diff --git a/README.md b/README.md index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..4f65397694faa1ab3a13b9ab8e5b740cfa92b5ca 100644 --- a/README.md +++ b/README.md @@ -0,0 +1,108 @@ +
+ +## Open-YOLO 3D: Towards Fast and Accurate Open-Vocabulary 3D Instance Segmentation +
+ +
+ +
+ +
+Mohamed El Amine Boudjoghra1, Angela Dai2, Jean Lahoud1, Hisham Cholakkal1, Rao Muhammad Anwer1,3, Salman Khan1,4, Fahad Khan1,5 + +1Mohamed Bin Zayed University of Artificial Intelligence (MBZUAI) 2Technical University of Munich (TUM) 3Aalto University 4Australian National University 5Linköping University +
+ + +
+ +![paper](https://img.shields.io/badge/arXiv-Paper-.svg) + + +
+ + + +### News + +* **30 May 2024**: [Open-YOLO 3D](https://arxiv.org/abs/2406.02548) released on arXiv. 📝 +* **30 May 2024**: Code released. 💻 + +### Abstract + + Recent works on open-vocabulary 3D instance segmentation show strong promise, but at the cost of slow inference speed and high computation requirements. This high computation cost is typically due to their heavy reliance on 3D clip features, which require computationally expensive 2D foundation models like Segment Anything (SAM) and CLIP for multi-view aggregation into 3D. As a consequence, this hampers their applicability in many real-world applications that require both fast and accurate predictions. To this end, we propose a fast yet accurate open-vocabulary 3D instance segmentation approach, named Open-YOLO 3D, that effectively leverages only 2D object detection from multi-view RGB images for open-vocabulary 3D instance segmentation. + We address this task by generating class-agnostic 3D masks for objects in the scene and associating them with text prompts. + We observe that the projection of class-agnostic 3D point cloud instances already holds instance information; thus, using SAM might only result in redundancy that unnecessarily increases the inference time. +We empirically find that a better performance of matching text prompts to 3D masks can be achieved in a faster fashion with a 2D object detector. We validate our Open-YOLO 3D on two benchmarks, ScanNet200 and Replica, + under two scenarios: (i) with ground truth masks, where labels are required for given object proposals, and (ii) with class-agnostic 3D proposals generated from a 3D proposal network. Our Open-YOLO 3D achieves state-of-the-art performance on both datasets while obtaining up to 16x speedup compared to the best existing method in literature. On ScanNet200 val. set, our Open-YOLO 3D achieves mean average precision (mAP) of 24.7% while operating at 22 seconds per scene. + +### Qualitative results +
+ +
+ +
+ + +## Installation guide + +Kindly check [Installation guide](./docs/Installation.md) on how to setup the Conda environment and to download the checkpoints, the pre-computed class agnostic masks, and the ground truth masks. + +## Data Preparation + +Kindly check [Data Preparation guide](./docs/Data_prep.md) on how to prepare ScanNet200 and Replica datasets. + +## Results reproducibility + +Kindly use the pre-computed class agnostic masks we shared to reproduce the exact numbers we reported in the paper. + +**Reproduce the results of ScanNet200 with precomputed-masks (using Mask3D)** +``` +python run_evaluation.py --dataset_name scannet200 --path_to_3d_masks "./output/scannet200/scannet200_masks" +``` +**Reproduce the results of ScanNet200 with oracle 3D masks (ground truth 3D masks)** +``` +python run_evaluation.py --dataset_name scannet200 --path_to_3d_masks "./output/scannet200/scannet200_ground_truth_masks" --is_gt +``` +**Reproduce the results of Replica with precomputed-masks (using Mask3D)** +``` +python run_evaluation.py --dataset_name replica --path_to_3d_masks "./output/replica/replica_masks" +``` +**Reproduce the results of Replica with oracle 3D masks (ground truth 3D masks)** +``` +python run_evaluation.py --dataset_name replica --path_to_3d_masks "./output/replica/replica_ground_truth_masks" --is_gt +``` + +You can evaluate without our 3D class-agnostic masks, but this may lead to variability in results due to elements like furthest point sampling that cause randomness in predictions from Mask3D. For consistent results with the ones we report in the paper, we recommend using our pre-computed masks. + +**Reproduce the results of Replica or ScanNet200 without using our pre-computed masks** +``` +python run_evaluation.py --dataset_name $DATASET_NAME +``` + +## Single scene inference + +``` +from utils import OpenYolo3D + +openyolo3d = OpenYolo3D("$(pwd)/pretrained/config.yaml") #Initialize the model, define the text prompts in the config. +prediction = openyolo3d.predict("$(pwd)/data/replica/office0", 6553.5) #Predict the instance masks and labels (takes around 20 seconds in total). +openyolo3d.save_output_as_ply("$(pwd)/sample/output.ply", True) # Save the ply file for visualization, you can use meshlab to visualize the output scene +``` + +## Acknoledgments +We would like to thank the authors of Mask3D and YoloWorld for their works which were used for our model. + + +## BibTeX :pray: +``` +@misc{boudjoghra2024openyolo, + title={Open-YOLO 3D: Towards Fast and Accurate Open-Vocabulary 3D Instance Segmentation}, + author={Mohamed El Amine Boudjoghra and Angela Dai and Jean Lahoud and Hisham Cholakkal and Rao Muhammad Anwer and Salman Khan and Fahad Shahbaz Khan}, + year={2024}, + eprint={2406.02548}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + diff --git a/environment.yml b/environment.yml new file mode 100644 index 0000000000000000000000000000000000000000..1ce639c16688b6ced1b83624da4a391a43ccf30e --- /dev/null +++ b/environment.yml @@ -0,0 +1,216 @@ +name: openyolo3d +channels: + - anaconda + - defaults +dependencies: + - _libgcc_mutex=0.1=main + - _openmp_mutex=5.1=1_gnu + - blas=1.0=openblas + - boltons=23.0.0=py310h06a4308_0 + - brotlipy=0.7.0=py310h7f8727e_1002 + - bzip2=1.0.8=h7b6447c_0 + - ca-certificates=2023.01.10=h06a4308_0 + - certifi=2022.12.7=py310h06a4308_0 + - cffi=1.15.1=py310h5eee18b_3 + - charset-normalizer=2.0.4=pyhd3eb1b0_0 + - conda=23.3.1=py310h06a4308_0 + - conda-content-trust=0.1.3=py310h06a4308_0 + - conda-package-handling=2.0.2=py310h06a4308_0 + - conda-package-streaming=0.7.0=py310h06a4308_0 + - cryptography=39.0.1=py310h9ce1e76_0 + - idna=3.4=py310h06a4308_0 + - jsonpatch=1.32=pyhd3eb1b0_0 + - jsonpointer=2.1=pyhd3eb1b0_0 + - ld_impl_linux-64=2.38=h1181459_1 + - libffi=3.4.2=h6a678d5_6 + - libgcc-ng=11.2.0=h1234567_1 + - libgfortran-ng=11.2.0=h00389a5_1 + - libgfortran5=11.2.0=h1234567_1 + - libgomp=11.2.0=h1234567_1 + - libopenblas=0.3.21=h043d6bf_0 + - libstdcxx-ng=11.2.0=h1234567_1 + - libuuid=1.41.5=h5eee18b_0 + - ncurses=6.4=h6a678d5_0 + - nomkl=3.0=0 + - openblas-devel=0.3.21=h06a4308_0 + - openssl=1.1.1s=h7f8727e_0 + - packaging=23.0=py310h06a4308_0 + - pluggy=1.0.0=py310h06a4308_1 + - pycosat=0.6.4=py310h5eee18b_0 + - pycparser=2.21=pyhd3eb1b0_0 + - pyopenssl=23.0.0=py310h06a4308_0 + - pysocks=1.7.1=py310h06a4308_0 + - python=3.10.9=h7a1cb2a_0 + - readline=8.2=h5eee18b_0 + - requests=2.28.1=py310h06a4308_1 + - ruamel.yaml=0.17.21=py310h5eee18b_0 + - ruamel.yaml.clib=0.2.6=py310h5eee18b_1 + - setuptools=65.6.3=py310h06a4308_0 + - six=1.16.0=pyhd3eb1b0_1 + - sqlite=3.41.2=h5eee18b_0 + - tk=8.6.12=h1ccaba5_0 + - toolz=0.12.0=py310h06a4308_0 + - tqdm=4.65.0=py310h2f386ee_0 + - urllib3=1.26.15=py310h06a4308_0 + - wheel=0.37.1=pyhd3eb1b0_0 + - xz=5.2.10=h5eee18b_1 + - zlib=1.2.13=h5eee18b_0 + - zstandard=0.19.0=py310h5eee18b_0 + - pip + - pip: + - absl-py==1.4.0 + - addict==2.4.0 + - aiohttp==3.8.4 + - aiosignal==1.3.1 + # - albumentations==1.2.1 #manual + - antlr4-python3-runtime==4.8 + - anyio==3.6.2 + - appdirs==1.4.4 + - asttokens==2.2.1 + - async-timeout==4.0.2 + - attrs==23.1.0 + - backcall==0.2.0 + - black==21.4b2 + - cachetools==5.3.0 + - click==8.1.3 + - cloudpickle==2.1.0 + - comm==0.1.3 + - configargparse==1.5.3 + - contourpy==1.0.7 + - cycler==0.11.0 + - dash==2.9.3 + - dash-core-components==2.0.0 + - dash-html-components==2.0.0 + - dash-table==5.0.0 + - debugpy==1.6.7 + - decorator==5.1.1 + # - detectron2==0.6 + - docker-pycreds==0.4.0 + - executing==1.2.0 + - fastapi==0.95.1 + - fastjsonschema==2.16.3 + - fire==0.4.0 + - flake8==6.0.0 + - flask==2.2.3 + - fonttools==4.39.3 + - frozenlist==1.3.3 + - fsspec==2023.4.0 + # - fvcore==0.1.5.post20220512 #manual + - gitdb==4.0.10 + - gitpython==3.1.31 + - google-auth==2.17.3 + - google-auth-oauthlib==1.0.0 + - grpcio==1.54.0 + - h11==0.14.0 + - hydra-core==1.0.5 + - imageio==2.21.1 + - importlib-metadata==3.10.1 + - iopath==0.1.10 + - ipykernel==6.22.0 + - ipython==8.12.0 + - ipywidgets==8.0.6 + - itsdangerous==2.1.2 + - jedi==0.18.2 + - jinja2==3.1.2 + - joblib==1.2.0 + - jsonschema==4.17.3 + - jupyter-client==8.2.0 + - jupyter-core==5.3.0 + - jupyterlab-widgets==3.0.7 + - kiwisolver==1.4.4 + - lazy-loader==0.2 + - loguru==0.6.0 + - markdown==3.4.3 + - markupsafe==2.1.2 + - matplotlib==3.7.1 + - matplotlib-inline==0.1.6 + # - minkowskiengine==0.5.4 + - multidict==6.0.4 + - mypy-extensions==1.0.0 + - natsort==8.3.1 + - nbformat==5.7.0 + - nest-asyncio==1.5.6 + - networkx==3.1 + - ninja==1.10.2.3 + - numpy==1.24.2 + - oauthlib==3.2.2 + # - omegaconf==2.0.6 #manual + # - open3d==0.17.0 #manual + - opencv-python-headless==4.7.0.72 + - pandas==2.0.0 + - parso==0.8.3 + - pathspec==0.11.1 + - pathtools==0.1.2 + - pexpect==4.8.0 + - pickleshare==0.7.5 + - pillow==9.5.0 + - pip==23.1 + - platformdirs==3.2.0 + - plotly==5.14.1 + - plyfile==0.7.4 + # - pointnet2==0.0.0 + - portalocker==2.7.0 + - prompt-toolkit==3.0.38 + - protobuf==4.22.3 + - psutil==5.9.5 + - ptyprocess==0.7.0 + - pure-eval==0.2.2 + - pyasn1==0.5.0 + - pyasn1-modules==0.3.0 + - pycocotools==2.0.4 + - pydantic==1.10.7 + - pydeprecate==0.3.2 + - pygments==2.15.1 + - pyparsing==3.0.9 + - pyquaternion==0.9.9 + - pyrsistent==0.19.3 + - python-dateutil==2.8.2 + - python-dotenv==0.20.0 + - python-multipart==0.0.6 + # - pytorch-lightning==1.7.2 + - pytz==2023.3 + - pyviz3d==0.2.28 + - pywavelets==1.4.1 + - pyyaml==5.3.1 + - pyzmq==25.0.2 + - qudida==0.0.4 + - regex==2023.3.23 + - requests-oauthlib==1.3.1 + - rsa==4.9 + - scikit-image==0.20.0 + - scikit-learn==1.1.2 + - scipy==1.9.0 + - sentry-sdk==1.20.0 + - setproctitle==1.3.2 + - smmap==5.0.0 + - sniffio==1.3.0 + - stack-data==0.6.2 + - starlette==0.26.1 + - tabulate==0.9.0 + - tenacity==8.2.2 + - tensorboard==2.12.2 + - tensorboard-data-server==0.7.0 + - tensorboard-plugin-wit==1.8.1 + - termcolor==2.2.0 + - threadpoolctl==3.1.0 + - tifffile==2023.4.12 + - toml==0.10.2 + # - torch==1.12.1+cu113 + # - torch-scatter==2.1.1 + # - torchmetrics==0.11.4 + # - torchvision==0.13.1+cu113 + - tornado==6.3 + - traitlets==5.9.0 + - trimesh==3.14.0 + - typing-extensions==4.5.0 + - tzdata==2023.3 + - uvicorn==0.21.1 + - volumentations==0.1.8 + - wandb==0.15.0 + - wcwidth==0.2.6 + - werkzeug==2.2.3 + - widgetsnbextension==4.0.7 + - yacs==0.1.8 + - yarl==1.8.2 + - zipp==3.15.0 +prefix: /opt/conda diff --git a/models/Mask3D/LICENSE b/models/Mask3D/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..e619d905e048f45390e27e6fc2d93b6e96f1ea3b --- /dev/null +++ b/models/Mask3D/LICENSE @@ -0,0 +1,22 @@ +MIT License + +Copyright (c) 2022 + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/models/Mask3D/MANIFEST.in b/models/Mask3D/MANIFEST.in new file mode 100644 index 0000000000000000000000000000000000000000..9ead0b59b546d425aeac6e46dba4278ef87eb3a7 --- /dev/null +++ b/models/Mask3D/MANIFEST.in @@ -0,0 +1 @@ +recursive-include mask3d/conf *.yaml \ No newline at end of file diff --git a/models/Mask3D/README.md b/models/Mask3D/README.md new file mode 100644 index 0000000000000000000000000000000000000000..e02d4a639970a937a899274858682e85b2a33de8 --- /dev/null +++ b/models/Mask3D/README.md @@ -0,0 +1,289 @@ +# Packaged version of Mask3D to be used in LabelMaker + +## Installation + +``` +# Some users experienced issues on Ubuntu with an AMD CPU +# Install libopenblas-dev (issue #115, thanks WindWing) +# sudo apt-get install libopenblas-dev + +export TORCH_CUDA_ARCH_LIST="6.0 6.1 6.2 7.0 7.2 7.5 8.0 8.6" + +conda env create -f environment.yml + +conda activate mask3d_cuda113 + +pip3 install torch==1.12.1+cu113 torchvision==0.13.1+cu113 --extra-index-url https://download.pytorch.org/whl/cu113 +pip3 install torch-scatter -f https://data.pyg.org/whl/torch-1.12.1+cu113.html +pip3 install 'git+https://github.com/facebookresearch/detectron2.git@710e7795d0eeadf9def0e7ef957eea13532e34cf' --no-deps + +mkdir third_party +cd third_party + +git clone --recursive "https://github.com/NVIDIA/MinkowskiEngine" +cd MinkowskiEngine +git checkout 02fc608bea4c0549b0a7b00ca1bf15dee4a0b228 +python setup.py install --force_cuda --blas=openblas + +cd .. +git clone https://github.com/ScanNet/ScanNet.git +cd ScanNet/Segmentator +git checkout 3e5726500896748521a6ceb81271b0f5b2c0e7d2 +make + +cd third_party/pointnet2 +python setup.py install + +cd ../../ +pip3 install pytorch-lightning==1.7.2 + +pip install . + +``` + +To use the model in your code you need to download a checkpoint from the list below. +Afterwards, the basic model can be used like: + + +```python +from mask3d import get_model + +model = get_model(checkpoint_path='checkpoints/scannet200/scannet200_benchmark.ckpt') +``` + + +Here is a minimal example assuming you have a pointcloud in the folder data. + +```python + +from mask3d import get_model, load_mesh, prepare_data, map_output_to_pointcloud, save_colorized_mesh + +model = get_model('checkpoints/scannet200/scannet200_benchmark.ckpt') +model.eval() +device = torch.device("cuda" if torch.cuda.is_available() else "cpu") +model.to(device) + +# load input data +pointcloud_file = 'data/pcl.ply' +mesh = load_mesh(pointcloud_file) + +# prepare data +data, points, colors, features, unique_map, inverse_map = prepare_data(mesh, device) + +# run model +with torch.no_grad(): + outputs = model(data, raw_coordinates=features) + +# map output to point cloud +labels = map_output_to_pointcloud(mesh, outputs, inverse_map) + +# save colorized mesh +save_colorized_mesh(mesh, labels, 'data/pcl_labelled.ply', colormap='scannet200') +``` + +So far, only Scannet200 checkpoints are supported. We are working on the ScanNet checkpoints. + +# Original Information + +## Mask3D: Mask Transformer for 3D Instance Segmentation +
+Jonas Schult1, Francis Engelmann2,3, Alexander Hermans1, Or Litany4, Siyu Tang3, Bastian Leibe1 + +1RWTH Aachen University 2ETH AI Center 3ETH Zurich 4NVIDIA + +Mask3D predicts accurate 3D semantic instances achieving state-of-the-art on ScanNet, ScanNet200, S3DIS and STPLS3D. + +[![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/mask3d-for-3d-semantic-instance-segmentation/3d-instance-segmentation-on-scannetv2)](https://paperswithcode.com/sota/3d-instance-segmentation-on-scannetv2?p=mask3d-for-3d-semantic-instance-segmentation) +[![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/mask3d-for-3d-semantic-instance-segmentation/3d-instance-segmentation-on-scannet200)](https://paperswithcode.com/sota/3d-instance-segmentation-on-scannet200?p=mask3d-for-3d-semantic-instance-segmentation) +[![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/mask3d-for-3d-semantic-instance-segmentation/3d-instance-segmentation-on-s3dis)](https://paperswithcode.com/sota/3d-instance-segmentation-on-s3dis?p=mask3d-for-3d-semantic-instance-segmentation) +[![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/mask3d-for-3d-semantic-instance-segmentation/3d-instance-segmentation-on-stpls3d)](https://paperswithcode.com/sota/3d-instance-segmentation-on-stpls3d?p=mask3d-for-3d-semantic-instance-segmentation) + +PyTorch +Lightning +Config: Hydra + +![teaser](./docs/teaser.jpg) + +
+

+ +[[Project Webpage](https://jonasschult.github.io/Mask3D/)] +[[Paper](https://arxiv.org/abs/2210.03105)] +[[Demo](https://francisengelmann.github.io/mask3d/)] + + +## News + +* **17. January 2023**: Mask3D is accepted at ICRA 2023. :fire: +* **14. October 2022**: STPLS3D support added. +* **10. October 2022**: Mask3D ranks 2nd on the [STPLS3D Challenge](https://codalab.lisn.upsaclay.fr/competitions/4646#results) hosted by the [Urban3D Workshop](https://urban3dchallenge.github.io/) at ECCV 2022. +* **6. October 2022**: [Mask3D preprint](https://arxiv.org/abs/2210.03105) released on arXiv. +* **25. September 2022**: Code released. + +## Code structure +We adapt the codebase of [Mix3D](https://github.com/kumuji/mix3d) which provides a highly modularized framework for 3D Semantic Segmentation based on the MinkowskiEngine. + +``` +├── mix3d +│ ├── main_instance_segmentation.py <- the main file +│ ├── conf <- hydra configuration files +│ ├── datasets +│ │ ├── preprocessing <- folder with preprocessing scripts +│ │ ├── semseg.py <- indoor dataset +│ │ └── utils.py +│ ├── models <- Mask3D modules +│ ├── trainer +│ │ ├── __init__.py +│ │ └── trainer.py <- train loop +│ └── utils +├── data +│ ├── processed <- folder for preprocessed datasets +│ └── raw <- folder for raw datasets +├── scripts <- train scripts +├── docs +├── README.md +└── saved <- folder that stores models and logs +``` + +### Dependencies :memo: +The main dependencies of the project are the following: +```yaml +python: 3.10.9 +cuda: 11.3 +``` +You can set up a conda environment as follows +``` +# Some users experienced issues on Ubuntu with an AMD CPU +# Install libopenblas-dev (issue #115, thanks WindWing) +# sudo apt-get install libopenblas-dev + +export TORCH_CUDA_ARCH_LIST="6.0 6.1 6.2 7.0 7.2 7.5 8.0 8.6" + +conda env create -f environment.yml + +conda activate mask3d_cuda113 + +pip3 install torch==1.12.1+cu113 torchvision==0.13.1+cu113 --extra-index-url https://download.pytorch.org/whl/cu113 +pip3 install torch-scatter -f https://data.pyg.org/whl/torch-1.12.1+cu113.html +pip3 install 'git+https://github.com/facebookresearch/detectron2.git@710e7795d0eeadf9def0e7ef957eea13532e34cf' --no-deps + +mkdir third_party +cd third_party + +git clone --recursive "https://github.com/NVIDIA/MinkowskiEngine" +cd MinkowskiEngine +git checkout 02fc608bea4c0549b0a7b00ca1bf15dee4a0b228 +python setup.py install --force_cuda --blas=openblas + +cd .. +git clone https://github.com/ScanNet/ScanNet.git +cd ScanNet/Segmentator +git checkout 3e5726500896748521a6ceb81271b0f5b2c0e7d2 +make + +cd ../../pointnet2 +python setup.py install + +cd ../../ +pip3 install pytorch-lightning==1.7.2 +``` + +### Data preprocessing :hammer: +After installing the dependencies, we preprocess the datasets. + +#### ScanNet / ScanNet200 +First, we apply Felzenswalb and Huttenlocher's Graph Based Image Segmentation algorithm to the test scenes using the default parameters. +Please refer to the [original repository](https://github.com/ScanNet/ScanNet/tree/master/Segmentator) for details. +Put the resulting segmentations in `./data/raw/scannet_test_segments`. +``` +python -m datasets.preprocessing.scannet_preprocessing preprocess \ +--data_dir="PATH_TO_RAW_SCANNET_DATASET" \ +--save_dir="data/processed/scannet" \ +--git_repo="PATH_TO_SCANNET_GIT_REPO" \ +--scannet200=false/true +``` + +#### S3DIS +The S3DIS dataset contains some smalls bugs which we initially fixed manually. We will soon release a preprocessing script which directly preprocesses the original dataset. For the time being, please follow the instructions [here](https://github.com/JonasSchult/Mask3D/issues/8#issuecomment-1279535948) to fix the dataset manually. Afterwards, call the preprocessing script as follows: + +``` +python -m datasets.preprocessing.s3dis_preprocessing preprocess \ +--data_dir="PATH_TO_Stanford3dDataset_v1.2" \ +--save_dir="data/processed/s3dis" +``` + +#### STPLS3D +``` +python -m datasets.preprocessing.stpls3d_preprocessing preprocess \ +--data_dir="PATH_TO_STPLS3D" \ +--save_dir="data/processed/stpls3d" +``` + +### Training and testing :train2: +Train Mask3D on the ScanNet dataset: +```bash +python main_instance_segmentation.py +``` +Please refer to the [config scripts](https://github.com/JonasSchult/Mask3D/tree/main/scripts) (for example [here](https://github.com/JonasSchult/Mask3D/blob/main/scripts/scannet/scannet_val.sh#L15)) for detailed instructions how to reproduce our results. +In the simplest case the inference command looks as follows: +```bash +python main_instance_segmentation.py \ +general.checkpoint='PATH_TO_CHECKPOINT.ckpt' \ +general.train_mode=false +``` + +## Trained checkpoints :floppy_disk: +We provide detailed scores and network configurations with trained checkpoints. + +### [S3DIS](http://buildingparser.stanford.edu/dataset.html) (pretrained on ScanNet train+val) +Following PointGroup, HAIS and SoftGroup, we finetune a model pretrained on ScanNet ([config](./scripts/scannet/scannet_pretrain_for_s3dis.sh) and [checkpoint](https://omnomnom.vision.rwth-aachen.de/data/mask3d/checkpoints/s3dis/scannet_pretrained/scannet_pretrained.ckpt)). +| Dataset | AP | AP_50 | AP_25 | Config | Checkpoint :floppy_disk: | Scores :chart_with_upwards_trend: | Visualizations :telescope: +|:-:|:-:|:-:|:-:|:-:|:-:|:-:|:-:| +| Area 1 | 69.3 | 81.9 | 87.7 | [config](scripts/s3dis/s3dis_pretrained.sh) | [checkpoint](https://omnomnom.vision.rwth-aachen.de/data/mask3d/checkpoints/s3dis/scannet_pretrained/area1_scannet_pretrained.ckpt) | [scores](./docs/detailed_scores/s3dis/scannet_pretrained/s3dis_area1_scannet_pretrained.txt) | [visualizations](https://omnomnom.vision.rwth-aachen.de/data/mask3d/visualizations/s3dis/scannet_pretrained/area_1/) +| Area 2 | 44.0 | 59.5 | 66.5 | [config](scripts/s3dis/s3dis_pretrained.sh) | [checkpoint](https://omnomnom.vision.rwth-aachen.de/data/mask3d/checkpoints/s3dis/scannet_pretrained/area2_scannet_pretrained.ckpt) | [scores](./docs/detailed_scores/s3dis/scannet_pretrained/s3dis_area2_scannet_pretrained.txt) | [visualizations](https://omnomnom.vision.rwth-aachen.de/data/mask3d/visualizations/s3dis/scannet_pretrained/area_2/) +| Area 3 | 73.4 | 83.2 | 88.2 | [config](scripts/s3dis/s3dis_pretrained.sh) | [checkpoint](https://omnomnom.vision.rwth-aachen.de/data/mask3d/checkpoints/s3dis/scannet_pretrained/area3_scannet_pretrained.ckpt) | [scores](./docs/detailed_scores/s3dis/scannet_pretrained/s3dis_area3_scannet_pretrained.txt) | [visualizations](https://omnomnom.vision.rwth-aachen.de/data/mask3d/visualizations/s3dis/scannet_pretrained/area_3/) +| Area 4 | 58.0 | 69.5 | 74.9 | [config](scripts/s3dis/s3dis_pretrained.sh) | [checkpoint](https://omnomnom.vision.rwth-aachen.de/data/mask3d/checkpoints/s3dis/scannet_pretrained/area4_scannet_pretrained.ckpt) | [scores](./docs/detailed_scores/s3dis/scannet_pretrained/s3dis_area4_scannet_pretrained.txt) | [visualizations](https://omnomnom.vision.rwth-aachen.de/data/mask3d/visualizations/s3dis/scannet_pretrained/area_4/) +| Area 5 | 57.8 | 71.9 | 77.2 | [config](scripts/s3dis/s3dis_pretrained.sh) | [checkpoint](https://omnomnom.vision.rwth-aachen.de/data/mask3d/checkpoints/s3dis/scannet_pretrained/area5_scannet_pretrained.ckpt) | [scores](./docs/detailed_scores/s3dis/scannet_pretrained/s3dis_area5_scannet_pretrained.txt) | [visualizations](https://omnomnom.vision.rwth-aachen.de/data/mask3d/visualizations/s3dis/scannet_pretrained/area_5/) +| Area 6 | 68.4 | 79.9 | 85.2 | [config](scripts/s3dis/s3dis_pretrained.sh) | [checkpoint](https://omnomnom.vision.rwth-aachen.de/data/mask3d/checkpoints/s3dis/scannet_pretrained/area6_scannet_pretrained.ckpt) | [scores](./docs/detailed_scores/s3dis/scannet_pretrained/s3dis_area6_scannet_pretrained.txt) | [visualizations](https://omnomnom.vision.rwth-aachen.de/data/mask3d/visualizations/s3dis/scannet_pretrained/area_6/) + +### [S3DIS](http://buildingparser.stanford.edu/dataset.html) (from scratch) + +| Dataset | AP | AP_50 | AP_25 | Config | Checkpoint :floppy_disk: | Scores :chart_with_upwards_trend: | Visualizations :telescope: +|:-:|:-:|:-:|:-:|:-:|:-:|:-:|:-:| +| Area 1 | 74.1 | 85.1 | 89.6 | [config](scripts/s3dis/s3dis_from_scratch.sh) | [checkpoint](https://omnomnom.vision.rwth-aachen.de/data/mask3d/checkpoints/s3dis/from_scratch/area1_from_scratch.ckpt) | [scores](./docs/detailed_scores/s3dis/from_scratch/s3dis_area1_from_scratch.txt) | [visualizations](https://omnomnom.vision.rwth-aachen.de/data/mask3d/visualizations/s3dis/from_scratch/area_1/) +| Area 2 | 44.9 | 57.1 | 67.9 | [config](scripts/s3dis/s3dis_from_scratch.sh) | [checkpoint](https://omnomnom.vision.rwth-aachen.de/data/mask3d/checkpoints/s3dis/from_scratch/area2_from_scratch.ckpt) | [scores](./docs/detailed_scores/s3dis/from_scratch/s3dis_area2_from_scratch.txt) | [visualizations](https://omnomnom.vision.rwth-aachen.de/data/mask3d/visualizations/s3dis/from_scratch/area_2/) +| Area 3 | 74.4 | 84.4 | 88.1 | [config](scripts/s3dis/s3dis_from_scratch.sh) | [checkpoint](https://omnomnom.vision.rwth-aachen.de/data/mask3d/checkpoints/s3dis/from_scratch/area3_from_scratch.ckpt) | [scores](./docs/detailed_scores/s3dis/from_scratch/s3dis_area3_from_scratch.txt) | [visualizations](https://omnomnom.vision.rwth-aachen.de/data/mask3d/visualizations/s3dis/from_scratch/area_3/) +| Area 4 | 63.8 | 74.7 | 81.1 | [config](scripts/s3dis/s3dis_from_scratch.sh) | [checkpoint](https://omnomnom.vision.rwth-aachen.de/data/mask3d/checkpoints/s3dis/from_scratch/area4_from_scratch.ckpt) | [scores](./docs/detailed_scores/s3dis/from_scratch/s3dis_area4_from_scratch.txt) | [visualizations](https://omnomnom.vision.rwth-aachen.de/data/mask3d/visualizations/s3dis/from_scratch/area_4/) +| Area 5 | 56.6 | 68.4 | 75.2 | [config](scripts/s3dis/s3dis_from_scratch.sh) | [checkpoint](https://omnomnom.vision.rwth-aachen.de/data/mask3d/checkpoints/s3dis/from_scratch/area5_from_scratch.ckpt) | [scores](./docs/detailed_scores/s3dis/from_scratch/s3dis_area5_from_scratch.txt) | [visualizations](https://omnomnom.vision.rwth-aachen.de/data/mask3d/visualizations/s3dis/from_scratch/area_5/) +| Area 6 | 73.3 | 83.4 | 87.8 | [config](scripts/s3dis/s3dis_from_scratch.sh) | [checkpoint](https://omnomnom.vision.rwth-aachen.de/data/mask3d/checkpoints/s3dis/from_scratch/area6_from_scratch.ckpt) | [scores](./docs/detailed_scores/s3dis/from_scratch/s3dis_area6_from_scratch.txt) | [visualizations](https://omnomnom.vision.rwth-aachen.de/data/mask3d/visualizations/s3dis/from_scratch/area_6/) + +### [ScanNet v2](https://kaldir.vc.in.tum.de/scannet_benchmark/semantic_instance_3d?metric=ap) + +| Dataset | AP | AP_50 | AP_25 | Config | Checkpoint :floppy_disk: | Scores :chart_with_upwards_trend: | Visualizations :telescope: +|:-:|:-:|:-:|:-:|:-:|:-:|:-:|:-:| +| ScanNet val | 55.2 | 73.7 | 83.5 | [config](scripts/scannet/scannet_val.sh) | [checkpoint](https://omnomnom.vision.rwth-aachen.de/data/mask3d/checkpoints/scannet/scannet_val.ckpt) | [scores](./docs/detailed_scores/scannet_val.txt) | [visualizations](https://omnomnom.vision.rwth-aachen.de/data/mask3d/visualizations/scannet/val/) +| ScanNet test | 56.6 | 78.0 | 87.0 | [config](scripts/scannet/scannet_benchmark.sh) | [checkpoint](https://omnomnom.vision.rwth-aachen.de/data/mask3d/checkpoints/scannet/scannet_benchmark.ckpt) | [scores](http://kaldir.vc.in.tum.de/scannet_benchmark/result_details?id=1081) | [visualizations](https://omnomnom.vision.rwth-aachen.de/data/mask3d/visualizations/scannet/test/) + +### [ScanNet 200](https://kaldir.vc.in.tum.de/scannet_benchmark/scannet200_semantic_instance_3d) + +| Dataset | AP | AP_50 | AP_25 | Config | Checkpoint :floppy_disk: | Scores :chart_with_upwards_trend: | Visualizations :telescope: +|:-:|:-:|:-:|:-:|:-:|:-:|:-:|:-:| +| ScanNet200 val | 27.4 | 37.0 | 42.3 | [config](scripts/scannet200/scannet200_val.sh) | [checkpoint](https://omnomnom.vision.rwth-aachen.de/data/mask3d/checkpoints/scannet200/scannet200_val.ckpt) | [scores](./docs/detailed_scores/scannet200_val.txt) | [visualizations](https://omnomnom.vision.rwth-aachen.de/data/mask3d/visualizations/scannet200/val/) +| ScanNet200 test | 27.8 | 38.8 | 44.5 | [config](scripts/scannet200/scannet200_benchmark.sh) | [checkpoint](https://omnomnom.vision.rwth-aachen.de/data/mask3d/checkpoints/scannet200/scannet200_benchmark.ckpt) | [scores](https://kaldir.vc.in.tum.de/scannet_benchmark/result_details?id=1242) | [visualizations](https://omnomnom.vision.rwth-aachen.de/data/mask3d/visualizations/scannet200/test/) + +### [STPLS3D](https://www.stpls3d.com/) + +| Dataset | AP | AP_50 | AP_25 | Config | Checkpoint :floppy_disk: | Scores :chart_with_upwards_trend: | Visualizations :telescope: +|:-:|:-:|:-:|:-:|:-:|:-:|:-:|:-:| +| STPLS3D val | 57.3 | 74.3 | 81.6 | [config](scripts/stpls3d/stpls3d_val.sh) | [checkpoint](https://omnomnom.vision.rwth-aachen.de/data/mask3d/checkpoints/stpls3d/stpls3d_val.ckpt) | [scores](./docs/detailed_scores/stpls3d.txt) | [visualizations](https://omnomnom.vision.rwth-aachen.de/data/mask3d/visualizations/stpls3d/) +| STPLS3D test | 63.4 | 79.2 | 85.6 | [config](scripts/stpls3d/stpls3d_benchmark.sh) | [checkpoint](https://omnomnom.vision.rwth-aachen.de/data/mask3d/checkpoints/stpls3d/stpls3d_benchmark.zip) | [scores](https://codalab.lisn.upsaclay.fr/competitions/4646#results) | visualizations + +## BibTeX :pray: +``` +@article{Schult23ICRA, + title = {{Mask3D: Mask Transformer for 3D Semantic Instance Segmentation}}, + author = {Schult, Jonas and Engelmann, Francis and Hermans, Alexander and Litany, Or and Tang, Siyu and Leibe, Bastian}, + booktitle = {{International Conference on Robotics and Automation (ICRA)}}, + year = {2023} +} +``` diff --git a/models/Mask3D/__init__.py b/models/Mask3D/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/Mask3D/build/lib/mask3d/__init__.py b/models/Mask3D/build/lib/mask3d/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..0b01a17620598f366cfa55c36a48609b1f0075f6 --- /dev/null +++ b/models/Mask3D/build/lib/mask3d/__init__.py @@ -0,0 +1,216 @@ +import hydra +import torch + +from mask3d.models.mask3d import Mask3D +from mask3d.utils.utils import ( + load_checkpoint_with_missing_or_exsessive_keys, + load_backbone_checkpoint_with_missing_or_exsessive_keys, +) + +class InstanceSegmentation(torch.nn.Module): + def __init__(self, cfg): + super().__init__() + self.model = hydra.utils.instantiate(cfg.model) + + + def forward(self, x, raw_coordinates=None, point2segment=None): + return self.model(x, raw_coordinates=raw_coordinates, point2segment=point2segment) + + +from omegaconf import OmegaConf, DictConfig +import hydra +from hydra.core.global_hydra import GlobalHydra +from hydra.experimental import initialize, compose + +# imports for input loading +import albumentations as A +import MinkowskiEngine as ME +import numpy as np +import open3d as o3d + +# imports for output +from mask3d.datasets.scannet200.scannet200_constants import (VALID_CLASS_IDS_20, VALID_CLASS_IDS_200, SCANNET_COLOR_MAP_20, SCANNET_COLOR_MAP_200) + +def get_model(checkpoint_path=None, dataset_name = "scannet200"): + + + # Initialize the directory with config files + with initialize(config_path="conf"): + # Compose a configuration + cfg = compose(config_name="config_base_instance_segmentation.yaml") + + cfg.general.checkpoint = checkpoint_path + + # would be nicd to avoid this hardcoding below + # dataset_name = checkpoint_path.split('/')[-1].split('_')[0] + if dataset_name == 'scannet200': + cfg.general.num_targets = 201 + cfg.general.train_mode = False + cfg.general.eval_on_segments = True + cfg.general.topk_per_image = 300 + cfg.general.use_dbscan = True + cfg.general.dbscan_eps = 0.95 + cfg.general.export_threshold = 0.001 + + # # data + cfg.data.num_labels = 200 + cfg.data.test_mode = "validation" + + # # model + cfg.model.num_queries = 150 + + if dataset_name == 'scannet': + cfg.general.num_targets = 19 + cfg.general.train_mode = False + cfg.general.eval_on_segments = True + cfg.general.topk_per_image = 300 + cfg.general.use_dbscan = True + cfg.general.dbscan_eps = 0.95 + cfg.general.export_threshold = 0.001 + + # # data + cfg.data.num_labels = 20 + cfg.data.test_mode = "test" + + # # model + cfg.model.num_queries = 150 + + #TODO: this has to be fixed and discussed with Jonas + # cfg.model.scene_min = -3. + # cfg.model.scene_max = 3. + + # # Initialize the Hydra context + # hydra.core.global_hydra.GlobalHydra.instance().clear() + # hydra.initialize(config_path="conf") + + # Load the configuration + # cfg = hydra.compose(config_name="config_base_instance_segmentation.yaml") + model = InstanceSegmentation(cfg) + + if cfg.general.backbone_checkpoint is not None: + cfg, model = load_backbone_checkpoint_with_missing_or_exsessive_keys( + cfg, model + ) + if cfg.general.checkpoint is not None: + cfg, model = load_checkpoint_with_missing_or_exsessive_keys(cfg, model) + + return model + + +def load_mesh(pcl_file): + + # load point cloud + input_mesh_path = pcl_file + mesh = o3d.io.read_triangle_mesh(input_mesh_path) + return mesh + +def prepare_data(mesh, device): + + # normalization for point cloud features + color_mean = (0.47793125906962, 0.4303257521323044, 0.3749598901421883) + color_std = (0.2834475483823543, 0.27566157565723015, 0.27018971370874995) + normalize_color = A.Normalize(mean=color_mean, std=color_std) + + + points = np.asarray(mesh.vertices) + colors = np.asarray(mesh.vertex_colors) + colors = colors * 255. + + pseudo_image = colors.astype(np.uint8)[np.newaxis, :, :] + colors = np.squeeze(normalize_color(image=pseudo_image)["image"]) + + coords = np.floor(points / 0.02) + _, _, unique_map, inverse_map = ME.utils.sparse_quantize( + coordinates=coords, + features=colors, + return_index=True, + return_inverse=True, + ) + + sample_coordinates = coords[unique_map] + coordinates = [torch.from_numpy(sample_coordinates).int()] + sample_features = colors[unique_map] + features = [torch.from_numpy(sample_features).float()] + + coordinates, _ = ME.utils.sparse_collate(coords=coordinates, feats=features) + features = torch.cat(features, dim=0) + data = ME.SparseTensor( + coordinates=coordinates, + features=features, + device=device, + ) + + + return data, points, colors, features, unique_map, inverse_map + + +def map_output_to_pointcloud(mesh, + outputs, + inverse_map): + + # parse predictions + logits = outputs["pred_logits"] + masks = outputs["pred_masks"] + + # reformat predictions + logits = logits[0] + masks = masks[0] + + labels = [] + confidences = [] + masks_binary = [] + + for i in range(len(logits)): + p_labels = torch.softmax(logits[i], dim=-1) + p_masks = torch.sigmoid(masks[:, i]) + l = torch.argmax(p_labels, dim=-1) + c_label = torch.max(p_labels) + m = p_masks > 0.5 + c_m = p_masks[m].sum() / (m.sum() + 1e-8) + c = c_label * c_m + labels.append(l.item()) + confidences.append(c.item()) + masks_binary.append(m[inverse_map]) # mapping the mask back to the original point cloud + return (torch.stack(masks_binary), torch.tensor(confidences)) + +def save_colorized_mesh(mesh, labels_mapped, output_file, colormap='scannet'): + + # colorize mesh + colors = np.zeros((len(mesh.vertices), 3)) + for li in np.unique(labels_mapped): + if colormap == 'scannet': + raise ValueError('Not implemented yet') + elif colormap == 'scannet200': + v_li = VALID_CLASS_IDS_200[int(li)] + colors[(labels_mapped == li)[:, 0], :] = SCANNET_COLOR_MAP_200[v_li] + else: + raise ValueError('Unknown colormap - not supported') + + colors = colors / 255. + mesh.vertex_colors = o3d.utility.Vector3dVector(colors) + o3d.io.write_triangle_mesh(output_file, mesh) + +if __name__ == '__main__': + + model = get_model('checkpoints/scannet200/scannet200_benchmark.ckpt') + model.eval() + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + model.to(device) + + # load input data + pointcloud_file = 'data/pcl.ply' + mesh = load_mesh(pointcloud_file) + + # prepare data + data, points, colors, features, unique_map, inverse_map = prepare_data(mesh, device) + + # run model + with torch.no_grad(): + outputs = model(data, raw_coordinates=features) + + # map output to point cloud + labels = map_output_to_pointcloud(mesh, outputs, inverse_map) + + # save colorized mesh + save_colorized_mesh(mesh, labels, 'data/pcl_labelled.ply', colormap='scannet200') + \ No newline at end of file diff --git a/models/Mask3D/build/lib/mask3d/benchmark/__init__.py b/models/Mask3D/build/lib/mask3d/benchmark/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/Mask3D/build/lib/mask3d/benchmark/evaluate_semantic_instance.py b/models/Mask3D/build/lib/mask3d/benchmark/evaluate_semantic_instance.py new file mode 100644 index 0000000000000000000000000000000000000000..242cb87a09b5c69a0d967217a2cd97706197a63d --- /dev/null +++ b/models/Mask3D/build/lib/mask3d/benchmark/evaluate_semantic_instance.py @@ -0,0 +1,1141 @@ +# Evaluates semantic instance task +# Adapted from the CityScapes evaluation: https://github.com/mcordts/cityscapesScripts/tree/master/cityscapesscripts/evaluation +# Input: +# - path to .txt prediction files +# - path to .txt ground truth files +# - output file to write results to +# Each .txt prediction file look like: +# [(pred0) rel. path to pred. mask over verts as .txt] [(pred0) label id] [(pred0) confidence] +# [(pred1) rel. path to pred. mask over verts as .txt] [(pred1) label id] [(pred1) confidence] +# [(pred2) rel. path to pred. mask over verts as .txt] [(pred2) label id] [(pred2) confidence] +# ... +# +# NOTE: The prediction files must live in the root of the given prediction path. +# Predicted mask .txt files must live in a subfolder. +# Additionally, filenames must not contain spaces. +# The relative paths to predicted masks must contain one integer per line, +# where each line corresponds to vertices in the *_vh_clean_2.ply (in that order). +# Non-zero integers indicate part of the predicted instance. +# The label ids specify the class of the corresponding mask. +# Confidence is a float confidence score of the mask. +# +# Note that only the valid classes are used for evaluation, +# i.e., any ground truth label not in the valid label set +# is ignored in the evaluation. +# +# example usage: evaluate_semantic_instance.py --scan_path [path to scan data] --output_file [output file] + +# python imports +import math +import os, sys, argparse +import inspect +from copy import deepcopy +from uuid import uuid4 + +import torch + +try: + import numpy as np +except: + print("Failed to import numpy package.") + sys.exit(-1) + +from scipy import stats + +# currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) +# parentdir = os.path.dirname(currentdir) +# sys.path.insert(0,parentdir) +import benchmark.util as util +import benchmark.util_3d as util_3d + +# parser = argparse.ArgumentParser() +# parser.add_argument('--gt_path', default='', help='path to directory of gt .txt files') +# parser.add_argument('--output_file', default='', help='output file [default: ./semantic_instance_evaluation.txt]') +# opt = parser.parse_args() + +# if opt.output_file == '': +# opt.output_file = os.path.join(os.getcwd(), 'semantic_instance_evaluation.txt') + + +# ---------- Label info ---------- # +CLASS_LABELS = [ + "cabinet", + "bed", + "chair", + "sofa", + "table", + "door", + "window", + "bookshelf", + "picture", + "counter", + "desk", + "curtain", + "refrigerator", + "shower curtain", + "toilet", + "sink", + "bathtub", + "otherfurniture", +] +VALID_CLASS_IDS = np.array( + [3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39] +) +ID_TO_LABEL = {} +LABEL_TO_ID = {} +for i in range(len(VALID_CLASS_IDS)): + LABEL_TO_ID[CLASS_LABELS[i]] = VALID_CLASS_IDS[i] + ID_TO_LABEL[VALID_CLASS_IDS[i]] = CLASS_LABELS[i] +# ---------- Evaluation params ---------- # +# overlaps for evaluation +opt = {} +opt["overlaps"] = np.append(np.arange(0.5, 0.95, 0.05), 0.25) +# minimum region size for evaluation [verts] +opt["min_region_sizes"] = np.array([100]) # 100 for s3dis, scannet +# distance thresholds [m] +opt["distance_threshes"] = np.array([float("inf")]) +# distance confidences +opt["distance_confs"] = np.array([-float("inf")]) + + +def evaluate_matches(matches): + overlaps = opt["overlaps"] + min_region_sizes = [opt["min_region_sizes"][0]] + dist_threshes = [opt["distance_threshes"][0]] + dist_confs = [opt["distance_confs"][0]] + + # results: class x overlap + ap = np.zeros( + (len(dist_threshes), len(CLASS_LABELS), len(overlaps)), float + ) + for di, (min_region_size, distance_thresh, distance_conf) in enumerate( + zip(min_region_sizes, dist_threshes, dist_confs) + ): + for oi, overlap_th in enumerate(overlaps): + pred_visited = {} + for m in matches: + for p in matches[m]["pred"]: + for label_name in CLASS_LABELS: + for p in matches[m]["pred"][label_name]: + if "uuid" in p: + pred_visited[p["uuid"]] = False + for li, label_name in enumerate(CLASS_LABELS): + y_true = np.empty(0) + y_score = np.empty(0) + hard_false_negatives = 0 + has_gt = False + has_pred = False + for m in matches: + pred_instances = matches[m]["pred"][label_name] + gt_instances = matches[m]["gt"][label_name] + # filter groups in ground truth + gt_instances = [ + gt + for gt in gt_instances + if gt["instance_id"] >= 1000 + and gt["vert_count"] >= min_region_size + and gt["med_dist"] <= distance_thresh + and gt["dist_conf"] >= distance_conf + ] + if gt_instances: + has_gt = True + if pred_instances: + has_pred = True + + cur_true = np.ones(len(gt_instances)) + cur_score = np.ones(len(gt_instances)) * (-float("inf")) + cur_match = np.zeros(len(gt_instances), dtype=bool) + # collect matches + for (gti, gt) in enumerate(gt_instances): + found_match = False + num_pred = len(gt["matched_pred"]) + for pred in gt["matched_pred"]: + # greedy assignments + if pred_visited[pred["uuid"]]: + continue + overlap = float(pred["intersection"]) / ( + gt["vert_count"] + + pred["vert_count"] + - pred["intersection"] + ) + if overlap > overlap_th: + confidence = pred["confidence"] + # if already have a prediction for this gt, + # the prediction with the lower score is automatically a false positive + if cur_match[gti]: + max_score = max(cur_score[gti], confidence) + min_score = min(cur_score[gti], confidence) + cur_score[gti] = max_score + # append false positive + cur_true = np.append(cur_true, 0) + cur_score = np.append(cur_score, min_score) + cur_match = np.append(cur_match, True) + # otherwise set score + else: + found_match = True + cur_match[gti] = True + cur_score[gti] = confidence + pred_visited[pred["uuid"]] = True + if not found_match: + hard_false_negatives += 1 + # remove non-matched ground truth instances + cur_true = cur_true[cur_match == True] + cur_score = cur_score[cur_match == True] + + # collect non-matched predictions as false positive + for pred in pred_instances: + found_gt = False + for gt in pred["matched_gt"]: + overlap = float(gt["intersection"]) / ( + gt["vert_count"] + + pred["vert_count"] + - gt["intersection"] + ) + if overlap > overlap_th: + found_gt = True + break + if not found_gt: + num_ignore = pred["void_intersection"] + for gt in pred["matched_gt"]: + # group? + if gt["instance_id"] < 1000: + num_ignore += gt["intersection"] + # small ground truth instances + if ( + gt["vert_count"] < min_region_size + or gt["med_dist"] > distance_thresh + or gt["dist_conf"] < distance_conf + ): + num_ignore += gt["intersection"] + proportion_ignore = ( + float(num_ignore) / pred["vert_count"] + ) + # if not ignored append false positive + if proportion_ignore <= overlap_th: + cur_true = np.append(cur_true, 0) + confidence = pred["confidence"] + cur_score = np.append(cur_score, confidence) + + # append to overall results + y_true = np.append(y_true, cur_true) + y_score = np.append(y_score, cur_score) + + # compute average precision + if has_gt and has_pred: + # compute precision recall curve first + + # sorting and cumsum + score_arg_sort = np.argsort(y_score) + y_score_sorted = y_score[score_arg_sort] + y_true_sorted = y_true[score_arg_sort] + y_true_sorted_cumsum = np.cumsum(y_true_sorted) + + # unique thresholds + (thresholds, unique_indices) = np.unique( + y_score_sorted, return_index=True + ) + num_prec_recall = len(unique_indices) + 1 + + # prepare precision recall + num_examples = len(y_score_sorted) + # https://github.com/ScanNet/ScanNet/pull/26 + # all predictions are non-matched but also all of them are ignored and not counted as FP + # y_true_sorted_cumsum is empty + # num_true_examples = y_true_sorted_cumsum[-1] + num_true_examples = ( + y_true_sorted_cumsum[-1] + if len(y_true_sorted_cumsum) > 0 + else 0 + ) + precision = np.zeros(num_prec_recall) + recall = np.zeros(num_prec_recall) + + # deal with the first point + y_true_sorted_cumsum = np.append(y_true_sorted_cumsum, 0) + # deal with remaining + for idx_res, idx_scores in enumerate(unique_indices): + cumsum = y_true_sorted_cumsum[idx_scores - 1] + tp = num_true_examples - cumsum + fp = num_examples - idx_scores - tp + fn = cumsum + hard_false_negatives + p = float(tp) / (tp + fp) + r = float(tp) / (tp + fn) + precision[idx_res] = p + recall[idx_res] = r + + # first point in curve is artificial + precision[-1] = 1.0 + recall[-1] = 0.0 + + # compute average of precision-recall curve + recall_for_conv = np.copy(recall) + recall_for_conv = np.append( + recall_for_conv[0], recall_for_conv + ) + recall_for_conv = np.append(recall_for_conv, 0.0) + + stepWidths = np.convolve( + recall_for_conv, [-0.5, 0, 0.5], "valid" + ) + # integrate is now simply a dot product + ap_current = np.dot(precision, stepWidths) + + elif has_gt: + ap_current = 0.0 + else: + ap_current = float("nan") + ap[di, li, oi] = ap_current + return ap + + +def compute_averages(aps): + d_inf = 0 + o50 = np.where(np.isclose(opt["overlaps"], 0.5)) + o25 = np.where(np.isclose(opt["overlaps"], 0.25)) + oAllBut25 = np.where(np.logical_not(np.isclose(opt["overlaps"], 0.25))) + avg_dict = {} + # avg_dict['all_ap'] = np.nanmean(aps[ d_inf,:,: ]) + avg_dict["all_ap"] = np.nanmean(aps[d_inf, :, oAllBut25]) + avg_dict["all_ap_50%"] = np.nanmean(aps[d_inf, :, o50]) + avg_dict["all_ap_25%"] = np.nanmean(aps[d_inf, :, o25]) + avg_dict["classes"] = {} + for (li, label_name) in enumerate(CLASS_LABELS): + avg_dict["classes"][label_name] = {} + # avg_dict["classes"][label_name]["ap"] = np.average(aps[ d_inf,li, :]) + avg_dict["classes"][label_name]["ap"] = np.average( + aps[d_inf, li, oAllBut25] + ) + avg_dict["classes"][label_name]["ap50%"] = np.average( + aps[d_inf, li, o50] + ) + avg_dict["classes"][label_name]["ap25%"] = np.average( + aps[d_inf, li, o25] + ) + return avg_dict + + +def make_pred_info(pred: dict): + # pred = {'pred_scores' = 100, 'pred_classes' = 100 'pred_masks' = Nx100} + pred_info = {} + assert ( + pred["pred_classes"].shape[0] + == pred["pred_scores"].shape[0] + == pred["pred_masks"].shape[1] + ) + for i in range(len(pred["pred_classes"])): + info = {} + info["label_id"] = pred["pred_classes"][i] + info["conf"] = pred["pred_scores"][i] + info["mask"] = pred["pred_masks"][:, i] + pred_info[uuid4()] = info # we later need to identify these objects + return pred_info + + +def assign_instances_for_scan(pred: dict, gt_file: str): + pred_info = make_pred_info(pred) + try: + gt_ids = util_3d.load_ids(gt_file) + except Exception as e: + util.print_error("unable to load " + gt_file + ": " + str(e)) + + # get gt instances + gt_instances = util_3d.get_instances( + gt_ids, VALID_CLASS_IDS, CLASS_LABELS, ID_TO_LABEL + ) + # associate + gt2pred = deepcopy(gt_instances) + for label in gt2pred: + for gt in gt2pred[label]: + gt["matched_pred"] = [] + pred2gt = {} + for label in CLASS_LABELS: + pred2gt[label] = [] + num_pred_instances = 0 + # mask of void labels in the groundtruth + bool_void = np.logical_not(np.in1d(gt_ids // 1000, VALID_CLASS_IDS)) + # go thru all prediction masks + for uuid in pred_info: + label_id = int(pred_info[uuid]["label_id"]) + conf = pred_info[uuid]["conf"] + if not label_id in ID_TO_LABEL: + continue + label_name = ID_TO_LABEL[label_id] + # read the mask + pred_mask = pred_info[uuid]["mask"] + assert len(pred_mask) == len(gt_ids) + # convert to binary + pred_mask = np.not_equal(pred_mask, 0) + num = np.count_nonzero(pred_mask) + if num < opt["min_region_sizes"][0]: + continue # skip if empty + + pred_instance = {} + pred_instance["uuid"] = uuid + pred_instance["pred_id"] = num_pred_instances + pred_instance["label_id"] = label_id + pred_instance["vert_count"] = num + pred_instance["confidence"] = conf + pred_instance["void_intersection"] = np.count_nonzero( + np.logical_and(bool_void, pred_mask) + ) + + # matched gt instances + matched_gt = [] + # go thru all gt instances with matching label + for (gt_num, gt_inst) in enumerate(gt2pred[label_name]): + intersection = np.count_nonzero( + np.logical_and(gt_ids == gt_inst["instance_id"], pred_mask) + ) + if intersection > 0: + gt_copy = gt_inst.copy() + pred_copy = pred_instance.copy() + gt_copy["intersection"] = intersection + pred_copy["intersection"] = intersection + matched_gt.append(gt_copy) + gt2pred[label_name][gt_num]["matched_pred"].append(pred_copy) + pred_instance["matched_gt"] = matched_gt + num_pred_instances += 1 + pred2gt[label_name].append(pred_instance) + + return gt2pred, pred2gt + + +def print_results(avgs): + sep = "" + col1 = ":" + lineLen = 64 + + print("") + print("#" * lineLen) + line = "" + line += "{:<15}".format("what") + sep + col1 + line += "{:>15}".format("AP") + sep + line += "{:>15}".format("AP_50%") + sep + line += "{:>15}".format("AP_25%") + sep + print(line) + print("#" * lineLen) + + for (li, label_name) in enumerate(CLASS_LABELS): + ap_avg = avgs["classes"][label_name]["ap"] + ap_50o = avgs["classes"][label_name]["ap50%"] + ap_25o = avgs["classes"][label_name]["ap25%"] + line = "{:<15}".format(label_name) + sep + col1 + line += sep + "{:>15.3f}".format(ap_avg) + sep + line += sep + "{:>15.3f}".format(ap_50o) + sep + line += sep + "{:>15.3f}".format(ap_25o) + sep + print(line) + + all_ap_avg = avgs["all_ap"] + all_ap_50o = avgs["all_ap_50%"] + all_ap_25o = avgs["all_ap_25%"] + + print("-" * lineLen) + line = "{:<15}".format("average") + sep + col1 + line += "{:>15.3f}".format(all_ap_avg) + sep + line += "{:>15.3f}".format(all_ap_50o) + sep + line += "{:>15.3f}".format(all_ap_25o) + sep + print(line) + print("") + + +def write_result_file(avgs, filename): + _SPLITTER = "," + with open(filename, "w") as f: + f.write( + _SPLITTER.join(["class", "class id", "ap", "ap50", "ap25"]) + "\n" + ) + for i in range(len(VALID_CLASS_IDS)): + class_name = CLASS_LABELS[i] + class_id = VALID_CLASS_IDS[i] + ap = avgs["classes"][class_name]["ap"] + ap50 = avgs["classes"][class_name]["ap50%"] + ap25 = avgs["classes"][class_name]["ap25%"] + f.write( + _SPLITTER.join( + [str(x) for x in [class_name, class_id, ap, ap50, ap25]] + ) + + "\n" + ) + + +def evaluate( + preds: dict, gt_path: str, output_file: str, dataset: str = "scannet" +): + global CLASS_LABELS + global VALID_CLASS_IDS + global ID_TO_LABEL + global LABEL_TO_ID + global opt + + if dataset == "stpls3d": + # global CLASS_LABELS + # global VALID_CLASS_IDS + # global ID_TO_LABEL + # global LABEL_TO_ID + + opt["min_region_sizes"] = np.array([10]) + + CLASS_LABELS = [ + "Build", + "LowVeg", + "MediumVeg", + "HighVeg", + "Vehicle", + "Truck", + "Aircraft", + "MilitaryVeh", + "Bike", + "Motorcycle", + "LightPole", + "StreetSign", + "Clutter", + "Fence", + ] + VALID_CLASS_IDS = np.array( + [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14] + ) + + ID_TO_LABEL = {} + LABEL_TO_ID = {} + for i in range(len(VALID_CLASS_IDS)): + LABEL_TO_ID[CLASS_LABELS[i]] = VALID_CLASS_IDS[i] + ID_TO_LABEL[VALID_CLASS_IDS[i]] = CLASS_LABELS[i] + + if dataset == "s3dis": + # global CLASS_LABELS + # global VALID_CLASS_IDS + # global ID_TO_LABEL + # global LABEL_TO_ID + + CLASS_LABELS = [ + "ceiling", + "floor", + "wall", + "beam", + "column", + "window", + "door", + "table", + "chair", + "sofa", + "bookcase", + "board", + "clutter", + ] + VALID_CLASS_IDS = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]) + ID_TO_LABEL = {} + LABEL_TO_ID = {} + for i in range(len(VALID_CLASS_IDS)): + LABEL_TO_ID[CLASS_LABELS[i]] = VALID_CLASS_IDS[i] + ID_TO_LABEL[VALID_CLASS_IDS[i]] = CLASS_LABELS[i] + + if dataset == "scannet200": + CLASS_LABELS = ( + "chair", + "table", + "door", + "couch", + "cabinet", + "shelf", + "desk", + "office chair", + "bed", + "pillow", + "sink", + "picture", + "window", + "toilet", + "bookshelf", + "monitor", + "curtain", + "book", + "armchair", + "coffee table", + "box", + "refrigerator", + "lamp", + "kitchen cabinet", + "towel", + "clothes", + "tv", + "nightstand", + "counter", + "dresser", + "stool", + "cushion", + "plant", + "ceiling", + "bathtub", + "end table", + "dining table", + "keyboard", + "bag", + "backpack", + "toilet paper", + "printer", + "tv stand", + "whiteboard", + "blanket", + "shower curtain", + "trash can", + "closet", + "stairs", + "microwave", + "stove", + "shoe", + "computer tower", + "bottle", + "bin", + "ottoman", + "bench", + "board", + "washing machine", + "mirror", + "copier", + "basket", + "sofa chair", + "file cabinet", + "fan", + "laptop", + "shower", + "paper", + "person", + "paper towel dispenser", + "oven", + "blinds", + "rack", + "plate", + "blackboard", + "piano", + "suitcase", + "rail", + "radiator", + "recycling bin", + "container", + "wardrobe", + "soap dispenser", + "telephone", + "bucket", + "clock", + "stand", + "light", + "laundry basket", + "pipe", + "clothes dryer", + "guitar", + "toilet paper holder", + "seat", + "speaker", + "column", + "bicycle", + "ladder", + "bathroom stall", + "shower wall", + "cup", + "jacket", + "storage bin", + "coffee maker", + "dishwasher", + "paper towel roll", + "machine", + "mat", + "windowsill", + "bar", + "toaster", + "bulletin board", + "ironing board", + "fireplace", + "soap dish", + "kitchen counter", + "doorframe", + "toilet paper dispenser", + "mini fridge", + "fire extinguisher", + "ball", + "hat", + "shower curtain rod", + "water cooler", + "paper cutter", + "tray", + "shower door", + "pillar", + "ledge", + "toaster oven", + "mouse", + "toilet seat cover dispenser", + "furniture", + "cart", + "storage container", + "scale", + "tissue box", + "light switch", + "crate", + "power outlet", + "decoration", + "sign", + "projector", + "closet door", + "vacuum cleaner", + "candle", + "plunger", + "stuffed animal", + "headphones", + "dish rack", + "broom", + "guitar case", + "range hood", + "dustpan", + "hair dryer", + "water bottle", + "handicap bar", + "purse", + "vent", + "shower floor", + "water pitcher", + "mailbox", + "bowl", + "paper bag", + "alarm clock", + "music stand", + "projector screen", + "divider", + "laundry detergent", + "bathroom counter", + "object", + "bathroom vanity", + "closet wall", + "laundry hamper", + "bathroom stall door", + "ceiling light", + "trash bin", + "dumbbell", + "stair rail", + "tube", + "bathroom cabinet", + "cd case", + "closet rod", + "coffee kettle", + "structure", + "shower head", + "keyboard piano", + "case of water bottles", + "coat rack", + "storage organizer", + "folded chair", + "fire alarm", + "power strip", + "calendar", + "poster", + "potted plant", + "luggage", + "mattress", + ) + + VALID_CLASS_IDS = np.array( + ( + 2, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 21, + 22, + 23, + 24, + 26, + 27, + 28, + 29, + 31, + 32, + 33, + 34, + 35, + 36, + 38, + 39, + 40, + 41, + 42, + 44, + 45, + 46, + 47, + 48, + 49, + 50, + 51, + 52, + 54, + 55, + 56, + 57, + 58, + 59, + 62, + 63, + 64, + 65, + 66, + 67, + 68, + 69, + 70, + 71, + 72, + 73, + 74, + 75, + 76, + 77, + 78, + 79, + 80, + 82, + 84, + 86, + 87, + 88, + 89, + 90, + 93, + 95, + 96, + 97, + 98, + 99, + 100, + 101, + 102, + 103, + 104, + 105, + 106, + 107, + 110, + 112, + 115, + 116, + 118, + 120, + 121, + 122, + 125, + 128, + 130, + 131, + 132, + 134, + 136, + 138, + 139, + 140, + 141, + 145, + 148, + 154, + 155, + 156, + 157, + 159, + 161, + 163, + 165, + 166, + 168, + 169, + 170, + 177, + 180, + 185, + 188, + 191, + 193, + 195, + 202, + 208, + 213, + 214, + 221, + 229, + 230, + 232, + 233, + 242, + 250, + 261, + 264, + 276, + 283, + 286, + 300, + 304, + 312, + 323, + 325, + 331, + 342, + 356, + 370, + 392, + 395, + 399, + 408, + 417, + 488, + 540, + 562, + 570, + 572, + 581, + 609, + 748, + 776, + 1156, + 1163, + 1164, + 1165, + 1166, + 1167, + 1168, + 1169, + 1170, + 1171, + 1172, + 1173, + 1174, + 1175, + 1176, + 1178, + 1179, + 1180, + 1181, + 1182, + 1183, + 1184, + 1185, + 1186, + 1187, + 1188, + 1189, + 1190, + 1191, + ) + ) + + ID_TO_LABEL = {} + LABEL_TO_ID = {} + for i in range(len(VALID_CLASS_IDS)): + LABEL_TO_ID[CLASS_LABELS[i]] = VALID_CLASS_IDS[i] + ID_TO_LABEL[VALID_CLASS_IDS[i]] = CLASS_LABELS[i] + + total_true = 0 + total_seen = 0 + NUM_CLASSES = len(VALID_CLASS_IDS) + + true_positive_classes = np.zeros(NUM_CLASSES) + positive_classes = np.zeros(NUM_CLASSES) + gt_classes = np.zeros(NUM_CLASSES) + + # precision & recall + total_gt_ins = np.zeros(NUM_CLASSES) + at = 0.5 + tpsins = [[] for _ in range(NUM_CLASSES)] + fpsins = [[] for _ in range(NUM_CLASSES)] + # mucov and mwcov + all_mean_cov = [[] for _ in range(NUM_CLASSES)] + all_mean_weighted_cov = [[] for _ in range(NUM_CLASSES)] + + print("evaluating", len(preds), "scans...") + matches = {} + for i, (k, v) in enumerate(preds.items()): + gt_file = os.path.join(gt_path, k + ".txt") + if not os.path.isfile(gt_file): + util.print_error( + "Scan {} does not match any gt file".format(k), user_fault=True + ) + + if dataset == "s3dis": + gt_ids = util_3d.load_ids(gt_file) + gt_sem = (gt_ids // 1000) - 1 + gt_ins = gt_ids - (gt_ids // 1000) * 1000 + + # pred_sem = v['pred_classes'] - 1 + pred_sem = np.zeros(v["pred_masks"].shape[0], dtype=np.int) + # TODO CONTINUE HERE!!!!!!!!!!!!! + pred_ins = np.zeros(v["pred_masks"].shape[0], dtype=np.int) + + for inst_id in reversed(range(v["pred_masks"].shape[1])): + point_ids = np.argwhere(v["pred_masks"][:, inst_id] == 1.0)[ + :, 0 + ] + pred_ins[point_ids] = inst_id + 1 + pred_sem[point_ids] = v["pred_classes"][inst_id] - 1 + + # semantic acc + total_true += np.sum(pred_sem == gt_sem) + total_seen += pred_sem.shape[0] + + # TODO PARALLELIZ THIS!!!!!!! + # pn semantic mIoU + """ + for j in range(gt_sem.shape[0]): + gt_l = int(gt_sem[j]) + pred_l = int(pred_sem[j]) + gt_classes[gt_l] += 1 + positive_classes[pred_l] += 1 + true_positive_classes[gt_l] += int(gt_l == pred_l) + """ + + uniq, counts = np.unique(pred_sem, return_counts=True) + positive_classes[uniq] += counts + + uniq, counts = np.unique(gt_sem, return_counts=True) + gt_classes[uniq] += counts + + uniq, counts = np.unique( + gt_sem[pred_sem == gt_sem], return_counts=True + ) + true_positive_classes[uniq] += counts + + # instance + un = np.unique(pred_ins) + pts_in_pred = [[] for _ in range(NUM_CLASSES)] + for ig, g in enumerate(un): # each object in prediction + if g == -1: + continue + tmp = pred_ins == g + sem_seg_i = int(stats.mode(pred_sem[tmp])[0]) + pts_in_pred[sem_seg_i] += [tmp] + + un = np.unique(gt_ins) + pts_in_gt = [[] for _ in range(NUM_CLASSES)] + for ig, g in enumerate(un): + tmp = gt_ins == g + sem_seg_i = int(stats.mode(gt_sem[tmp])[0]) + pts_in_gt[sem_seg_i] += [tmp] + + # instance mucov & mwcov + for i_sem in range(NUM_CLASSES): + sum_cov = 0 + mean_cov = 0 + mean_weighted_cov = 0 + num_gt_point = 0 + for ig, ins_gt in enumerate(pts_in_gt[i_sem]): + ovmax = 0.0 + num_ins_gt_point = np.sum(ins_gt) + num_gt_point += num_ins_gt_point + for ip, ins_pred in enumerate(pts_in_pred[i_sem]): + union = ins_pred | ins_gt + intersect = ins_pred & ins_gt + iou = float(np.sum(intersect)) / np.sum(union) + + if iou > ovmax: + ovmax = iou + ipmax = ip + + sum_cov += ovmax + mean_weighted_cov += ovmax * num_ins_gt_point + + if len(pts_in_gt[i_sem]) != 0: + mean_cov = sum_cov / len(pts_in_gt[i_sem]) + all_mean_cov[i_sem].append(mean_cov) + + mean_weighted_cov /= num_gt_point + all_mean_weighted_cov[i_sem].append(mean_weighted_cov) + + if dataset == "s3dis": + # instance precision & recall + for i_sem in range(NUM_CLASSES): + tp = [0.0] * len(pts_in_pred[i_sem]) + fp = [0.0] * len(pts_in_pred[i_sem]) + gtflag = np.zeros(len(pts_in_gt[i_sem])) + total_gt_ins[i_sem] += len(pts_in_gt[i_sem]) + + for ip, ins_pred in enumerate(pts_in_pred[i_sem]): + ovmax = -1.0 + + for ig, ins_gt in enumerate(pts_in_gt[i_sem]): + union = ins_pred | ins_gt + intersect = ins_pred & ins_gt + iou = float(np.sum(intersect)) / np.sum(union) + + if iou > ovmax: + ovmax = iou + igmax = ig + + if ovmax >= at: + tp[ip] = 1 # true + else: + fp[ip] = 1 # false positive + + tpsins[i_sem] += tp + fpsins[i_sem] += fp + + matches_key = os.path.abspath(gt_file) + # assign gt to predictions + gt2pred, pred2gt = assign_instances_for_scan(v, gt_file) + matches[matches_key] = {} + matches[matches_key]["gt"] = gt2pred + matches[matches_key]["pred"] = pred2gt + sys.stdout.write("\rscans processed: {}".format(i + 1)) + sys.stdout.flush() + print("") + ap_scores = evaluate_matches(matches) + avgs = compute_averages(ap_scores) + + # print + print_results(avgs) + write_result_file(avgs, output_file) + + if dataset == "s3dis": + MUCov = np.zeros(NUM_CLASSES) + MWCov = np.zeros(NUM_CLASSES) + for i_sem in range(NUM_CLASSES): + MUCov[i_sem] = np.mean(all_mean_cov[i_sem]) + MWCov[i_sem] = np.mean(all_mean_weighted_cov[i_sem]) + + precision = np.zeros(NUM_CLASSES) + recall = np.zeros(NUM_CLASSES) + for i_sem in range(NUM_CLASSES): + tp = np.asarray(tpsins[i_sem]).astype(np.float) + fp = np.asarray(fpsins[i_sem]).astype(np.float) + tp = np.sum(tp) + fp = np.sum(fp) + rec = tp / total_gt_ins[i_sem] + prec = tp / (tp + fp) + + precision[i_sem] = prec + recall[i_sem] = rec + + """ + LOG_FOUT = open(os.path.join('results_a5.txt'), 'w') + + def log_string(out_str): + LOG_FOUT.write(out_str + '\n') + LOG_FOUT.flush() + print(out_str) + """ + + return np.mean(precision), np.mean(recall) + + +# TODO: remove this +# import pandas as pd +# def main(): +# print("!!! CLI is only for debugging purposes. use `evaluate()` instead.") +# evaluate(pd.read_pickle("/globalwork/schult/saved_predictions.pkl"), opt.gt_path, opt.output_file) + +# if __name__ == '__main__': +# main() diff --git a/models/Mask3D/build/lib/mask3d/benchmark/util.py b/models/Mask3D/build/lib/mask3d/benchmark/util.py new file mode 100644 index 0000000000000000000000000000000000000000..9a4224cd4f785c8a5a7cde490cf0f9999e61dbe7 --- /dev/null +++ b/models/Mask3D/build/lib/mask3d/benchmark/util.py @@ -0,0 +1,128 @@ +import os, sys +import csv + +try: + import numpy as np +except: + print("Failed to import numpy package.") + sys.exit(-1) +try: + import imageio +except: + print("Please install the module 'imageio' for image processing, e.g.") + print("pip install imageio") + sys.exit(-1) + +# print an error message and quit +def print_error(message, user_fault=False): + sys.stderr.write("ERROR: " + str(message) + "\n") + if user_fault: + sys.exit(2) + sys.exit(-1) + + +# if string s represents an int +def represents_int(s): + try: + int(s) + return True + except ValueError: + return False + + +def read_label_mapping( + filename, label_from="raw_category", label_to="nyu40id" +): + assert os.path.isfile(filename) + mapping = dict() + with open(filename) as csvfile: + reader = csv.DictReader(csvfile, delimiter="\t") + for row in reader: + mapping[row[label_from]] = int(row[label_to]) + # if ints convert + if represents_int(list(mapping.keys())[0]): + mapping = {int(k): v for k, v in mapping.items()} + return mapping + + +# input: scene_types.txt or scene_types_all.txt +def read_scene_types_mapping(filename, remove_spaces=True): + assert os.path.isfile(filename) + mapping = dict() + lines = open(filename).read().splitlines() + lines = [line.split("\t") for line in lines] + if remove_spaces: + mapping = {x[1].strip(): int(x[0]) for x in lines} + else: + mapping = {x[1]: int(x[0]) for x in lines} + return mapping + + +# color by label +def visualize_label_image(filename, image): + height = image.shape[0] + width = image.shape[1] + vis_image = np.zeros([height, width, 3], dtype=np.uint8) + color_palette = create_color_palette() + for idx, color in enumerate(color_palette): + vis_image[image == idx] = color + imageio.imwrite(filename, vis_image) + + +# color by different instances (mod length of color palette) +def visualize_instance_image(filename, image): + height = image.shape[0] + width = image.shape[1] + vis_image = np.zeros([height, width, 3], dtype=np.uint8) + color_palette = create_color_palette() + instances = np.unique(image) + for idx, inst in enumerate(instances): + vis_image[image == inst] = color_palette[inst % len(color_palette)] + imageio.imwrite(filename, vis_image) + + +# color palette for nyu40 labels +def create_color_palette(): + return [ + (0, 0, 0), + (174, 199, 232), # wall + (152, 223, 138), # floor + (31, 119, 180), # cabinet + (255, 187, 120), # bed + (188, 189, 34), # chair + (140, 86, 75), # sofa + (255, 152, 150), # table + (214, 39, 40), # door + (197, 176, 213), # window + (148, 103, 189), # bookshelf + (196, 156, 148), # picture + (23, 190, 207), # counter + (178, 76, 76), + (247, 182, 210), # desk + (66, 188, 102), + (219, 219, 141), # curtain + (140, 57, 197), + (202, 185, 52), + (51, 176, 203), + (200, 54, 131), + (92, 193, 61), + (78, 71, 183), + (172, 114, 82), + (255, 127, 14), # refrigerator + (91, 163, 138), + (153, 98, 156), + (140, 153, 101), + (158, 218, 229), # shower curtain + (100, 125, 154), + (178, 127, 135), + (120, 185, 128), + (146, 111, 194), + (44, 160, 44), # toilet + (112, 128, 144), # sink + (96, 207, 209), + (227, 119, 194), # bathtub + (213, 92, 176), + (94, 106, 211), + (82, 84, 163), # otherfurn + (100, 85, 144), + ] diff --git a/models/Mask3D/build/lib/mask3d/benchmark/util_3d.py b/models/Mask3D/build/lib/mask3d/benchmark/util_3d.py new file mode 100644 index 0000000000000000000000000000000000000000..572064f3ca251563466ca6bfbe2c70dacdad205f --- /dev/null +++ b/models/Mask3D/build/lib/mask3d/benchmark/util_3d.py @@ -0,0 +1,177 @@ +import os, sys +import json + +try: + import numpy as np +except: + print("Failed to import numpy package.") + sys.exit(-1) + +try: + from plyfile import PlyData, PlyElement +except: + print("Please install the module 'plyfile' for PLY i/o, e.g.") + print("pip install plyfile") + sys.exit(-1) + +import benchmark.util as util + + +# matrix: 4x4 np array +# points Nx3 np array +def transform_points(matrix, points): + assert len(points.shape) == 2 and points.shape[1] == 3 + num_points = points.shape[0] + p = np.concatenate([points, np.ones((num_points, 1))], axis=1) + p = np.matmul(matrix, np.transpose(p)) + p = np.transpose(p) + p[:, :3] /= p[:, 3, None] + return p[:, :3] + + +def export_ids(filename, ids): + with open(filename, "w") as f: + for id in ids: + f.write("%d\n" % id) + + +def load_ids(filename): + ids = open(filename).read().splitlines() + ids = np.array(ids, dtype=np.int64) + return ids + + +def read_mesh_vertices(filename): + assert os.path.isfile(filename) + with open(filename, "rb") as f: + plydata = PlyData.read(f) + num_verts = plydata["vertex"].count + vertices = np.zeros(shape=[num_verts, 3], dtype=np.float32) + vertices[:, 0] = plydata["vertex"].data["x"] + vertices[:, 1] = plydata["vertex"].data["y"] + vertices[:, 2] = plydata["vertex"].data["z"] + return vertices + + +# export 3d instance labels for instance evaluation +def export_instance_ids_for_eval(filename, label_ids, instance_ids): + assert label_ids.shape[0] == instance_ids.shape[0] + output_mask_path_relative = "pred_mask" + name = os.path.splitext(os.path.basename(filename))[0] + output_mask_path = os.path.join( + os.path.dirname(filename), output_mask_path_relative + ) + if not os.path.isdir(output_mask_path): + os.mkdir(output_mask_path) + insts = np.unique(instance_ids) + zero_mask = np.zeros(shape=(instance_ids.shape[0]), dtype=np.int32) + with open(filename, "w") as f: + for idx, inst_id in enumerate(insts): + if inst_id == 0: # 0 -> no instance for this vertex + continue + output_mask_file = os.path.join( + output_mask_path_relative, name + "_" + str(idx) + ".txt" + ) + loc = np.where(instance_ids == inst_id) + label_id = label_ids[loc[0][0]] + f.write("%s %d %f\n" % (output_mask_file, label_id, 1.0)) + # write mask + mask = np.copy(zero_mask) + mask[loc[0]] = 1 + export_ids(output_mask_file, mask) + + +# ------------ Instance Utils ------------ # + + +class Instance(object): + instance_id = 0 + label_id = 0 + vert_count = 0 + med_dist = -1 + dist_conf = 0.0 + + def __init__(self, mesh_vert_instances, instance_id): + if instance_id == -1: + return + self.instance_id = int(instance_id) + self.label_id = int(self.get_label_id(instance_id)) + self.vert_count = int( + self.get_instance_verts(mesh_vert_instances, instance_id) + ) + + def get_label_id(self, instance_id): + return int(instance_id // 1000) + + def get_instance_verts(self, mesh_vert_instances, instance_id): + return (mesh_vert_instances == instance_id).sum() + + def to_json(self): + return json.dumps( + self, default=lambda o: o.__dict__, sort_keys=True, indent=4 + ) + + def to_dict(self): + dict = {} + dict["instance_id"] = self.instance_id + dict["label_id"] = self.label_id + dict["vert_count"] = self.vert_count + dict["med_dist"] = self.med_dist + dict["dist_conf"] = self.dist_conf + return dict + + def from_json(self, data): + self.instance_id = int(data["instance_id"]) + self.label_id = int(data["label_id"]) + self.vert_count = int(data["vert_count"]) + if "med_dist" in data: + self.med_dist = float(data["med_dist"]) + self.dist_conf = float(data["dist_conf"]) + + def __str__(self): + return "(" + str(self.instance_id) + ")" + + +def read_instance_prediction_file(filename, pred_path): + lines = open(filename).read().splitlines() + instance_info = {} + abs_pred_path = os.path.abspath(pred_path) + for line in lines: + parts = line.split(" ") + if len(parts) != 3: + util.print_error( + "invalid instance prediction file. Expected (per line): [rel path prediction] [label id prediction] [confidence prediction]" + ) + if os.path.isabs(parts[0]): + util.print_error( + "invalid instance prediction file. First entry in line must be a relative path" + ) + mask_file = os.path.join(os.path.dirname(filename), parts[0]) + mask_file = os.path.abspath(mask_file) + # check that mask_file lives inside prediction path + if os.path.commonprefix([mask_file, abs_pred_path]) != abs_pred_path: + util.print_error( + "predicted mask {} in prediction text file {} points outside of prediction path.".format( + mask_file, filename + ) + ) + + info = {} + info["label_id"] = int(float(parts[1])) + info["conf"] = float(parts[2]) + instance_info[mask_file] = info + return instance_info + + +def get_instances(ids, class_ids, class_labels, id2label): + instances = {} + for label in class_labels: + instances[label] = [] + instance_ids = np.unique(ids) + for id in instance_ids: + if id == 0: + continue + inst = Instance(ids, id) + if inst.label_id in class_ids: + instances[id2label[inst.label_id]].append(inst.to_dict()) + return instances diff --git a/models/Mask3D/build/lib/mask3d/conf/__init__.py b/models/Mask3D/build/lib/mask3d/conf/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/Mask3D/build/lib/mask3d/conf/augmentation/albumentations_aug.yaml b/models/Mask3D/build/lib/mask3d/conf/augmentation/albumentations_aug.yaml new file mode 100644 index 0000000000000000000000000000000000000000..006663b4be251bf0f41ac2f66f855ae3d59a2878 --- /dev/null +++ b/models/Mask3D/build/lib/mask3d/conf/augmentation/albumentations_aug.yaml @@ -0,0 +1,30 @@ +__version__: 0.4.5 +transform: + __class_fullname__: albumentations.core.composition.Compose + additional_targets: {} + bbox_params: null + keypoint_params: null + p: 1.0 + transforms: + - __class_fullname__: albumentations.augmentations.transforms.RandomBrightnessContrast + always_apply: true + brightness_by_max: true + brightness_limit: + - -0.2 + - 0.2 + contrast_limit: + - -0.2 + - 0.2 + p: 0.5 + - __class_fullname__: albumentations.augmentations.transforms.RGBShift + always_apply: true + b_shift_limit: + - -20 + - 20 + g_shift_limit: + - -20 + - 20 + p: 0.5 + r_shift_limit: + - -20 + - 20 diff --git a/models/Mask3D/build/lib/mask3d/conf/augmentation/volumentations_aug.yaml b/models/Mask3D/build/lib/mask3d/conf/augmentation/volumentations_aug.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3b86407a2e735ad8dbba79f83746ceb79722aedf --- /dev/null +++ b/models/Mask3D/build/lib/mask3d/conf/augmentation/volumentations_aug.yaml @@ -0,0 +1,53 @@ +# pi = 3.14159265358979 +# pi/2 = 1.57079632679489 +# pi/3 = 1.04719755119659 +# pi/6 = 0.52359877559829 +# pi/12 = 0.26179938779914 +# pi/24 = 0.13089969389957 +# +__version__: 0.1.6 +transform: + __class_fullname__: volumentations.core.composition.Compose + additional_targets: {} + p: 1.0 + transforms: + - __class_fullname__: volumentations.augmentations.transforms.Scale3d + always_apply: true + p: 0.5 + scale_limit: + - - -0.1 + - 0.1 + - - -0.1 + - 0.1 + - - -0.1 + - 0.1 + - __class_fullname__: volumentations.augmentations.transforms.RotateAroundAxis3d + always_apply: true + axis: + - 0 + - 0 + - 1 + p: 0.5 + rotation_limit: + - -3.141592653589793 + - 3.141592653589793 + - __class_fullname__: volumentations.augmentations.transforms.RotateAroundAxis3d + always_apply: true + axis: + - 0 + - 1 + - 0 + p: 0.5 + rotation_limit: + - -0.13089969389957 + - 0.13089969389957 + - __class_fullname__: volumentations.augmentations.transforms.RotateAroundAxis3d + always_apply: true + axis: + - 1 + - 0 + - 0 + p: 0.5 + rotation_limit: + - -0.13089969389957 + - 0.13089969389957 diff --git a/models/Mask3D/build/lib/mask3d/conf/callbacks/callbacks_instance_segmentation.yaml b/models/Mask3D/build/lib/mask3d/conf/callbacks/callbacks_instance_segmentation.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7f0958eed35ea4317ddc3f2378dd66336472c0fa --- /dev/null +++ b/models/Mask3D/build/lib/mask3d/conf/callbacks/callbacks_instance_segmentation.yaml @@ -0,0 +1,11 @@ +# @package _group_ +- _target_: pytorch_lightning.callbacks.ModelCheckpoint + monitor: val_mean_ap_50 + save_last: true + save_top_k: 1 + mode: max + dirpath: ${general.save_dir} + filename: "{epoch}-{val_mean_ap_50:.3f}" + every_n_epochs: 1 + +- _target_: pytorch_lightning.callbacks.LearningRateMonitor diff --git a/models/Mask3D/build/lib/mask3d/conf/config_base_instance_segmentation.yaml b/models/Mask3D/build/lib/mask3d/conf/config_base_instance_segmentation.yaml new file mode 100644 index 0000000000000000000000000000000000000000..61aeae0519bd308a58293d07ee902beb6a64ed5d --- /dev/null +++ b/models/Mask3D/build/lib/mask3d/conf/config_base_instance_segmentation.yaml @@ -0,0 +1,75 @@ +general: + train_mode: true + task: "instance_segmentation" + seed: null + checkpoint: null + backbone_checkpoint: null + freeze_backbone: false # train only last layer + linear_probing_backbone: false + train_on_segments: false + eval_on_segments: false + filter_out_instances: false + save_visualizations: false + visualization_point_size: 20 + decoder_id: -1 + export: false + use_dbscan: false + ignore_class_threshold: 100 + project_name: scannet + workspace: jonasschult + experiment_name: DEBUG_ABLATION + num_targets: 19 + add_instance: true + dbscan_eps: 0.95 + dbscan_min_points: 1 + + + export_threshold: 0.0001 + + reps_per_epoch: 1 + + on_crops: false + + scores_threshold: 0.0 + iou_threshold: 1.0 + + area: 5 + + eval_inner_core: -1 # disabled + + topk_per_image: 100 + + ignore_mask_idx: [] + + max_batch_size: 99999999 + + save_dir: saved/${general.experiment_name} + # time/commit/md5(config)_uuid + # time/experiment_id/version_uuid + # experiment_id: 1 # commit[:8], or unique from logger + # version: 1 # md5[:8] of config + + gpus: 1 + +defaults: + - data: indoor + - data/data_loaders: simple_loader + - data/datasets: scannet + - data/collation_functions: voxelize_collate + - logging: full + - model: mask3d + - metrics: miou + - optimizer: adamw + - scheduler: onecyclelr + - trainer: trainer600 + - callbacks: callbacks_instance_segmentation + - matcher: hungarian_matcher + - loss: set_criterion + +hydra: + run: + dir: saved/hydra_logs/${now:%Y-%m-%d}/${now:%H-%M-%S} + sweep: + dir: saved/hydra_logs/${now:%Y-%m-%d}/${now:%H-%M-%S} + # dir: ${general.save_dir} + subdir: ${hydra.job.num}_${hydra.job.id} diff --git a/models/Mask3D/build/lib/mask3d/conf/data/collation_functions/voxelize_collate.yaml b/models/Mask3D/build/lib/mask3d/conf/data/collation_functions/voxelize_collate.yaml new file mode 100644 index 0000000000000000000000000000000000000000..026552efb024e4e6fd90bf6bda9df283da2bf4c1 --- /dev/null +++ b/models/Mask3D/build/lib/mask3d/conf/data/collation_functions/voxelize_collate.yaml @@ -0,0 +1,42 @@ +# @package data + +train_collation: + _target_: mask3d.datasets.utils.VoxelizeCollate + ignore_label: ${data.ignore_label} + voxel_size: ${data.voxel_size} + mode: ${data.train_mode} + small_crops: false + very_small_crops: false + batch_instance: false + probing: ${general.linear_probing_backbone} + task: ${general.task} + ignore_class_threshold: ${general.ignore_class_threshold} + filter_out_classes: ${data.train_dataset.filter_out_classes} + label_offset: ${data.train_dataset.label_offset} + num_queries: ${model.num_queries} + +validation_collation: + _target_: mask3d.datasets.utils.VoxelizeCollate + ignore_label: ${data.ignore_label} + voxel_size: ${data.voxel_size} + mode: ${data.validation_mode} + batch_instance: false + probing: ${general.linear_probing_backbone} + task: ${general.task} + ignore_class_threshold: ${general.ignore_class_threshold} + filter_out_classes: ${data.validation_dataset.filter_out_classes} + label_offset: ${data.validation_dataset.label_offset} + num_queries: ${model.num_queries} + +test_collation: + _target_: mask3d.datasets.utils.VoxelizeCollate + ignore_label: ${data.ignore_label} + voxel_size: ${data.voxel_size} + mode: ${data.test_mode} + batch_instance: false + probing: ${general.linear_probing_backbone} + task: ${general.task} + ignore_class_threshold: ${general.ignore_class_threshold} + filter_out_classes: ${data.test_dataset.filter_out_classes} + label_offset: ${data.test_dataset.label_offset} + num_queries: ${model.num_queries} \ No newline at end of file diff --git a/models/Mask3D/build/lib/mask3d/conf/data/collation_functions/voxelize_collate_merge.yaml b/models/Mask3D/build/lib/mask3d/conf/data/collation_functions/voxelize_collate_merge.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d5d3471d143ddfe999d8f3031e41ba6efce2e879 --- /dev/null +++ b/models/Mask3D/build/lib/mask3d/conf/data/collation_functions/voxelize_collate_merge.yaml @@ -0,0 +1,36 @@ +# @package data + +train_collation: + _target_: mask3d.datasets.utils.VoxelizeCollateMerge + ignore_label: ${data.ignore_label} + voxel_size: ${data.voxel_size} + mode: ${data.train_mode} + small_crops: false + very_small_crops: false + scenes: 2 + batch_instance: false + make_one_pc_noise: false + place_nearby: false + place_far: false + proba: 1 + probing: ${general.linear_probing_backbone} + include_ignore: ${general.include_ignore} + task: ${general.task} + +validation_collation: + _target_: mask3d.datasets.utils.VoxelizeCollate + ignore_label: ${data.ignore_label} + voxel_size: ${data.voxel_size} + mode: ${data.validation_mode} + probing: ${general.linear_probing_backbone} + include_ignore: ${general.include_ignore} + task: ${general.task} + +test_collation: + _target_: mask3d.datasets.utils.VoxelizeCollate + ignore_label: ${data.ignore_label} + voxel_size: ${data.voxel_size} + mode: ${data.test_mode} + probing: ${general.linear_probing_backbone} + include_ignore: ${general.include_ignore} + task: ${general.task} diff --git a/models/Mask3D/build/lib/mask3d/conf/data/data_loaders/simple_loader.yaml b/models/Mask3D/build/lib/mask3d/conf/data/data_loaders/simple_loader.yaml new file mode 100644 index 0000000000000000000000000000000000000000..39996e14d769c2ba9341da582a1f7bf970fc7925 --- /dev/null +++ b/models/Mask3D/build/lib/mask3d/conf/data/data_loaders/simple_loader.yaml @@ -0,0 +1,22 @@ +# @package data + +train_dataloader: + _target_: torch.utils.data.DataLoader + shuffle: true + pin_memory: ${data.pin_memory} + num_workers: ${data.num_workers} + batch_size: ${data.batch_size} + +validation_dataloader: + _target_: torch.utils.data.DataLoader + shuffle: false + pin_memory: ${data.pin_memory} + num_workers: ${data.num_workers} + batch_size: ${data.test_batch_size} + +test_dataloader: + _target_: torch.utils.data.DataLoader + shuffle: false + pin_memory: ${data.pin_memory} + num_workers: ${data.num_workers} + batch_size: ${data.test_batch_size} diff --git a/models/Mask3D/build/lib/mask3d/conf/data/data_loaders/simple_loader_save_memory.yaml b/models/Mask3D/build/lib/mask3d/conf/data/data_loaders/simple_loader_save_memory.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b1b1b45d13167dc07357a13feb5a513dd71c9a2e --- /dev/null +++ b/models/Mask3D/build/lib/mask3d/conf/data/data_loaders/simple_loader_save_memory.yaml @@ -0,0 +1,22 @@ +# @package data + +train_dataloader: + _target_: torch.utils.data.DataLoader + shuffle: true + pin_memory: ${data.pin_memory} + num_workers: ${data.num_workers} + batch_size: ${data.batch_size} + +validation_dataloader: + _target_: torch.utils.data.DataLoader + shuffle: false + pin_memory: ${data.pin_memory} + num_workers: 1 + batch_size: ${data.test_batch_size} + +test_dataloader: + _target_: torch.utils.data.DataLoader + shuffle: false + pin_memory: ${data.pin_memory} + num_workers: 1 + batch_size: ${data.test_batch_size} diff --git a/models/Mask3D/build/lib/mask3d/conf/data/datasets/matterport.yaml b/models/Mask3D/build/lib/mask3d/conf/data/datasets/matterport.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6728ab9eb26bc78f435237d9d7d61800b900735d --- /dev/null +++ b/models/Mask3D/build/lib/mask3d/conf/data/datasets/matterport.yaml @@ -0,0 +1,48 @@ +# @package data +train_dataset: + _target_: mix3d.datasets.semseg.SemanticSegmentationDataset + data_dir: data/processed/matterport + image_augmentations_path: mix3d/conf/augmentation/albumentations_aug.yaml + volume_augmentations_path: mix3d/conf/augmentation/volumentations_aug.yaml + label_db_filepath: data/processed/matterport/label_database.yaml + color_mean_std: data/processed/scannet/color_mean_std.yaml + data_percent: 1.0 + mode: ${data.train_mode} + ignore_label: ${data.ignore_label} + num_labels: ${data.num_labels} + add_raw_coordinates: ${data.add_raw_coordinates} + add_colors: ${data.add_colors} + add_normals: ${data.add_normals} + add_instance: ${data.add_instance} + +validation_dataset: + _target_: mix3d.datasets.semseg.SemanticSegmentationDataset + data_dir: data/processed/scannet + image_augmentations_path: null + volume_augmentations_path: null + label_db_filepath: data/processed/matterport/label_database.yaml + color_mean_std: data/processed/scannet/color_mean_std.yaml + data_percent: 1.0 + mode: ${data.validation_mode} + ignore_label: ${data.ignore_label} + num_labels: ${data.num_labels} + add_raw_coordinates: ${data.add_raw_coordinates} + add_colors: ${data.add_colors} + add_normals: ${data.add_normals} + add_instance: ${data.add_instance} + +test_dataset: + _target_: mix3d.datasets.semseg.SemanticSegmentationDataset + data_dir: data/processed/matterport + image_augmentations_path: null + volume_augmentations_path: null + label_db_filepath: data/processed/matterport/label_database.yaml + color_mean_std: data/processed/scannet/color_mean_std.yaml + data_percent: 1.0 + mode: ${data.test_mode} + ignore_label: ${data.ignore_label} + num_labels: ${data.num_labels} + add_raw_coordinates: ${data.add_raw_coordinates} + add_colors: ${data.add_colors} + add_normals: ${data.add_normals} + add_instance: ${data.add_instance} diff --git a/models/Mask3D/build/lib/mask3d/conf/data/datasets/matterport_scannet.yaml b/models/Mask3D/build/lib/mask3d/conf/data/datasets/matterport_scannet.yaml new file mode 100644 index 0000000000000000000000000000000000000000..df259ceaadfa68a90c2b8a60d7b74a958b30c79d --- /dev/null +++ b/models/Mask3D/build/lib/mask3d/conf/data/datasets/matterport_scannet.yaml @@ -0,0 +1,50 @@ +# @package data +train_dataset: + _target_: mix3d.datasets.semseg.SemanticSegmentationDataset + data_dir: + - data/processed/scannet + - data/processed/matterport + image_augmentations_path: mix3d/conf/augmentation/albumentations_aug.yaml + volume_augmentations_path: mix3d/conf/augmentation/volumentations_aug.yaml + label_db_filepath: data/processed/scannet/label_database.yaml + color_mean_std: data/processed/scannet/color_mean_std.yaml + data_percent: 1.0 + mode: ${data.train_mode} + ignore_label: ${data.ignore_label} + num_labels: ${data.num_labels} + add_raw_coordinates: ${data.add_raw_coordinates} + add_colors: ${data.add_colors} + add_normals: ${data.add_normals} + add_instance: ${data.add_instance} + +validation_dataset: + _target_: mix3d.datasets.semseg.SemanticSegmentationDataset + data_dir: data/processed/scannet + image_augmentations_path: null + volume_augmentations_path: null + label_db_filepath: data/processed/scannet/label_database.yaml + color_mean_std: data/processed/scannet/color_mean_std.yaml + data_percent: 1.0 + mode: ${data.validation_mode} + ignore_label: ${data.ignore_label} + num_labels: ${data.num_labels} + add_raw_coordinates: ${data.add_raw_coordinates} + add_colors: ${data.add_colors} + add_normals: ${data.add_normals} + add_instance: ${data.add_instance} + +test_dataset: + _target_: mix3d.datasets.semseg.SemanticSegmentationDataset + data_dir: data/processed/scannet + image_augmentations_path: null + volume_augmentations_path: null + label_db_filepath: data/processed/scannet/label_database.yaml + color_mean_std: data/processed/scannet/color_mean_std.yaml + data_percent: 1.0 + mode: ${data.test_mode} + ignore_label: ${data.ignore_label} + num_labels: ${data.num_labels} + add_raw_coordinates: ${data.add_raw_coordinates} + add_colors: ${data.add_colors} + add_normals: ${data.add_normals} + add_instance: ${data.add_instance} diff --git a/models/Mask3D/build/lib/mask3d/conf/data/datasets/rio.yaml b/models/Mask3D/build/lib/mask3d/conf/data/datasets/rio.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1adfea36fea05b14a7fa95382677aee6144d1b4b --- /dev/null +++ b/models/Mask3D/build/lib/mask3d/conf/data/datasets/rio.yaml @@ -0,0 +1,48 @@ +# @package data +train_dataset: + _target_: mix3d.datasets.semseg.SemanticSegmentationDataset + data_dir: data/processed/rio + image_augmentations_path: mix3d/conf/augmentation/albumentations_aug.yaml + volume_augmentations_path: mix3d/conf/augmentation/volumentations_aug.yaml + label_db_filepath: data/processed/scannet/label_database.yaml + color_mean_std: data/processed/scannet/color_mean_std.yaml + data_percent: 1.0 + mode: ${data.train_mode} + ignore_label: ${data.ignore_label} + num_labels: ${data.num_labels} + add_raw_coordinates: ${data.add_raw_coordinates} + add_colors: ${data.add_colors} + add_normals: ${data.add_normals} + add_instance: ${data.add_instance} + +validation_dataset: + _target_: mix3d.datasets.semseg.SemanticSegmentationDataset + data_dir: data/processed/rio + image_augmentations_path: null + volume_augmentations_path: null + label_db_filepath: data/processed/scannet/label_database.yaml + color_mean_std: data/processed/scannet/color_mean_std.yaml + data_percent: 1.0 + mode: ${data.validation_mode} + ignore_label: ${data.ignore_label} + num_labels: ${data.num_labels} + add_raw_coordinates: ${data.add_raw_coordinates} + add_colors: ${data.add_colors} + add_normals: ${data.add_normals} + add_instance: ${data.add_instance} + +test_dataset: + _target_: mix3d.datasets.semseg.SemanticSegmentationDataset + data_dir: data/processed/rio + image_augmentations_path: null + volume_augmentations_path: null + label_db_filepath: data/processed/scannet/label_database.yaml + color_mean_std: data/processed/scannet/color_mean_std.yaml + data_percent: 1.0 + mode: ${data.test_mode} + ignore_label: ${data.ignore_label} + num_labels: ${data.num_labels} + add_raw_coordinates: ${data.add_raw_coordinates} + add_colors: ${data.add_colors} + add_normals: ${data.add_normals} + add_instance: ${data.add_instance} diff --git a/models/Mask3D/build/lib/mask3d/conf/data/datasets/s3dis.yaml b/models/Mask3D/build/lib/mask3d/conf/data/datasets/s3dis.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2e1385416655514397d82737e1edc2d1a5997657 --- /dev/null +++ b/models/Mask3D/build/lib/mask3d/conf/data/datasets/s3dis.yaml @@ -0,0 +1,87 @@ +# @package data +train_dataset: + _target_: mask3d.datasets.semseg.SemanticSegmentationDataset + dataset_name: "s3dis" + data_dir: data/processed/s3dis + image_augmentations_path: conf/augmentation/albumentations_aug.yaml + volume_augmentations_path: conf/augmentation/volumentations_aug.yaml + label_db_filepath: data/processed/s3dis/label_database.yaml + color_mean_std: data/processed/s3dis/color_mean_std.yaml + data_percent: 1.0 + mode: ${data.train_mode} + ignore_label: ${data.ignore_label} + num_labels: ${data.num_labels} + add_raw_coordinates: ${data.add_raw_coordinates} + add_colors: ${data.add_colors} + add_normals: ${data.add_normals} + add_instance: ${data.add_instance} + cache_data: ${data.cache_data} + # different augs experiments + instance_oversampling: 0.0 + place_around_existing: False + point_per_cut: 0 + max_cut_region: 0 + flip_in_center: false + noise_rate: 0 + resample_points: 0 + cropping: ${data.cropping} + cropping_args: ${data.cropping_args} + is_tta: false + crop_min_size: ${data.crop_min_size} + crop_length: ${data.crop_length} + cropping_v1: ${data.cropping_v1} + area: ${general.area} + filter_out_classes: [] + label_offset: 0 + +validation_dataset: + _target_: mask3d.datasets.semseg.SemanticSegmentationDataset + dataset_name: "s3dis" + data_dir: data/processed/s3dis + image_augmentations_path: null + volume_augmentations_path: null + label_db_filepath: data/processed/s3dis/label_database.yaml + color_mean_std: data/processed/s3dis/color_mean_std.yaml + data_percent: 1.0 + mode: ${data.validation_mode} + ignore_label: ${data.ignore_label} + num_labels: ${data.num_labels} + add_raw_coordinates: ${data.add_raw_coordinates} + add_colors: ${data.add_colors} + add_normals: ${data.add_normals} + add_instance: ${data.add_instance} + cache_data: ${data.cache_data} + cropping: false + is_tta: false + crop_min_size: ${data.crop_min_size} + crop_length: ${data.crop_length} + cropping_v1: ${data.cropping_v1} + area: ${general.area} + filter_out_classes: [] + label_offset: 0 + +test_dataset: + _target_: mask3d.datasets.semseg.SemanticSegmentationDataset + dataset_name: "s3dis" + data_dir: data/processed/s3dis + image_augmentations_path: null + volume_augmentations_path: null + label_db_filepath: data/processed/s3dis/label_database.yaml + color_mean_std: data/processed/s3dis/color_mean_std.yaml + data_percent: 1.0 + mode: ${data.test_mode} + ignore_label: ${data.ignore_label} + num_labels: ${data.num_labels} + add_raw_coordinates: ${data.add_raw_coordinates} + add_colors: ${data.add_colors} + add_normals: ${data.add_normals} + add_instance: ${data.add_instance} + cache_data: ${data.cache_data} + cropping: false + is_tta: false + crop_min_size: ${data.crop_min_size} + crop_length: ${data.crop_length} + cropping_v1: ${data.cropping_v1} + area: ${general.area} + filter_out_classes: [] + label_offset: 0 diff --git a/models/Mask3D/build/lib/mask3d/conf/data/datasets/scannet.yaml b/models/Mask3D/build/lib/mask3d/conf/data/datasets/scannet.yaml new file mode 100644 index 0000000000000000000000000000000000000000..50f1c6c5998d8f3c6dae35ef508225dff4b0271f --- /dev/null +++ b/models/Mask3D/build/lib/mask3d/conf/data/datasets/scannet.yaml @@ -0,0 +1,79 @@ +# @package data +train_dataset: + _target_: mask3d.datasets.semseg.SemanticSegmentationDataset + dataset_name: "scannet" + data_dir: data/processed/scannet + image_augmentations_path: conf/augmentation/albumentations_aug.yaml + volume_augmentations_path: conf/augmentation/volumentations_aug.yaml + label_db_filepath: data/processed/scannet/label_database.yaml + color_mean_std: data/processed/scannet/color_mean_std.yaml + data_percent: 1.0 + mode: ${data.train_mode} + ignore_label: ${data.ignore_label} + num_labels: ${data.num_labels} + add_raw_coordinates: ${data.add_raw_coordinates} + add_colors: ${data.add_colors} + add_normals: ${data.add_normals} + add_instance: ${data.add_instance} + # different augs experiments + instance_oversampling: 0.0 + place_around_existing: false + point_per_cut: 0 + max_cut_region: 0 + flip_in_center: false + noise_rate: 0 + resample_points: 0 + add_unlabeled_pc: false + cropping: ${data.cropping} + cropping_args: ${data.cropping_args} + is_tta: false + crop_min_size: ${data.crop_min_size} + crop_length: ${data.crop_length} + filter_out_classes: [0, 1] + label_offset: 2 + +validation_dataset: + _target_: mask3d.datasets.semseg.SemanticSegmentationDataset + dataset_name: "scannet" + data_dir: data/processed/scannet + image_augmentations_path: null + volume_augmentations_path: null + label_db_filepath: data/processed/scannet/label_database.yaml + color_mean_std: data/processed/scannet/color_mean_std.yaml + data_percent: 1.0 + mode: ${data.validation_mode} + ignore_label: ${data.ignore_label} + num_labels: ${data.num_labels} + add_raw_coordinates: ${data.add_raw_coordinates} + add_colors: ${data.add_colors} + add_normals: ${data.add_normals} + add_instance: ${data.add_instance} + cropping: false + is_tta: false + crop_min_size: ${data.crop_min_size} + crop_length: ${data.crop_length} + filter_out_classes: [0, 1] + label_offset: 2 + +test_dataset: + _target_: mask3d.datasets.semseg.SemanticSegmentationDataset + dataset_name: "scannet" + data_dir: data/processed/scannet + image_augmentations_path: null + volume_augmentations_path: null + label_db_filepath: data/processed/scannet/label_database.yaml + color_mean_std: data/processed/scannet/color_mean_std.yaml + data_percent: 1.0 + mode: ${data.test_mode} + ignore_label: ${data.ignore_label} + num_labels: ${data.num_labels} + add_raw_coordinates: ${data.add_raw_coordinates} + add_colors: ${data.add_colors} + add_normals: ${data.add_normals} + add_instance: ${data.add_instance} + cropping: false + is_tta: false + crop_min_size: ${data.crop_min_size} + crop_length: ${data.crop_length} + filter_out_classes: [0, 1] + label_offset: 2 diff --git a/models/Mask3D/build/lib/mask3d/conf/data/datasets/scannet200.yaml b/models/Mask3D/build/lib/mask3d/conf/data/datasets/scannet200.yaml new file mode 100644 index 0000000000000000000000000000000000000000..730a6ab9f1965004ec9828d1e8b2429005bef6f2 --- /dev/null +++ b/models/Mask3D/build/lib/mask3d/conf/data/datasets/scannet200.yaml @@ -0,0 +1,79 @@ +# @package data +train_dataset: + _target_: mask3d.datasets.semseg.SemanticSegmentationDataset + dataset_name: "scannet200" + data_dir: /home/weders/scratch/scratch/scannetter/arkit/raw/ + image_augmentations_path: conf/augmentation/albumentations_aug.yaml + volume_augmentations_path: conf/augmentation/volumentations_aug.yaml + # label_db_filepath: data/processed/scannet200/label_database.yaml + # color_mean_std: data/processed/scannet200/color_mean_std.yaml + data_percent: 1.0 + mode: ${data.train_mode} + ignore_label: ${data.ignore_label} + num_labels: ${data.num_labels} + add_raw_coordinates: ${data.add_raw_coordinates} + add_colors: ${data.add_colors} + add_normals: ${data.add_normals} + add_instance: ${data.add_instance} + # different augs experiments + instance_oversampling: 0.0 + place_around_existing: false + point_per_cut: 0 + max_cut_region: 0 + flip_in_center: false + noise_rate: 0 + resample_points: 0 + add_unlabeled_pc: false + cropping: ${data.cropping} + cropping_args: ${data.cropping_args} + is_tta: false + crop_min_size: ${data.crop_min_size} + crop_length: ${data.crop_length} + filter_out_classes: [0, 2] + label_offset: 2 + +validation_dataset: + _target_: mask3d.datasets.semseg.SemanticSegmentationDataset + dataset_name: "scannet200" + data_dir: /home/weders/scratch/scratch/scannetter/arkit/raw/ + image_augmentations_path: null + volume_augmentations_path: null + # label_db_filepath: data/processed/scannet200/label_database.yaml + # color_mean_std: data/processed/scannet200/color_mean_std.yaml + data_percent: 1.0 + mode: ${data.validation_mode} + ignore_label: ${data.ignore_label} + num_labels: ${data.num_labels} + add_raw_coordinates: ${data.add_raw_coordinates} + add_colors: ${data.add_colors} + add_normals: ${data.add_normals} + add_instance: ${data.add_instance} + cropping: false + is_tta: false + crop_min_size: ${data.crop_min_size} + crop_length: ${data.crop_length} + filter_out_classes: [0, 2] + label_offset: 2 + +test_dataset: + _target_: mask3d.datasets.semseg.SemanticSegmentationDataset + dataset_name: "scannet200" + data_dir: /home/weders/scratch/scratch/scannetter/arkit/raw/ + image_augmentations_path: null + volume_augmentations_path: null + # label_db_filepath: data/processed/scannet200/label_database.yaml + # color_mean_std: data/processed/scannet200/color_mean_std.yaml + data_percent: 1.0 + mode: ${data.test_mode} + ignore_label: ${data.ignore_label} + num_labels: ${data.num_labels} + add_raw_coordinates: ${data.add_raw_coordinates} + add_colors: ${data.add_colors} + add_normals: ${data.add_normals} + add_instance: ${data.add_instance} + cropping: false + is_tta: false + crop_min_size: ${data.crop_min_size} + crop_length: ${data.crop_length} + filter_out_classes: [0, 2] + label_offset: 2 diff --git a/models/Mask3D/build/lib/mask3d/conf/data/datasets/semantic_kitti.yaml b/models/Mask3D/build/lib/mask3d/conf/data/datasets/semantic_kitti.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9540ad610bd4a68d64369519d20e13009df9feda --- /dev/null +++ b/models/Mask3D/build/lib/mask3d/conf/data/datasets/semantic_kitti.yaml @@ -0,0 +1,42 @@ +# @package data +train_dataset: + _target_: mix3d.datasets.outdoor_semseg.LidarDataset + data_dir: data/processed/semantic_kitti + label_db_filepath: data/processed/semantic_kitti/label_database.yaml + mode: ${data.train_mode} + add_reflection: ${data.add_reflection} + add_distance: ${data.add_distance} + add_instance: ${data.add_instance} + num_labels: ${data.num_labels} + sweep: ${data.sweep} + data_percent: 1.0 + ignore_label: ${data.ignore_label} + volume_augmentations_path: mix3d/conf/augmentation/volumentations_aug.yaml + +validation_dataset: + _target_: mix3d.datasets.outdoor_semseg.LidarDataset + data_dir: data/processed/semantic_kitti + label_db_filepath: data/processed/semantic_kitti/label_database.yaml + mode: ${data.validation_mode} + add_reflection: ${data.add_reflection} + add_distance: ${data.add_distance} + add_instance: ${data.add_instance} + num_labels: ${data.num_labels} + sweep: ${data.sweep} + data_percent: 1.0 + ignore_label: ${data.ignore_label} + volume_augmentations_path: null + +test_dataset: + _target_: mix3d.datasets.outdoor_semseg.LidarDataset + data_dir: data/processed/semantic_kitti + label_db_filepath: data/processed/semantic_kitti/label_database.yaml + mode: ${data.test_mode} + add_reflection: ${data.add_reflection} + add_distance: ${data.add_distance} + add_instance: ${data.add_instance} + num_labels: ${data.num_labels} + sweep: ${data.sweep} + data_percent: 1.0 + ignore_label: ${data.ignore_label} + volume_augmentations_path: null diff --git a/models/Mask3D/build/lib/mask3d/conf/data/datasets/stpls3d.yaml b/models/Mask3D/build/lib/mask3d/conf/data/datasets/stpls3d.yaml new file mode 100644 index 0000000000000000000000000000000000000000..913667d4123a7edead9d948358ae25cf9f7b4bb1 --- /dev/null +++ b/models/Mask3D/build/lib/mask3d/conf/data/datasets/stpls3d.yaml @@ -0,0 +1,95 @@ +# @package data +train_dataset: + _target_: mask3d.datasets.semseg.SemanticSegmentationDataset + dataset_name: "stpls3d" + data_dir: data/processed/stpls3d + image_augmentations_path: conf/augmentation/albumentations_aug.yaml + volume_augmentations_path: conf/augmentation/volumentations_aug.yaml + label_db_filepath: data/processed/stpls3d/label_database.yaml + color_mean_std: data/processed/stpls3d/color_mean_std.yaml + data_percent: 1.0 + mode: ${data.train_mode} + ignore_label: ${data.ignore_label} + num_labels: ${data.num_labels} + add_raw_coordinates: ${data.add_raw_coordinates} + add_colors: ${data.add_colors} + add_normals: ${data.add_normals} + add_instance: ${data.add_instance} + cache_data: ${data.cache_data} + # different augs experiments + instance_oversampling: 0.0 + place_around_existing: False + point_per_cut: 0 + max_cut_region: 0 + flip_in_center: false + noise_rate: 0 + resample_points: 0 + cropping: ${data.cropping} + cropping_args: ${data.cropping_args} + is_tta: false + crop_min_size: ${data.crop_min_size} + crop_length: ${data.crop_length} + cropping_v1: ${data.cropping_v1} + area: ${general.area} + reps_per_epoch: ${general.reps_per_epoch} + eval_inner_core: ${general.eval_inner_core} + filter_out_classes: [0] + label_offset: 1 + is_elastic_distortion: true + color_drop: 0.0 + +validation_dataset: + _target_: mask3d.datasets.semseg.SemanticSegmentationDataset + dataset_name: "stpls3d" + data_dir: data/processed/stpls3d + image_augmentations_path: null + volume_augmentations_path: null + label_db_filepath: data/processed/stpls3d/label_database.yaml + color_mean_std: data/processed/stpls3d/color_mean_std.yaml + data_percent: 1.0 + mode: ${data.validation_mode} + ignore_label: ${data.ignore_label} + num_labels: ${data.num_labels} + add_raw_coordinates: ${data.add_raw_coordinates} + add_colors: ${data.add_colors} + add_normals: ${data.add_normals} + add_instance: ${data.add_instance} + cache_data: ${data.cache_data} + cropping: false + is_tta: false + crop_min_size: ${data.crop_min_size} + crop_length: ${data.crop_length} + cropping_v1: ${data.cropping_v1} + area: ${general.area} + on_crops: ${general.on_crops} + eval_inner_core: ${general.eval_inner_core} + filter_out_classes: [0] + label_offset: 1 + +test_dataset: + _target_: mask3d.datasets.semseg.SemanticSegmentationDataset + dataset_name: "stpls3d" + data_dir: data/processed/stpls3d + image_augmentations_path: null + volume_augmentations_path: null + label_db_filepath: data/processed/stpls3d/label_database.yaml + color_mean_std: data/processed/stpls3d/color_mean_std.yaml + data_percent: 1.0 + mode: ${data.test_mode} + ignore_label: ${data.ignore_label} + num_labels: ${data.num_labels} + add_raw_coordinates: ${data.add_raw_coordinates} + add_colors: ${data.add_colors} + add_normals: ${data.add_normals} + add_instance: ${data.add_instance} + cache_data: ${data.cache_data} + cropping: false + is_tta: false + crop_min_size: ${data.crop_min_size} + crop_length: ${data.crop_length} + cropping_v1: ${data.cropping_v1} + area: ${general.area} + on_crops: ${general.on_crops} + eval_inner_core: ${general.eval_inner_core} + filter_out_classes: [0] + label_offset: 1 diff --git a/models/Mask3D/build/lib/mask3d/conf/data/indoor.yaml b/models/Mask3D/build/lib/mask3d/conf/data/indoor.yaml new file mode 100644 index 0000000000000000000000000000000000000000..868c37ccfe901f14396b68a38eac47b42cb3e812 --- /dev/null +++ b/models/Mask3D/build/lib/mask3d/conf/data/indoor.yaml @@ -0,0 +1,43 @@ +# @package _group_ + +# these parameters are inherited by datasets, data_loaders and collators +# but they might be overwritten + +# splits +train_mode: train +validation_mode: validation +test_mode: validation # test # validation + +# dataset +ignore_label: 255 +add_raw_coordinates: true # 3dim +add_colors: true # 3dim +add_normals: false # 3dim +in_channels: 3 # in_channels = 3 * (add_normals + add_colors + add_raw_coordinates) +num_labels: 20 +# num_labels: 41 +add_instance: ${general.add_instance} +task: ${general.task} + +# data loader +pin_memory: false +num_workers: 4 +batch_size: 5 +test_batch_size: 1 +cache_data: false + +# collation +voxel_size: 0.02 + +reps_per_epoch: ${general.reps_per_epoch} + +cropping: false +cropping_args: + min_points: 30000 + aspect: 0.8 + min_crop: 0.5 + max_crop: 1.0 + +crop_min_size: 20000 +crop_length: 6.0 +cropping_v1: true \ No newline at end of file diff --git a/models/Mask3D/build/lib/mask3d/conf/data/outdoor.yaml b/models/Mask3D/build/lib/mask3d/conf/data/outdoor.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a77474f62d1cfb53f130160f641c65cb81a62956 --- /dev/null +++ b/models/Mask3D/build/lib/mask3d/conf/data/outdoor.yaml @@ -0,0 +1,26 @@ +# @package _group_ + +# these parameters are inherited by datasets, data_loaders and collators +# but they might be overwritten + +# splits +train_mode: train +validation_mode: validation +test_mode: validation + +# dataset +ignore_label: 255 +add_distance: true # 1dim +add_reflection: true # 1dim +in_channels: 2 # in_channels = add_distance + add_reflection +num_labels: 19 +add_instance: false + +# data loader +pin_memory: true +num_workers: 4 +batch_size: 18 +sweep: 1 + +# collation +voxel_size: 0.15 diff --git a/models/Mask3D/build/lib/mask3d/conf/logging/base.yaml b/models/Mask3D/build/lib/mask3d/conf/logging/base.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3d700a101ddf3d1e2c1a3cdea08190afff762a5b --- /dev/null +++ b/models/Mask3D/build/lib/mask3d/conf/logging/base.yaml @@ -0,0 +1,10 @@ +# @package _group_ +- _target_: pytorch_lightning.loggers.NeptuneLogger + project_name: ${general.workspace}/${general.project_name} + experiment_name: ${general.experiment_name} + offline_mode: false + +- _target_: pytorch_lightning.loggers.CSVLogger + save_dir: ${general.save_dir} + name: ${general.experiment_id} + version: ${general.version} diff --git a/models/Mask3D/build/lib/mask3d/conf/logging/full.yaml b/models/Mask3D/build/lib/mask3d/conf/logging/full.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b434e94dc1f0889cf0829b5f89b8509717a3546c --- /dev/null +++ b/models/Mask3D/build/lib/mask3d/conf/logging/full.yaml @@ -0,0 +1,8 @@ +# @package _group_ +- _target_: pytorch_lightning.loggers.WandbLogger + project: ${general.project_name} + name: ${general.experiment_name} + save_dir: ${general.save_dir} + entity: "schult" + resume: "allow" + id: ${general.experiment_name} diff --git a/models/Mask3D/build/lib/mask3d/conf/logging/minimal.yaml b/models/Mask3D/build/lib/mask3d/conf/logging/minimal.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b1c46e26fefedcec50d4fdc9fc77c187d60cf7b9 --- /dev/null +++ b/models/Mask3D/build/lib/mask3d/conf/logging/minimal.yaml @@ -0,0 +1,5 @@ +# @package _group_ +- _target_: pytorch_lightning.loggers.CSVLogger + save_dir: ${general.save_dir} + name: ${general.experiment_id} + version: ${general.version} diff --git a/models/Mask3D/build/lib/mask3d/conf/logging/offline.yaml b/models/Mask3D/build/lib/mask3d/conf/logging/offline.yaml new file mode 100644 index 0000000000000000000000000000000000000000..914ad19142ca22c3778be709208323908460ebac --- /dev/null +++ b/models/Mask3D/build/lib/mask3d/conf/logging/offline.yaml @@ -0,0 +1,10 @@ +# @package _group_ +- _target_: pytorch_lightning.loggers.TensorBoardLogger + name: ${general.experiment_id} + version: ${general.version} + save_dir: ${general.save_dir} + +- _target_: pytorch_lightning.loggers.CSVLogger + name: ${general.experiment_id} + version: ${general.version} + save_dir: ${general.save_dir} \ No newline at end of file diff --git a/models/Mask3D/build/lib/mask3d/conf/loss/cross_entropy.yaml b/models/Mask3D/build/lib/mask3d/conf/loss/cross_entropy.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c000f40ad2ab40605c244e38243a6e0cc7933768 --- /dev/null +++ b/models/Mask3D/build/lib/mask3d/conf/loss/cross_entropy.yaml @@ -0,0 +1,3 @@ +# @package _group_ +_target_: torch.nn.CrossEntropyLoss +ignore_index: ${data.ignore_label} diff --git a/models/Mask3D/build/lib/mask3d/conf/loss/set_criterion.yaml b/models/Mask3D/build/lib/mask3d/conf/loss/set_criterion.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3c04ba49ce1823c2d6e923a03ae0514490d463e9 --- /dev/null +++ b/models/Mask3D/build/lib/mask3d/conf/loss/set_criterion.yaml @@ -0,0 +1,11 @@ +# @package _group_ +_target_: mask3d.models.criterion.SetCriterion +num_classes: ${general.num_targets} +eos_coef: 0.1 +losses: + - "labels" + - "masks" +num_points: ${matcher.num_points} +oversample_ratio: 3.0 +importance_sample_ratio: 0.75 +class_weights: -1 diff --git a/models/Mask3D/build/lib/mask3d/conf/loss/set_criterion_custom_weights_1.yaml b/models/Mask3D/build/lib/mask3d/conf/loss/set_criterion_custom_weights_1.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1d2c308e081c1ffa61beb13308b27e6ff753f0f4 --- /dev/null +++ b/models/Mask3D/build/lib/mask3d/conf/loss/set_criterion_custom_weights_1.yaml @@ -0,0 +1,11 @@ +# @package _group_ +_target_: mask3d.models.criterion.SetCriterion +num_classes: ${general.num_targets} +eos_coef: 0.1 +losses: + - "labels" + - "masks" +num_points: ${matcher.num_points} +oversample_ratio: 3.0 +importance_sample_ratio: 0.75 +class_weights: [1.0,1.5,10.0,1.0,1.0,1.0,1.0,1.0,10.0,10.0,1.0,10.0,1.0,1.0] diff --git a/models/Mask3D/build/lib/mask3d/conf/matcher/hungarian_matcher.yaml b/models/Mask3D/build/lib/mask3d/conf/matcher/hungarian_matcher.yaml new file mode 100644 index 0000000000000000000000000000000000000000..47750b20906b6b40a131b702ba360e36ee4c8380 --- /dev/null +++ b/models/Mask3D/build/lib/mask3d/conf/matcher/hungarian_matcher.yaml @@ -0,0 +1,6 @@ +# @package _group_ +_target_: mask3d.models.matcher.HungarianMatcher +cost_class: 2. +cost_mask: 5. +cost_dice: 2. +num_points: -1 diff --git a/models/Mask3D/build/lib/mask3d/conf/metrics/miou.yaml b/models/Mask3D/build/lib/mask3d/conf/metrics/miou.yaml new file mode 100644 index 0000000000000000000000000000000000000000..68d1b61181d9615d7d6d7638261d119a4fc47074 --- /dev/null +++ b/models/Mask3D/build/lib/mask3d/conf/metrics/miou.yaml @@ -0,0 +1,4 @@ +# @package _group_ +_target_: mask3d.models.metrics.ConfusionMatrix +num_classes: ${data.num_labels} +ignore_label: ${data.ignore_label} diff --git a/models/Mask3D/build/lib/mask3d/conf/model/mask3d.yaml b/models/Mask3D/build/lib/mask3d/conf/model/mask3d.yaml new file mode 100644 index 0000000000000000000000000000000000000000..95718d8710477650561e0ddd845688f50c868032 --- /dev/null +++ b/models/Mask3D/build/lib/mask3d/conf/model/mask3d.yaml @@ -0,0 +1,47 @@ +# @package _group_ +_target_: mask3d.models.Mask3D + +# transformer parameters +hidden_dim: 128 +dim_feedforward: 1024 +num_queries: 100 +num_heads: 8 +num_decoders: 3 +dropout: 0.0 +pre_norm: false +use_level_embed: false +normalize_pos_enc: true +positional_encoding_type: "fourier" +gauss_scale: 1.0 +hlevels: [0,1,2,3] + +# queries +non_parametric_queries: true +random_query_both: false +random_normal: false +random_queries: false +use_np_features: false + +# sampling +sample_sizes: [200, 800, 3200, 12800, 51200] +max_sample_size: false # change false means sampling activated + +shared_decoder: true +num_classes: ${general.num_targets} +train_on_segments: ${general.train_on_segments} +scatter_type: "mean" + +voxel_size: ${data.voxel_size} + +config: + backbone: + _target_: mask3d.models.Res16UNet34C + config: + dialations: [ 1, 1, 1, 1 ] + conv1_kernel_size: 5 + bn_momentum: 0.02 + # depends on normals, color, raw_coordinates + # varies from 3 to 9 + in_channels: ${data.in_channels} + out_channels: ${data.num_labels} + out_fpn: true diff --git a/models/Mask3D/build/lib/mask3d/conf/optimizer/adamw.yaml b/models/Mask3D/build/lib/mask3d/conf/optimizer/adamw.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4b4020d1ddd1444c94ea5bfbe1281c485fca587e --- /dev/null +++ b/models/Mask3D/build/lib/mask3d/conf/optimizer/adamw.yaml @@ -0,0 +1,3 @@ +# @package _group_ +_target_: torch.optim.AdamW +lr: 0.0001 \ No newline at end of file diff --git a/models/Mask3D/build/lib/mask3d/conf/optimizer/adamw_lower.yaml b/models/Mask3D/build/lib/mask3d/conf/optimizer/adamw_lower.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7e42f091a0d5dd03b66ab1dcec8b81d78a692af9 --- /dev/null +++ b/models/Mask3D/build/lib/mask3d/conf/optimizer/adamw_lower.yaml @@ -0,0 +1,3 @@ +# @package _group_ +_target_: torch.optim.AdamW +lr: 0.005 diff --git a/models/Mask3D/build/lib/mask3d/conf/scheduler/exponentiallr.yaml b/models/Mask3D/build/lib/mask3d/conf/scheduler/exponentiallr.yaml new file mode 100644 index 0000000000000000000000000000000000000000..dc5224083670b286d75fda46304560dbcca3aecb --- /dev/null +++ b/models/Mask3D/build/lib/mask3d/conf/scheduler/exponentiallr.yaml @@ -0,0 +1,11 @@ +# @package _group_ + +scheduler: + _target_: torch.optim.lr_scheduler.ExponentialLR + gamma: 0.99999 + last_epoch: -1 # ${trainer.max_epochs} + # need to set to number because of tensorboard logger + # steps_per_epoch: -1 + +pytorch_lightning_params: + interval: step diff --git a/models/Mask3D/build/lib/mask3d/conf/scheduler/lambdalr.yaml b/models/Mask3D/build/lib/mask3d/conf/scheduler/lambdalr.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b63f6f4333e98931ce22f1a38829de0ef51a3719 --- /dev/null +++ b/models/Mask3D/build/lib/mask3d/conf/scheduler/lambdalr.yaml @@ -0,0 +1,8 @@ +# @package _group_ + +scheduler: + _target_: torch.optim.lr_scheduler.StepLR + step_size: 99999 + +pytorch_lightning_params: + interval: epoch diff --git a/models/Mask3D/build/lib/mask3d/conf/scheduler/onecyclelr.yaml b/models/Mask3D/build/lib/mask3d/conf/scheduler/onecyclelr.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c788877193d7366c21088cf9fefb77e4f62ef4d9 --- /dev/null +++ b/models/Mask3D/build/lib/mask3d/conf/scheduler/onecyclelr.yaml @@ -0,0 +1,11 @@ +# @package _group_ + +scheduler: + _target_: torch.optim.lr_scheduler.OneCycleLR + max_lr: ${optimizer.lr} + epochs: ${trainer.max_epochs} + # need to set to number because of tensorboard logger + steps_per_epoch: -1 + +pytorch_lightning_params: + interval: step diff --git a/models/Mask3D/build/lib/mask3d/conf/trainer/trainer.yaml b/models/Mask3D/build/lib/mask3d/conf/trainer/trainer.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f436300f9ca6bbbe96ca6c1b4c7e8eeffe35fabd --- /dev/null +++ b/models/Mask3D/build/lib/mask3d/conf/trainer/trainer.yaml @@ -0,0 +1,7 @@ +# @package _group_ +deterministic: false +max_epochs: 1000 +min_epochs: 1 +resume_from_checkpoint: null +check_val_every_n_epoch: 50 +num_sanity_val_steps: -1 diff --git a/models/Mask3D/build/lib/mask3d/conf/trainer/trainer600.yaml b/models/Mask3D/build/lib/mask3d/conf/trainer/trainer600.yaml new file mode 100644 index 0000000000000000000000000000000000000000..dc9f00295aafe3431d1c0e7ca50dbc29559ea134 --- /dev/null +++ b/models/Mask3D/build/lib/mask3d/conf/trainer/trainer600.yaml @@ -0,0 +1,7 @@ +# @package _group_ +deterministic: false +max_epochs: 601 +min_epochs: 1 +resume_from_checkpoint: null +check_val_every_n_epoch: 50 +num_sanity_val_steps: 2 diff --git a/models/Mask3D/build/lib/mask3d/datasets/__init__.py b/models/Mask3D/build/lib/mask3d/datasets/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/Mask3D/build/lib/mask3d/datasets/outdoor_semseg.py b/models/Mask3D/build/lib/mask3d/datasets/outdoor_semseg.py new file mode 100644 index 0000000000000000000000000000000000000000..4592a6eda45c1a7626530eb19c42c267496749df --- /dev/null +++ b/models/Mask3D/build/lib/mask3d/datasets/outdoor_semseg.py @@ -0,0 +1,206 @@ +import logging +from pathlib import Path +from typing import List, Optional, Union, Tuple +from random import random + +import numpy as np +import volumentations as V +import yaml +from torch.utils.data import Dataset + +logger = logging.getLogger(__name__) + + +class LidarDataset(Dataset): + def __init__( + self, + data_dir: Optional[ + Union[str, Tuple[str]] + ] = "data/processed/semantic_kitti", + label_db_filepath: Optional[ + str + ] = "./data/processed/semantic_kitti/label_database.yaml", + mode: Optional[str] = "train", + add_reflection: Optional[bool] = True, + add_distance: Optional[bool] = False, + add_instance: Optional[bool] = True, + num_labels: Optional[int] = -1, + data_percent: Optional[float] = 1.0, + ignore_label: Optional[Union[int, List[int]]] = 255, + volume_augmentations_path: Optional[str] = None, + sweep: Optional[int] = 1, + ): + self.mode = mode + self.data_dir = data_dir + if type(data_dir) == str: + self.data_dir = [self.data_dir] + self.ignore_label = ignore_label + self.add_instance = add_instance + self.add_distance = add_distance + self.add_reflection = add_reflection + + # loading database files + self._data = [] + for database_path in self.data_dir: + database_path = Path(database_path) + if not (database_path / f"{mode}_database.yaml").exists(): + print(f"generate {database_path}/{mode}_database.yaml first") + exit() + self._data.extend( + self._load_yaml(database_path / f"{mode}_database.yaml") + ) + + labels = self._load_yaml(Path(label_db_filepath)) + self._labels = self._select_correct_labels(labels, num_labels) + + # augmentations + self.volume_augmentations = V.NoOp() + if volume_augmentations_path is not None: + self.volume_augmentations = V.load( + volume_augmentations_path, data_format="yaml" + ) + + # reformulating in sweeps + data = [[]] + last_scene = self._data[0]["scene"] + for x in self._data: + if x["scene"] == last_scene: + data[-1].append(x) + else: + last_scene = x["scene"] + data.append([x]) + for i in range(len(data)): + data[i] = list(self.chunks(data[i], sweep)) + self._data = [val for sublist in data for val in sublist] + + if data_percent < 1.0: + self._data = self._data[: int(len(self._data) * data_percent)] + + @staticmethod + def chunks(lst, n): + """Yield successive n-sized chunks from lst.""" + for i in range(0, len(lst), n): + yield lst[i : i + n] + + def __len__(self): + return len(self.data) + + def __getitem__(self, idx: int): + points = [] + for sweep in self.data[idx]: + points.append(np.load(sweep["filepath"])) + # rotate + points[-1][:, :3] = ( + points[-1][:, :3] @ np.array(sweep["pose"])[:3, :3] + ) + # translate + points[-1][:, :3] += np.array(sweep["pose"])[:3, 3] + points = np.vstack(points) + + coordinates, features, labels = ( + points[:, :3], + points[:, 3:-2], + points[:, -2:], + ) + + if not self.add_reflection: + features = np.ones(np.ones((len(coordinates), 1))) + + if self.add_distance: + center_coordinate = coordinates.mean(0) + features = np.hstack( + ( + features, + np.linalg.norm(coordinates - center_coordinate, axis=1)[ + :, np.newaxis + ], + ) + ) + + # volume and image augmentations for train + if "train" in self.mode: + coordinates -= coordinates.mean(0) + if 0.5 > random(): + coordinates += ( + np.random.uniform(coordinates.min(0), coordinates.max(0)) + / 2 + ) + aug = self.volume_augmentations( + points=coordinates, + features=features, + labels=labels, + ) + coordinates, features, labels = ( + aug["points"], + aug["features"], + aug["labels"], + ) + + # prepare labels and map from 0 to 20(40) + labels = labels.astype(np.int32) + if labels.size > 0: + labels[:, 0] = self._remap_from_zero(labels[:, 0]) + if not self.add_instance: + # taking only first column, which is segmentation label, not instance + labels = labels[:, 0].flatten() + + return coordinates, features, labels + + @property + def data(self): + """database file containing information about preproscessed dataset""" + return self._data + + @property + def label_info(self): + """database file containing information labels used by dataset""" + return self._labels + + @staticmethod + def _load_yaml(filepath): + with open(filepath) as f: + file = yaml.safe_load(f) + return file + + def _select_correct_labels(self, labels, num_labels): + number_of_validation_labels = 0 + number_of_all_labels = 0 + for ( + k, + v, + ) in labels.items(): + number_of_all_labels += 1 + if v["validation"]: + number_of_validation_labels += 1 + + if num_labels == number_of_all_labels: + return labels + elif num_labels == number_of_validation_labels: + valid_labels = dict() + for ( + k, + v, + ) in labels.items(): + if v["validation"]: + valid_labels.update({k: v}) + return valid_labels + else: + msg = f"""not available number labels, select from: + {number_of_validation_labels}, {number_of_all_labels}""" + raise ValueError(msg) + + def _remap_from_zero(self, labels): + labels[ + ~np.isin(labels, list(self.label_info.keys())) + ] = self.ignore_label + # remap to the range from 0 + for i, k in enumerate(self.label_info.keys()): + labels[labels == k] = i + return labels + + def _remap_model_output(self, output): + output = np.array(output) + output_remapped = output.copy() + for i, k in enumerate(self.label_info.keys()): + output_remapped[output == i] = k + return output_remapped diff --git a/models/Mask3D/build/lib/mask3d/datasets/preprocessing/__init__.py b/models/Mask3D/build/lib/mask3d/datasets/preprocessing/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/Mask3D/build/lib/mask3d/datasets/preprocessing/arkitscenes_preprocessing.py b/models/Mask3D/build/lib/mask3d/datasets/preprocessing/arkitscenes_preprocessing.py new file mode 100644 index 0000000000000000000000000000000000000000..2f222dc27e73eedab1e1d82b14c1573ce632af7c --- /dev/null +++ b/models/Mask3D/build/lib/mask3d/datasets/preprocessing/arkitscenes_preprocessing.py @@ -0,0 +1,116 @@ +import re +from pathlib import Path +import numpy as np +import pandas as pd +from fire import Fire +from natsort import natsorted +from loguru import logger +import os + +from datasets.preprocessing.base_preprocessing import BasePreprocessing +from utils.point_cloud_utils import load_ply_with_normals + +from datasets.scannet200.scannet200_constants import ( + VALID_CLASS_IDS_200, + SCANNET_COLOR_MAP_200, + CLASS_LABELS_200, +) + + +class ARKitScenesPreprocessing(BasePreprocessing): + def __init__( + self, + data_dir: str = "/home/weders/scratch/scratch/scannetter/arkit/raw", + save_dir: str = "/home/weders/scratch/scratch/scannetter/arkit/raw", + modes: tuple = ('Validation', ), + n_jobs: int = 1, + git_repo: str = "./data/raw/scannet/ScanNet", + mesh_file: str="mesh_tsdf.ply", + scannet200: bool = False, + ): + super().__init__(data_dir, save_dir, modes, n_jobs) + + self.scannet200 = scannet200 + git_repo = Path(git_repo) + for mode in self.modes: + scenes = os.listdir(os.path.join(data_dir, mode)) + scans_folder = "scans_test" if mode == "test" else "scans" + filepaths = [] + for scene in scenes: + if os.path.exists(os.path.join(data_dir, mode, scene, mesh_file)): + filepaths.append( + self.data_dir + / mode + / scene + / mesh_file) + self.files[mode] = natsorted(filepaths) + + def process_file(self, filepath, mode): + """process_file. + + Please note, that for obtaining segmentation labels ply files were used. + + Args: + filepath: path to the main ply file + mode: train, test or validation + + Returns: + filebase: info about file + """ + scene = int(filepath.parent.name) + print(scene) + filebase = { + "filepath": filepath, + "scene": scene, + "sub_scene": scene, + "raw_filepath": str(filepath), + "file_len": -1, + } + # reading both files and checking that they are fitting + coords, features, _ = load_ply_with_normals(filepath) + file_len = len(coords) + filebase["file_len"] = file_len + points = np.hstack((coords, features)) + + print(features.shape) + + points = np.concatenate((points, np.zeros((file_len, 4))), axis=1) # adding segment and label fake columns + + processed_filepath = ( + self.save_dir / mode / f"data_mask3d.npy" + ) + if not processed_filepath.parent.exists(): + processed_filepath.parent.mkdir(parents=True, exist_ok=True) + np.save(processed_filepath, points.astype(np.float32)) + filebase["filepath"] = str(processed_filepath) + + return filebase + + @logger.catch + def fix_bugs_in_labels(self): + if not self.scannet200: + logger.add(self.save_dir / "fixed_bugs_in_labels.log") + found_wrong_labels = { + tuple([270, 0]): 50, + tuple([270, 2]): 50, + tuple([384, 0]): 149, + } + for scene, wrong_label in found_wrong_labels.items(): + scene, sub_scene = scene + bug_file = ( + self.save_dir / "train" / f"{scene:04}_{sub_scene:02}.npy" + ) + points = np.load(bug_file) + bug_mask = points[:, -1] != wrong_label + points = points[bug_mask] + np.save(bug_file, points) + logger.info(f"Fixed {bug_file}") + + def _parse_scene_subscene(self, name): + scene_match = re.match(r"scene(\d{4})_(\d{2})", name) + print(scene_match) + return int(scene_match.group(1)), int(scene_match.group(2)) + + +if __name__ == "__main__": + Fire(ARKitScenesPreprocessing) \ No newline at end of file diff --git a/models/Mask3D/build/lib/mask3d/datasets/preprocessing/base_preprocessing.py b/models/Mask3D/build/lib/mask3d/datasets/preprocessing/base_preprocessing.py new file mode 100644 index 0000000000000000000000000000000000000000..a17fd4f89aca0d16d27b1bd10c9f40b3e40a6e61 --- /dev/null +++ b/models/Mask3D/build/lib/mask3d/datasets/preprocessing/base_preprocessing.py @@ -0,0 +1,204 @@ +import os +import sys +import re +import yaml +import json +import multiprocessing +from pathlib import Path +from hashlib import md5 + +import numpy as np +from fire import Fire +from tqdm import tqdm +from joblib import Parallel, delayed +from loguru import logger + + +class BasePreprocessing: + def __init__( + self, + data_dir: str = "./data/raw/", + save_dir: str = "./data/processed/", + modes: tuple = ("train", "validation", "test"), + n_jobs: int = -1, + ): + self.data_dir = Path(data_dir) + self.save_dir = Path(save_dir) + self.n_jobs = n_jobs + self.modes = modes + + if not self.data_dir.exists(): + logger.error("data folder doesn't exist") + raise FileNotFoundError + if self.save_dir.exists() is False: + self.save_dir.mkdir(parents=True, exist_ok=True) + + self.files = {} + for data_type in self.modes: + self.files.update({data_type: []}) + + @logger.catch + def preprocess(self): + self.n_jobs = ( + multiprocessing.cpu_count() if self.n_jobs == -1 else self.n_jobs + ) + for mode in self.modes: + database = [] + logger.info(f"Tasks for {mode}: {len(self.files[mode])}") + parallel_results = Parallel(n_jobs=self.n_jobs, verbose=10)( + delayed(self.process_file)(file, mode) + for file in self.files[mode] + ) + for filebase in parallel_results: + database.append(filebase) + self.save_database(database, mode) + # self.fix_bugs_in_labels() + # self.joint_database() + # self.compute_color_mean_std( + # train_database_path=(self.save_dir / "train_database.yaml") + # ) + + def preprocess_sequential(self): + for mode in self.modes: + database = [] + for filepath in tqdm(self.files[mode], unit="file"): + filebase = self.process_file(filepath, mode) + database.append(filebase) + self.save_database(database, mode) + self.fix_bugs_in_labels() + self.joint_database() + self.compute_color_mean_std( + train_database_path=(self.save_dir / "train_database.yaml") + ) + + def process_file(self, filepath, mode): + """process_file. + + Args: + filepath: path to the main file + mode: typically train, test or validation + + Returns: + filebase: info about file + """ + raise NotImplementedError + + def make_instance_database_sequential( + self, + train_database_path: str = "./data/processed/train_database.yaml", + mode="instance", + ): + train_database = self._load_yaml(train_database_path) + instance_database = [] + for sample in tqdm(train_database): + instance_database.append(self.extract_instance_from_file(sample)) + self.save_database(instance_database, mode=mode) + + @logger.catch + def make_instance_database( + self, + train_database_path: str = "./data/processed/train_database.yaml", + mode="instance", + ): + self.n_jobs = ( + multiprocessing.cpu_count() if self.n_jobs == -1 else self.n_jobs + ) + train_database = self._load_yaml(train_database_path) + instance_database = [] + logger.info(f"Files in database: {len(train_database)}") + parallel_results = Parallel(n_jobs=self.n_jobs, verbose=10)( + delayed(self.extract_instance_from_file)(sample) + for sample in train_database + ) + for filebase in parallel_results: + instance_database.append(filebase) + self.save_database(instance_database, mode=mode) + + def extract_instance_from_file(self, sample_from_database): + points = np.load(sample_from_database["filepath"]) + labels = points[:, -2:] + file_instances = [] + for instance_id in np.unique(labels[:, 1]): + occupied_indices = np.isin(labels[:, 1], instance_id) + instance_points = points[occupied_indices].copy() + instance_classes = ( + np.unique(instance_points[:, 9]).astype(int).tolist() + ) + + hash_string = str(sample_from_database["filepath"]) + str( + instance_id + ) + hash_string = md5(hash_string.encode("utf-8")).hexdigest() + instance_filepath = ( + self.save_dir / "instances" / f"{hash_string}.npy" + ) + instance = { + "classes": instance_classes, + "instance_filepath": str(instance_filepath), + "instance_size": len(instance_points), + "original_file": str(sample_from_database["filepath"]), + } + if not instance_filepath.parent.exists(): + instance_filepath.parent.mkdir(parents=True, exist_ok=True) + np.save(instance_filepath, instance_points.astype(np.float32)) + file_instances.append(instance) + return file_instances + + def fix_bugs_in_labels(self): + pass + + def compute_color_mean_std( + self, + train_database_path: str = "./data/processed/train_database.yaml", + ): + pass + + def save_database(self, database, mode): + for element in database: + self._dict_to_yaml(element) + self._save_yaml(self.save_dir / (mode + "_database.yaml"), database) + + def joint_database(self, train_modes=["train", "validation"]): + joint_db = [] + for mode in train_modes: + joint_db.extend( + self._load_yaml(self.save_dir / (mode + "_database.yaml")) + ) + self._save_yaml( + self.save_dir / "train_validation_database.yaml", joint_db + ) + + @classmethod + def _read_json(cls, path): + with open(path) as f: + file = json.load(f) + return file + + @classmethod + def _save_yaml(cls, path, file): + with open(path, "w") as f: + yaml.safe_dump( + file, f, default_style=None, default_flow_style=False + ) + + @classmethod + def _dict_to_yaml(cls, dictionary): + if not isinstance(dictionary, dict): + return + for k, v in dictionary.items(): + if isinstance(v, dict): + cls._dict_to_yaml(v) + if isinstance(v, np.ndarray): + dictionary[k] = v.tolist() + if isinstance(v, Path): + dictionary[k] = str(v) + + @classmethod + def _load_yaml(cls, filepath): + with open(filepath) as f: + file = yaml.safe_load(f) + return file + + +if __name__ == "__main__": + Fire(BasePreprocessing) diff --git a/models/Mask3D/build/lib/mask3d/datasets/preprocessing/s3dis_preprocessing.py b/models/Mask3D/build/lib/mask3d/datasets/preprocessing/s3dis_preprocessing.py new file mode 100644 index 0000000000000000000000000000000000000000..3e7ff4967ca9dc22248c6863b41f7b652687ae98 --- /dev/null +++ b/models/Mask3D/build/lib/mask3d/datasets/preprocessing/s3dis_preprocessing.py @@ -0,0 +1,282 @@ +import os +import re + +import numpy as np +from fire import Fire +from loguru import logger +from natsort import natsorted + +from datasets.preprocessing.base_preprocessing import BasePreprocessing + + +class S3DISPreprocessing(BasePreprocessing): + def __init__( + self, + data_dir: str = "./data/raw/s3dis", + save_dir: str = "./data/processed/s3dis", + modes: tuple = ( + "Area_1", + "Area_2", + "Area_3", + "Area_4", + "Area_5", + "Area_6", + ), + n_jobs: int = -1, + ): + super().__init__(data_dir, save_dir, modes, n_jobs) + + self.class_map = { + "ceiling": 0, + "floor": 1, + "wall": 2, + "beam": 3, + "column": 4, + "window": 5, + "door": 6, + "table": 7, + "chair": 8, + "sofa": 9, + "bookcase": 10, + "board": 11, + "clutter": 12, + "stairs": 12, # stairs are also mapped to clutter + } + + self.color_map = [ + [0, 255, 0], # ceiling + [0, 0, 255], # floor + [0, 255, 255], # wall + [255, 255, 0], # beam + [255, 0, 255], # column + [100, 100, 255], # window + [200, 200, 100], # door + [170, 120, 200], # table + [255, 0, 0], # chair + [200, 100, 100], # sofa + [10, 200, 100], # bookcase + [200, 200, 200], # board + [50, 50, 50], + ] # clutter + + self.create_label_database() + + for mode in self.modes: + filepaths = [] + for scene_path in [ + f.path for f in os.scandir(self.data_dir / mode) if f.is_dir() + ]: + filepaths.append(scene_path) + self.files[mode] = natsorted(filepaths) + + def create_label_database(self): + label_database = dict() + for class_name, class_id in self.class_map.items(): + label_database[class_id] = { + "color": self.color_map[class_id], + "name": class_name, + "validation": True, + } + + self._save_yaml(self.save_dir / "label_database.yaml", label_database) + return label_database + + def _buf_count_newlines_gen(self, fname): + def _make_gen(reader): + while True: + b = reader(2**16) + if not b: + break + yield b + + with open(fname, "rb") as f: + count = sum(buf.count(b"\n") for buf in _make_gen(f.raw.read)) + return count + + def process_file(self, filepath, mode): + """process_file. + + Please note, that for obtaining segmentation labels ply files were used. + + Args: + filepath: path to the main ply file + mode: train, test or validation + + Returns: + filebase: info about file + """ + filebase = { + "filepath": filepath, + "scene": filepath.split("/")[-1], + "area": mode, + "raw_filepath": str(filepath), + "file_len": -1, + } + + scene_name = filepath.split("/")[-1] + instance_counter = 0 + scene_points = [] + for instance in [ + f + for f in os.scandir( + self.data_dir / mode / scene_name / "Annotations" + ) + if f.name.endswith(".txt") + ]: + instance_class = self.class_map[instance.name.split("_")[0]] + instance_points = np.loadtxt(instance.path) + + instance_normals = np.ones((instance_points.shape[0], 3)) + instance_class = np.array(instance_class).repeat( + instance_points.shape[0] + )[..., None] + instance_id = np.array(instance_counter).repeat( + instance_points.shape[0] + )[..., None] + + instance_points = np.hstack( + ( + instance_points, + instance_normals, + instance_class, + instance_id, + ) + ) + + scene_points.append(instance_points) + instance_counter += 1 + + points = np.vstack(scene_points) + + pcd_size = self._buf_count_newlines_gen(f"{filepath}/{scene_name}.txt") + if points.shape[0] != pcd_size: + print(f"FILE SIZE DOES NOT MATCH FOR {filepath}/{scene_name}.txt") + print(f"({points.shape[0]} vs. {pcd_size})") + + filebase["raw_segmentation_filepath"] = "" + + # add segment id as additional feature (DUMMY) + points = np.hstack((points, np.ones(points.shape[0])[..., None])) + points[:, [9, 10, -1]] = points[ + :, [-1, 9, 10] + ] # move segments after RGB + + gt_data = (points[:, -2] + 1) * 1000 + points[:, -1] + 1 + + file_len = len(points) + filebase["file_len"] = file_len + + processed_filepath = self.save_dir / mode / f"{scene_name}.npy" + if not processed_filepath.parent.exists(): + processed_filepath.parent.mkdir(parents=True, exist_ok=True) + np.save(processed_filepath, points.astype(np.float32)) + filebase["filepath"] = str(processed_filepath) + + processed_gt_filepath = ( + self.save_dir / "instance_gt" / mode / f"{scene_name}.txt" + ) + if not processed_gt_filepath.parent.exists(): + processed_gt_filepath.parent.mkdir(parents=True, exist_ok=True) + np.savetxt(processed_gt_filepath, gt_data.astype(np.int32), fmt="%d") + filebase["instance_gt_filepath"] = str(processed_gt_filepath) + + filebase["color_mean"] = [ + float((points[:, 3] / 255).mean()), + float((points[:, 4] / 255).mean()), + float((points[:, 5] / 255).mean()), + ] + filebase["color_std"] = [ + float(((points[:, 3] / 255) ** 2).mean()), + float(((points[:, 4] / 255) ** 2).mean()), + float(((points[:, 5] / 255) ** 2).mean()), + ] + return filebase + + def compute_color_mean_std(self, train_database_path: str = ""): + area_database_paths = [ + f + for f in os.scandir(self.save_dir) + if f.name.startswith("Area_") and f.name.endswith(".yaml") + ] + + for database_path in area_database_paths: + database = self._load_yaml(database_path.path) + color_mean, color_std = [], [] + for sample in database: + color_std.append(sample["color_std"]) + color_mean.append(sample["color_mean"]) + + color_mean = np.array(color_mean).mean(axis=0) + color_std = np.sqrt( + np.array(color_std).mean(axis=0) - color_mean**2 + ) + feats_mean_std = { + "mean": [float(each) for each in color_mean], + "std": [float(each) for each in color_std], + } + self._save_yaml( + self.save_dir / f"{database_path.name}_color_mean_std.yaml", + feats_mean_std, + ) + + for database_path in area_database_paths: + all_mean, all_std = [], [] + for let_out_path in area_database_paths: + if database_path == let_out_path: + continue + + database = self._load_yaml(let_out_path.path) + for sample in database: + all_std.append(sample["color_std"]) + all_mean.append(sample["color_mean"]) + + all_color_mean = np.array(all_mean).mean(axis=0) + all_color_std = np.sqrt( + np.array(all_std).mean(axis=0) - all_color_mean**2 + ) + feats_mean_std = { + "mean": [float(each) for each in all_color_mean], + "std": [float(each) for each in all_color_std], + } + file_path = database_path.name.replace("_database.yaml", "") + self._save_yaml( + self.save_dir / f"{file_path}_color_mean_std.yaml", + feats_mean_std, + ) + + @logger.catch + def fix_bugs_in_labels(self): + pass + + def joint_database( + self, + train_modes=( + "Area_1", + "Area_2", + "Area_3", + "Area_4", + "Area_5", + "Area_6", + ), + ): + for mode in train_modes: + joint_db = [] + for let_out in train_modes: + if mode == let_out: + continue + joint_db.extend( + self._load_yaml( + self.save_dir / (let_out + "_database.yaml") + ) + ) + self._save_yaml( + self.save_dir / f"train_{mode}_database.yaml", joint_db + ) + + def _parse_scene_subscene(self, name): + scene_match = re.match(r"scene(\d{4})_(\d{2})", name) + return int(scene_match.group(1)), int(scene_match.group(2)) + + +if __name__ == "__main__": + Fire(S3DISPreprocessing) diff --git a/models/Mask3D/build/lib/mask3d/datasets/preprocessing/scannet_preprocessing.py b/models/Mask3D/build/lib/mask3d/datasets/preprocessing/scannet_preprocessing.py new file mode 100644 index 0000000000000000000000000000000000000000..5a981864612e04930b04c9c0df8aaa6e2d9249a3 --- /dev/null +++ b/models/Mask3D/build/lib/mask3d/datasets/preprocessing/scannet_preprocessing.py @@ -0,0 +1,296 @@ +import re +from pathlib import Path +import numpy as np +import pandas as pd +from fire import Fire +from natsort import natsorted +from loguru import logger + +from datasets.preprocessing.base_preprocessing import BasePreprocessing +from utils.point_cloud_utils import load_ply_with_normals + +from datasets.scannet200.scannet200_constants import ( + VALID_CLASS_IDS_200, + SCANNET_COLOR_MAP_200, + CLASS_LABELS_200, +) + + +class ScannetPreprocessing(BasePreprocessing): + def __init__( + self, + data_dir: str = "./data/raw/scannet/scannet", + save_dir: str = "./data/processed/scannet", + modes: tuple = ("train", "validation", "test"), + n_jobs: int = -1, + git_repo: str = "./data/raw/scannet/ScanNet", + scannet200: bool = False, + ): + super().__init__(data_dir, save_dir, modes, n_jobs) + + self.scannet200 = scannet200 + + if self.scannet200: + self.labels_pd = pd.read_csv( + self.data_dir / "scannetv2-labels.combined.tsv", + sep="\t", + header=0, + ) + + git_repo = Path(git_repo) + self.create_label_database(git_repo) + for mode in self.modes: + trainval_split_dir = git_repo / "Tasks" / "Benchmark" + scannet_special_mode = "val" if mode == "validation" else mode + with open( + trainval_split_dir / (f"scannetv2_{scannet_special_mode}.txt") + ) as f: + # -1 because the last one is always empty + split_file = f.read().split("\n")[:-1] + + scans_folder = "scans_test" if mode == "test" else "scans" + filepaths = [] + for scene in split_file: + filepaths.append( + self.data_dir + / scans_folder + / scene + / (scene + "_vh_clean_2.ply") + ) + self.files[mode] = natsorted(filepaths) + + def create_label_database(self, git_repo): + if self.scannet200: + label_database = {} + for row_id, class_id in enumerate(VALID_CLASS_IDS_200): + label_database[class_id] = { + "color": SCANNET_COLOR_MAP_200[class_id], + "name": CLASS_LABELS_200[row_id], + "validation": True, + } + self._save_yaml( + self.save_dir / "label_database.yaml", label_database + ) + return label_database + else: + if (self.save_dir / "label_database.yaml").exists(): + return self._load_yaml(self.save_dir / "label_database.yaml") + df = pd.read_csv( + self.data_dir / "scannetv2-labels.combined.tsv", sep="\t" + ) + df = ( + df[~df[["nyu40class", "nyu40id"]].duplicated()][ + ["nyu40class", "nyu40id"] + ] + .set_index("nyu40id") + .sort_index()[["nyu40class"]] + .rename(columns={"nyu40class": "name"}) + .replace(" ", "_", regex=True) + ) + df = pd.DataFrame([{"name": "empty"}]).append(df) + df["validation"] = False + + with open( + git_repo + / "Tasks" + / "Benchmark" + / "classes_SemVoxLabel-nyu40id.txt" + ) as f: + for_validation = f.read().split("\n") + for category in for_validation: + index = int(re.split(" +", category)[0]) + df.loc[index, "validation"] = True + + # doing this hack because otherwise I will have to install imageio + with open(git_repo / "BenchmarkScripts" / "util.py") as f: + util = f.read() + color_list = eval("[" + util.split("return [\n")[1]) + + df["color"] = color_list + + label_database = df.to_dict("index") + self._save_yaml( + self.save_dir / "label_database.yaml", label_database + ) + return label_database + + def process_file(self, filepath, mode): + """process_file. + + Please note, that for obtaining segmentation labels ply files were used. + + Args: + filepath: path to the main ply file + mode: train, test or validation + + Returns: + filebase: info about file + """ + scene, sub_scene = self._parse_scene_subscene(filepath.name) + filebase = { + "filepath": filepath, + "scene": scene, + "sub_scene": sub_scene, + "raw_filepath": str(filepath), + "file_len": -1, + } + # reading both files and checking that they are fitting + coords, features, _ = load_ply_with_normals(filepath) + file_len = len(coords) + filebase["file_len"] = file_len + points = np.hstack((coords, features)) + + if mode in ["train", "validation"]: + # getting scene information + description_filepath = Path( + filepath + ).parent / filepath.name.replace("_vh_clean_2.ply", ".txt") + with open(description_filepath) as f: + scene_type = f.read().split("\n")[:-1] + scene_type = scene_type[-1].split(" = ")[1] + filebase["scene_type"] = scene_type + filebase["raw_description_filepath"] = description_filepath + + # getting instance info + instance_info_filepath = next( + Path(filepath).parent.glob("*.aggregation.json") + ) + segment_indexes_filepath = next( + Path(filepath).parent.glob("*[0-9].segs.json") + ) + instance_db = self._read_json(instance_info_filepath) + segments = self._read_json(segment_indexes_filepath) + segments = np.array(segments["segIndices"]) + filebase["raw_instance_filepath"] = instance_info_filepath + filebase["raw_segmentation_filepath"] = segment_indexes_filepath + + # add segment id as additional feature + segment_ids = np.unique(segments, return_inverse=True)[1] + points = np.hstack((points, segment_ids[..., None])) + + # reading labels file + label_filepath = filepath.parent / filepath.name.replace( + ".ply", ".labels.ply" + ) + filebase["raw_label_filepath"] = label_filepath + label_coords, label_colors, labels = load_ply_with_normals( + label_filepath + ) + if not np.allclose(coords, label_coords): + raise ValueError("files doesn't have same coordinates") + + # adding instance label + labels = labels[:, np.newaxis] + empty_instance_label = np.full(labels.shape, -1) + labels = np.hstack((labels, empty_instance_label)) + for instance in instance_db["segGroups"]: + segments_occupied = np.array(instance["segments"]) + occupied_indices = np.isin(segments, segments_occupied) + labels[occupied_indices, 1] = instance["id"] + + if self.scannet200: + label200 = instance["label"] + # Map the category name to id + label_ids = self.labels_pd[ + self.labels_pd["raw_category"] == label200 + ]["id"] + label_id = ( + int(label_ids.iloc[0]) if len(label_ids) > 0 else 0 + ) + labels[occupied_indices, 0] = label_id + points = np.hstack((points, labels)) + + # gt_data = (points[:, -2] + 1) * 1000 + points[:, -1] + 1 + gt_data = points[:, -2] * 1000 + points[:, -1] + 1 + else: + segments_test = "../../data/raw/scannet_test_segments" + segment_indexes_filepath = filepath.name.replace( + ".ply", ".0.010000.segs.json" + ) + segments = self._read_json( + f"{segments_test}/{segment_indexes_filepath}" + ) + segments = np.array(segments["segIndices"]) + # add segment id as additional feature + segment_ids = np.unique(segments, return_inverse=True)[1] + points = np.hstack((points, segment_ids[..., None])) + + processed_filepath = ( + self.save_dir / mode / f"{scene:04}_{sub_scene:02}.npy" + ) + if not processed_filepath.parent.exists(): + processed_filepath.parent.mkdir(parents=True, exist_ok=True) + np.save(processed_filepath, points.astype(np.float32)) + filebase["filepath"] = str(processed_filepath) + + if mode == "test": + return filebase + + processed_gt_filepath = ( + self.save_dir + / "instance_gt" + / mode + / f"scene{scene:04}_{sub_scene:02}.txt" + ) + if not processed_gt_filepath.parent.exists(): + processed_gt_filepath.parent.mkdir(parents=True, exist_ok=True) + np.savetxt(processed_gt_filepath, gt_data.astype(np.int32), fmt="%d") + filebase["instance_gt_filepath"] = str(processed_gt_filepath) + + filebase["color_mean"] = [ + float((features[:, 0] / 255).mean()), + float((features[:, 1] / 255).mean()), + float((features[:, 2] / 255).mean()), + ] + filebase["color_std"] = [ + float(((features[:, 0] / 255) ** 2).mean()), + float(((features[:, 1] / 255) ** 2).mean()), + float(((features[:, 2] / 255) ** 2).mean()), + ] + return filebase + + def compute_color_mean_std( + self, + train_database_path: str = "./data/processed/scannet/train_database.yaml", + ): + train_database = self._load_yaml(train_database_path) + color_mean, color_std = [], [] + for sample in train_database: + color_std.append(sample["color_std"]) + color_mean.append(sample["color_mean"]) + + color_mean = np.array(color_mean).mean(axis=0) + color_std = np.sqrt(np.array(color_std).mean(axis=0) - color_mean**2) + feats_mean_std = { + "mean": [float(each) for each in color_mean], + "std": [float(each) for each in color_std], + } + self._save_yaml(self.save_dir / "color_mean_std.yaml", feats_mean_std) + + @logger.catch + def fix_bugs_in_labels(self): + if not self.scannet200: + logger.add(self.save_dir / "fixed_bugs_in_labels.log") + found_wrong_labels = { + tuple([270, 0]): 50, + tuple([270, 2]): 50, + tuple([384, 0]): 149, + } + for scene, wrong_label in found_wrong_labels.items(): + scene, sub_scene = scene + bug_file = ( + self.save_dir / "train" / f"{scene:04}_{sub_scene:02}.npy" + ) + points = np.load(bug_file) + bug_mask = points[:, -1] != wrong_label + points = points[bug_mask] + np.save(bug_file, points) + logger.info(f"Fixed {bug_file}") + + def _parse_scene_subscene(self, name): + scene_match = re.match(r"scene(\d{4})_(\d{2})", name) + return int(scene_match.group(1)), int(scene_match.group(2)) + + +if __name__ == "__main__": + Fire(ScannetPreprocessing) diff --git a/models/Mask3D/build/lib/mask3d/datasets/preprocessing/semantic_kitti_preprocessing.py b/models/Mask3D/build/lib/mask3d/datasets/preprocessing/semantic_kitti_preprocessing.py new file mode 100644 index 0000000000000000000000000000000000000000..d483e535435cca026588c3177cfe368fad99596b --- /dev/null +++ b/models/Mask3D/build/lib/mask3d/datasets/preprocessing/semantic_kitti_preprocessing.py @@ -0,0 +1,181 @@ +import re +from pathlib import Path +from hashlib import md5 +from natsort import natsorted + +import numpy as np +from fire import Fire + +from base_preprocessing import BasePreprocessing + + +class SemanticKittiPreprocessing(BasePreprocessing): + def __init__( + self, + data_dir: str = "./data/raw/semantic_kitti", + save_dir: str = "./data/processed/semantic_kitti", + modes: tuple = ("train", "validation", "test"), + n_jobs: int = -1, + git_repo: str = "./data/raw/semantic-kitti-api", + ): + super().__init__(data_dir, save_dir, modes, n_jobs) + + git_repo = Path(git_repo) + self.create_label_database(git_repo / "config" / "semantic-kitti.yaml") + self.config = self._load_yaml( + git_repo / "config" / "semantic-kitti.yaml" + ) + self.pose = dict() + + for mode in self.modes: + scene_mode = "valid" if mode == "validation" else mode + self.pose[mode] = dict() + for scene in sorted(self.config["split"][scene_mode]): + filepaths = list( + self.data_dir.glob(f"*/{scene:02}/velodyne/*bin") + ) + filepaths = [str(file) for file in filepaths] + self.files[mode].extend(natsorted(filepaths)) + calibration = parse_calibration( + Path(filepaths[0]).parent.parent / "calib.txt" + ) + self.pose[mode].update( + { + scene: parse_poses( + Path(filepaths[0]).parent.parent / "poses.txt", + calibration, + ), + } + ) + + def create_label_database(self, config_file): + if (self.save_dir / "label_database.yaml").exists(): + return self._load_yaml(self.save_dir / "label_database.yaml") + config = self._load_yaml(config_file) + label_database = {} + for key, old_key in config["learning_map_inv"].items(): + label_database.update( + { + key: { + "name": config["labels"][old_key], + # bgr -> rgb + "color": config["color_map"][old_key][::-1], + "validation": not config["learning_ignore"][key], + } + } + ) + + self._save_yaml(self.save_dir / "label_database.yaml", label_database) + return label_database + + def process_file(self, filepath, mode): + """process_file. + + Args: + filepath: path to the main ply file + mode: train, test + + Returns: + filebase: info about file + """ + scene, sub_scene = re.search(r"(\d{2}).*(\d{6})", filepath).group(1, 2) + filebase = { + "filepath": filepath, + "scene": int(scene), + "sub_scene": int(sub_scene), + "file_len": -1, + "pose": self.pose[mode][int(scene)][int(sub_scene)].tolist(), + } + + points = np.fromfile(filepath, dtype=np.float32).reshape(-1, 4) + file_len = len(points) + filebase["file_len"] = file_len + + if mode in ["train", "validation"]: + # getting label info + label_filepath = filepath.replace("velodyne", "labels").replace( + "bin", "label" + ) + filebase["label_filepath"] = label_filepath + label = np.fromfile(label_filepath, dtype=np.uint32).astype( + np.int32 + ) + if not points.shape[0] == label.shape[0]: + raise ValueError("Files do not have same length") + semantic_label = label & 0xFFFF + instance_label = label >> 16 + + semantic_label_copy = semantic_label.copy() + for label in np.unique(semantic_label): + semantic_label[semantic_label_copy == label] = self.config[ + "learning_map" + ][label] + + label = np.hstack( + (semantic_label[:, np.newaxis], instance_label[:, np.newaxis]) + ) + points = np.hstack((points, label)) + + processed_filepath = self.save_dir / mode / f"{scene}_{sub_scene}.npy" + if not processed_filepath.parent.exists(): + processed_filepath.parent.mkdir(parents=True, exist_ok=True) + np.save(processed_filepath, points.astype(np.float32)) + filebase["filepath"] = str(processed_filepath) + + return filebase + + +def parse_calibration(filename): + """read calibration file with given filename + Returns + ------- + dict + Calibration matrices as 4x4 numpy arrays. + """ + calib = {} + + with open(filename) as calib_file: + for line in calib_file: + key, content = line.strip().split(":") + values = [float(v) for v in content.strip().split()] + + pose = np.zeros((4, 4)) + pose[0, 0:4] = values[0:4] + pose[1, 0:4] = values[4:8] + pose[2, 0:4] = values[8:12] + pose[3, 3] = 1.0 + + calib[key] = pose + return calib + + +def parse_poses(filename, calibration): + """read poses file with per-scan poses from given filename + Returns + ------- + list + list of poses as 4x4 numpy arrays. + """ + + poses = [] + + Tr = calibration["Tr"] + Tr_inv = np.linalg.inv(Tr) + + with open(filename) as file: + for line in file: + values = [float(v) for v in line.strip().split()] + + pose = np.zeros((4, 4)) + pose[0, 0:4] = values[0:4] + pose[1, 0:4] = values[4:8] + pose[2, 0:4] = values[8:12] + pose[3, 3] = 1.0 + + poses.append(np.matmul(Tr_inv, np.matmul(pose, Tr))) + + return poses + + +if __name__ == "__main__": + Fire(SemanticKittiPreprocessing) diff --git a/models/Mask3D/build/lib/mask3d/datasets/preprocessing/stpls3d_preprocessing.py b/models/Mask3D/build/lib/mask3d/datasets/preprocessing/stpls3d_preprocessing.py new file mode 100644 index 0000000000000000000000000000000000000000..63ed5bff5d52e656f4bad2f853e5973b433871bd --- /dev/null +++ b/models/Mask3D/build/lib/mask3d/datasets/preprocessing/stpls3d_preprocessing.py @@ -0,0 +1,291 @@ +import re +import os +import numpy as np +from fire import Fire +from natsort import natsorted +from loguru import logger +import pandas as pd + +from datasets.preprocessing.base_preprocessing import BasePreprocessing + + +class STPLS3DPreprocessing(BasePreprocessing): + def __init__( + self, + data_dir: str = "../../data/raw/stpls3d", + save_dir: str = "../../data/processed/stpls3d", + modes: tuple = ("train", "validation", "test"), + n_jobs: int = -1, + ): + super().__init__(data_dir, save_dir, modes, n_jobs) + + # https://github.com/meidachen/STPLS3D/blob/main/HAIS/STPLS3DInstanceSegmentationChallenge_Codalab_Evaluate.py#L31 + CLASS_LABELS = [ + "Build", + "LowVeg", + "MediumVeg", + "HighVeg", + "Vehicle", + "Truck", + "Aircraft", + "MilitaryVeh", + "Bike", + "Motorcycle", + "LightPole", + "StreetSign", + "Clutter", + "Fence", + ] + VALID_CLASS_IDS = np.array( + [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14] + ) + + self.class_map = { + "Ground": 0, + "Build": 1, + "LowVeg": 2, + "MediumVeg": 3, + "HighVeg": 4, + "Vehicle": 5, + "Truck": 6, + "Aircraft": 7, + "MilitaryVeh": 8, + "Bike": 9, + "Motorcycle": 10, + "LightPole": 11, + "StreetSign": 12, + "Clutter": 13, + "Fence": 14, + } + + self.color_map = [ + [0, 255, 0], # Ground + [0, 0, 255], # Build + [0, 255, 255], # LowVeg + [255, 255, 0], # MediumVeg + [255, 0, 255], # HiVeg + [100, 100, 255], # Vehicle + [200, 200, 100], # Truck + [170, 120, 200], # Aircraft + [255, 0, 0], # MilitaryVec + [200, 100, 100], # Bike + [10, 200, 100], # Motorcycle + [200, 200, 200], # LightPole + [50, 50, 50], # StreetSign + [60, 130, 60], # Clutter + [130, 30, 60], + ] # Fence + + self.create_label_database() + + for mode in self.modes: + filepaths = [] + for scene_path in [ + f.path for f in os.scandir(self.data_dir / mode) + ]: + filepaths.append(scene_path) + self.files[mode] = natsorted(filepaths) + + def create_label_database(self): + label_database = dict() + for class_name, class_id in self.class_map.items(): + label_database[class_id] = { + "color": self.color_map[class_id], + "name": class_name, + "validation": True, + } + + self._save_yaml(self.save_dir / "label_database.yaml", label_database) + return label_database + + def process_file(self, filepath, mode): + """process_file. + + Please note, that for obtaining segmentation labels ply files were used. + + Args: + filepath: path to the main ply file + mode: train, test or validation + + Returns: + filebase: info about file + """ + filebase = { + "filepath": filepath, + "scene": filepath.split("/")[-1], + "raw_filepath": str(filepath), + "file_len": -1, + } + + points = pd.read_csv(filepath, header=None).values + + filebase["raw_segmentation_filepath"] = "" + + # add segment id as additional feature (DUMMY) + if mode in ["train", "validation"]: + points = np.hstack( + ( + points, + np.ones(points.shape[0])[..., None], # normal 1 + np.ones(points.shape[0])[..., None], # normal 2 + np.ones(points.shape[0])[..., None], # normal 3 + np.ones(points.shape[0])[..., None], + ) + ) # segments + else: + # we need to add dummies for semantics and instances + points = np.hstack( + ( + points, + np.ones(points.shape[0])[..., None], # semantic class + np.ones(points.shape[0])[..., None], # instance id + np.ones(points.shape[0])[..., None], # normal 1 + np.ones(points.shape[0])[..., None], # normal 2 + np.ones(points.shape[0])[..., None], # normal 3 + np.ones(points.shape[0])[..., None], + ) + ) # segments + + points = points[ + :, [0, 1, 2, 3, 4, 5, 8, 9, 10, 11, 6, 7] + ] # move segments after RGB + + # move point clouds to be in positive range (important for split pointcloud function) + points[:, :3] = points[:, :3] - points[:, :3].min(0) + + points = points.astype(np.float32) + + if mode == "test": + points = points[:, :-2] + else: + points[ + points[:, -1] == -100.0, -1 + ] = -1 # -1 indicates "no instance" + + file_len = len(points) + filebase["file_len"] = file_len + + processed_filepath = ( + self.save_dir + / mode + / f"{filebase['scene'].replace('.txt', '')}.npy" + ) + if not processed_filepath.parent.exists(): + processed_filepath.parent.mkdir(parents=True, exist_ok=True) + np.save(processed_filepath, points.astype(np.float32)) + filebase["filepath"] = str(processed_filepath) + + if mode in ["validation", "test"]: + blocks = self.splitPointCloud(points) + + filebase["instance_gt_filepath"] = [] + filebase["filepath_crop"] = [] + for block_id, block in enumerate(blocks): + if len(block) > 10000: + if mode == "validation": + new_instance_ids = np.unique( + block[:, -1], return_inverse=True + )[1] + + assert new_instance_ids.shape[0] == block.shape[0] + # == 0 means -1 == no instance + # new_instance_ids[new_instance_ids == 0] + assert ( + new_instance_ids.max() < 1000 + ), "we cannot encode when there are more than 999 instances in a block" + + gt_data = (block[:, -2]) * 1000 + new_instance_ids + + processed_gt_filepath = ( + self.save_dir + / "instance_gt" + / mode + / f"{filebase['scene'].replace('.txt', '')}_{block_id}.txt" + ) + if not processed_gt_filepath.parent.exists(): + processed_gt_filepath.parent.mkdir( + parents=True, exist_ok=True + ) + np.savetxt( + processed_gt_filepath, + gt_data.astype(np.int32), + fmt="%d", + ) + filebase["instance_gt_filepath"].append( + str(processed_gt_filepath) + ) + + processed_filepath = ( + self.save_dir + / mode + / f"{filebase['scene'].replace('.txt', '')}_{block_id}.npy" + ) + if not processed_filepath.parent.exists(): + processed_filepath.parent.mkdir( + parents=True, exist_ok=True + ) + np.save(processed_filepath, block.astype(np.float32)) + filebase["filepath_crop"].append(str(processed_filepath)) + else: + print("block was smaller than 1000 points") + assert False + + filebase["color_mean"] = [ + float((points[:, 3] / 255).mean()), + float((points[:, 4] / 255).mean()), + float((points[:, 5] / 255).mean()), + ] + filebase["color_std"] = [ + float(((points[:, 3] / 255) ** 2).mean()), + float(((points[:, 4] / 255) ** 2).mean()), + float(((points[:, 5] / 255) ** 2).mean()), + ] + return filebase + + def compute_color_mean_std( + self, + train_database_path: str = "./data/processed/stpls3d/train_database.yaml", + ): + train_database = self._load_yaml(train_database_path) + color_mean, color_std = [], [] + for sample in train_database: + color_std.append(sample["color_std"]) + color_mean.append(sample["color_mean"]) + + color_mean = np.array(color_mean).mean(axis=0) + color_std = np.sqrt(np.array(color_std).mean(axis=0) - color_mean**2) + feats_mean_std = { + "mean": [float(each) for each in color_mean], + "std": [float(each) for each in color_std], + } + self._save_yaml(self.save_dir / "color_mean_std.yaml", feats_mean_std) + + def splitPointCloud(self, cloud, size=50.0, stride=50): + limitMax = np.amax(cloud[:, 0:3], axis=0) + width = int(np.ceil((limitMax[0] - size) / stride)) + 1 + depth = int(np.ceil((limitMax[1] - size) / stride)) + 1 + cells = [ + (x * stride, y * stride) + for x in range(width) + for y in range(depth) + ] + blocks = [] + for (x, y) in cells: + xcond = (cloud[:, 0] <= x + size) & (cloud[:, 0] >= x) + ycond = (cloud[:, 1] <= y + size) & (cloud[:, 1] >= y) + cond = xcond & ycond + block = cloud[cond, :] + blocks.append(block) + return blocks + + @logger.catch + def fix_bugs_in_labels(self): + pass + + def _parse_scene_subscene(self, name): + scene_match = re.match(r"scene(\d{4})_(\d{2})", name) + return int(scene_match.group(1)), int(scene_match.group(2)) + + +if __name__ == "__main__": + Fire(STPLS3DPreprocessing) diff --git a/models/Mask3D/build/lib/mask3d/datasets/random_cuboid.py b/models/Mask3D/build/lib/mask3d/datasets/random_cuboid.py new file mode 100644 index 0000000000000000000000000000000000000000..334b87ecadbd9cbee2979d462532fb4a479b280f --- /dev/null +++ b/models/Mask3D/build/lib/mask3d/datasets/random_cuboid.py @@ -0,0 +1,96 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import numpy as np +import torch + + +def check_aspect(crop_range, aspect_min): + xy_aspect = np.min(crop_range[:2]) / np.max(crop_range[:2]) + xz_aspect = np.min(crop_range[[0, 2]]) / np.max(crop_range[[0, 2]]) + yz_aspect = np.min(crop_range[1:]) / np.max(crop_range[1:]) + return ( + (xy_aspect >= aspect_min) + or (xz_aspect >= aspect_min) + or (yz_aspect >= aspect_min) + ) + + +class RandomCuboid(object): + """ + RandomCuboid augmentation from DepthContrast [https://arxiv.org/abs/2101.02691] + We slightly modify this operation to account for object detection. + This augmentation randomly crops a cuboid from the input and + ensures that the cropped cuboid contains at least one bounding box + """ + + def __init__( + self, + min_points, + # aspect=0.8, + crop_length=6.0, + version1=True, + ): + # self.aspect = aspect + self.crop_length = crop_length + self.min_points = min_points + self.version1 = version1 + + def __call__(self, point_cloud): + if point_cloud.shape[0] < self.min_points: + print("too small pcd") + return np.ones(point_cloud.shape[0], dtype=np.bool) + + range_xyz = np.max(point_cloud[:, :2], axis=0) - np.min( + point_cloud[:, :2], axis=0 + ) + + for _ in range(100): + # crop_range = self.min_crop + np.random.rand(3) * ( + # self.max_crop - self.min_crop + # ) + # crop_range[-1] = 999. + # if not check_aspect(crop_range, self.aspect): + # continue + + sample_center = point_cloud[:, :2].min(axis=0) + range_xyz / 2 + + if self.version1: + offset_x = np.random.uniform( + -range_xyz[0] / 4, range_xyz[0] / 4 + ) + offset_y = np.random.uniform( + -range_xyz[1] / 4, range_xyz[1] / 4 + ) + else: + offset_x = np.random.uniform( + -(range_xyz[0] / 2) + self.crop_length / 4, + +(range_xyz[0] / 2) - self.crop_length / 4, + ) + offset_y = np.random.uniform( + -(range_xyz[1] / 2) + self.crop_length / 4, + +(range_xyz[1] / 2) - self.crop_length / 4, + ) + + sample_center[0] = sample_center[0] + offset_x + sample_center[1] = sample_center[1] + offset_y + + min_xy = sample_center - self.crop_length / 2 + max_xy = sample_center + self.crop_length / 2 + + upper_idx = ( + np.sum((point_cloud[:, :2] <= max_xy).astype(np.int32), 1) == 2 + ) + lower_idx = ( + np.sum((point_cloud[:, :2] >= min_xy).astype(np.int32), 1) == 2 + ) + + new_pointidx = (upper_idx) & (lower_idx) + + if np.sum(new_pointidx) < self.min_points: + print("TOO SMALL") + continue + + return new_pointidx + + # fallback + print("FALLBACK") + return np.ones(point_cloud.shape[0], dtype=np.bool) diff --git a/models/Mask3D/build/lib/mask3d/datasets/scannet200/__init__.py b/models/Mask3D/build/lib/mask3d/datasets/scannet200/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/Mask3D/build/lib/mask3d/datasets/scannet200/scannet200_constants.py b/models/Mask3D/build/lib/mask3d/datasets/scannet200/scannet200_constants.py new file mode 100644 index 0000000000000000000000000000000000000000..1d921407068335b82ad10af912d7e9d715dbd6ca --- /dev/null +++ b/models/Mask3D/build/lib/mask3d/datasets/scannet200/scannet200_constants.py @@ -0,0 +1,704 @@ +### ScanNet Benchmark constants ### +VALID_CLASS_IDS_20 = ( + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 14, + 16, + 24, + 28, + 33, + 34, + 36, + 39, +) + +CLASS_LABELS_20 = ( + "wall", + "floor", + "cabinet", + "bed", + "chair", + "sofa", + "table", + "door", + "window", + "bookshelf", + "picture", + "counter", + "desk", + "curtain", + "refrigerator", + "shower curtain", + "toilet", + "sink", + "bathtub", + "otherfurniture", +) + +SCANNET_COLOR_MAP_20 = { + 0: (0.0, 0.0, 0.0), + 1: (174.0, 199.0, 232.0), + 2: (152.0, 223.0, 138.0), + 3: (31.0, 119.0, 180.0), + 4: (255.0, 187.0, 120.0), + 5: (188.0, 189.0, 34.0), + 6: (140.0, 86.0, 75.0), + 7: (255.0, 152.0, 150.0), + 8: (214.0, 39.0, 40.0), + 9: (197.0, 176.0, 213.0), + 10: (148.0, 103.0, 189.0), + 11: (196.0, 156.0, 148.0), + 12: (23.0, 190.0, 207.0), + 14: (247.0, 182.0, 210.0), + 15: (66.0, 188.0, 102.0), + 16: (219.0, 219.0, 141.0), + 17: (140.0, 57.0, 197.0), + 18: (202.0, 185.0, 52.0), + 19: (51.0, 176.0, 203.0), + 20: (200.0, 54.0, 131.0), + 21: (92.0, 193.0, 61.0), + 22: (78.0, 71.0, 183.0), + 23: (172.0, 114.0, 82.0), + 24: (255.0, 127.0, 14.0), + 25: (91.0, 163.0, 138.0), + 26: (153.0, 98.0, 156.0), + 27: (140.0, 153.0, 101.0), + 28: (158.0, 218.0, 229.0), + 29: (100.0, 125.0, 154.0), + 30: (178.0, 127.0, 135.0), + 32: (146.0, 111.0, 194.0), + 33: (44.0, 160.0, 44.0), + 34: (112.0, 128.0, 144.0), + 35: (96.0, 207.0, 209.0), + 36: (227.0, 119.0, 194.0), + 37: (213.0, 92.0, 176.0), + 38: (94.0, 106.0, 211.0), + 39: (82.0, 84.0, 163.0), + 40: (100.0, 85.0, 144.0), +} + +### ScanNet200 Benchmark constants ### +VALID_CLASS_IDS_200 = ( + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 21, + 22, + 23, + 24, + 26, + 27, + 28, + 29, + 31, + 32, + 33, + 34, + 35, + 36, + 38, + 39, + 40, + 41, + 42, + 44, + 45, + 46, + 47, + 48, + 49, + 50, + 51, + 52, + 54, + 55, + 56, + 57, + 58, + 59, + 62, + 63, + 64, + 65, + 66, + 67, + 68, + 69, + 70, + 71, + 72, + 73, + 74, + 75, + 76, + 77, + 78, + 79, + 80, + 82, + 84, + 86, + 87, + 88, + 89, + 90, + 93, + 95, + 96, + 97, + 98, + 99, + 100, + 101, + 102, + 103, + 104, + 105, + 106, + 107, + 110, + 112, + 115, + 116, + 118, + 120, + 121, + 122, + 125, + 128, + 130, + 131, + 132, + 134, + 136, + 138, + 139, + 140, + 141, + 145, + 148, + 154, + 155, + 156, + 157, + 159, + 161, + 163, + 165, + 166, + 168, + 169, + 170, + 177, + 180, + 185, + 188, + 191, + 193, + 195, + 202, + 208, + 213, + 214, + 221, + 229, + 230, + 232, + 233, + 242, + 250, + 261, + 264, + 276, + 283, + 286, + 300, + 304, + 312, + 323, + 325, + 331, + 342, + 356, + 370, + 392, + 395, + 399, + 408, + 417, + 488, + 540, + 562, + 570, + 572, + 581, + 609, + 748, + 776, + 1156, + 1163, + 1164, + 1165, + 1166, + 1167, + 1168, + 1169, + 1170, + 1171, + 1172, + 1173, + 1174, + 1175, + 1176, + 1178, + 1179, + 1180, + 1181, + 1182, + 1183, + 1184, + 1185, + 1186, + 1187, + 1188, + 1189, + 1190, + 1191, +) + +CLASS_LABELS_200 = ( + "wall", + "chair", + "floor", + "table", + "door", + "couch", + "cabinet", + "shelf", + "desk", + "office chair", + "bed", + "pillow", + "sink", + "picture", + "window", + "toilet", + "bookshelf", + "monitor", + "curtain", + "book", + "armchair", + "coffee table", + "box", + "refrigerator", + "lamp", + "kitchen cabinet", + "towel", + "clothes", + "tv", + "nightstand", + "counter", + "dresser", + "stool", + "cushion", + "plant", + "ceiling", + "bathtub", + "end table", + "dining table", + "keyboard", + "bag", + "backpack", + "toilet paper", + "printer", + "tv stand", + "whiteboard", + "blanket", + "shower curtain", + "trash can", + "closet", + "stairs", + "microwave", + "stove", + "shoe", + "computer tower", + "bottle", + "bin", + "ottoman", + "bench", + "board", + "washing machine", + "mirror", + "copier", + "basket", + "sofa chair", + "file cabinet", + "fan", + "laptop", + "shower", + "paper", + "person", + "paper towel dispenser", + "oven", + "blinds", + "rack", + "plate", + "blackboard", + "piano", + "suitcase", + "rail", + "radiator", + "recycling bin", + "container", + "wardrobe", + "soap dispenser", + "telephone", + "bucket", + "clock", + "stand", + "light", + "laundry basket", + "pipe", + "clothes dryer", + "guitar", + "toilet paper holder", + "seat", + "speaker", + "column", + "bicycle", + "ladder", + "bathroom stall", + "shower wall", + "cup", + "jacket", + "storage bin", + "coffee maker", + "dishwasher", + "paper towel roll", + "machine", + "mat", + "windowsill", + "bar", + "toaster", + "bulletin board", + "ironing board", + "fireplace", + "soap dish", + "kitchen counter", + "doorframe", + "toilet paper dispenser", + "mini fridge", + "fire extinguisher", + "ball", + "hat", + "shower curtain rod", + "water cooler", + "paper cutter", + "tray", + "shower door", + "pillar", + "ledge", + "toaster oven", + "mouse", + "toilet seat cover dispenser", + "furniture", + "cart", + "storage container", + "scale", + "tissue box", + "light switch", + "crate", + "power outlet", + "decoration", + "sign", + "projector", + "closet door", + "vacuum cleaner", + "candle", + "plunger", + "stuffed animal", + "headphones", + "dish rack", + "broom", + "guitar case", + "range hood", + "dustpan", + "hair dryer", + "water bottle", + "handicap bar", + "purse", + "vent", + "shower floor", + "water pitcher", + "mailbox", + "bowl", + "paper bag", + "alarm clock", + "music stand", + "projector screen", + "divider", + "laundry detergent", + "bathroom counter", + "object", + "bathroom vanity", + "closet wall", + "laundry hamper", + "bathroom stall door", + "ceiling light", + "trash bin", + "dumbbell", + "stair rail", + "tube", + "bathroom cabinet", + "cd case", + "closet rod", + "coffee kettle", + "structure", + "shower head", + "keyboard piano", + "case of water bottles", + "coat rack", + "storage organizer", + "folded chair", + "fire alarm", + "power strip", + "calendar", + "poster", + "potted plant", + "luggage", + "mattress", +) + +SCANNET_COLOR_MAP_200 = { + 0: (0.0, 0.0, 0.0), + 1: (174.0, 199.0, 232.0), + 2: (188.0, 189.0, 34.0), + 3: (152.0, 223.0, 138.0), + 4: (255.0, 152.0, 150.0), + 5: (214.0, 39.0, 40.0), + 6: (91.0, 135.0, 229.0), + 7: (31.0, 119.0, 180.0), + 8: (229.0, 91.0, 104.0), + 9: (247.0, 182.0, 210.0), + 10: (91.0, 229.0, 110.0), + 11: (255.0, 187.0, 120.0), + 13: (141.0, 91.0, 229.0), + 14: (112.0, 128.0, 144.0), + 15: (196.0, 156.0, 148.0), + 16: (197.0, 176.0, 213.0), + 17: (44.0, 160.0, 44.0), + 18: (148.0, 103.0, 189.0), + 19: (229.0, 91.0, 223.0), + 21: (219.0, 219.0, 141.0), + 22: (192.0, 229.0, 91.0), + 23: (88.0, 218.0, 137.0), + 24: (58.0, 98.0, 137.0), + 26: (177.0, 82.0, 239.0), + 27: (255.0, 127.0, 14.0), + 28: (237.0, 204.0, 37.0), + 29: (41.0, 206.0, 32.0), + 31: (62.0, 143.0, 148.0), + 32: (34.0, 14.0, 130.0), + 33: (143.0, 45.0, 115.0), + 34: (137.0, 63.0, 14.0), + 35: (23.0, 190.0, 207.0), + 36: (16.0, 212.0, 139.0), + 38: (90.0, 119.0, 201.0), + 39: (125.0, 30.0, 141.0), + 40: (150.0, 53.0, 56.0), + 41: (186.0, 197.0, 62.0), + 42: (227.0, 119.0, 194.0), + 44: (38.0, 100.0, 128.0), + 45: (120.0, 31.0, 243.0), + 46: (154.0, 59.0, 103.0), + 47: (169.0, 137.0, 78.0), + 48: (143.0, 245.0, 111.0), + 49: (37.0, 230.0, 205.0), + 50: (14.0, 16.0, 155.0), + 51: (196.0, 51.0, 182.0), + 52: (237.0, 80.0, 38.0), + 54: (138.0, 175.0, 62.0), + 55: (158.0, 218.0, 229.0), + 56: (38.0, 96.0, 167.0), + 57: (190.0, 77.0, 246.0), + 58: (208.0, 49.0, 84.0), + 59: (208.0, 193.0, 72.0), + 62: (55.0, 220.0, 57.0), + 63: (10.0, 125.0, 140.0), + 64: (76.0, 38.0, 202.0), + 65: (191.0, 28.0, 135.0), + 66: (211.0, 120.0, 42.0), + 67: (118.0, 174.0, 76.0), + 68: (17.0, 242.0, 171.0), + 69: (20.0, 65.0, 247.0), + 70: (208.0, 61.0, 222.0), + 71: (162.0, 62.0, 60.0), + 72: (210.0, 235.0, 62.0), + 73: (45.0, 152.0, 72.0), + 74: (35.0, 107.0, 149.0), + 75: (160.0, 89.0, 237.0), + 76: (227.0, 56.0, 125.0), + 77: (169.0, 143.0, 81.0), + 78: (42.0, 143.0, 20.0), + 79: (25.0, 160.0, 151.0), + 80: (82.0, 75.0, 227.0), + 82: (253.0, 59.0, 222.0), + 84: (240.0, 130.0, 89.0), + 86: (123.0, 172.0, 47.0), + 87: (71.0, 194.0, 133.0), + 88: (24.0, 94.0, 205.0), + 89: (134.0, 16.0, 179.0), + 90: (159.0, 32.0, 52.0), + 93: (213.0, 208.0, 88.0), + 95: (64.0, 158.0, 70.0), + 96: (18.0, 163.0, 194.0), + 97: (65.0, 29.0, 153.0), + 98: (177.0, 10.0, 109.0), + 99: (152.0, 83.0, 7.0), + 100: (83.0, 175.0, 30.0), + 101: (18.0, 199.0, 153.0), + 102: (61.0, 81.0, 208.0), + 103: (213.0, 85.0, 216.0), + 104: (170.0, 53.0, 42.0), + 105: (161.0, 192.0, 38.0), + 106: (23.0, 241.0, 91.0), + 107: (12.0, 103.0, 170.0), + 110: (151.0, 41.0, 245.0), + 112: (133.0, 51.0, 80.0), + 115: (184.0, 162.0, 91.0), + 116: (50.0, 138.0, 38.0), + 118: (31.0, 237.0, 236.0), + 120: (39.0, 19.0, 208.0), + 121: (223.0, 27.0, 180.0), + 122: (254.0, 141.0, 85.0), + 125: (97.0, 144.0, 39.0), + 128: (106.0, 231.0, 176.0), + 130: (12.0, 61.0, 162.0), + 131: (124.0, 66.0, 140.0), + 132: (137.0, 66.0, 73.0), + 134: (250.0, 253.0, 26.0), + 136: (55.0, 191.0, 73.0), + 138: (60.0, 126.0, 146.0), + 139: (153.0, 108.0, 234.0), + 140: (184.0, 58.0, 125.0), + 141: (135.0, 84.0, 14.0), + 145: (139.0, 248.0, 91.0), + 148: (53.0, 200.0, 172.0), + 154: (63.0, 69.0, 134.0), + 155: (190.0, 75.0, 186.0), + 156: (127.0, 63.0, 52.0), + 157: (141.0, 182.0, 25.0), + 159: (56.0, 144.0, 89.0), + 161: (64.0, 160.0, 250.0), + 163: (182.0, 86.0, 245.0), + 165: (139.0, 18.0, 53.0), + 166: (134.0, 120.0, 54.0), + 168: (49.0, 165.0, 42.0), + 169: (51.0, 128.0, 133.0), + 170: (44.0, 21.0, 163.0), + 177: (232.0, 93.0, 193.0), + 180: (176.0, 102.0, 54.0), + 185: (116.0, 217.0, 17.0), + 188: (54.0, 209.0, 150.0), + 191: (60.0, 99.0, 204.0), + 193: (129.0, 43.0, 144.0), + 195: (252.0, 100.0, 106.0), + 202: (187.0, 196.0, 73.0), + 208: (13.0, 158.0, 40.0), + 213: (52.0, 122.0, 152.0), + 214: (128.0, 76.0, 202.0), + 221: (187.0, 50.0, 115.0), + 229: (180.0, 141.0, 71.0), + 230: (77.0, 208.0, 35.0), + 232: (72.0, 183.0, 168.0), + 233: (97.0, 99.0, 203.0), + 242: (172.0, 22.0, 158.0), + 250: (155.0, 64.0, 40.0), + 261: (118.0, 159.0, 30.0), + 264: (69.0, 252.0, 148.0), + 276: (45.0, 103.0, 173.0), + 283: (111.0, 38.0, 149.0), + 286: (184.0, 9.0, 49.0), + 300: (188.0, 174.0, 67.0), + 304: (53.0, 206.0, 53.0), + 312: (97.0, 235.0, 252.0), + 323: (66.0, 32.0, 182.0), + 325: (236.0, 114.0, 195.0), + 331: (241.0, 154.0, 83.0), + 342: (133.0, 240.0, 52.0), + 356: (16.0, 205.0, 144.0), + 370: (75.0, 101.0, 198.0), + 392: (237.0, 95.0, 251.0), + 395: (191.0, 52.0, 49.0), + 399: (227.0, 254.0, 54.0), + 408: (49.0, 206.0, 87.0), + 417: (48.0, 113.0, 150.0), + 488: (125.0, 73.0, 182.0), + 540: (229.0, 32.0, 114.0), + 562: (158.0, 119.0, 28.0), + 570: (60.0, 205.0, 27.0), + 572: (18.0, 215.0, 201.0), + 581: (79.0, 76.0, 153.0), + 609: (134.0, 13.0, 116.0), + 748: (192.0, 97.0, 63.0), + 776: (108.0, 163.0, 18.0), + 1156: (95.0, 220.0, 156.0), + 1163: (98.0, 141.0, 208.0), + 1164: (144.0, 19.0, 193.0), + 1165: (166.0, 36.0, 57.0), + 1166: (212.0, 202.0, 34.0), + 1167: (23.0, 206.0, 34.0), + 1168: (91.0, 211.0, 236.0), + 1169: (79.0, 55.0, 137.0), + 1170: (182.0, 19.0, 117.0), + 1171: (134.0, 76.0, 14.0), + 1172: (87.0, 185.0, 28.0), + 1173: (82.0, 224.0, 187.0), + 1174: (92.0, 110.0, 214.0), + 1175: (168.0, 80.0, 171.0), + 1176: (197.0, 63.0, 51.0), + 1178: (175.0, 199.0, 77.0), + 1179: (62.0, 180.0, 98.0), + 1180: (8.0, 91.0, 150.0), + 1181: (77.0, 15.0, 130.0), + 1182: (154.0, 65.0, 96.0), + 1183: (197.0, 152.0, 11.0), + 1184: (59.0, 155.0, 45.0), + 1185: (12.0, 147.0, 145.0), + 1186: (54.0, 35.0, 219.0), + 1187: (210.0, 73.0, 181.0), + 1188: (221.0, 124.0, 77.0), + 1189: (149.0, 214.0, 66.0), + 1190: (72.0, 185.0, 134.0), + 1191: (42.0, 94.0, 198.0), +} + +### For instance segmentation the non-object categories ### +VALID_PANOPTIC_IDS = (1, 3) + +CLASS_LABELS_PANOPTIC = ("wall", "floor") diff --git a/models/Mask3D/build/lib/mask3d/datasets/scannet200/scannet200_splits.py b/models/Mask3D/build/lib/mask3d/datasets/scannet200/scannet200_splits.py new file mode 100644 index 0000000000000000000000000000000000000000..3a5585f70319d1eb061669bd82bbf3d64d0bca7b --- /dev/null +++ b/models/Mask3D/build/lib/mask3d/datasets/scannet200/scannet200_splits.py @@ -0,0 +1,625 @@ +### This file contains the HEAD - COMMON - TAIL split category ids for ScanNet 200 + +HEAD_CATS_SCANNET_200 = [ + "tv stand", + "curtain", + "blinds", + "shower curtain", + "bookshelf", + "tv", + "kitchen cabinet", + "pillow", + "lamp", + "dresser", + "monitor", + "object", + "ceiling", + "board", + "stove", + "closet wall", + "couch", + "office chair", + "kitchen counter", + "shower", + "closet", + "doorframe", + "sofa chair", + "mailbox", + "nightstand", + "washing machine", + "picture", + "book", + "sink", + "recycling bin", + "table", + "backpack", + "shower wall", + "toilet", + "copier", + "counter", + "stool", + "refrigerator", + "window", + "file cabinet", + "chair", + "wall", + "plant", + "coffee table", + "stairs", + "armchair", + "cabinet", + "bathroom vanity", + "bathroom stall", + "mirror", + "blackboard", + "trash can", + "stair rail", + "box", + "towel", + "door", + "clothes", + "whiteboard", + "bed", + "floor", + "bathtub", + "desk", + "wardrobe", + "clothes dryer", + "radiator", + "shelf", +] +COMMON_CATS_SCANNET_200 = [ + "cushion", + "end table", + "dining table", + "keyboard", + "bag", + "toilet paper", + "printer", + "blanket", + "microwave", + "shoe", + "computer tower", + "bottle", + "bin", + "ottoman", + "bench", + "basket", + "fan", + "laptop", + "person", + "paper towel dispenser", + "oven", + "rack", + "piano", + "suitcase", + "rail", + "container", + "telephone", + "stand", + "light", + "laundry basket", + "pipe", + "seat", + "column", + "bicycle", + "ladder", + "jacket", + "storage bin", + "coffee maker", + "dishwasher", + "machine", + "mat", + "windowsill", + "bulletin board", + "fireplace", + "mini fridge", + "water cooler", + "shower door", + "pillar", + "ledge", + "furniture", + "cart", + "decoration", + "closet door", + "vacuum cleaner", + "dish rack", + "range hood", + "projector screen", + "divider", + "bathroom counter", + "laundry hamper", + "bathroom stall door", + "ceiling light", + "trash bin", + "bathroom cabinet", + "structure", + "storage organizer", + "potted plant", + "mattress", +] +TAIL_CATS_SCANNET_200 = [ + "paper", + "plate", + "soap dispenser", + "bucket", + "clock", + "guitar", + "toilet paper holder", + "speaker", + "cup", + "paper towel roll", + "bar", + "toaster", + "ironing board", + "soap dish", + "toilet paper dispenser", + "fire extinguisher", + "ball", + "hat", + "shower curtain rod", + "paper cutter", + "tray", + "toaster oven", + "mouse", + "toilet seat cover dispenser", + "storage container", + "scale", + "tissue box", + "light switch", + "crate", + "power outlet", + "sign", + "projector", + "candle", + "plunger", + "stuffed animal", + "headphones", + "broom", + "guitar case", + "dustpan", + "hair dryer", + "water bottle", + "handicap bar", + "purse", + "vent", + "shower floor", + "water pitcher", + "bowl", + "paper bag", + "alarm clock", + "music stand", + "laundry detergent", + "dumbbell", + "tube", + "cd case", + "closet rod", + "coffee kettle", + "shower head", + "keyboard piano", + "case of water bottles", + "coat rack", + "folded chair", + "fire alarm", + "power strip", + "calendar", + "poster", + "luggage", +] + + +### Given the different size of the official train and val sets, not all ScanNet200 categories are present in the validation set. +### Here we list of categories with labels and IDs present in both train and validation set, and the remaining categories those are present in train, but not in val +### We dont evaluate on unseen validation categories in this benchmark + +VALID_CLASS_IDS_200_VALIDATION = ( + "wall", + "chair", + "floor", + "table", + "door", + "couch", + "cabinet", + "shelf", + "desk", + "office chair", + "bed", + "pillow", + "sink", + "picture", + "window", + "toilet", + "bookshelf", + "monitor", + "curtain", + "book", + "armchair", + "coffee table", + "box", + "refrigerator", + "lamp", + "kitchen cabinet", + "towel", + "clothes", + "tv", + "nightstand", + "counter", + "dresser", + "stool", + "cushion", + "plant", + "ceiling", + "bathtub", + "end table", + "dining table", + "keyboard", + "bag", + "backpack", + "toilet paper", + "printer", + "tv stand", + "whiteboard", + "blanket", + "shower curtain", + "trash can", + "closet", + "stairs", + "microwave", + "stove", + "shoe", + "computer tower", + "bottle", + "bin", + "ottoman", + "bench", + "board", + "washing machine", + "mirror", + "copier", + "basket", + "sofa chair", + "file cabinet", + "fan", + "laptop", + "shower", + "paper", + "person", + "paper towel dispenser", + "oven", + "blinds", + "rack", + "plate", + "blackboard", + "piano", + "suitcase", + "rail", + "radiator", + "recycling bin", + "container", + "wardrobe", + "soap dispenser", + "telephone", + "bucket", + "clock", + "stand", + "light", + "laundry basket", + "pipe", + "clothes dryer", + "guitar", + "toilet paper holder", + "seat", + "speaker", + "column", + "ladder", + "bathroom stall", + "shower wall", + "cup", + "jacket", + "storage bin", + "coffee maker", + "dishwasher", + "paper towel roll", + "machine", + "mat", + "windowsill", + "bar", + "toaster", + "bulletin board", + "ironing board", + "fireplace", + "soap dish", + "kitchen counter", + "doorframe", + "toilet paper dispenser", + "mini fridge", + "fire extinguisher", + "ball", + "hat", + "shower curtain rod", + "water cooler", + "paper cutter", + "tray", + "shower door", + "pillar", + "ledge", + "toaster oven", + "mouse", + "toilet seat cover dispenser", + "furniture", + "cart", + "scale", + "tissue box", + "light switch", + "crate", + "power outlet", + "decoration", + "sign", + "projector", + "closet door", + "vacuum cleaner", + "plunger", + "stuffed animal", + "headphones", + "dish rack", + "broom", + "range hood", + "dustpan", + "hair dryer", + "water bottle", + "handicap bar", + "vent", + "shower floor", + "water pitcher", + "mailbox", + "bowl", + "paper bag", + "projector screen", + "divider", + "laundry detergent", + "bathroom counter", + "object", + "bathroom vanity", + "closet wall", + "laundry hamper", + "bathroom stall door", + "ceiling light", + "trash bin", + "dumbbell", + "stair rail", + "tube", + "bathroom cabinet", + "closet rod", + "coffee kettle", + "shower head", + "keyboard piano", + "case of water bottles", + "coat rack", + "folded chair", + "fire alarm", + "power strip", + "calendar", + "poster", + "potted plant", + "mattress", +) + +CLASS_LABELS_200_VALIDATION = ( + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 21, + 22, + 23, + 24, + 26, + 27, + 28, + 29, + 31, + 32, + 33, + 34, + 35, + 36, + 38, + 39, + 40, + 41, + 42, + 44, + 45, + 46, + 47, + 48, + 49, + 50, + 51, + 52, + 54, + 55, + 56, + 57, + 58, + 59, + 62, + 63, + 64, + 65, + 66, + 67, + 68, + 69, + 70, + 71, + 72, + 73, + 74, + 75, + 76, + 77, + 78, + 79, + 80, + 82, + 84, + 86, + 87, + 88, + 89, + 90, + 93, + 95, + 96, + 97, + 98, + 99, + 100, + 101, + 102, + 103, + 104, + 105, + 106, + 107, + 110, + 112, + 115, + 116, + 118, + 120, + 122, + 125, + 128, + 130, + 131, + 132, + 134, + 136, + 138, + 139, + 140, + 141, + 145, + 148, + 154, + 155, + 156, + 157, + 159, + 161, + 163, + 165, + 166, + 168, + 169, + 170, + 177, + 180, + 185, + 188, + 191, + 193, + 195, + 202, + 208, + 213, + 214, + 229, + 230, + 232, + 233, + 242, + 250, + 261, + 264, + 276, + 283, + 300, + 304, + 312, + 323, + 325, + 342, + 356, + 370, + 392, + 395, + 408, + 417, + 488, + 540, + 562, + 570, + 609, + 748, + 776, + 1156, + 1163, + 1164, + 1165, + 1166, + 1167, + 1168, + 1169, + 1170, + 1171, + 1172, + 1173, + 1175, + 1176, + 1179, + 1180, + 1181, + 1182, + 1184, + 1185, + 1186, + 1187, + 1188, + 1189, + 1191, +) + +VALID_CLASS_IDS_200_TRAIN_ONLY = ( + "bicycle", + "storage container", + "candle", + "guitar case", + "purse", + "alarm clock", + "music stand", + "cd case", + "structure", + "storage organizer", + "luggage", +) + +CLASS_LABELS_200_TRAIN_ONLY = ( + 121, + 221, + 286, + 331, + 399, + 572, + 581, + 1174, + 1178, + 1183, + 1190, +) diff --git a/models/Mask3D/build/lib/mask3d/datasets/semseg.py b/models/Mask3D/build/lib/mask3d/datasets/semseg.py new file mode 100644 index 0000000000000000000000000000000000000000..a848b1a20e4690971bf16790fcea00ade84441c0 --- /dev/null +++ b/models/Mask3D/build/lib/mask3d/datasets/semseg.py @@ -0,0 +1,993 @@ +import logging +from itertools import product +from pathlib import Path +from random import random, sample, uniform +from typing import List, Optional, Tuple, Union +from random import choice +from copy import deepcopy +from random import randrange + + +import numpy +import torch +from datasets.random_cuboid import RandomCuboid + +import albumentations as A +import numpy as np +import scipy +import volumentations as V +import yaml + +# from yaml import CLoader as Loader +from torch.utils.data import Dataset +from datasets.scannet200.scannet200_constants import ( + SCANNET_COLOR_MAP_200, + SCANNET_COLOR_MAP_20, +) + +logger = logging.getLogger(__name__) + + +class SemanticSegmentationDataset(Dataset): + """Docstring for SemanticSegmentationDataset.""" + + def __init__( + self, + dataset_name="scannet", + data_dir: Optional[Union[str, Tuple[str]]] = "data/processed/scannet", + label_db_filepath: Optional[ + str + ] = "configs/scannet_preprocessing/label_database.yaml", + # mean std values from scannet + color_mean_std: Optional[Union[str, Tuple[Tuple[float]]]] = ( + (0.47793125906962, 0.4303257521323044, 0.3749598901421883), + (0.2834475483823543, 0.27566157565723015, 0.27018971370874995), + ), + mode: Optional[str] = "train", + add_colors: Optional[bool] = True, + add_normals: Optional[bool] = True, + add_raw_coordinates: Optional[bool] = False, + add_instance: Optional[bool] = False, + num_labels: Optional[int] = -1, + data_percent: Optional[float] = 1.0, + ignore_label: Optional[Union[int, Tuple[int]]] = 255, + volume_augmentations_path: Optional[str] = None, + image_augmentations_path: Optional[str] = None, + instance_oversampling=0, + place_around_existing=False, + max_cut_region=0, + point_per_cut=100, + flip_in_center=False, + noise_rate=0.0, + resample_points=0.0, + cache_data=False, + add_unlabeled_pc=False, + task="instance_segmentation", + cropping=False, + cropping_args=None, + is_tta=False, + crop_min_size=20000, + crop_length=6.0, + cropping_v1=True, + reps_per_epoch=1, + area=-1, + on_crops=False, + eval_inner_core=-1, + filter_out_classes=[], + label_offset=0, + add_clip=False, + is_elastic_distortion=True, + color_drop=0.0, + ): + assert task in [ + "instance_segmentation", + "semantic_segmentation", + ], "unknown task" + + self.add_clip = add_clip + self.dataset_name = dataset_name + self.is_elastic_distortion = is_elastic_distortion + self.color_drop = color_drop + + if self.dataset_name == "scannet": + self.color_map = SCANNET_COLOR_MAP_20 + self.color_map[255] = (255, 255, 255) + elif self.dataset_name == "stpls3d": + self.color_map = { + 0: [0, 255, 0], # Ground + 1: [0, 0, 255], # Build + 2: [0, 255, 255], # LowVeg + 3: [255, 255, 0], # MediumVeg + 4: [255, 0, 255], # HiVeg + 5: [100, 100, 255], # Vehicle + 6: [200, 200, 100], # Truck + 7: [170, 120, 200], # Aircraft + 8: [255, 0, 0], # MilitaryVec + 9: [200, 100, 100], # Bike + 10: [10, 200, 100], # Motorcycle + 11: [200, 200, 200], # LightPole + 12: [50, 50, 50], # StreetSign + 13: [60, 130, 60], # Clutter + 14: [130, 30, 60], + } # Fence + elif self.dataset_name == "scannet200": + self.color_map = SCANNET_COLOR_MAP_200 + elif self.dataset_name == "s3dis": + self.color_map = { + 0: [0, 255, 0], # ceiling + 1: [0, 0, 255], # floor + 2: [0, 255, 255], # wall + 3: [255, 255, 0], # beam + 4: [255, 0, 255], # column + 5: [100, 100, 255], # window + 6: [200, 200, 100], # door + 7: [170, 120, 200], # table + 8: [255, 0, 0], # chair + 9: [200, 100, 100], # sofa + 10: [10, 200, 100], # bookcase + 11: [200, 200, 200], # board + 12: [50, 50, 50], # clutter + } + else: + assert False, "dataset not known" + + self.task = task + + self.filter_out_classes = filter_out_classes + self.label_offset = label_offset + + self.area = area + self.eval_inner_core = eval_inner_core + + self.reps_per_epoch = reps_per_epoch + + self.cropping = cropping + self.cropping_args = cropping_args + self.is_tta = is_tta + self.on_crops = on_crops + + self.crop_min_size = crop_min_size + self.crop_length = crop_length + + self.version1 = cropping_v1 + + self.random_cuboid = RandomCuboid( + self.crop_min_size, + crop_length=self.crop_length, + version1=self.version1, + ) + + self.mode = mode + self.data_dir = data_dir + self.add_unlabeled_pc = add_unlabeled_pc + if add_unlabeled_pc: + self.other_database = self._load_yaml( + Path(data_dir).parent / "matterport" / "train_database.yaml" + ) + if type(data_dir) == str: + self.data_dir = [self.data_dir] + self.ignore_label = ignore_label + self.add_colors = add_colors + self.add_normals = add_normals + self.add_instance = add_instance + self.add_raw_coordinates = add_raw_coordinates + self.instance_oversampling = instance_oversampling + self.place_around_existing = place_around_existing + self.max_cut_region = max_cut_region + self.point_per_cut = point_per_cut + self.flip_in_center = flip_in_center + self.noise_rate = noise_rate + self.resample_points = resample_points + + # loading database files + self._data = [] + for database_path in self.data_dir: + database_path = Path(database_path) + mode = 'Validation' + if self.dataset_name != "s3dis": + if not (database_path / f"{mode}_database.yaml").exists(): + print( + f"generate {database_path}/{mode}_database.yaml first" + ) + exit() + self._data.extend( + self._load_yaml(database_path / f"{mode}_database.yaml") + ) + else: + # mode_s3dis = f"Area_{self.area}" + mode_s3dis = "Validation" + if self.mode == "train": + mode_s3dis = "train_" + mode_s3dis + if not ( + database_path / f"{mode_s3dis}_database.yaml" + ).exists(): + print( + f"generate {database_path}/{mode_s3dis}_database.yaml first" + ) + exit() + self._data.extend( + self._load_yaml( + database_path / f"{mode_s3dis}_database.yaml" + ) + ) + if data_percent < 1.0: + self._data = sample( + self._data, int(len(self._data) * data_percent) + ) + # labels = self._load_yaml(Path(label_db_filepath)) + + # if working only on classes for validation - discard others + # self._labels = self._select_correct_labels(labels, num_labels) + + if instance_oversampling > 0: + self.instance_data = self._load_yaml( + Path(label_db_filepath).parent / "instance_database.yaml" + ) + + # normalize color channels + if self.dataset_name == "s3dis": + color_mean_std = color_mean_std.replace( + "color_mean_std.yaml", f"Area_{self.area}_color_mean_std.yaml" + ) + + if Path(str(color_mean_std)).exists(): + color_mean_std = self._load_yaml(color_mean_std) + color_mean, color_std = ( + tuple(color_mean_std["mean"]), + tuple(color_mean_std["std"]), + ) + elif len(color_mean_std[0]) == 3 and len(color_mean_std[1]) == 3: + color_mean, color_std = color_mean_std[0], color_mean_std[1] + else: + logger.error( + "pass mean and std as tuple of tuples, or as an .yaml file" + ) + + # augmentations + self.volume_augmentations = V.NoOp() + if (volume_augmentations_path is not None) and ( + volume_augmentations_path != "none" + ): + self.volume_augmentations = V.load( + Path(volume_augmentations_path), data_format="yaml" + ) + self.image_augmentations = A.NoOp() + if (image_augmentations_path is not None) and ( + image_augmentations_path != "none" + ): + self.image_augmentations = A.load( + Path(image_augmentations_path), data_format="yaml" + ) + # mandatory color augmentation + if add_colors: + self.normalize_color = A.Normalize(mean=color_mean, std=color_std) + + self.cache_data = cache_data + # new_data = [] + if self.cache_data: + new_data = [] + for i in range(len(self._data)): + self._data[i]["data"] = np.load( + self.data[i]["filepath"].replace("../../", "") + ) + if self.on_crops: + if self.eval_inner_core == -1: + for block_id, block in enumerate( + self.splitPointCloud(self._data[i]["data"]) + ): + if len(block) > 10000: + new_data.append( + { + "instance_gt_filepath": self._data[i][ + "instance_gt_filepath" + ][block_id] + if len( + self._data[i][ + "instance_gt_filepath" + ] + ) + > 0 + else list(), + "scene": f"{self._data[i]['scene'].replace('.txt', '')}_{block_id}.txt", + "raw_filepath": f"{self.data[i]['filepath'].replace('.npy', '')}_{block_id}", + "data": block, + } + ) + else: + assert False + else: + conds_inner, blocks_outer = self.splitPointCloud( + self._data[i]["data"], + size=self.crop_length, + inner_core=self.eval_inner_core, + ) + + for block_id in range(len(conds_inner)): + cond_inner = conds_inner[block_id] + block_outer = blocks_outer[block_id] + + if cond_inner.sum() > 10000: + new_data.append( + { + "instance_gt_filepath": self._data[i][ + "instance_gt_filepath" + ][block_id] + if len( + self._data[i][ + "instance_gt_filepath" + ] + ) + > 0 + else list(), + "scene": f"{self._data[i]['scene'].replace('.txt', '')}_{block_id}.txt", + "raw_filepath": f"{self.data[i]['filepath'].replace('.npy', '')}_{block_id}", + "data": block_outer, + "cond_inner": cond_inner, + } + ) + else: + assert False + + if self.on_crops: + self._data = new_data + # new_data.append(np.load(self.data[i]["filepath"].replace("../../", ""))) + # self._data = new_data + + def splitPointCloud(self, cloud, size=50.0, stride=50, inner_core=-1): + if inner_core == -1: + limitMax = np.amax(cloud[:, 0:3], axis=0) + width = int(np.ceil((limitMax[0] - size) / stride)) + 1 + depth = int(np.ceil((limitMax[1] - size) / stride)) + 1 + cells = [ + (x * stride, y * stride) + for x in range(width) + for y in range(depth) + ] + blocks = [] + for (x, y) in cells: + xcond = (cloud[:, 0] <= x + size) & (cloud[:, 0] >= x) + ycond = (cloud[:, 1] <= y + size) & (cloud[:, 1] >= y) + cond = xcond & ycond + block = cloud[cond, :] + blocks.append(block) + return blocks + else: + limitMax = np.amax(cloud[:, 0:3], axis=0) + width = int(np.ceil((limitMax[0] - inner_core) / stride)) + 1 + depth = int(np.ceil((limitMax[1] - inner_core) / stride)) + 1 + cells = [ + (x * stride, y * stride) + for x in range(width) + for y in range(depth) + ] + blocks_outer = [] + conds_inner = [] + for (x, y) in cells: + xcond_outer = ( + cloud[:, 0] <= x + inner_core / 2.0 + size / 2 + ) & (cloud[:, 0] >= x + inner_core / 2.0 - size / 2) + ycond_outer = ( + cloud[:, 1] <= y + inner_core / 2.0 + size / 2 + ) & (cloud[:, 1] >= y + inner_core / 2.0 - size / 2) + + cond_outer = xcond_outer & ycond_outer + block_outer = cloud[cond_outer, :] + + xcond_inner = (block_outer[:, 0] <= x + inner_core) & ( + block_outer[:, 0] >= x + ) + ycond_inner = (block_outer[:, 1] <= y + inner_core) & ( + block_outer[:, 1] >= y + ) + + cond_inner = xcond_inner & ycond_inner + + conds_inner.append(cond_inner) + blocks_outer.append(block_outer) + return conds_inner, blocks_outer + + def map2color(self, labels): + output_colors = list() + + for label in labels: + output_colors.append(self.color_map[label]) + + return torch.tensor(output_colors) + + def __len__(self): + if self.is_tta: + return 5 * len(self.data) + else: + return self.reps_per_epoch * len(self.data) + + def __getitem__(self, idx: int): + idx = idx % len(self.data) + if self.is_tta: + idx = idx % len(self.data) + + if self.cache_data: + points = self.data[idx]["data"] + else: + assert not self.on_crops, "you need caching if on crops" + points = np.load(self.data[idx]["filepath"].replace("../../", "")) + + if "train" in self.mode and self.dataset_name in ["s3dis", "stpls3d"]: + inds = self.random_cuboid(points) + points = points[inds] + + coordinates, color, normals, segments, labels = ( + points[:, :3], + points[:, 3:6], + points[:, 6:9], + points[:, 9], + points[:, 10:12], + ) + + raw_coordinates = coordinates.copy() + raw_color = color + raw_normals = normals + + if not self.add_colors: + color = np.ones((len(color), 3)) + + # volume and image augmentations for train + if "train" in self.mode or self.is_tta: + if self.cropping: + new_idx = self.random_cuboid( + coordinates, + labels[:, 1], + self._remap_from_zero(labels[:, 0].copy()), + ) + + coordinates = coordinates[new_idx] + color = color[new_idx] + labels = labels[new_idx] + segments = segments[new_idx] + raw_color = raw_color[new_idx] + raw_normals = raw_normals[new_idx] + normals = normals[new_idx] + points = points[new_idx] + + coordinates -= coordinates.mean(0) + + try: + coordinates += ( + np.random.uniform(coordinates.min(0), coordinates.max(0)) + / 2 + ) + except OverflowError as err: + print(coordinates) + print(coordinates.shape) + raise err + + if self.instance_oversampling > 0.0: + ( + coordinates, + color, + normals, + labels, + ) = self.augment_individual_instance( + coordinates, + color, + normals, + labels, + self.instance_oversampling, + ) + + if self.flip_in_center: + coordinates = flip_in_center(coordinates) + + for i in (0, 1): + if random() < 0.5: + coord_max = np.max(points[:, i]) + coordinates[:, i] = coord_max - coordinates[:, i] + + if random() < 0.95: + if self.is_elastic_distortion: + for granularity, magnitude in ((0.2, 0.4), (0.8, 1.6)): + coordinates = elastic_distortion( + coordinates, granularity, magnitude + ) + aug = self.volume_augmentations( + points=coordinates, + normals=normals, + features=color, + labels=labels, + ) + coordinates, color, normals, labels = ( + aug["points"], + aug["features"], + aug["normals"], + aug["labels"], + ) + pseudo_image = color.astype(np.uint8)[np.newaxis, :, :] + color = np.squeeze( + self.image_augmentations(image=pseudo_image)["image"] + ) + + if self.point_per_cut != 0: + number_of_cuts = int(len(coordinates) / self.point_per_cut) + for _ in range(number_of_cuts): + size_of_cut = np.random.uniform(0.05, self.max_cut_region) + # not wall, floor or empty + point = choice(coordinates) + x_min = point[0] - size_of_cut + x_max = x_min + size_of_cut + y_min = point[1] - size_of_cut + y_max = y_min + size_of_cut + z_min = point[2] - size_of_cut + z_max = z_min + size_of_cut + indexes = crop( + coordinates, x_min, y_min, z_min, x_max, y_max, z_max + ) + coordinates, normals, color, labels = ( + coordinates[~indexes], + normals[~indexes], + color[~indexes], + labels[~indexes], + ) + + # if self.noise_rate > 0: + # coordinates, color, normals, labels = random_points( + # coordinates, + # color, + # normals, + # labels, + # self.noise_rate, + # self.ignore_label, + # ) + + if (self.resample_points > 0) or (self.noise_rate > 0): + coordinates, color, normals, labels = random_around_points( + coordinates, + color, + normals, + labels, + self.resample_points, + self.noise_rate, + self.ignore_label, + ) + + if self.add_unlabeled_pc: + if random() < 0.8: + new_points = np.load( + self.other_database[ + np.random.randint(0, len(self.other_database) - 1) + ]["filepath"] + ) + ( + unlabeled_coords, + unlabeled_color, + unlabeled_normals, + unlabeled_labels, + ) = ( + new_points[:, :3], + new_points[:, 3:6], + new_points[:, 6:9], + new_points[:, 9:], + ) + unlabeled_coords -= unlabeled_coords.mean(0) + unlabeled_coords += ( + np.random.uniform( + unlabeled_coords.min(0), unlabeled_coords.max(0) + ) + / 2 + ) + + aug = self.volume_augmentations( + points=unlabeled_coords, + normals=unlabeled_normals, + features=unlabeled_color, + labels=unlabeled_labels, + ) + ( + unlabeled_coords, + unlabeled_color, + unlabeled_normals, + unlabeled_labels, + ) = ( + aug["points"], + aug["features"], + aug["normals"], + aug["labels"], + ) + pseudo_image = unlabeled_color.astype(np.uint8)[ + np.newaxis, :, : + ] + unlabeled_color = np.squeeze( + self.image_augmentations(image=pseudo_image)["image"] + ) + + coordinates = np.concatenate( + (coordinates, unlabeled_coords) + ) + color = np.concatenate((color, unlabeled_color)) + normals = np.concatenate((normals, unlabeled_normals)) + labels = np.concatenate( + ( + labels, + np.full_like(unlabeled_labels, self.ignore_label), + ) + ) + + if random() < self.color_drop: + color[:] = 255 + + # normalize color information + pseudo_image = color.astype(np.uint8)[np.newaxis, :, :] + color = np.squeeze(self.normalize_color(image=pseudo_image)["image"]) + + # prepare labels and map from 0 to 20(40) + labels = labels.astype(np.int32) + # if labels.size > 0: + # labels[:, 0] = self._remap_from_zero(labels[:, 0]) + # if not self.add_instance: + # # taking only first column, which is segmentation label, not instance + # labels = labels[:, 0].flatten()[..., None] + + labels = np.hstack((labels, segments[..., None].astype(np.int32))) + + features = color + if self.add_normals: + features = np.hstack((features, normals)) + if self.add_raw_coordinates: + if len(features.shape) == 1: + features = np.hstack((features[None, ...], coordinates)) + else: + features = np.hstack((features, coordinates)) + + # if self.task != "semantic_segmentation": + if self.data[idx]["raw_filepath"].split("/")[-2] in [ + "scene0636_00", + "scene0154_00", + ]: + return self.__getitem__(0) + + if self.dataset_name == "s3dis": + return ( + coordinates, + features, + labels, + self.data[idx]["area"] + "_" + self.data[idx]["scene"], + raw_color, + raw_normals, + raw_coordinates, + idx, + ) + if self.dataset_name == "stpls3d": + if labels.shape[1] != 1: # only segments --> test set! + if np.unique(labels[:, -2]).shape[0] < 2: + print("NO INSTANCES") + return self.__getitem__(0) + return ( + coordinates, + features, + labels, + self.data[idx]["scene"], + raw_color, + raw_normals, + raw_coordinates, + idx, + ) + else: + return ( + coordinates, + features, + labels, + self.data[idx]["raw_filepath"].split("/")[-2], + raw_color, + raw_normals, + raw_coordinates, + idx, + ) + + @property + def data(self): + """database file containing information about preproscessed dataset""" + return self._data + + @property + def label_info(self): + """database file containing information labels used by dataset""" + return self._labels + + @staticmethod + def _load_yaml(filepath): + with open(filepath) as f: + # file = yaml.load(f, Loader=Loader) + file = yaml.load(f) + return file + + def _select_correct_labels(self, labels, num_labels): + number_of_validation_labels = 0 + number_of_all_labels = 0 + for ( + k, + v, + ) in labels.items(): + number_of_all_labels += 1 + if v["validation"]: + number_of_validation_labels += 1 + + if num_labels == number_of_all_labels: + return labels + elif num_labels == number_of_validation_labels: + valid_labels = dict() + for ( + k, + v, + ) in labels.items(): + if v["validation"]: + valid_labels.update({k: v}) + return valid_labels + else: + msg = f"""not available number labels, select from: + {number_of_validation_labels}, {number_of_all_labels}""" + raise ValueError(msg) + + def _remap_from_zero(self, labels): + labels[ + ~np.isin(labels, list(self.label_info.keys())) + ] = self.ignore_label + # remap to the range from 0 + for i, k in enumerate(self.label_info.keys()): + labels[labels == k] = i + return labels + + def _remap_model_output(self, output): + output = np.array(output) + output_remapped = output.copy() + for i, k in enumerate(self.label_info.keys()): + output_remapped[output == i] = k + return output_remapped + + def augment_individual_instance( + self, coordinates, color, normals, labels, oversampling=1.0 + ): + max_instance = int(len(np.unique(labels[:, 1]))) + # randomly selecting half of non-zero instances + for instance in range(0, int(max_instance * oversampling)): + if self.place_around_existing: + center = choice( + coordinates[ + labels[:, 1] == choice(np.unique(labels[:, 1])) + ] + ) + else: + center = np.array( + [uniform(-5, 5), uniform(-5, 5), uniform(-0.5, 2)] + ) + instance = choice(choice(self.instance_data)) + instance = np.load(instance["instance_filepath"]) + # centering two objects + instance[:, :3] = ( + instance[:, :3] - instance[:, :3].mean(axis=0) + center + ) + max_instance = max_instance + 1 + instance[:, -1] = max_instance + aug = V.Compose( + [ + V.Scale3d(), + V.RotateAroundAxis3d( + rotation_limit=np.pi / 24, axis=(1, 0, 0) + ), + V.RotateAroundAxis3d( + rotation_limit=np.pi / 24, axis=(0, 1, 0) + ), + V.RotateAroundAxis3d(rotation_limit=np.pi, axis=(0, 0, 1)), + ] + )( + points=instance[:, :3], + features=instance[:, 3:6], + normals=instance[:, 6:9], + labels=instance[:, 9:], + ) + coordinates = np.concatenate((coordinates, aug["points"])) + color = np.concatenate((color, aug["features"])) + normals = np.concatenate((normals, aug["normals"])) + labels = np.concatenate((labels, aug["labels"])) + + return coordinates, color, normals, labels + + +def elastic_distortion(pointcloud, granularity, magnitude): + """Apply elastic distortion on sparse coordinate space. + + pointcloud: numpy array of (number of points, at least 3 spatial dims) + granularity: size of the noise grid (in same scale[m/cm] as the voxel grid) + magnitude: noise multiplier + """ + blurx = np.ones((3, 1, 1, 1)).astype("float32") / 3 + blury = np.ones((1, 3, 1, 1)).astype("float32") / 3 + blurz = np.ones((1, 1, 3, 1)).astype("float32") / 3 + coords = pointcloud[:, :3] + coords_min = coords.min(0) + + # Create Gaussian noise tensor of the size given by granularity. + noise_dim = ((coords - coords_min).max(0) // granularity).astype(int) + 3 + noise = np.random.randn(*noise_dim, 3).astype(np.float32) + + # Smoothing. + for _ in range(2): + noise = scipy.ndimage.filters.convolve( + noise, blurx, mode="constant", cval=0 + ) + noise = scipy.ndimage.filters.convolve( + noise, blury, mode="constant", cval=0 + ) + noise = scipy.ndimage.filters.convolve( + noise, blurz, mode="constant", cval=0 + ) + + # Trilinear interpolate noise filters for each spatial dimensions. + ax = [ + np.linspace(d_min, d_max, d) + for d_min, d_max, d in zip( + coords_min - granularity, + coords_min + granularity * (noise_dim - 2), + noise_dim, + ) + ] + interp = scipy.interpolate.RegularGridInterpolator( + ax, noise, bounds_error=0, fill_value=0 + ) + pointcloud[:, :3] = coords + interp(coords) * magnitude + return pointcloud + + +def crop(points, x_min, y_min, z_min, x_max, y_max, z_max): + if x_max <= x_min or y_max <= y_min or z_max <= z_min: + raise ValueError( + "We should have x_min < x_max and y_min < y_max and z_min < z_max. But we got" + " (x_min = {x_min}, y_min = {y_min}, z_min = {z_min}," + " x_max = {x_max}, y_max = {y_max}, z_max = {z_max})".format( + x_min=x_min, + x_max=x_max, + y_min=y_min, + y_max=y_max, + z_min=z_min, + z_max=z_max, + ) + ) + inds = np.all( + [ + (points[:, 0] >= x_min), + (points[:, 0] < x_max), + (points[:, 1] >= y_min), + (points[:, 1] < y_max), + (points[:, 2] >= z_min), + (points[:, 2] < z_max), + ], + axis=0, + ) + return inds + + +def flip_in_center(coordinates): + # moving coordinates to center + coordinates -= coordinates.mean(0) + aug = V.Compose( + [ + V.Flip3d(axis=(0, 1, 0), always_apply=True), + V.Flip3d(axis=(1, 0, 0), always_apply=True), + ] + ) + + first_crop = coordinates[:, 0] > 0 + first_crop &= coordinates[:, 1] > 0 + # x -y + second_crop = coordinates[:, 0] > 0 + second_crop &= coordinates[:, 1] < 0 + # -x y + third_crop = coordinates[:, 0] < 0 + third_crop &= coordinates[:, 1] > 0 + # -x -y + fourth_crop = coordinates[:, 0] < 0 + fourth_crop &= coordinates[:, 1] < 0 + + if first_crop.size > 1: + coordinates[first_crop] = aug(points=coordinates[first_crop])["points"] + if second_crop.size > 1: + minimum = coordinates[second_crop].min(0) + minimum[2] = 0 + minimum[0] = 0 + coordinates[second_crop] = aug(points=coordinates[second_crop])[ + "points" + ] + coordinates[second_crop] += minimum + if third_crop.size > 1: + minimum = coordinates[third_crop].min(0) + minimum[2] = 0 + minimum[1] = 0 + coordinates[third_crop] = aug(points=coordinates[third_crop])["points"] + coordinates[third_crop] += minimum + if fourth_crop.size > 1: + minimum = coordinates[fourth_crop].min(0) + minimum[2] = 0 + coordinates[fourth_crop] = aug(points=coordinates[fourth_crop])[ + "points" + ] + coordinates[fourth_crop] += minimum + + return coordinates + + +def random_around_points( + coordinates, + color, + normals, + labels, + rate=0.2, + noise_rate=0, + ignore_label=255, +): + coord_indexes = sample( + list(range(len(coordinates))), k=int(len(coordinates) * rate) + ) + noisy_coordinates = deepcopy(coordinates[coord_indexes]) + noisy_coordinates += np.random.uniform( + -0.2 - noise_rate, 0.2 + noise_rate, size=noisy_coordinates.shape + ) + + if noise_rate > 0: + noisy_color = np.random.randint(0, 255, size=noisy_coordinates.shape) + noisy_normals = np.random.rand(*noisy_coordinates.shape) * 2 - 1 + noisy_labels = np.full(labels[coord_indexes].shape, ignore_label) + + coordinates = np.vstack((coordinates, noisy_coordinates)) + color = np.vstack((color, noisy_color)) + normals = np.vstack((normals, noisy_normals)) + labels = np.vstack((labels, noisy_labels)) + else: + noisy_color = deepcopy(color[coord_indexes]) + noisy_normals = deepcopy(normals[coord_indexes]) + noisy_labels = deepcopy(labels[coord_indexes]) + + coordinates = np.vstack((coordinates, noisy_coordinates)) + color = np.vstack((color, noisy_color)) + normals = np.vstack((normals, noisy_normals)) + labels = np.vstack((labels, noisy_labels)) + + return coordinates, color, normals, labels + + +def random_points( + coordinates, color, normals, labels, noise_rate=0.6, ignore_label=255 +): + max_boundary = coordinates.max(0) + 0.1 + min_boundary = coordinates.min(0) - 0.1 + + noisy_coordinates = int( + (max(max_boundary) - min(min_boundary)) / noise_rate + ) + + noisy_coordinates = np.array( + list( + product( + np.linspace( + min_boundary[0], max_boundary[0], noisy_coordinates + ), + np.linspace( + min_boundary[1], max_boundary[1], noisy_coordinates + ), + np.linspace( + min_boundary[2], max_boundary[2], noisy_coordinates + ), + ) + ) + ) + noisy_coordinates += np.random.uniform( + -noise_rate, noise_rate, size=noisy_coordinates.shape + ) + + noisy_color = np.random.randint(0, 255, size=noisy_coordinates.shape) + noisy_normals = np.random.rand(*noisy_coordinates.shape) * 2 - 1 + noisy_labels = np.full( + (noisy_coordinates.shape[0], labels.shape[1]), ignore_label + ) + + coordinates = np.vstack((coordinates, noisy_coordinates)) + color = np.vstack((color, noisy_color)) + normals = np.vstack((normals, noisy_normals)) + labels = np.vstack((labels, noisy_labels)) + return coordinates, color, normals, labels diff --git a/models/Mask3D/build/lib/mask3d/datasets/utils.py b/models/Mask3D/build/lib/mask3d/datasets/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..46d8dd7e112f9722e2af65a76f24191600764a00 --- /dev/null +++ b/models/Mask3D/build/lib/mask3d/datasets/utils.py @@ -0,0 +1,639 @@ +import MinkowskiEngine as ME +import numpy as np +import torch +from random import random + + +class VoxelizeCollate: + def __init__( + self, + ignore_label=255, + voxel_size=1, + mode="test", + small_crops=False, + very_small_crops=False, + batch_instance=False, + probing=False, + task="instance_segmentation", + ignore_class_threshold=100, + filter_out_classes=[], + label_offset=0, + num_queries=None, + ): + assert task in [ + "instance_segmentation", + "semantic_segmentation", + ], "task not known" + self.task = task + self.filter_out_classes = filter_out_classes + self.label_offset = label_offset + self.voxel_size = voxel_size + self.ignore_label = ignore_label + self.mode = mode + self.batch_instance = batch_instance + self.small_crops = small_crops + self.very_small_crops = very_small_crops + self.probing = probing + self.ignore_class_threshold = ignore_class_threshold + + self.num_queries = num_queries + + def __call__(self, batch): + if ("train" in self.mode) and ( + self.small_crops or self.very_small_crops + ): + batch = make_crops(batch) + if ("train" in self.mode) and self.very_small_crops: + batch = make_crops(batch) + return voxelize( + batch, + self.ignore_label, + self.voxel_size, + self.probing, + self.mode, + task=self.task, + ignore_class_threshold=self.ignore_class_threshold, + filter_out_classes=self.filter_out_classes, + label_offset=self.label_offset, + num_queries=self.num_queries, + ) + + +class VoxelizeCollateMerge: + def __init__( + self, + ignore_label=255, + voxel_size=1, + mode="test", + scenes=2, + small_crops=False, + very_small_crops=False, + batch_instance=False, + make_one_pc_noise=False, + place_nearby=False, + place_far=False, + proba=1, + probing=False, + task="instance_segmentation", + ): + assert task in [ + "instance_segmentation", + "semantic_segmentation", + ], "task not known" + self.task = task + self.mode = mode + self.scenes = scenes + self.small_crops = small_crops + self.very_small_crops = very_small_crops + self.ignore_label = ignore_label + self.voxel_size = voxel_size + self.batch_instance = batch_instance + self.make_one_pc_noise = make_one_pc_noise + self.place_nearby = place_nearby + self.place_far = place_far + self.proba = proba + self.probing = probing + + def __call__(self, batch): + if ( + ("train" in self.mode) + and (not self.make_one_pc_noise) + and (self.proba > random()) + ): + if self.small_crops or self.very_small_crops: + batch = make_crops(batch) + if self.very_small_crops: + batch = make_crops(batch) + if self.batch_instance: + batch = batch_instances(batch) + new_batch = [] + for i in range(0, len(batch), self.scenes): + batch_coordinates = [] + batch_features = [] + batch_labels = [] + + batch_filenames = "" + batch_raw_color = [] + batch_raw_normals = [] + + offset_instance_id = 0 + offset_segment_id = 0 + + for j in range(min(len(batch[i:]), self.scenes)): + batch_coordinates.append(batch[i + j][0]) + batch_features.append(batch[i + j][1]) + + if j == 0: + batch_filenames = batch[i + j][3] + else: + batch_filenames = ( + batch_filenames + f"+{batch[i + j][3]}" + ) + + batch_raw_color.append(batch[i + j][4]) + batch_raw_normals.append(batch[i + j][5]) + + # make instance ids and segment ids unique + # take care that -1 instances stay at -1 + batch_labels.append( + batch[i + j][2] + + [0, offset_instance_id, offset_segment_id] + ) + batch_labels[-1][batch[i + j][2][:, 1] == -1, 1] = -1 + + max_instance_id, max_segment_id = batch[i + j][2].max( + axis=0 + )[1:] + offset_segment_id = offset_segment_id + max_segment_id + 1 + offset_instance_id = ( + offset_instance_id + max_instance_id + 1 + ) + + if (len(batch_coordinates) == 2) and self.place_nearby: + border = batch_coordinates[0][:, 0].max() + border -= batch_coordinates[1][:, 0].min() + batch_coordinates[1][:, 0] += border + elif (len(batch_coordinates) == 2) and self.place_far: + batch_coordinates[1] += ( + np.random.uniform((-10, -10, -10), (10, 10, 10)) * 200 + ) + new_batch.append( + ( + np.vstack(batch_coordinates), + np.vstack(batch_features), + np.concatenate(batch_labels), + batch_filenames, + np.vstack(batch_raw_color), + np.vstack(batch_raw_normals), + ) + ) + # TODO WHAT ABOUT POINT2SEGMENT AND SO ON ... + batch = new_batch + elif ("train" in self.mode) and self.make_one_pc_noise: + new_batch = [] + for i in range(0, len(batch), 2): + if (i + 1) < len(batch): + new_batch.append( + [ + np.vstack((batch[i][0], batch[i + 1][0])), + np.vstack((batch[i][1], batch[i + 1][1])), + np.concatenate( + ( + batch[i][2], + np.full_like( + batch[i + 1][2], self.ignore_label + ), + ) + ), + ] + ) + new_batch.append( + [ + np.vstack((batch[i][0], batch[i + 1][0])), + np.vstack((batch[i][1], batch[i + 1][1])), + np.concatenate( + ( + np.full_like( + batch[i][2], self.ignore_label + ), + batch[i + 1][2], + ) + ), + ] + ) + else: + new_batch.append([batch[i][0], batch[i][1], batch[i][2]]) + batch = new_batch + # return voxelize(batch, self.ignore_label, self.voxel_size, self.probing, self.mode) + return voxelize( + batch, + self.ignore_label, + self.voxel_size, + self.probing, + self.mode, + task=self.task, + ) + + +def batch_instances(batch): + new_batch = [] + for sample in batch: + for instance_id in np.unique(sample[2][:, 1]): + new_batch.append( + ( + sample[0][sample[2][:, 1] == instance_id], + sample[1][sample[2][:, 1] == instance_id], + sample[2][sample[2][:, 1] == instance_id][:, 0], + ), + ) + return new_batch + + +def voxelize( + batch, + ignore_label, + voxel_size, + probing, + mode, + task, + ignore_class_threshold, + filter_out_classes, + label_offset, + num_queries, +): + ( + coordinates, + features, + labels, + original_labels, + inverse_maps, + original_colors, + original_normals, + original_coordinates, + idx, + ) = ([], [], [], [], [], [], [], [], []) + voxelization_dict = { + "ignore_label": ignore_label, + # "quantization_size": self.voxel_size, + "return_index": True, + "return_inverse": True, + } + + full_res_coords = [] + + for sample in batch: + idx.append(sample[7]) + original_coordinates.append(sample[6]) + original_labels.append(sample[2]) + full_res_coords.append(sample[0]) + original_colors.append(sample[4]) + original_normals.append(sample[5]) + + coords = np.floor(sample[0] / voxel_size) + voxelization_dict.update( + { + "coordinates": torch.from_numpy(coords).to("cpu").contiguous(), + "features": sample[1], + } + ) + + # maybe this change (_, _, ...) is not necessary and we can directly get out + # the sample coordinates? + _, _, unique_map, inverse_map = ME.utils.sparse_quantize( + **voxelization_dict + ) + inverse_maps.append(inverse_map) + + sample_coordinates = coords[unique_map] + coordinates.append(torch.from_numpy(sample_coordinates).int()) + sample_features = sample[1][unique_map] + features.append(torch.from_numpy(sample_features).float()) + if len(sample[2]) > 0: + sample_labels = sample[2][unique_map] + labels.append(torch.from_numpy(sample_labels).long()) + + # Concatenate all lists + input_dict = {"coords": coordinates, "feats": features} + if len(labels) > 0: + input_dict["labels"] = labels + coordinates, features, labels = ME.utils.sparse_collate(**input_dict) + else: + coordinates, features = ME.utils.sparse_collate(**input_dict) + labels = torch.Tensor([]) + + if probing: + return ( + NoGpu( + coordinates, + features, + original_labels, + inverse_maps, + ), + labels, + ) + + if mode == "test": + for i in range(len(input_dict["labels"])): + _, ret_index, ret_inv = np.unique( + input_dict["labels"][i][:, 0], + return_index=True, + return_inverse=True, + ) + input_dict["labels"][i][:, 0] = torch.from_numpy(ret_inv) + # input_dict["segment2label"].append(input_dict["labels"][i][ret_index][:, :-1]) + else: + input_dict["segment2label"] = [] + + if "labels" in input_dict: + for i in range(len(input_dict["labels"])): + # TODO BIGGER CHANGE CHECK!!! + _, ret_index, ret_inv = np.unique( + input_dict["labels"][i][:, -1], + return_index=True, + return_inverse=True, + ) + input_dict["labels"][i][:, -1] = torch.from_numpy(ret_inv) + input_dict["segment2label"].append( + input_dict["labels"][i][ret_index][:, :-1] + ) + + if "labels" in input_dict: + list_labels = input_dict["labels"] + + target = [] + target_full = [] + + if len(list_labels[0].shape) == 1: + for batch_id in range(len(list_labels)): + label_ids = list_labels[batch_id].unique() + if 255 in label_ids: + label_ids = label_ids[:-1] + + target.append( + { + "labels": label_ids, + "masks": list_labels[batch_id] + == label_ids.unsqueeze(1), + } + ) + else: + if mode == "test": + for i in range(len(input_dict["labels"])): + target.append( + {"point2segment": input_dict["labels"][i][:, 0]} + ) + target_full.append( + { + "point2segment": torch.from_numpy( + original_labels[i][:, 0] + ).long() + } + ) + else: + target = get_instance_masks( + list_labels, + list_segments=input_dict["segment2label"], + task=task, + ignore_class_threshold=ignore_class_threshold, + filter_out_classes=filter_out_classes, + label_offset=label_offset, + ) + for i in range(len(target)): + target[i]["point2segment"] = input_dict["labels"][i][:, 2] + if "train" not in mode: + target_full = get_instance_masks( + [torch.from_numpy(l) for l in original_labels], + task=task, + ignore_class_threshold=ignore_class_threshold, + filter_out_classes=filter_out_classes, + label_offset=label_offset, + ) + for i in range(len(target_full)): + target_full[i]["point2segment"] = torch.from_numpy( + original_labels[i][:, 2] + ).long() + else: + target = [] + target_full = [] + coordinates = [] + features = [] + + if "train" not in mode: + return ( + NoGpu( + coordinates, + features, + original_labels, + inverse_maps, + full_res_coords, + target_full, + original_colors, + original_normals, + original_coordinates, + idx, + ), + target, + [sample[3] for sample in batch], + ) + else: + return ( + NoGpu( + coordinates, + features, + original_labels, + inverse_maps, + full_res_coords, + ), + target, + [sample[3] for sample in batch], + ) + + +def get_instance_masks( + list_labels, + task, + list_segments=None, + ignore_class_threshold=100, + filter_out_classes=[], + label_offset=0, +): + target = [] + + for batch_id in range(len(list_labels)): + label_ids = [] + masks = [] + segment_masks = [] + instance_ids = list_labels[batch_id][:, 1].unique() + + for instance_id in instance_ids: + if instance_id == -1: + continue + + # TODO is it possible that a ignore class (255) is an instance??? + # instance == -1 ??? + tmp = list_labels[batch_id][ + list_labels[batch_id][:, 1] == instance_id + ] + label_id = tmp[0, 0] + + if ( + label_id in filter_out_classes + ): # floor, wall, undefined==255 is not included + continue + + if ( + 255 in filter_out_classes + and label_id.item() == 255 + and tmp.shape[0] < ignore_class_threshold + ): + continue + + label_ids.append(label_id) + masks.append(list_labels[batch_id][:, 1] == instance_id) + + if list_segments: + segment_mask = torch.zeros( + list_segments[batch_id].shape[0] + ).bool() + segment_mask[ + list_labels[batch_id][ + list_labels[batch_id][:, 1] == instance_id + ][:, 2].unique() + ] = True + segment_masks.append(segment_mask) + + if len(label_ids) == 0: + return list() + + label_ids = torch.stack(label_ids) + masks = torch.stack(masks) + if list_segments: + segment_masks = torch.stack(segment_masks) + + if task == "semantic_segmentation": + new_label_ids = [] + new_masks = [] + new_segment_masks = [] + for label_id in label_ids.unique(): + masking = label_ids == label_id + + new_label_ids.append(label_id) + new_masks.append(masks[masking, :].sum(dim=0).bool()) + + if list_segments: + new_segment_masks.append( + segment_masks[masking, :].sum(dim=0).bool() + ) + + label_ids = torch.stack(new_label_ids) + masks = torch.stack(new_masks) + + if list_segments: + segment_masks = torch.stack(new_segment_masks) + + target.append( + { + "labels": label_ids, + "masks": masks, + "segment_mask": segment_masks, + } + ) + else: + target.append({"labels": label_ids, "masks": masks}) + else: + l = torch.clamp(label_ids - label_offset, min=0) + + if list_segments: + target.append( + { + "labels": l, + "masks": masks, + "segment_mask": segment_masks, + } + ) + else: + target.append({"labels": l, "masks": masks}) + return target + + +def make_crops(batch): + new_batch = [] + # detupling + for scene in batch: + new_batch.append([scene[0], scene[1], scene[2]]) + batch = new_batch + new_batch = [] + for scene in batch: + # move to center for better quadrant split + scene[0][:, :3] -= scene[0][:, :3].mean(0) + + # BUGFIX - there always would be a point in every quadrant + scene[0] = np.vstack( + ( + scene[0], + np.array( + [ + [0.1, 0.1, 0.1], + [0.1, -0.1, 0.1], + [-0.1, 0.1, 0.1], + [-0.1, -0.1, 0.1], + ] + ), + ) + ) + scene[1] = np.vstack((scene[1], np.zeros((4, scene[1].shape[1])))) + scene[2] = np.concatenate( + (scene[2], np.full_like((scene[2]), 255)[:4]) + ) + + crop = scene[0][:, 0] > 0 + crop &= scene[0][:, 1] > 0 + if crop.size > 1: + new_batch.append([scene[0][crop], scene[1][crop], scene[2][crop]]) + + crop = scene[0][:, 0] > 0 + crop &= scene[0][:, 1] < 0 + if crop.size > 1: + new_batch.append([scene[0][crop], scene[1][crop], scene[2][crop]]) + + crop = scene[0][:, 0] < 0 + crop &= scene[0][:, 1] > 0 + if crop.size > 1: + new_batch.append([scene[0][crop], scene[1][crop], scene[2][crop]]) + + crop = scene[0][:, 0] < 0 + crop &= scene[0][:, 1] < 0 + if crop.size > 1: + new_batch.append([scene[0][crop], scene[1][crop], scene[2][crop]]) + + # moving all of them to center + for i in range(len(new_batch)): + new_batch[i][0][:, :3] -= new_batch[i][0][:, :3].mean(0) + return new_batch + + +class NoGpu: + def __init__( + self, + coordinates, + features, + original_labels=None, + inverse_maps=None, + full_res_coords=None, + target_full=None, + original_colors=None, + original_normals=None, + original_coordinates=None, + idx=None, + ): + """helper class to prevent gpu loading on lightning""" + self.coordinates = coordinates + self.features = features + self.original_labels = original_labels + self.inverse_maps = inverse_maps + self.full_res_coords = full_res_coords + self.target_full = target_full + self.original_colors = original_colors + self.original_normals = original_normals + self.original_coordinates = original_coordinates + self.idx = idx + + +class NoGpuMask: + def __init__( + self, + coordinates, + features, + original_labels=None, + inverse_maps=None, + masks=None, + labels=None, + ): + """helper class to prevent gpu loading on lightning""" + self.coordinates = coordinates + self.features = features + self.original_labels = original_labels + self.inverse_maps = inverse_maps + + self.masks = masks + self.labels = labels diff --git a/models/Mask3D/build/lib/mask3d/main_instance_segmentation.py b/models/Mask3D/build/lib/mask3d/main_instance_segmentation.py new file mode 100644 index 0000000000000000000000000000000000000000..c2664673cb3a1fa16191e7baa82a50bbb8f5f195 --- /dev/null +++ b/models/Mask3D/build/lib/mask3d/main_instance_segmentation.py @@ -0,0 +1,114 @@ +import logging +import os +from hashlib import md5 +from uuid import uuid4 +import hydra +from dotenv import load_dotenv +from omegaconf import DictConfig, OmegaConf +from trainer.trainer import InstanceSegmentation, RegularCheckpointing +from pytorch_lightning.callbacks import ModelCheckpoint +from utils.utils import ( + flatten_dict, + load_baseline_model, + load_checkpoint_with_missing_or_exsessive_keys, + load_backbone_checkpoint_with_missing_or_exsessive_keys, +) +from pytorch_lightning import Trainer, seed_everything + + +def get_parameters(cfg: DictConfig): + logger = logging.getLogger(__name__) + load_dotenv(".env") + + # parsing input parameters + seed_everything(cfg.general.seed) + + # getting basic configuration + if cfg.general.get("gpus", None) is None: + cfg.general.gpus = os.environ.get("CUDA_VISIBLE_DEVICES", None) + loggers = [] + + # cfg.general.experiment_id = "0" # str(Repo("./").commit())[:8] + # params = flatten_dict(OmegaConf.to_container(cfg, resolve=True)) + + # create unique id for experiments that are run locally + # unique_id = "_" + str(uuid4())[:4] + # cfg.general.version = md5(str(params).encode("utf-8")).hexdigest()[:8] + unique_id + + if not os.path.exists(cfg.general.save_dir): + os.makedirs(cfg.general.save_dir) + else: + print("EXPERIMENT ALREADY EXIST") + cfg["trainer"][ + "resume_from_checkpoint" + ] = f"{cfg.general.save_dir}/last-epoch.ckpt" + + for log in cfg.logging: + print(log) + # loggers.append(hydra.utils.instantiate(log)) + # loggers[-1].log_hyperparams( + # flatten_dict(OmegaConf.to_container(cfg, resolve=True)) + # ) + + model = InstanceSegmentation(cfg) + if cfg.general.backbone_checkpoint is not None: + cfg, model = load_backbone_checkpoint_with_missing_or_exsessive_keys( + cfg, model + ) + if cfg.general.checkpoint is not None: + cfg, model = load_checkpoint_with_missing_or_exsessive_keys(cfg, model) + + logger.info(flatten_dict(OmegaConf.to_container(cfg, resolve=True))) + return cfg, model, loggers + + +@hydra.main( + config_path="conf", config_name="config_base_instance_segmentation.yaml" +) +def train(cfg: DictConfig): + os.chdir(hydra.utils.get_original_cwd()) + cfg, model, loggers = get_parameters(cfg) + callbacks = [] + for cb in cfg.callbacks: + callbacks.append(hydra.utils.instantiate(cb)) + + callbacks.append(RegularCheckpointing()) + + runner = Trainer( + logger=loggers, + gpus=cfg.general.gpus, + callbacks=callbacks, + weights_save_path=str(cfg.general.save_dir), + **cfg.trainer, + ) + runner.fit(model) + + +@hydra.main( + config_path="conf", config_name="config_base_instance_segmentation.yaml" +) +def test(cfg: DictConfig): + # because hydra wants to change dir for some reason + os.chdir(hydra.utils.get_original_cwd()) + cfg, model, loggers = get_parameters(cfg) + runner = Trainer( + gpus=cfg.general.gpus, + logger=loggers, + weights_save_path=str(cfg.general.save_dir), + **cfg.trainer, + ) + runner.test(model) + + +@hydra.main( + config_path="conf", config_name="config_base_instance_segmentation.yaml" +) +def main(cfg: DictConfig): + if cfg["general"]["train_mode"]: + train(cfg) + else: + test(cfg) + + +if __name__ == "__main__": + main() diff --git a/models/Mask3D/build/lib/mask3d/models/__init__.py b/models/Mask3D/build/lib/mask3d/models/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b092c965bba4c734b49a7f4d2e3ab6fee8471d17 --- /dev/null +++ b/models/Mask3D/build/lib/mask3d/models/__init__.py @@ -0,0 +1,44 @@ +import mask3d.models.resunet as resunet +import mask3d.models.res16unet as res16unet +from mask3d.models.res16unet import ( + Res16UNet34C, + Res16UNet34A, + Res16UNet14A, + Res16UNet34D, + Res16UNet18D, + Res16UNet18B, + Custom30M, +) +from mask3d.models.mask3d import Mask3D + +MODELS = [] + + +def add_models(module): + MODELS.extend([getattr(module, a) for a in dir(module) if "Net" in a]) + + +add_models(resunet) +add_models(res16unet) +add_models(mask3d) + + +def get_models(): + """Returns a tuple of sample models.""" + return MODELS + + +def load_model(name): + """Creates and returns an instance of the model given its class name.""" + # Find the model class from its name + all_models = get_models() + mdict = {model.__name__: model for model in all_models} + if name not in mdict: + print("Invalid model index. Options are:") + # Display a list of valid model names + for model in all_models: + print(f"\t* {model.__name__}") + return None + NetClass = mdict[name] + + return NetClass diff --git a/models/Mask3D/build/lib/mask3d/models/criterion.py b/models/Mask3D/build/lib/mask3d/models/criterion.py new file mode 100644 index 0000000000000000000000000000000000000000..19ce8bc8ecf4a0be08ce91e45857412a8d55efba --- /dev/null +++ b/models/Mask3D/build/lib/mask3d/models/criterion.py @@ -0,0 +1,343 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# Modified by Bowen Cheng from https://github.com/facebookresearch/detr/blob/master/models/detr.py +# Modified for Mask3D +""" +MaskFormer criterion. +""" + +import torch +import torch.nn.functional as F +from torch import nn + +from detectron2.utils.comm import get_world_size +from detectron2.projects.point_rend.point_features import ( + get_uncertain_point_coords_with_randomness, + point_sample, +) + +from mask3d.models.misc import ( + is_dist_avail_and_initialized, + nested_tensor_from_tensor_list, +) + + +def dice_loss( + inputs: torch.Tensor, + targets: torch.Tensor, + num_masks: float, +): + """ + Compute the DICE loss, similar to generalized IOU for masks + Args: + inputs: A float tensor of arbitrary shape. + The predictions for each example. + targets: A float tensor with the same shape as inputs. Stores the binary + classification label for each element in inputs + (0 for the negative class and 1 for the positive class). + """ + inputs = inputs.sigmoid() + inputs = inputs.flatten(1) + numerator = 2 * (inputs * targets).sum(-1) + denominator = inputs.sum(-1) + targets.sum(-1) + loss = 1 - (numerator + 1) / (denominator + 1) + return loss.sum() / num_masks + + +dice_loss_jit = torch.jit.script(dice_loss) # type: torch.jit.ScriptModule + + +def sigmoid_ce_loss( + inputs: torch.Tensor, + targets: torch.Tensor, + num_masks: float, +): + """ + Args: + inputs: A float tensor of arbitrary shape. + The predictions for each example. + targets: A float tensor with the same shape as inputs. Stores the binary + classification label for each element in inputs + (0 for the negative class and 1 for the positive class). + Returns: + Loss tensor + """ + loss = F.binary_cross_entropy_with_logits( + inputs, targets, reduction="none" + ) + + return loss.mean(1).sum() / num_masks + + +sigmoid_ce_loss_jit = torch.jit.script( + sigmoid_ce_loss +) # type: torch.jit.ScriptModule + + +def calculate_uncertainty(logits): + """ + We estimate uncerainty as L1 distance between 0.0 and the logit prediction in 'logits' for the + foreground class in `classes`. + Args: + logits (Tensor): A tensor of shape (R, 1, ...) for class-specific or + class-agnostic, where R is the total number of predicted masks in all images and C is + the number of foreground classes. The values are logits. + Returns: + scores (Tensor): A tensor of shape (R, 1, ...) that contains uncertainty scores with + the most uncertain locations having the highest uncertainty score. + """ + assert logits.shape[1] == 1 + gt_class_logits = logits.clone() + return -(torch.abs(gt_class_logits)) + + +class SetCriterion(nn.Module): + """This class computes the loss for DETR. + The process happens in two steps: + 1) we compute hungarian assignment between ground truth boxes and the outputs of the model + 2) we supervise each pair of matched ground-truth / prediction (supervise class and box) + """ + + def __init__( + self, + num_classes, + matcher, + weight_dict, + eos_coef, + losses, + num_points, + oversample_ratio, + importance_sample_ratio, + class_weights, + ): + """Create the criterion. + Parameters: + num_classes: number of object categories, omitting the special no-object category + matcher: module able to compute a matching between targets and proposals + weight_dict: dict containing as key the names of the losses and as values their relative weight. + eos_coef: relative classification weight applied to the no-object category + losses: list of all the losses to be applied. See get_loss for list of available losses. + """ + super().__init__() + self.num_classes = num_classes - 1 + self.class_weights = class_weights + self.matcher = matcher + self.weight_dict = weight_dict + self.eos_coef = eos_coef + self.losses = losses + empty_weight = torch.ones(self.num_classes + 1) + empty_weight[-1] = self.eos_coef + + if self.class_weights != -1: + assert ( + len(self.class_weights) == self.num_classes + ), "CLASS WEIGHTS DO NOT MATCH" + empty_weight[:-1] = torch.tensor(self.class_weights) + + self.register_buffer("empty_weight", empty_weight) + + # pointwise mask loss parameters + self.num_points = num_points + self.oversample_ratio = oversample_ratio + self.importance_sample_ratio = importance_sample_ratio + + def loss_labels(self, outputs, targets, indices, num_masks, mask_type): + """Classification loss (NLL) + targets dicts must contain the key "labels" containing a tensor of dim [nb_target_boxes] + """ + assert "pred_logits" in outputs + src_logits = outputs["pred_logits"].float() + + idx = self._get_src_permutation_idx(indices) + target_classes_o = torch.cat( + [t["labels"][J] for t, (_, J) in zip(targets, indices)] + ) + target_classes = torch.full( + src_logits.shape[:2], + self.num_classes, + dtype=torch.int64, + device=src_logits.device, + ) + target_classes[idx] = target_classes_o + + loss_ce = F.cross_entropy( + src_logits.transpose(1, 2), + target_classes, + self.empty_weight, + ignore_index=253, + ) + losses = {"loss_ce": loss_ce} + return losses + + def loss_masks(self, outputs, targets, indices, num_masks, mask_type): + """Compute the losses related to the masks: the focal loss and the dice loss. + targets dicts must contain the key "masks" containing a tensor of dim [nb_target_boxes, h, w] + """ + assert "pred_masks" in outputs + + loss_masks = [] + loss_dices = [] + + for batch_id, (map_id, target_id) in enumerate(indices): + map = outputs["pred_masks"][batch_id][:, map_id].T + target_mask = targets[batch_id][mask_type][target_id] + + if self.num_points != -1: + point_idx = torch.randperm( + target_mask.shape[1], device=target_mask.device + )[: int(self.num_points * target_mask.shape[1])] + else: + # sample all points + point_idx = torch.arange( + target_mask.shape[1], device=target_mask.device + ) + + num_masks = target_mask.shape[0] + map = map[:, point_idx] + target_mask = target_mask[:, point_idx].float() + + loss_masks.append(sigmoid_ce_loss_jit(map, target_mask, num_masks)) + loss_dices.append(dice_loss_jit(map, target_mask, num_masks)) + # del target_mask + return { + "loss_mask": torch.sum(torch.stack(loss_masks)), + "loss_dice": torch.sum(torch.stack(loss_dices)), + } + + src_idx = self._get_src_permutation_idx(indices) + tgt_idx = self._get_tgt_permutation_idx(indices) + src_masks = outputs["pred_masks"] + src_masks = src_masks[src_idx] + masks = [t[mask_type] for t in targets] + # TODO use valid to mask invalid areas due to padding in loss + target_masks, valid = nested_tensor_from_tensor_list(masks).decompose() + target_masks = target_masks.to(src_masks) + target_masks = target_masks[tgt_idx] + + # No need to upsample predictions as we are using normalized coordinates :) + # N x 1 x H x W + src_masks = src_masks[:, None] + target_masks = target_masks[:, None] + + with torch.no_grad(): + # sample point_coords + point_coords = get_uncertain_point_coords_with_randomness( + src_masks, + lambda logits: calculate_uncertainty(logits), + self.num_points, + self.oversample_ratio, + self.importance_sample_ratio, + ) + # get gt labels + point_labels = point_sample( + target_masks, + point_coords, + align_corners=False, + ).squeeze(1) + + point_logits = point_sample( + src_masks, + point_coords, + align_corners=False, + ).squeeze(1) + + losses = { + "loss_mask": sigmoid_ce_loss_jit( + point_logits, point_labels, num_masks, mask_type + ), + "loss_dice": dice_loss_jit( + point_logits, point_labels, num_masks, mask_type + ), + } + + del src_masks + del target_masks + return losses + + def _get_src_permutation_idx(self, indices): + # permute predictions following indices + batch_idx = torch.cat( + [torch.full_like(src, i) for i, (src, _) in enumerate(indices)] + ) + src_idx = torch.cat([src for (src, _) in indices]) + return batch_idx, src_idx + + def _get_tgt_permutation_idx(self, indices): + # permute targets following indices + batch_idx = torch.cat( + [torch.full_like(tgt, i) for i, (_, tgt) in enumerate(indices)] + ) + tgt_idx = torch.cat([tgt for (_, tgt) in indices]) + return batch_idx, tgt_idx + + def get_loss(self, loss, outputs, targets, indices, num_masks, mask_type): + loss_map = {"labels": self.loss_labels, "masks": self.loss_masks} + assert loss in loss_map, f"do you really want to compute {loss} loss?" + return loss_map[loss](outputs, targets, indices, num_masks, mask_type) + + def forward(self, outputs, targets, mask_type): + """This performs the loss computation. + Parameters: + outputs: dict of tensors, see the output specification of the model for the format + targets: list of dicts, such that len(targets) == batch_size. + The expected keys in each dict depends on the losses applied, see each loss' doc + """ + outputs_without_aux = { + k: v for k, v in outputs.items() if k != "aux_outputs" + } + + # Retrieve the matching between the outputs of the last layer and the targets + indices = self.matcher(outputs_without_aux, targets, mask_type) + + # Compute the average number of target boxes accross all nodes, for normalization purposes + num_masks = sum(len(t["labels"]) for t in targets) + num_masks = torch.as_tensor( + [num_masks], + dtype=torch.float, + device=next(iter(outputs.values())).device, + ) + if is_dist_avail_and_initialized(): + torch.distributed.all_reduce(num_masks) + num_masks = torch.clamp(num_masks / get_world_size(), min=1).item() + + # Compute all the requested losses + losses = {} + for loss in self.losses: + losses.update( + self.get_loss( + loss, outputs, targets, indices, num_masks, mask_type + ) + ) + + # In case of auxiliary losses, we repeat this process with the output of each intermediate layer. + if "aux_outputs" in outputs: + for i, aux_outputs in enumerate(outputs["aux_outputs"]): + indices = self.matcher(aux_outputs, targets, mask_type) + for loss in self.losses: + l_dict = self.get_loss( + loss, + aux_outputs, + targets, + indices, + num_masks, + mask_type, + ) + l_dict = {k + f"_{i}": v for k, v in l_dict.items()} + losses.update(l_dict) + + return losses + + def __repr__(self): + head = "Criterion " + self.__class__.__name__ + body = [ + "matcher: {}".format(self.matcher.__repr__(_repr_indent=8)), + "losses: {}".format(self.losses), + "weight_dict: {}".format(self.weight_dict), + "num_classes: {}".format(self.num_classes), + "eos_coef: {}".format(self.eos_coef), + "num_points: {}".format(self.num_points), + "oversample_ratio: {}".format(self.oversample_ratio), + "importance_sample_ratio: {}".format(self.importance_sample_ratio), + ] + _repr_indent = 4 + lines = [head] + [" " * _repr_indent + line for line in body] + return "\n".join(lines) diff --git a/models/Mask3D/build/lib/mask3d/models/mask3d.py b/models/Mask3D/build/lib/mask3d/models/mask3d.py new file mode 100644 index 0000000000000000000000000000000000000000..0e09440cfacc68a961af8231f8205bf1daf6a134 --- /dev/null +++ b/models/Mask3D/build/lib/mask3d/models/mask3d.py @@ -0,0 +1,870 @@ +import torch +import hydra +import torch.nn as nn +import MinkowskiEngine.MinkowskiOps as me +from MinkowskiEngine.MinkowskiPooling import MinkowskiAvgPooling +import numpy as np +from torch.nn import functional as F +from mask3d.models.modules.common import conv +from mask3d.models.position_embedding import PositionEmbeddingCoordsSine +from mask3d.models.modules.helpers_3detr import GenericMLP +from torch_scatter import scatter_mean, scatter_max, scatter_min +from torch.cuda.amp import autocast + +from pointnet2.pointnet2_utils import furthest_point_sample + + +class Mask3D(nn.Module): + def __init__( + self, + config, + hidden_dim, + num_queries, + num_heads, + dim_feedforward, + sample_sizes, + shared_decoder, + num_classes, + num_decoders, + dropout, + pre_norm, + positional_encoding_type, + non_parametric_queries, + train_on_segments, + normalize_pos_enc, + use_level_embed, + scatter_type, + hlevels, + use_np_features, + voxel_size, + max_sample_size, + random_queries, + gauss_scale, + random_query_both, + random_normal, + ): + super().__init__() + self.random_normal = random_normal + self.random_query_both = random_query_both + self.random_queries = random_queries + self.max_sample_size = max_sample_size + self.gauss_scale = gauss_scale + self.voxel_size = voxel_size + self.scatter_type = scatter_type + self.hlevels = hlevels + self.use_level_embed = use_level_embed + self.train_on_segments = train_on_segments + self.normalize_pos_enc = normalize_pos_enc + self.num_decoders = num_decoders + self.num_classes = num_classes + self.dropout = dropout + self.pre_norm = pre_norm + self.shared_decoder = shared_decoder + self.sample_sizes = sample_sizes + self.non_parametric_queries = non_parametric_queries + self.use_np_features = use_np_features + self.mask_dim = hidden_dim + self.num_heads = num_heads + self.num_queries = num_queries + self.pos_enc_type = positional_encoding_type + + self.backbone = hydra.utils.instantiate(config.backbone) + self.num_levels = len(self.hlevels) + sizes = self.backbone.PLANES[-5:] + + self.mask_features_head = conv( + self.backbone.PLANES[7], + self.mask_dim, + kernel_size=1, + stride=1, + bias=True, + D=3, + ) + + if self.scatter_type == "mean": + self.scatter_fn = scatter_mean + elif self.scatter_type == "max": + self.scatter_fn = lambda mask, p2s, dim: scatter_max( + mask, p2s, dim=dim + )[0] + else: + assert False, "Scatter function not known" + + assert ( + not use_np_features + ) or non_parametric_queries, "np features only with np queries" + + if self.non_parametric_queries: + self.query_projection = GenericMLP( + input_dim=self.mask_dim, + hidden_dims=[self.mask_dim], + output_dim=self.mask_dim, + use_conv=True, + output_use_activation=True, + hidden_use_bias=True, + ) + + if self.use_np_features: + self.np_feature_projection = nn.Sequential( + nn.Linear(sizes[-1], hidden_dim), + nn.ReLU(), + nn.Linear(hidden_dim, hidden_dim), + ) + elif self.random_query_both: + self.query_projection = GenericMLP( + input_dim=2 * self.mask_dim, + hidden_dims=[2 * self.mask_dim], + output_dim=2 * self.mask_dim, + use_conv=True, + output_use_activation=True, + hidden_use_bias=True, + ) + else: + # PARAMETRIC QUERIES + # learnable query features + self.query_feat = nn.Embedding(num_queries, hidden_dim) + # learnable query p.e. + self.query_pos = nn.Embedding(num_queries, hidden_dim) + + if self.use_level_embed: + # learnable scale-level embedding + self.level_embed = nn.Embedding(self.num_levels, hidden_dim) + + self.mask_embed_head = nn.Sequential( + nn.Linear(hidden_dim, hidden_dim), + nn.ReLU(), + nn.Linear(hidden_dim, hidden_dim), + ) + + self.class_embed_head = nn.Linear(hidden_dim, self.num_classes) + + if self.pos_enc_type == "legacy": + self.pos_enc = PositionalEncoding3D(channels=self.mask_dim) + elif self.pos_enc_type == "fourier": + self.pos_enc = PositionEmbeddingCoordsSine( + pos_type="fourier", + d_pos=self.mask_dim, + gauss_scale=self.gauss_scale, + normalize=self.normalize_pos_enc, + ) + elif self.pos_enc_type == "sine": + self.pos_enc = PositionEmbeddingCoordsSine( + pos_type="sine", + d_pos=self.mask_dim, + normalize=self.normalize_pos_enc, + ) + else: + assert False, "pos enc type not known" + + self.pooling = MinkowskiAvgPooling( + kernel_size=2, stride=2, dimension=3 + ) + + self.masked_transformer_decoder = nn.ModuleList() + self.cross_attention = nn.ModuleList() + self.self_attention = nn.ModuleList() + self.ffn_attention = nn.ModuleList() + self.lin_squeeze = nn.ModuleList() + + num_shared = self.num_decoders if not self.shared_decoder else 1 + + for _ in range(num_shared): + tmp_cross_attention = nn.ModuleList() + tmp_self_attention = nn.ModuleList() + tmp_ffn_attention = nn.ModuleList() + tmp_squeeze_attention = nn.ModuleList() + for i, hlevel in enumerate(self.hlevels): + tmp_cross_attention.append( + CrossAttentionLayer( + d_model=self.mask_dim, + nhead=self.num_heads, + dropout=self.dropout, + normalize_before=self.pre_norm, + ) + ) + + tmp_squeeze_attention.append( + nn.Linear(sizes[hlevel], self.mask_dim) + ) + + tmp_self_attention.append( + SelfAttentionLayer( + d_model=self.mask_dim, + nhead=self.num_heads, + dropout=self.dropout, + normalize_before=self.pre_norm, + ) + ) + + tmp_ffn_attention.append( + FFNLayer( + d_model=self.mask_dim, + dim_feedforward=dim_feedforward, + dropout=self.dropout, + normalize_before=self.pre_norm, + ) + ) + + self.cross_attention.append(tmp_cross_attention) + self.self_attention.append(tmp_self_attention) + self.ffn_attention.append(tmp_ffn_attention) + self.lin_squeeze.append(tmp_squeeze_attention) + + self.decoder_norm = nn.LayerNorm(hidden_dim) + + def get_pos_encs(self, coords): + pos_encodings_pcd = [] + + for i in range(len(coords)): + pos_encodings_pcd.append([[]]) + for coords_batch in coords[i].decomposed_features: + scene_min = coords_batch.min(dim=0)[0][None, ...] + scene_max = coords_batch.max(dim=0)[0][None, ...] + + with autocast(enabled=False): + tmp = self.pos_enc( + coords_batch[None, ...].float(), + input_range=[scene_min, scene_max], + ) + + pos_encodings_pcd[-1][0].append(tmp.squeeze(0).permute((1, 0))) + + return pos_encodings_pcd + + def forward( + self, x, point2segment=None, raw_coordinates=None, is_eval=False + ): + # print(point2segment) + pcd_features, aux = self.backbone(x) + + batch_size = len(x.decomposed_coordinates) + + with torch.no_grad(): + coordinates = me.SparseTensor( + features=raw_coordinates, + coordinate_manager=aux[-1].coordinate_manager, + coordinate_map_key=aux[-1].coordinate_map_key, + device=aux[-1].device, + ) + + coords = [coordinates] + for _ in reversed(range(len(aux) - 1)): + coords.append(self.pooling(coords[-1])) + + coords.reverse() + + pos_encodings_pcd = self.get_pos_encs(coords) + mask_features = self.mask_features_head(pcd_features) + if point2segment is not None: + mask_segments = [] + for i, mask_feature in enumerate( + mask_features.decomposed_features + ): + mask_segments.append( + self.scatter_fn(mask_feature, point2segment[i], dim=0) + ) + + sampled_coords = None + + if self.non_parametric_queries: + fps_idx = [ + furthest_point_sample( + x.decomposed_coordinates[i][None, ...].float(), + self.num_queries, + ) + .squeeze(0) + .long() + for i in range(len(x.decomposed_coordinates)) + ] + + sampled_coords = torch.stack( + [ + coordinates.decomposed_features[i][fps_idx[i].long(), :] + for i in range(len(fps_idx)) + ] + ) + + mins = torch.stack( + [ + coordinates.decomposed_features[i].min(dim=0)[0] + for i in range(len(coordinates.decomposed_features)) + ] + ) + maxs = torch.stack( + [ + coordinates.decomposed_features[i].max(dim=0)[0] + for i in range(len(coordinates.decomposed_features)) + ] + ) + + query_pos = self.pos_enc( + sampled_coords.float(), input_range=[mins, maxs] + ) # Batch, Dim, queries + query_pos = self.query_projection(query_pos) + + if not self.use_np_features: + queries = torch.zeros_like(query_pos).permute((0, 2, 1)) + else: + queries = torch.stack( + [ + pcd_features.decomposed_features[i][ + fps_idx[i].long(), : + ] + for i in range(len(fps_idx)) + ] + ) + queries = self.np_feature_projection(queries) + query_pos = query_pos.permute((2, 0, 1)) + elif self.random_queries: + query_pos = ( + torch.rand( + batch_size, + self.mask_dim, + self.num_queries, + device=x.device, + ) + - 0.5 + ) + + queries = torch.zeros_like(query_pos).permute((0, 2, 1)) + query_pos = query_pos.permute((2, 0, 1)) + elif self.random_query_both: + if not self.random_normal: + query_pos_feat = ( + torch.rand( + batch_size, + 2 * self.mask_dim, + self.num_queries, + device=x.device, + ) + - 0.5 + ) + else: + query_pos_feat = torch.randn( + batch_size, + 2 * self.mask_dim, + self.num_queries, + device=x.device, + ) + + queries = query_pos_feat[:, : self.mask_dim, :].permute((0, 2, 1)) + query_pos = query_pos_feat[:, self.mask_dim :, :].permute( + (2, 0, 1) + ) + else: + # PARAMETRIC QUERIES + queries = self.query_feat.weight.unsqueeze(0).repeat( + batch_size, 1, 1 + ) + query_pos = self.query_pos.weight.unsqueeze(1).repeat( + 1, batch_size, 1 + ) + + predictions_class = [] + predictions_mask = [] + + for decoder_counter in range(self.num_decoders): + if self.shared_decoder: + decoder_counter = 0 + for i, hlevel in enumerate(self.hlevels): + if point2segment is not None: + output_class, outputs_mask, attn_mask = self.mask_module( + queries, + mask_features, + mask_segments, + len(aux) - hlevel - 1, + ret_attn_mask=True, + point2segment=point2segment, + coords=coords, + ) + else: + output_class, outputs_mask, attn_mask = self.mask_module( + queries, + mask_features, + None, + len(aux) - hlevel - 1, + ret_attn_mask=True, + point2segment=None, + coords=coords, + ) + + decomposed_aux = aux[hlevel].decomposed_features + decomposed_attn = attn_mask.decomposed_features + + curr_sample_size = max( + [pcd.shape[0] for pcd in decomposed_aux] + ) + + if min([pcd.shape[0] for pcd in decomposed_aux]) == 1: + raise RuntimeError( + "only a single point gives nans in cross-attention" + ) + + if not (self.max_sample_size or is_eval): + curr_sample_size = min( + curr_sample_size, self.sample_sizes[hlevel] + ) + + rand_idx = [] + mask_idx = [] + for k in range(len(decomposed_aux)): + pcd_size = decomposed_aux[k].shape[0] + if pcd_size <= curr_sample_size: + # we do not need to sample + # take all points and pad the rest with zeroes and mask it + idx = torch.zeros( + curr_sample_size, + dtype=torch.long, + device=queries.device, + ) + + midx = torch.ones( + curr_sample_size, + dtype=torch.bool, + device=queries.device, + ) + + idx[:pcd_size] = torch.arange( + pcd_size, device=queries.device + ) + + midx[:pcd_size] = False # attend to first points + else: + # we have more points in pcd as we like to sample + # take a subset (no padding or masking needed) + idx = torch.randperm( + decomposed_aux[k].shape[0], device=queries.device + )[:curr_sample_size] + midx = torch.zeros( + curr_sample_size, + dtype=torch.bool, + device=queries.device, + ) # attend to all + + rand_idx.append(idx) + mask_idx.append(midx) + + batched_aux = torch.stack( + [ + decomposed_aux[k][rand_idx[k], :] + for k in range(len(rand_idx)) + ] + ) + + batched_attn = torch.stack( + [ + decomposed_attn[k][rand_idx[k], :] + for k in range(len(rand_idx)) + ] + ) + + batched_pos_enc = torch.stack( + [ + pos_encodings_pcd[hlevel][0][k][rand_idx[k], :] + for k in range(len(rand_idx)) + ] + ) + + batched_attn.permute((0, 2, 1))[ + batched_attn.sum(1) == rand_idx[0].shape[0] + ] = False + + m = torch.stack(mask_idx) + batched_attn = torch.logical_or(batched_attn, m[..., None]) + + src_pcd = self.lin_squeeze[decoder_counter][i]( + batched_aux.permute((1, 0, 2)) + ) + if self.use_level_embed: + src_pcd += self.level_embed.weight[i] + + output = self.cross_attention[decoder_counter][i]( + queries.permute((1, 0, 2)), + src_pcd, + memory_mask=batched_attn.repeat_interleave( + self.num_heads, dim=0 + ).permute((0, 2, 1)), + memory_key_padding_mask=None, # here we do not apply masking on padded region + pos=batched_pos_enc.permute((1, 0, 2)), + query_pos=query_pos, + ) + + output = self.self_attention[decoder_counter][i]( + output, + tgt_mask=None, + tgt_key_padding_mask=None, + query_pos=query_pos, + ) + + # FFN + queries = self.ffn_attention[decoder_counter][i]( + output + ).permute((1, 0, 2)) + + predictions_class.append(output_class) + predictions_mask.append(outputs_mask) + + if point2segment is not None: + output_class, outputs_mask = self.mask_module( + queries, + mask_features, + mask_segments, + 0, + ret_attn_mask=False, + point2segment=point2segment, + coords=coords, + ) + else: + output_class, outputs_mask = self.mask_module( + queries, + mask_features, + None, + 0, + ret_attn_mask=False, + point2segment=None, + coords=coords, + ) + predictions_class.append(output_class) + predictions_mask.append(outputs_mask) + + return { + "pred_logits": predictions_class[-1], + "pred_masks": predictions_mask[-1], + "aux_outputs": self._set_aux_loss( + predictions_class, predictions_mask + ), + "sampled_coords": sampled_coords.detach().cpu().numpy() + if sampled_coords is not None + else None, + "backbone_features": pcd_features, + } + + def mask_module( + self, + query_feat, + mask_features, + mask_segments, + num_pooling_steps, + ret_attn_mask=True, + point2segment=None, + coords=None, + ): + query_feat = self.decoder_norm(query_feat) + mask_embed = self.mask_embed_head(query_feat) + outputs_class = self.class_embed_head(query_feat) + + output_masks = [] + + if point2segment is not None: + output_segments = [] + for i in range(len(mask_segments)): + output_segments.append(mask_segments[i] @ mask_embed[i].T) + output_masks.append(output_segments[-1][point2segment[i]]) + else: + for i in range(mask_features.C[-1, 0] + 1): + output_masks.append( + mask_features.decomposed_features[i] @ mask_embed[i].T + ) + + output_masks = torch.cat(output_masks) + outputs_mask = me.SparseTensor( + features=output_masks, + coordinate_manager=mask_features.coordinate_manager, + coordinate_map_key=mask_features.coordinate_map_key, + ) + + if ret_attn_mask: + attn_mask = outputs_mask + for _ in range(num_pooling_steps): + attn_mask = self.pooling(attn_mask.float()) + + attn_mask = me.SparseTensor( + features=(attn_mask.F.detach().sigmoid() < 0.5), + coordinate_manager=attn_mask.coordinate_manager, + coordinate_map_key=attn_mask.coordinate_map_key, + ) + + if point2segment is not None: + return outputs_class, output_segments, attn_mask + else: + return ( + outputs_class, + outputs_mask.decomposed_features, + attn_mask, + ) + + if point2segment is not None: + return outputs_class, output_segments + else: + return outputs_class, outputs_mask.decomposed_features + + @torch.jit.unused + def _set_aux_loss(self, outputs_class, outputs_seg_masks): + # this is a workaround to make torchscript happy, as torchscript + # doesn't support dictionary with non-homogeneous values, such + # as a dict having both a Tensor and a list. + return [ + {"pred_logits": a, "pred_masks": b} + for a, b in zip(outputs_class[:-1], outputs_seg_masks[:-1]) + ] + + +class PositionalEncoding3D(nn.Module): + def __init__(self, channels): + """ + :param channels: The last dimension of the tensor you want to apply pos emb to. + """ + self.orig_ch = channels + super(PositionalEncoding3D, self).__init__() + channels = int(np.ceil(channels / 6) * 2) + if channels % 2: + channels += 1 + self.channels = channels + inv_freq = 1.0 / ( + 10000 ** (torch.arange(0, channels, 2).float() / channels) + ) + self.register_buffer("inv_freq", inv_freq) + + def forward(self, tensor, input_range=None): + """ + :param tensor: A 5d tensor of size (batch_size, x, y, z, ch) + :return: Positional Encoding Matrix of size (batch_size, x, y, z, ch) + """ + pos_x, pos_y, pos_z = tensor[:, :, 0], tensor[:, :, 1], tensor[:, :, 2] + sin_inp_x = torch.einsum("bi,j->bij", pos_x, self.inv_freq) + sin_inp_y = torch.einsum("bi,j->bij", pos_y, self.inv_freq) + sin_inp_z = torch.einsum("bi,j->bij", pos_z, self.inv_freq) + emb_x = torch.cat((sin_inp_x.sin(), sin_inp_x.cos()), dim=-1) + + emb_y = torch.cat((sin_inp_y.sin(), sin_inp_y.cos()), dim=-1) + emb_z = torch.cat((sin_inp_z.sin(), sin_inp_z.cos()), dim=-1) + + emb = torch.cat((emb_x, emb_y, emb_z), dim=-1) + return emb[:, :, : self.orig_ch].permute((0, 2, 1)) + + +class SelfAttentionLayer(nn.Module): + def __init__( + self, + d_model, + nhead, + dropout=0.0, + activation="relu", + normalize_before=False, + ): + super().__init__() + self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) + + self.norm = nn.LayerNorm(d_model) + self.dropout = nn.Dropout(dropout) + + self.activation = _get_activation_fn(activation) + self.normalize_before = normalize_before + + self._reset_parameters() + + def _reset_parameters(self): + for p in self.parameters(): + if p.dim() > 1: + nn.init.xavier_uniform_(p) + + def with_pos_embed(self, tensor, pos): + return tensor if pos is None else tensor + pos + + def forward_post( + self, tgt, tgt_mask=None, tgt_key_padding_mask=None, query_pos=None + ): + q = k = self.with_pos_embed(tgt, query_pos) + tgt2 = self.self_attn( + q, + k, + value=tgt, + attn_mask=tgt_mask, + key_padding_mask=tgt_key_padding_mask, + )[0] + tgt = tgt + self.dropout(tgt2) + tgt = self.norm(tgt) + + return tgt + + def forward_pre( + self, tgt, tgt_mask=None, tgt_key_padding_mask=None, query_pos=None + ): + tgt2 = self.norm(tgt) + q = k = self.with_pos_embed(tgt2, query_pos) + tgt2 = self.self_attn( + q, + k, + value=tgt2, + attn_mask=tgt_mask, + key_padding_mask=tgt_key_padding_mask, + )[0] + tgt = tgt + self.dropout(tgt2) + + return tgt + + def forward( + self, tgt, tgt_mask=None, tgt_key_padding_mask=None, query_pos=None + ): + if self.normalize_before: + return self.forward_pre( + tgt, tgt_mask, tgt_key_padding_mask, query_pos + ) + return self.forward_post( + tgt, tgt_mask, tgt_key_padding_mask, query_pos + ) + + +class CrossAttentionLayer(nn.Module): + def __init__( + self, + d_model, + nhead, + dropout=0.0, + activation="relu", + normalize_before=False, + ): + super().__init__() + self.multihead_attn = nn.MultiheadAttention( + d_model, nhead, dropout=dropout + ) + + self.norm = nn.LayerNorm(d_model) + self.dropout = nn.Dropout(dropout) + + self.activation = _get_activation_fn(activation) + self.normalize_before = normalize_before + + self._reset_parameters() + + def _reset_parameters(self): + for p in self.parameters(): + if p.dim() > 1: + nn.init.xavier_uniform_(p) + + def with_pos_embed(self, tensor, pos): + return tensor if pos is None else tensor + pos + + def forward_post( + self, + tgt, + memory, + memory_mask=None, + memory_key_padding_mask=None, + pos=None, + query_pos=None, + ): + tgt2 = self.multihead_attn( + query=self.with_pos_embed(tgt, query_pos), + key=self.with_pos_embed(memory, pos), + value=memory, + attn_mask=memory_mask, + key_padding_mask=memory_key_padding_mask, + )[0] + tgt = tgt + self.dropout(tgt2) + tgt = self.norm(tgt) + + return tgt + + def forward_pre( + self, + tgt, + memory, + memory_mask=None, + memory_key_padding_mask=None, + pos=None, + query_pos=None, + ): + tgt2 = self.norm(tgt) + + tgt2 = self.multihead_attn( + query=self.with_pos_embed(tgt2, query_pos), + key=self.with_pos_embed(memory, pos), + value=memory, + attn_mask=memory_mask, + key_padding_mask=memory_key_padding_mask, + )[0] + tgt = tgt + self.dropout(tgt2) + + return tgt + + def forward( + self, + tgt, + memory, + memory_mask=None, + memory_key_padding_mask=None, + pos=None, + query_pos=None, + ): + if self.normalize_before: + return self.forward_pre( + tgt, + memory, + memory_mask, + memory_key_padding_mask, + pos, + query_pos, + ) + return self.forward_post( + tgt, memory, memory_mask, memory_key_padding_mask, pos, query_pos + ) + + +class FFNLayer(nn.Module): + def __init__( + self, + d_model, + dim_feedforward=2048, + dropout=0.0, + activation="relu", + normalize_before=False, + ): + super().__init__() + # Implementation of Feedforward model + self.linear1 = nn.Linear(d_model, dim_feedforward) + self.dropout = nn.Dropout(dropout) + self.linear2 = nn.Linear(dim_feedforward, d_model) + + self.norm = nn.LayerNorm(d_model) + + self.activation = _get_activation_fn(activation) + self.normalize_before = normalize_before + + self._reset_parameters() + + def _reset_parameters(self): + for p in self.parameters(): + if p.dim() > 1: + nn.init.xavier_uniform_(p) + + def with_pos_embed(self, tensor, pos): + return tensor if pos is None else tensor + pos + + def forward_post(self, tgt): + tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt)))) + tgt = tgt + self.dropout(tgt2) + tgt = self.norm(tgt) + return tgt + + def forward_pre(self, tgt): + tgt2 = self.norm(tgt) + tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2)))) + tgt = tgt + self.dropout(tgt2) + return tgt + + def forward(self, tgt): + if self.normalize_before: + return self.forward_pre(tgt) + return self.forward_post(tgt) + + +def _get_activation_fn(activation): + """Return an activation function given a string""" + if activation == "relu": + return F.relu + if activation == "gelu": + return F.gelu + if activation == "glu": + return F.glu + raise RuntimeError(f"activation should be relu/gelu, not {activation}.") diff --git a/models/Mask3D/build/lib/mask3d/models/matcher.py b/models/Mask3D/build/lib/mask3d/models/matcher.py new file mode 100644 index 0000000000000000000000000000000000000000..fc0e7a05bb76a078b1c3c3b9c877054e439b584c --- /dev/null +++ b/models/Mask3D/build/lib/mask3d/models/matcher.py @@ -0,0 +1,226 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# Modified by Bowen Cheng from https://github.com/facebookresearch/detr/blob/master/models/matcher.py +""" +Modules to compute the matching cost and solve the corresponding LSAP. +""" +import torch +import torch.nn.functional as F +from scipy.optimize import linear_sum_assignment +from torch import nn +from torch.cuda.amp import autocast + +from detectron2.projects.point_rend.point_features import point_sample + + +def batch_dice_loss(inputs: torch.Tensor, targets: torch.Tensor): + """ + Compute the DICE loss, similar to generalized IOU for masks + Args: + inputs: A float tensor of arbitrary shape. + The predictions for each example. + targets: A float tensor with the same shape as inputs. Stores the binary + classification label for each element in inputs + (0 for the negative class and 1 for the positive class). + """ + inputs = inputs.sigmoid() + inputs = inputs.flatten(1) + numerator = 2 * torch.einsum("nc,mc->nm", inputs, targets) + denominator = inputs.sum(-1)[:, None] + targets.sum(-1)[None, :] + loss = 1 - (numerator + 1) / (denominator + 1) + return loss + + +batch_dice_loss_jit = torch.jit.script( + batch_dice_loss +) # type: torch.jit.ScriptModule + + +def batch_sigmoid_ce_loss(inputs: torch.Tensor, targets: torch.Tensor): + """ + Args: + inputs: A float tensor of arbitrary shape. + The predictions for each example. + targets: A float tensor with the same shape as inputs. Stores the binary + classification label for each element in inputs + (0 for the negative class and 1 for the positive class). + Returns: + Loss tensor + """ + hw = inputs.shape[1] + + pos = F.binary_cross_entropy_with_logits( + inputs, torch.ones_like(inputs), reduction="none" + ) + neg = F.binary_cross_entropy_with_logits( + inputs, torch.zeros_like(inputs), reduction="none" + ) + + loss = torch.einsum("nc,mc->nm", pos, targets) + torch.einsum( + "nc,mc->nm", neg, (1 - targets) + ) + + return loss / hw + + +batch_sigmoid_ce_loss_jit = torch.jit.script( + batch_sigmoid_ce_loss +) # type: torch.jit.ScriptModule + + +class HungarianMatcher(nn.Module): + """This class computes an assignment between the targets and the predictions of the network + + For efficiency reasons, the targets don't include the no_object. Because of this, in general, + there are more predictions than targets. In this case, we do a 1-to-1 matching of the best predictions, + while the others are un-matched (and thus treated as non-objects). + """ + + def __init__( + self, + cost_class: float = 1, + cost_mask: float = 1, + cost_dice: float = 1, + num_points: int = 0, + ): + """Creates the matcher + + Params: + cost_class: This is the relative weight of the classification error in the matching cost + cost_mask: This is the relative weight of the focal loss of the binary mask in the matching cost + cost_dice: This is the relative weight of the dice loss of the binary mask in the matching cost + """ + super().__init__() + self.cost_class = cost_class + self.cost_mask = cost_mask + self.cost_dice = cost_dice + + assert ( + cost_class != 0 or cost_mask != 0 or cost_dice != 0 + ), "all costs cant be 0" + + self.num_points = num_points + + @torch.no_grad() + def memory_efficient_forward(self, outputs, targets, mask_type): + """More memory-friendly matching""" + bs, num_queries = outputs["pred_logits"].shape[:2] + + indices = [] + + # Iterate through batch size + for b in range(bs): + + out_prob = outputs["pred_logits"][b].softmax( + -1 + ) # [num_queries, num_classes] + tgt_ids = targets[b]["labels"].clone() + + # Compute the classification cost. Contrary to the loss, we don't use the NLL, + # but approximate it in 1 - proba[target class]. + # The 1 is a constant that doesn't change the matching, it can be ommitted. + filter_ignore = tgt_ids == 253 + tgt_ids[filter_ignore] = 0 + cost_class = -out_prob[:, tgt_ids] + cost_class[ + :, filter_ignore + ] = ( + -1.0 + ) # for ignore classes pretend perfect match ;) TODO better worst class match? + + out_mask = outputs["pred_masks"][ + b + ].T # [num_queries, H_pred, W_pred] + # gt masks are already padded when preparing target + tgt_mask = targets[b][mask_type].to(out_mask) + + if self.num_points != -1: + point_idx = torch.randperm( + tgt_mask.shape[1], device=tgt_mask.device + )[: int(self.num_points * tgt_mask.shape[1])] + # point_idx = torch.randint(0, tgt_mask.shape[1], size=(self.num_points,), device=tgt_mask.device) + else: + # sample all points + point_idx = torch.arange( + tgt_mask.shape[1], device=tgt_mask.device + ) + + # out_mask = out_mask[:, None] + # tgt_mask = tgt_mask[:, None] + # all masks share the same set of points for efficient matching! + # point_coords = torch.rand(1, self.num_points, 2, device=out_mask.device) + # get gt labels + # tgt_mask = point_sample( + # tgt_mask, + # point_coords.repeat(tgt_mask.shape[0], 1, 1), + # align_corners=False, + # ).squeeze(1) + + # out_mask = point_sample( + # out_mask, + # point_coords.repeat(out_mask.shape[0], 1, 1), + # align_corners=False, + # ).squeeze(1) + + with autocast(enabled=False): + out_mask = out_mask.float() + tgt_mask = tgt_mask.float() + # Compute the focal loss between masks + cost_mask = batch_sigmoid_ce_loss_jit( + out_mask[:, point_idx], tgt_mask[:, point_idx] + ) + + # Compute the dice loss betwen masks + cost_dice = batch_dice_loss_jit( + out_mask[:, point_idx], tgt_mask[:, point_idx] + ) + + # Final cost matrix + C = ( + self.cost_mask * cost_mask + + self.cost_class * cost_class + + self.cost_dice * cost_dice + ) + C = C.reshape(num_queries, -1).cpu() + + indices.append(linear_sum_assignment(C)) + + return [ + ( + torch.as_tensor(i, dtype=torch.int64), + torch.as_tensor(j, dtype=torch.int64), + ) + for i, j in indices + ] + + @torch.no_grad() + def forward(self, outputs, targets, mask_type): + """Performs the matching + + Params: + outputs: This is a dict that contains at least these entries: + "pred_logits": Tensor of dim [batch_size, num_queries, num_classes] with the classification logits + "pred_masks": Tensor of dim [batch_size, num_queries, H_pred, W_pred] with the predicted masks + + targets: This is a list of targets (len(targets) = batch_size), where each target is a dict containing: + "labels": Tensor of dim [num_target_boxes] (where num_target_boxes is the number of ground-truth + objects in the target) containing the class labels + "masks": Tensor of dim [num_target_boxes, H_gt, W_gt] containing the target masks + + Returns: + A list of size batch_size, containing tuples of (index_i, index_j) where: + - index_i is the indices of the selected predictions (in order) + - index_j is the indices of the corresponding selected targets (in order) + For each batch element, it holds: + len(index_i) = len(index_j) = min(num_queries, num_target_boxes) + """ + return self.memory_efficient_forward(outputs, targets, mask_type) + + def __repr__(self, _repr_indent=4): + head = "Matcher " + self.__class__.__name__ + body = [ + "cost_class: {}".format(self.cost_class), + "cost_mask: {}".format(self.cost_mask), + "cost_dice: {}".format(self.cost_dice), + ] + lines = [head] + [" " * _repr_indent + line for line in body] + return "\n".join(lines) diff --git a/models/Mask3D/build/lib/mask3d/models/metrics/__init__.py b/models/Mask3D/build/lib/mask3d/models/metrics/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..bd7538b5868b93e4192dbee9ca0da9e91323cf0f --- /dev/null +++ b/models/Mask3D/build/lib/mask3d/models/metrics/__init__.py @@ -0,0 +1,4 @@ +from .confusionmatrix import ConfusionMatrix +from .metrics import IoU + +__all__ = ["ConfusionMatrix", "IoU"] diff --git a/models/Mask3D/build/lib/mask3d/models/metrics/confusionmatrix.py b/models/Mask3D/build/lib/mask3d/models/metrics/confusionmatrix.py new file mode 100644 index 0000000000000000000000000000000000000000..2d92f12595d26f76f3c26d18550b1b1486b837ff --- /dev/null +++ b/models/Mask3D/build/lib/mask3d/models/metrics/confusionmatrix.py @@ -0,0 +1,107 @@ +import numpy as np +import torch + + +class ConfusionMatrix: + """Constructs a confusion matrix for a multi-class classification problems. + + Does not support multi-label, multi-class problems. + + Keyword arguments: + - num_classes (int): number of classes in the classification problem. + - normalized (boolean, optional): Determines whether or not the confusion + matrix is normalized or not. Default: False. + + Modified from: https://github.com/pytorch/tnt/blob/master/torchnet/meter/confusionmeter.py + """ + + def __init__(self, num_classes, ignore_label): + super().__init__() + + self.conf = np.ndarray((num_classes, num_classes), dtype=np.int32) + self.ignore_label = ignore_label + self.num_classes = num_classes + self.reset() + + def reset(self): + self.conf.fill(0) + + def add(self, predicted, target): + """Computes the confusion matrix + + The shape of the confusion matrix is K x K, where K is the number + of classes. + + Keyword arguments: + - predicted (Tensor or numpy.ndarray): Can be an N x K tensor/array of + predicted scores obtained from the model for N examples and K classes, + or an N-tensor/array of integer values between 0 and K-1. + - target (Tensor or numpy.ndarray): Can be an N x K tensor/array of + ground-truth classes for N examples and K classes, or an N-tensor/array + of integer values between 0 and K-1. + + """ + # _, predicted = predicted.max(1) + + # predicted = predicted.view(-1) + # target = target.view(-1) + + # If target and/or predicted are tensors, convert them to numpy arrays + if torch.is_tensor(predicted): + predicted = predicted.cpu().numpy() + if torch.is_tensor(target): + target = target.cpu().numpy() + ind = ~np.isin(target, self.ignore_label) + predicted, target = predicted[ind], target[ind] + + assert ( + predicted.shape[0] == target.shape[0] + ), "number of targets and predicted outputs do not match" + + if np.ndim(predicted) != 1: + assert ( + predicted.shape[1] == self.num_classes + ), "number of predictions does not match size of confusion matrix" + predicted = np.argmax(predicted, 1) + else: + assert (predicted.max() < self.num_classes) and ( + predicted.min() >= 0 + ), "predicted values are not between 0 and k-1" + + if np.ndim(target) != 1: + assert ( + target.shape[1] == self.num_classes + ), "Onehot target does not match size of confusion matrix" + assert (target >= 0).all() and ( + target <= 1 + ).all(), "in one-hot encoding, target values should be 0 or 1" + assert ( + target.sum(1) == 1 + ).all(), "multi-label setting is not supported" + target = np.argmax(target, 1) + else: + assert (target.max() < self.num_classes) and ( + target.min() >= 0 + ), "target values are not between 0 and k-1" + + # hack for bincounting 2 arrays together + x = predicted + self.num_classes * target + bincount_2d = np.bincount( + x.astype(np.int32), minlength=self.num_classes**2 + ) + assert bincount_2d.size == self.num_classes**2 + conf = bincount_2d.reshape((self.num_classes, self.num_classes)) + + self.conf += conf + + def value(self, normalized=False): + """ + Returns: + Confustion matrix of K rows and K columns, where rows corresponds + to ground-truth targets and columns corresponds to predicted + targets. + """ + if normalized: + conf = self.conf.astype(np.float32) + return conf / conf.sum(1).clip(min=1e-12)[:, None] + return self.conf diff --git a/models/Mask3D/build/lib/mask3d/models/metrics/metrics.py b/models/Mask3D/build/lib/mask3d/models/metrics/metrics.py new file mode 100644 index 0000000000000000000000000000000000000000..f3f4b0ca4f7b0c5224ea242f459374a28485539f --- /dev/null +++ b/models/Mask3D/build/lib/mask3d/models/metrics/metrics.py @@ -0,0 +1,48 @@ +import numpy as np + + +class IoU: + """Computes the intersection over union (IoU) per class and corresponding + mean (mIoU). + + Intersection over union (IoU) is a common evaluation metric for semantic + segmentation. The predictions are first accumulated in a confusion matrix + and the IoU is computed from it as follows: + + IoU = true_positive / (true_positive + false_positive + false_negative). + + Keyword arguments: + - num_classes (int): number of classes in the classification problem + - normalized (boolean, optional): Determines whether or not the confusion + matrix is normalized or not. Default: False. + - ignore_index (int or iterable, optional): Index of the classes to ignore + when computing the IoU. Can be an int, or any iterable of ints. + + Modified from: https://github.com/pytorch/tnt/blob/master/torchnet/meter + + """ + + def __init__(self): + super().__init__() + + def value(self, conf_matrix): + """Computes the IoU and mean IoU. + + The mean computation ignores NaN elements of the IoU array. + + Returns: + Tuple: (IoU, mIoU). The first output is the per class IoU, + for K classes it's numpy.ndarray with K elements. The second output, + is the mean IoU. + """ + true_positive = np.diag(conf_matrix) + false_positive = np.sum(conf_matrix, 0) - true_positive + false_negative = np.sum(conf_matrix, 1) - true_positive + + # Just in case we get a division by 0, ignore/hide the error + with np.errstate(divide="ignore", invalid="ignore"): + iou = true_positive / ( + true_positive + false_positive + false_negative + ) + + return iou diff --git a/models/Mask3D/build/lib/mask3d/models/misc.py b/models/Mask3D/build/lib/mask3d/models/misc.py new file mode 100644 index 0000000000000000000000000000000000000000..8416b62804fbc002bd02a457d896276bc307b070 --- /dev/null +++ b/models/Mask3D/build/lib/mask3d/models/misc.py @@ -0,0 +1,119 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# Modified by Bowen Cheng from https://github.com/facebookresearch/detr/blob/master/util/misc.py +""" +Misc functions, including distributed helpers. + +Mostly copy-paste from torchvision references. +""" +from typing import List, Optional + +import torch +import torch.distributed as dist +import torchvision +from torch import Tensor + + +def _max_by_axis(the_list): + # type: (List[List[int]]) -> List[int] + maxes = the_list[0] + for sublist in the_list[1:]: + for index, item in enumerate(sublist): + maxes[index] = max(maxes[index], item) + return maxes + + +class NestedTensor(object): + def __init__(self, tensors, mask: Optional[Tensor]): + self.tensors = tensors + self.mask = mask + + def to(self, device): + # type: (Device) -> NestedTensor # noqa + cast_tensor = self.tensors.to(device) + mask = self.mask + if mask is not None: + assert mask is not None + cast_mask = mask.to(device) + else: + cast_mask = None + return NestedTensor(cast_tensor, cast_mask) + + def decompose(self): + return self.tensors, self.mask + + def __repr__(self): + return str(self.tensors) + + +def nested_tensor_from_tensor_list(tensor_list: List[Tensor]): + # TODO make this more general + if tensor_list[0].ndim == 3: + if torchvision._is_tracing(): + # nested_tensor_from_tensor_list() does not export well to ONNX + # call _onnx_nested_tensor_from_tensor_list() instead + return _onnx_nested_tensor_from_tensor_list(tensor_list) + + # TODO make it support different-sized images + max_size = _max_by_axis([list(img.shape) for img in tensor_list]) + # min_size = tuple(min(s) for s in zip(*[img.shape for img in tensor_list])) + batch_shape = [len(tensor_list)] + max_size + b, c, h, w = batch_shape + dtype = tensor_list[0].dtype + device = tensor_list[0].device + tensor = torch.zeros(batch_shape, dtype=dtype, device=device) + mask = torch.ones((b, h, w), dtype=torch.bool, device=device) + for img, pad_img, m in zip(tensor_list, tensor, mask): + pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img) + m[: img.shape[1], : img.shape[2]] = False + else: + raise ValueError("not supported") + return NestedTensor(tensor, mask) + + +# _onnx_nested_tensor_from_tensor_list() is an implementation of +# nested_tensor_from_tensor_list() that is supported by ONNX tracing. +@torch.jit.unused +def _onnx_nested_tensor_from_tensor_list( + tensor_list: List[Tensor], +) -> NestedTensor: + max_size = [] + for i in range(tensor_list[0].dim()): + max_size_i = torch.max( + torch.stack([img.shape[i] for img in tensor_list]).to( + torch.float32 + ) + ).to(torch.int64) + max_size.append(max_size_i) + max_size = tuple(max_size) + + # work around for + # pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img) + # m[: img.shape[1], :img.shape[2]] = False + # which is not yet supported in onnx + padded_imgs = [] + padded_masks = [] + for img in tensor_list: + padding = [(s1 - s2) for s1, s2 in zip(max_size, tuple(img.shape))] + padded_img = torch.nn.functional.pad( + img, (0, padding[2], 0, padding[1], 0, padding[0]) + ) + padded_imgs.append(padded_img) + + m = torch.zeros_like(img[0], dtype=torch.int, device=img.device) + padded_mask = torch.nn.functional.pad( + m, (0, padding[2], 0, padding[1]), "constant", 1 + ) + padded_masks.append(padded_mask.to(torch.bool)) + + tensor = torch.stack(padded_imgs) + mask = torch.stack(padded_masks) + + return NestedTensor(tensor, mask=mask) + + +def is_dist_avail_and_initialized(): + if not dist.is_available(): + return False + if not dist.is_initialized(): + return False + return True diff --git a/models/Mask3D/build/lib/mask3d/models/model.py b/models/Mask3D/build/lib/mask3d/models/model.py new file mode 100644 index 0000000000000000000000000000000000000000..d167fa58358f2c1a7ca4a509e38c61906e9dd7ac --- /dev/null +++ b/models/Mask3D/build/lib/mask3d/models/model.py @@ -0,0 +1,27 @@ +from MinkowskiEngine import MinkowskiNetwork + + +class Model(MinkowskiNetwork): + """ + Base network for all sparse convnet + + By default, all networks are segmentation networks. + """ + + OUT_PIXEL_DIST = -1 + + def __init__(self, in_channels, out_channels, config, D, **kwargs): + super().__init__(D) + self.in_channels = in_channels + self.out_channels = out_channels + self.config = config + + +class HighDimensionalModel(Model): + """ + Base network for all spatio (temporal) chromatic sparse convnet + """ + + def __init__(self, in_channels, out_channels, config, D, **kwargs): + assert D > 4, "Num dimension smaller than 5" + super().__init__(in_channels, out_channels, config, D, **kwargs) diff --git a/models/Mask3D/build/lib/mask3d/models/modules/3detr_helpers.py b/models/Mask3D/build/lib/mask3d/models/modules/3detr_helpers.py new file mode 100644 index 0000000000000000000000000000000000000000..2c3f7ea57c0266a9781cdfec9f59896d15750a9d --- /dev/null +++ b/models/Mask3D/build/lib/mask3d/models/modules/3detr_helpers.py @@ -0,0 +1,116 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import torch.nn as nn +from functools import partial +import copy + + +class BatchNormDim1Swap(nn.BatchNorm1d): + """ + Used for nn.Transformer that uses a HW x N x C rep + """ + + def forward(self, x): + """ + x: HW x N x C + permute to N x C x HW + Apply BN on C + permute back + """ + hw, n, c = x.shape + x = x.permute(1, 2, 0) + x = super(BatchNormDim1Swap, self).forward(x) + # x: n x c x hw -> hw x n x c + x = x.permute(2, 0, 1) + return x + + +NORM_DICT = { + "bn": BatchNormDim1Swap, + "bn1d": nn.BatchNorm1d, + "id": nn.Identity, + "ln": nn.LayerNorm, +} + +ACTIVATION_DICT = { + "relu": nn.ReLU, + "gelu": nn.GELU, + "leakyrelu": partial(nn.LeakyReLU, negative_slope=0.1), +} + +WEIGHT_INIT_DICT = { + "xavier_uniform": nn.init.xavier_uniform_, +} + + +class GenericMLP(nn.Module): + def __init__( + self, + input_dim, + hidden_dims, + output_dim, + norm_fn_name=None, + activation="relu", + use_conv=False, + dropout=None, + hidden_use_bias=False, + output_use_bias=True, + output_use_activation=False, + output_use_norm=False, + weight_init_name=None, + ): + super().__init__() + activation = ACTIVATION_DICT[activation] + norm = None + if norm_fn_name is not None: + norm = NORM_DICT[norm_fn_name] + if norm_fn_name == "ln" and use_conv: + norm = lambda x: nn.GroupNorm(1, x) # easier way to use LayerNorm + + if dropout is not None: + if not isinstance(dropout, list): + dropout = [dropout for _ in range(len(hidden_dims))] + + layers = [] + prev_dim = input_dim + for idx, x in enumerate(hidden_dims): + if use_conv: + layer = nn.Conv1d(prev_dim, x, 1, bias=hidden_use_bias) + else: + layer = nn.Linear(prev_dim, x, bias=hidden_use_bias) + layers.append(layer) + if norm: + layers.append(norm(x)) + layers.append(activation()) + if dropout is not None: + layers.append(nn.Dropout(p=dropout[idx])) + prev_dim = x + if use_conv: + layer = nn.Conv1d(prev_dim, output_dim, 1, bias=output_use_bias) + else: + layer = nn.Linear(prev_dim, output_dim, bias=output_use_bias) + layers.append(layer) + + if output_use_norm: + layers.append(norm(output_dim)) + + if output_use_activation: + layers.append(activation()) + + self.layers = nn.Sequential(*layers) + + if weight_init_name is not None: + self.do_weight_init(weight_init_name) + + def do_weight_init(self, weight_init_name): + func = WEIGHT_INIT_DICT[weight_init_name] + for (_, param) in self.named_parameters(): + if param.dim() > 1: # skips batchnorm/layernorm + func(param) + + def forward(self, x): + output = self.layers(x) + return output + + +def get_clones(module, N): + return nn.ModuleList([copy.deepcopy(module) for i in range(N)]) diff --git a/models/Mask3D/build/lib/mask3d/models/modules/__init__.py b/models/Mask3D/build/lib/mask3d/models/modules/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/Mask3D/build/lib/mask3d/models/modules/common.py b/models/Mask3D/build/lib/mask3d/models/modules/common.py new file mode 100644 index 0000000000000000000000000000000000000000..ae78b5b301cfd6ffcfc3417b543ebe2289602fb7 --- /dev/null +++ b/models/Mask3D/build/lib/mask3d/models/modules/common.py @@ -0,0 +1,275 @@ +import sys + +if sys.version_info[:2] >= (3, 8): + from collections.abc import Sequence +else: + from collections import Sequence + +from enum import Enum + +import torch.nn as nn +import MinkowskiEngine as ME + + +class NormType(Enum): + BATCH_NORM = 0 + INSTANCE_NORM = 1 + INSTANCE_BATCH_NORM = 2 + + +def get_norm(norm_type, n_channels, D, bn_momentum=0.1): + if norm_type == NormType.BATCH_NORM: + return ME.MinkowskiBatchNorm(n_channels, momentum=bn_momentum) + elif norm_type == NormType.INSTANCE_NORM: + return ME.MinkowskiInstanceNorm(n_channels) + elif norm_type == NormType.INSTANCE_BATCH_NORM: + return nn.Sequential( + ME.MinkowskiInstanceNorm(n_channels), + ME.MinkowskiBatchNorm(n_channels, momentum=bn_momentum), + ) + else: + raise ValueError(f"Norm type: {norm_type} not supported") + + +class ConvType(Enum): + """ + Define the kernel region type + """ + + HYPERCUBE = 0, "HYPERCUBE" + SPATIAL_HYPERCUBE = 1, "SPATIAL_HYPERCUBE" + SPATIO_TEMPORAL_HYPERCUBE = 2, "SPATIO_TEMPORAL_HYPERCUBE" + HYPERCROSS = 3, "HYPERCROSS" + SPATIAL_HYPERCROSS = 4, "SPATIAL_HYPERCROSS" + SPATIO_TEMPORAL_HYPERCROSS = 5, "SPATIO_TEMPORAL_HYPERCROSS" + SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS = ( + 6, + "SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS ", + ) + + def __new__(cls, value, name): + member = object.__new__(cls) + member._value_ = value + member.fullname = name + return member + + def __int__(self): + return self.value + + +# Convert the ConvType var to a RegionType var +conv_to_region_type = { + # kernel_size = [k, k, k, 1] + ConvType.HYPERCUBE: ME.RegionType.HYPER_CUBE, + ConvType.SPATIAL_HYPERCUBE: ME.RegionType.HYPER_CUBE, + ConvType.SPATIO_TEMPORAL_HYPERCUBE: ME.RegionType.HYPER_CUBE, + ConvType.HYPERCROSS: ME.RegionType.HYPER_CROSS, + ConvType.SPATIAL_HYPERCROSS: ME.RegionType.HYPER_CROSS, + ConvType.SPATIO_TEMPORAL_HYPERCROSS: ME.RegionType.HYPER_CROSS, + ConvType.SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS: ME.RegionType.HYPER_CUBE, # JONAS CHANGE from HYBRID +} + +# int_to_region_type = {m.value: m for m in ME.RegionType} +int_to_region_type = {m: ME.RegionType(m) for m in range(3)} + + +def convert_region_type(region_type): + """ + Convert the integer region_type to the corresponding RegionType enum object. + """ + return int_to_region_type[region_type] + + +def convert_conv_type(conv_type, kernel_size, D): + assert isinstance(conv_type, ConvType), "conv_type must be of ConvType" + region_type = conv_to_region_type[conv_type] + axis_types = None + if conv_type == ConvType.SPATIAL_HYPERCUBE: + # No temporal convolution + if isinstance(kernel_size, Sequence): + kernel_size = kernel_size[:3] + else: + kernel_size = [ + kernel_size, + ] * 3 + if D == 4: + kernel_size.append(1) + elif conv_type == ConvType.SPATIO_TEMPORAL_HYPERCUBE: + # conv_type conversion already handled + assert D == 4 + elif conv_type == ConvType.HYPERCUBE: + # conv_type conversion already handled + pass + elif conv_type == ConvType.SPATIAL_HYPERCROSS: + if isinstance(kernel_size, Sequence): + kernel_size = kernel_size[:3] + else: + kernel_size = [ + kernel_size, + ] * 3 + if D == 4: + kernel_size.append(1) + elif conv_type == ConvType.HYPERCROSS: + # conv_type conversion already handled + pass + elif conv_type == ConvType.SPATIO_TEMPORAL_HYPERCROSS: + # conv_type conversion already handled + assert D == 4 + elif conv_type == ConvType.SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS: + # Define the CUBIC conv kernel for spatial dims and CROSS conv for temp dim + axis_types = [ + ME.RegionType.HYPER_CUBE, + ] * 3 + if D == 4: + axis_types.append(ME.RegionType.HYPER_CROSS) + return region_type, axis_types, kernel_size + + +def conv( + in_planes, + out_planes, + kernel_size, + stride=1, + dilation=1, + bias=False, + conv_type=ConvType.HYPERCUBE, + D=-1, +): + assert D > 0, "Dimension must be a positive integer" + region_type, axis_types, kernel_size = convert_conv_type( + conv_type, kernel_size, D + ) + kernel_generator = ME.KernelGenerator( + kernel_size, + stride, + dilation, + region_type=region_type, + axis_types=None, # axis_types JONAS + dimension=D, + ) + + return ME.MinkowskiConvolution( + in_channels=in_planes, + out_channels=out_planes, + kernel_size=kernel_size, + stride=stride, + dilation=dilation, + bias=bias, + kernel_generator=kernel_generator, + dimension=D, + ) + + +def conv_tr( + in_planes, + out_planes, + kernel_size, + upsample_stride=1, + dilation=1, + bias=False, + conv_type=ConvType.HYPERCUBE, + D=-1, +): + assert D > 0, "Dimension must be a positive integer" + region_type, axis_types, kernel_size = convert_conv_type( + conv_type, kernel_size, D + ) + kernel_generator = ME.KernelGenerator( + kernel_size, + upsample_stride, + dilation, + region_type=region_type, + axis_types=axis_types, + dimension=D, + ) + + return ME.MinkowskiConvolutionTranspose( + in_channels=in_planes, + out_channels=out_planes, + kernel_size=kernel_size, + stride=upsample_stride, + dilation=dilation, + bias=bias, + kernel_generator=kernel_generator, + dimension=D, + ) + + +def avg_pool( + kernel_size, + stride=1, + dilation=1, + conv_type=ConvType.HYPERCUBE, + in_coords_key=None, + D=-1, +): + assert D > 0, "Dimension must be a positive integer" + region_type, axis_types, kernel_size = convert_conv_type( + conv_type, kernel_size, D + ) + kernel_generator = ME.KernelGenerator( + kernel_size, + stride, + dilation, + region_type=region_type, + axis_types=axis_types, + dimension=D, + ) + + return ME.MinkowskiAvgPooling( + kernel_size=kernel_size, + stride=stride, + dilation=dilation, + kernel_generator=kernel_generator, + dimension=D, + ) + + +def avg_unpool( + kernel_size, stride=1, dilation=1, conv_type=ConvType.HYPERCUBE, D=-1 +): + assert D > 0, "Dimension must be a positive integer" + region_type, axis_types, kernel_size = convert_conv_type( + conv_type, kernel_size, D + ) + kernel_generator = ME.KernelGenerator( + kernel_size, + stride, + dilation, + region_type=region_type, + axis_types=axis_types, + dimension=D, + ) + + return ME.MinkowskiAvgUnpooling( + kernel_size=kernel_size, + stride=stride, + dilation=dilation, + kernel_generator=kernel_generator, + dimension=D, + ) + + +def sum_pool( + kernel_size, stride=1, dilation=1, conv_type=ConvType.HYPERCUBE, D=-1 +): + assert D > 0, "Dimension must be a positive integer" + region_type, axis_types, kernel_size = convert_conv_type( + conv_type, kernel_size, D + ) + kernel_generator = ME.KernelGenerator( + kernel_size, + stride, + dilation, + region_type=region_type, + axis_types=axis_types, + dimension=D, + ) + + return ME.MinkowskiSumPooling( + kernel_size=kernel_size, + stride=stride, + dilation=dilation, + kernel_generator=kernel_generator, + dimension=D, + ) diff --git a/models/Mask3D/build/lib/mask3d/models/modules/helpers_3detr.py b/models/Mask3D/build/lib/mask3d/models/modules/helpers_3detr.py new file mode 100644 index 0000000000000000000000000000000000000000..2c3f7ea57c0266a9781cdfec9f59896d15750a9d --- /dev/null +++ b/models/Mask3D/build/lib/mask3d/models/modules/helpers_3detr.py @@ -0,0 +1,116 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import torch.nn as nn +from functools import partial +import copy + + +class BatchNormDim1Swap(nn.BatchNorm1d): + """ + Used for nn.Transformer that uses a HW x N x C rep + """ + + def forward(self, x): + """ + x: HW x N x C + permute to N x C x HW + Apply BN on C + permute back + """ + hw, n, c = x.shape + x = x.permute(1, 2, 0) + x = super(BatchNormDim1Swap, self).forward(x) + # x: n x c x hw -> hw x n x c + x = x.permute(2, 0, 1) + return x + + +NORM_DICT = { + "bn": BatchNormDim1Swap, + "bn1d": nn.BatchNorm1d, + "id": nn.Identity, + "ln": nn.LayerNorm, +} + +ACTIVATION_DICT = { + "relu": nn.ReLU, + "gelu": nn.GELU, + "leakyrelu": partial(nn.LeakyReLU, negative_slope=0.1), +} + +WEIGHT_INIT_DICT = { + "xavier_uniform": nn.init.xavier_uniform_, +} + + +class GenericMLP(nn.Module): + def __init__( + self, + input_dim, + hidden_dims, + output_dim, + norm_fn_name=None, + activation="relu", + use_conv=False, + dropout=None, + hidden_use_bias=False, + output_use_bias=True, + output_use_activation=False, + output_use_norm=False, + weight_init_name=None, + ): + super().__init__() + activation = ACTIVATION_DICT[activation] + norm = None + if norm_fn_name is not None: + norm = NORM_DICT[norm_fn_name] + if norm_fn_name == "ln" and use_conv: + norm = lambda x: nn.GroupNorm(1, x) # easier way to use LayerNorm + + if dropout is not None: + if not isinstance(dropout, list): + dropout = [dropout for _ in range(len(hidden_dims))] + + layers = [] + prev_dim = input_dim + for idx, x in enumerate(hidden_dims): + if use_conv: + layer = nn.Conv1d(prev_dim, x, 1, bias=hidden_use_bias) + else: + layer = nn.Linear(prev_dim, x, bias=hidden_use_bias) + layers.append(layer) + if norm: + layers.append(norm(x)) + layers.append(activation()) + if dropout is not None: + layers.append(nn.Dropout(p=dropout[idx])) + prev_dim = x + if use_conv: + layer = nn.Conv1d(prev_dim, output_dim, 1, bias=output_use_bias) + else: + layer = nn.Linear(prev_dim, output_dim, bias=output_use_bias) + layers.append(layer) + + if output_use_norm: + layers.append(norm(output_dim)) + + if output_use_activation: + layers.append(activation()) + + self.layers = nn.Sequential(*layers) + + if weight_init_name is not None: + self.do_weight_init(weight_init_name) + + def do_weight_init(self, weight_init_name): + func = WEIGHT_INIT_DICT[weight_init_name] + for (_, param) in self.named_parameters(): + if param.dim() > 1: # skips batchnorm/layernorm + func(param) + + def forward(self, x): + output = self.layers(x) + return output + + +def get_clones(module, N): + return nn.ModuleList([copy.deepcopy(module) for i in range(N)]) diff --git a/models/Mask3D/build/lib/mask3d/models/modules/resnet_block.py b/models/Mask3D/build/lib/mask3d/models/modules/resnet_block.py new file mode 100644 index 0000000000000000000000000000000000000000..ac16b72aa198964e343f57ad4f79193a22e830dc --- /dev/null +++ b/models/Mask3D/build/lib/mask3d/models/modules/resnet_block.py @@ -0,0 +1,157 @@ +import torch.nn as nn +from MinkowskiEngine import MinkowskiReLU + +from mask3d.models.modules.common import ConvType, NormType, conv, get_norm + + +class BasicBlockBase(nn.Module): + expansion = 1 + NORM_TYPE = NormType.BATCH_NORM + + def __init__( + self, + inplanes, + planes, + stride=1, + dilation=1, + downsample=None, + conv_type=ConvType.HYPERCUBE, + bn_momentum=0.1, + D=3, + ): + super().__init__() + + self.conv1 = conv( + inplanes, + planes, + kernel_size=3, + stride=stride, + dilation=dilation, + conv_type=conv_type, + D=D, + ) + self.norm1 = get_norm( + self.NORM_TYPE, planes, D, bn_momentum=bn_momentum + ) + self.conv2 = conv( + planes, + planes, + kernel_size=3, + stride=1, + dilation=dilation, + bias=False, + conv_type=conv_type, + D=D, + ) + self.norm2 = get_norm( + self.NORM_TYPE, planes, D, bn_momentum=bn_momentum + ) + self.relu = MinkowskiReLU(inplace=True) + self.downsample = downsample + + def forward(self, x): + residual = x + + out = self.conv1(x) + out = self.norm1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.norm2(out) + + if self.downsample is not None: + residual = self.downsample(x) + + out += residual + out = self.relu(out) + + return out + + +class BasicBlock(BasicBlockBase): + NORM_TYPE = NormType.BATCH_NORM + + +class BasicBlockIN(BasicBlockBase): + NORM_TYPE = NormType.INSTANCE_NORM + + +class BasicBlockINBN(BasicBlockBase): + NORM_TYPE = NormType.INSTANCE_BATCH_NORM + + +class BottleneckBase(nn.Module): + expansion = 4 + NORM_TYPE = NormType.BATCH_NORM + + def __init__( + self, + inplanes, + planes, + stride=1, + dilation=1, + downsample=None, + conv_type=ConvType.HYPERCUBE, + bn_momentum=0.1, + D=3, + ): + super().__init__() + self.conv1 = conv(inplanes, planes, kernel_size=1, D=D) + self.norm1 = get_norm( + self.NORM_TYPE, planes, D, bn_momentum=bn_momentum + ) + + self.conv2 = conv( + planes, + planes, + kernel_size=3, + stride=stride, + dilation=dilation, + conv_type=conv_type, + D=D, + ) + self.norm2 = get_norm( + self.NORM_TYPE, planes, D, bn_momentum=bn_momentum + ) + + self.conv3 = conv(planes, planes * self.expansion, kernel_size=1, D=D) + self.norm3 = get_norm( + self.NORM_TYPE, planes * self.expansion, D, bn_momentum=bn_momentum + ) + + self.relu = MinkowskiReLU(inplace=True) + self.downsample = downsample + + def forward(self, x): + residual = x + + out = self.conv1(x) + out = self.norm1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.norm2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.norm3(out) + + if self.downsample is not None: + residual = self.downsample(x) + + out += residual + out = self.relu(out) + + return out + + +class Bottleneck(BottleneckBase): + NORM_TYPE = NormType.BATCH_NORM + + +class BottleneckIN(BottleneckBase): + NORM_TYPE = NormType.INSTANCE_NORM + + +class BottleneckINBN(BottleneckBase): + NORM_TYPE = NormType.INSTANCE_BATCH_NORM diff --git a/models/Mask3D/build/lib/mask3d/models/modules/senet_block.py b/models/Mask3D/build/lib/mask3d/models/modules/senet_block.py new file mode 100644 index 0000000000000000000000000000000000000000..130082738505c79d5ecddb010595a5a66b9d8509 --- /dev/null +++ b/models/Mask3D/build/lib/mask3d/models/modules/senet_block.py @@ -0,0 +1,138 @@ +import torch.nn as nn +import MinkowskiEngine as ME + +from mix3d.models.modules.common import ConvType, NormType +from mix3d.models.modules.resnet_block import BasicBlock, Bottleneck + + +class SELayer(nn.Module): + def __init__(self, channel, reduction=16, D=-1): + # Global coords does not require coords_key + super().__init__() + self.fc = nn.Sequential( + ME.MinkowskiLinear(channel, channel // reduction), + ME.MinkowskiReLU(inplace=True), + ME.MinkowskiLinear(channel // reduction, channel), + ME.MinkowskiSigmoid(), + ) + self.pooling = ME.MinkowskiGlobalPooling(dimension=D) + self.broadcast_mul = ME.MinkowskiBroadcastMultiplication(dimension=D) + + def forward(self, x): + y = self.pooling(x) + y = self.fc(y) + return self.broadcast_mul(x, y) + + +class SEBasicBlock(BasicBlock): + def __init__( + self, + inplanes, + planes, + stride=1, + dilation=1, + downsample=None, + conv_type=ConvType.HYPERCUBE, + reduction=16, + D=-1, + ): + super().__init__( + inplanes, + planes, + stride=stride, + dilation=dilation, + downsample=downsample, + conv_type=conv_type, + D=D, + ) + self.se = SELayer(planes, reduction=reduction, D=D) + + def forward(self, x): + residual = x + + out = self.conv1(x) + out = self.norm1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.norm2(out) + out = self.se(out) + + if self.downsample is not None: + residual = self.downsample(x) + + out += residual + out = self.relu(out) + + return out + + +class SEBasicBlockSN(SEBasicBlock): + NORM_TYPE = NormType.SPARSE_SWITCH_NORM + + +class SEBasicBlockIN(SEBasicBlock): + NORM_TYPE = NormType.SPARSE_INSTANCE_NORM + + +class SEBasicBlockLN(SEBasicBlock): + NORM_TYPE = NormType.SPARSE_LAYER_NORM + + +class SEBottleneck(Bottleneck): + def __init__( + self, + inplanes, + planes, + stride=1, + dilation=1, + downsample=None, + conv_type=ConvType.HYPERCUBE, + D=3, + reduction=16, + ): + super().__init__( + inplanes, + planes, + stride=stride, + dilation=dilation, + downsample=downsample, + conv_type=conv_type, + D=D, + ) + self.se = SELayer(planes * self.expansion, reduction=reduction, D=D) + + def forward(self, x): + residual = x + + out = self.conv1(x) + out = self.norm1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.norm2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.norm3(out) + out = self.se(out) + + if self.downsample is not None: + residual = self.downsample(x) + + out += residual + out = self.relu(out) + + return out + + +class SEBottleneckSN(SEBottleneck): + NORM_TYPE = NormType.SPARSE_SWITCH_NORM + + +class SEBottleneckIN(SEBottleneck): + NORM_TYPE = NormType.SPARSE_INSTANCE_NORM + + +class SEBottleneckLN(SEBottleneck): + NORM_TYPE = NormType.SPARSE_LAYER_NORM diff --git a/models/Mask3D/build/lib/mask3d/models/position_embedding.py b/models/Mask3D/build/lib/mask3d/models/position_embedding.py new file mode 100644 index 0000000000000000000000000000000000000000..70275f1610e1d3f5ec8d11d18d298b7877204b86 --- /dev/null +++ b/models/Mask3D/build/lib/mask3d/models/position_embedding.py @@ -0,0 +1,179 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +""" +Various positional encodings for the transformer. +""" +import math +import torch +from torch import nn +import numpy as np + +# from utils.pc_util import shift_scale_points + + +def shift_scale_points(pred_xyz, src_range, dst_range=None): + """ + pred_xyz: B x N x 3 + src_range: [[B x 3], [B x 3]] - min and max XYZ coords + dst_range: [[B x 3], [B x 3]] - min and max XYZ coords + """ + if dst_range is None: + dst_range = [ + torch.zeros( + (src_range[0].shape[0], 3), device=src_range[0].device + ), + torch.ones((src_range[0].shape[0], 3), device=src_range[0].device), + ] + + if pred_xyz.ndim == 4: + src_range = [x[:, None] for x in src_range] + dst_range = [x[:, None] for x in dst_range] + + assert src_range[0].shape[0] == pred_xyz.shape[0] + assert dst_range[0].shape[0] == pred_xyz.shape[0] + assert src_range[0].shape[-1] == pred_xyz.shape[-1] + assert src_range[0].shape == src_range[1].shape + assert dst_range[0].shape == dst_range[1].shape + assert src_range[0].shape == dst_range[1].shape + + src_diff = src_range[1][:, None, :] - src_range[0][:, None, :] + dst_diff = dst_range[1][:, None, :] - dst_range[0][:, None, :] + prop_xyz = ( + ((pred_xyz - src_range[0][:, None, :]) * dst_diff) / src_diff + ) + dst_range[0][:, None, :] + return prop_xyz + + +class PositionEmbeddingCoordsSine(nn.Module): + def __init__( + self, + temperature=10000, + normalize=False, + scale=None, + pos_type="fourier", + d_pos=None, + d_in=3, + gauss_scale=1.0, + ): + super().__init__() + self.d_pos = d_pos + self.temperature = temperature + self.normalize = normalize + if scale is not None and normalize is False: + raise ValueError("normalize should be True if scale is passed") + if scale is None: + scale = 2 * math.pi + assert pos_type in ["sine", "fourier"] + self.pos_type = pos_type + self.scale = scale + if pos_type == "fourier": + assert d_pos is not None + assert d_pos % 2 == 0 + # define a gaussian matrix input_ch -> output_ch + B = torch.empty((d_in, d_pos // 2)).normal_() + B *= gauss_scale + self.register_buffer("gauss_B", B) + self.d_pos = d_pos + + def get_sine_embeddings(self, xyz, num_channels, input_range): + num_channels = self.d_pos + # clone coords so that shift/scale operations do not affect original tensor + orig_xyz = xyz + xyz = orig_xyz.clone() + + ncoords = xyz.shape[1] + if self.normalize: + xyz = shift_scale_points(xyz, src_range=input_range) + + ndim = num_channels // xyz.shape[2] + if ndim % 2 != 0: + ndim -= 1 + # automatically handle remainder by assiging it to the first dim + rems = num_channels - (ndim * xyz.shape[2]) + + assert ( + ndim % 2 == 0 + ), f"Cannot handle odd sized ndim={ndim} where num_channels={num_channels} and xyz={xyz.shape}" + + final_embeds = [] + prev_dim = 0 + + for d in range(xyz.shape[2]): + cdim = ndim + if rems > 0: + # add remainder in increments of two to maintain even size + cdim += 2 + rems -= 2 + + if cdim != prev_dim: + dim_t = torch.arange( + cdim, dtype=torch.float32, device=xyz.device + ) + dim_t = self.temperature ** (2 * (dim_t // 2) / cdim) + + # create batch x cdim x nccords embedding + raw_pos = xyz[:, :, d] + if self.scale: + raw_pos *= self.scale + pos = raw_pos[:, :, None] / dim_t + pos = torch.stack( + (pos[:, :, 0::2].sin(), pos[:, :, 1::2].cos()), dim=3 + ).flatten(2) + final_embeds.append(pos) + prev_dim = cdim + + final_embeds = torch.cat(final_embeds, dim=2).permute(0, 2, 1) + return final_embeds + + def get_fourier_embeddings(self, xyz, num_channels=None, input_range=None): + # Follows - https://people.eecs.berkeley.edu/~bmild/fourfeat/index.html + + if num_channels is None: + num_channels = self.gauss_B.shape[1] * 2 + + bsize, npoints = xyz.shape[0], xyz.shape[1] + assert num_channels > 0 and num_channels % 2 == 0 + d_in, max_d_out = self.gauss_B.shape[0], self.gauss_B.shape[1] + d_out = num_channels // 2 + assert d_out <= max_d_out + assert d_in == xyz.shape[-1] + + # clone coords so that shift/scale operations do not affect original tensor + orig_xyz = xyz + xyz = orig_xyz.clone() + + ncoords = xyz.shape[1] + if self.normalize: + xyz = shift_scale_points(xyz, src_range=input_range) + + xyz *= 2 * np.pi + xyz_proj = torch.mm(xyz.view(-1, d_in), self.gauss_B[:, :d_out]).view( + bsize, npoints, d_out + ) + final_embeds = [xyz_proj.sin(), xyz_proj.cos()] + + # return batch x d_pos x npoints embedding + final_embeds = torch.cat(final_embeds, dim=2).permute(0, 2, 1) + return final_embeds + + def forward(self, xyz, num_channels=None, input_range=None): + assert isinstance(xyz, torch.Tensor) + assert xyz.ndim == 3 + # xyz is batch x npoints x 3 + if self.pos_type == "sine": + with torch.no_grad(): + out = self.get_sine_embeddings(xyz, num_channels, input_range) + elif self.pos_type == "fourier": + with torch.no_grad(): + out = self.get_fourier_embeddings( + xyz, num_channels, input_range + ) + else: + raise ValueError(f"Unknown {self.pos_type}") + + return out + + def extra_repr(self): + st = f"type={self.pos_type}, scale={self.scale}, normalize={self.normalize}" + if hasattr(self, "gauss_B"): + st += f", gaussB={self.gauss_B.shape}, gaussBsum={self.gauss_B.sum().item()}" + return st diff --git a/models/Mask3D/build/lib/mask3d/models/res16unet.py b/models/Mask3D/build/lib/mask3d/models/res16unet.py new file mode 100644 index 0000000000000000000000000000000000000000..db771a6f12341b70d9e27e8f61efc2878b5d12c3 --- /dev/null +++ b/models/Mask3D/build/lib/mask3d/models/res16unet.py @@ -0,0 +1,444 @@ +import MinkowskiEngine.MinkowskiOps as me +from MinkowskiEngine import MinkowskiReLU + +from mask3d.models.resnet import ResNetBase, get_norm +from mask3d.models.modules.common import ConvType, NormType, conv, conv_tr +from mask3d.models.modules.resnet_block import BasicBlock, Bottleneck + + +class Res16UNetBase(ResNetBase): + BLOCK = None + PLANES = (32, 64, 128, 256, 256, 256, 256, 256) + DILATIONS = (1, 1, 1, 1, 1, 1, 1, 1) + LAYERS = (2, 2, 2, 2, 2, 2, 2, 2) + INIT_DIM = 32 + OUT_PIXEL_DIST = 1 + NORM_TYPE = NormType.BATCH_NORM + NON_BLOCK_CONV_TYPE = ConvType.SPATIAL_HYPERCUBE + CONV_TYPE = ConvType.SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS + + # To use the model, must call initialize_coords before forward pass. + # Once data is processed, call clear to reset the model before calling initialize_coords + def __init__( + self, in_channels, out_channels, config, D=3, out_fpn=False, **kwargs + ): + super().__init__(in_channels, out_channels, config, D) + self.out_fpn = out_fpn + + def network_initialization(self, in_channels, out_channels, config, D): + # Setup net_metadata + dilations = self.DILATIONS + bn_momentum = config.bn_momentum + + def space_n_time_m(n, m): + return n if D == 3 else [n, n, n, m] + + if D == 4: + self.OUT_PIXEL_DIST = space_n_time_m(self.OUT_PIXEL_DIST, 1) + + # Output of the first conv concated to conv6 + self.inplanes = self.INIT_DIM + self.conv0p1s1 = conv( + in_channels, + self.inplanes, + kernel_size=space_n_time_m(config.conv1_kernel_size, 1), + stride=1, + dilation=1, + conv_type=self.NON_BLOCK_CONV_TYPE, + D=D, + ) + + self.bn0 = get_norm( + self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum + ) + + self.conv1p1s2 = conv( + self.inplanes, + self.inplanes, + kernel_size=space_n_time_m(2, 1), + stride=space_n_time_m(2, 1), + dilation=1, + conv_type=self.NON_BLOCK_CONV_TYPE, + D=D, + ) + self.bn1 = get_norm( + self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum + ) + self.block1 = self._make_layer( + self.BLOCK, + self.PLANES[0], + self.LAYERS[0], + dilation=dilations[0], + norm_type=self.NORM_TYPE, + bn_momentum=bn_momentum, + ) + + self.conv2p2s2 = conv( + self.inplanes, + self.inplanes, + kernel_size=space_n_time_m(2, 1), + stride=space_n_time_m(2, 1), + dilation=1, + conv_type=self.NON_BLOCK_CONV_TYPE, + D=D, + ) + self.bn2 = get_norm( + self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum + ) + self.block2 = self._make_layer( + self.BLOCK, + self.PLANES[1], + self.LAYERS[1], + dilation=dilations[1], + norm_type=self.NORM_TYPE, + bn_momentum=bn_momentum, + ) + + self.conv3p4s2 = conv( + self.inplanes, + self.inplanes, + kernel_size=space_n_time_m(2, 1), + stride=space_n_time_m(2, 1), + dilation=1, + conv_type=self.NON_BLOCK_CONV_TYPE, + D=D, + ) + self.bn3 = get_norm( + self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum + ) + self.block3 = self._make_layer( + self.BLOCK, + self.PLANES[2], + self.LAYERS[2], + dilation=dilations[2], + norm_type=self.NORM_TYPE, + bn_momentum=bn_momentum, + ) + + self.conv4p8s2 = conv( + self.inplanes, + self.inplanes, + kernel_size=space_n_time_m(2, 1), + stride=space_n_time_m(2, 1), + dilation=1, + conv_type=self.NON_BLOCK_CONV_TYPE, + D=D, + ) + self.bn4 = get_norm( + self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum + ) + self.block4 = self._make_layer( + self.BLOCK, + self.PLANES[3], + self.LAYERS[3], + dilation=dilations[3], + norm_type=self.NORM_TYPE, + bn_momentum=bn_momentum, + ) + self.convtr4p16s2 = conv_tr( + self.inplanes, + self.PLANES[4], + kernel_size=space_n_time_m(2, 1), + upsample_stride=space_n_time_m(2, 1), + dilation=1, + bias=False, + conv_type=self.NON_BLOCK_CONV_TYPE, + D=D, + ) + self.bntr4 = get_norm( + self.NORM_TYPE, self.PLANES[4], D, bn_momentum=bn_momentum + ) + + self.inplanes = self.PLANES[4] + self.PLANES[2] * self.BLOCK.expansion + self.block5 = self._make_layer( + self.BLOCK, + self.PLANES[4], + self.LAYERS[4], + dilation=dilations[4], + norm_type=self.NORM_TYPE, + bn_momentum=bn_momentum, + ) + self.convtr5p8s2 = conv_tr( + self.inplanes, + self.PLANES[5], + kernel_size=space_n_time_m(2, 1), + upsample_stride=space_n_time_m(2, 1), + dilation=1, + bias=False, + conv_type=self.NON_BLOCK_CONV_TYPE, + D=D, + ) + self.bntr5 = get_norm( + self.NORM_TYPE, self.PLANES[5], D, bn_momentum=bn_momentum + ) + + self.inplanes = self.PLANES[5] + self.PLANES[1] * self.BLOCK.expansion + self.block6 = self._make_layer( + self.BLOCK, + self.PLANES[5], + self.LAYERS[5], + dilation=dilations[5], + norm_type=self.NORM_TYPE, + bn_momentum=bn_momentum, + ) + self.convtr6p4s2 = conv_tr( + self.inplanes, + self.PLANES[6], + kernel_size=space_n_time_m(2, 1), + upsample_stride=space_n_time_m(2, 1), + dilation=1, + bias=False, + conv_type=self.NON_BLOCK_CONV_TYPE, + D=D, + ) + self.bntr6 = get_norm( + self.NORM_TYPE, self.PLANES[6], D, bn_momentum=bn_momentum + ) + + self.inplanes = self.PLANES[6] + self.PLANES[0] * self.BLOCK.expansion + self.block7 = self._make_layer( + self.BLOCK, + self.PLANES[6], + self.LAYERS[6], + dilation=dilations[6], + norm_type=self.NORM_TYPE, + bn_momentum=bn_momentum, + ) + self.convtr7p2s2 = conv_tr( + self.inplanes, + self.PLANES[7], + kernel_size=space_n_time_m(2, 1), + upsample_stride=space_n_time_m(2, 1), + dilation=1, + bias=False, + conv_type=self.NON_BLOCK_CONV_TYPE, + D=D, + ) + self.bntr7 = get_norm( + self.NORM_TYPE, self.PLANES[7], D, bn_momentum=bn_momentum + ) + + self.inplanes = self.PLANES[7] + self.INIT_DIM + self.block8 = self._make_layer( + self.BLOCK, + self.PLANES[7], + self.LAYERS[7], + dilation=dilations[7], + norm_type=self.NORM_TYPE, + bn_momentum=bn_momentum, + ) + + self.final = conv( + self.PLANES[7], + out_channels, + kernel_size=1, + stride=1, + bias=True, + D=D, + ) + self.relu = MinkowskiReLU(inplace=True) + + def forward(self, x): + feature_maps = [] + + out = self.conv0p1s1(x) + out = self.bn0(out) + out_p1 = self.relu(out) + + out = self.conv1p1s2(out_p1) + out = self.bn1(out) + out = self.relu(out) + out_b1p2 = self.block1(out) + + out = self.conv2p2s2(out_b1p2) + out = self.bn2(out) + out = self.relu(out) + out_b2p4 = self.block2(out) + + out = self.conv3p4s2(out_b2p4) + out = self.bn3(out) + out = self.relu(out) + out_b3p8 = self.block3(out) + + # pixel_dist=16 + out = self.conv4p8s2(out_b3p8) + out = self.bn4(out) + out = self.relu(out) + out = self.block4(out) + + feature_maps.append(out) + + # pixel_dist=8 + out = self.convtr4p16s2(out) + out = self.bntr4(out) + out = self.relu(out) + + out = me.cat(out, out_b3p8) + out = self.block5(out) + + feature_maps.append(out) + + # pixel_dist=4 + out = self.convtr5p8s2(out) + out = self.bntr5(out) + out = self.relu(out) + + out = me.cat(out, out_b2p4) + out = self.block6(out) + + feature_maps.append(out) + + # pixel_dist=2 + out = self.convtr6p4s2(out) + out = self.bntr6(out) + out = self.relu(out) + + out = me.cat(out, out_b1p2) + out = self.block7(out) + + feature_maps.append(out) + + # pixel_dist=1 + out = self.convtr7p2s2(out) + out = self.bntr7(out) + out = self.relu(out) + + out = me.cat(out, out_p1) + out = self.block8(out) + + feature_maps.append(out) + + if not self.out_fpn: + return out + else: + return out, feature_maps + + +class Res16UNet14(Res16UNetBase): + BLOCK = BasicBlock + LAYERS = (1, 1, 1, 1, 1, 1, 1, 1) + + +class Res16UNet18(Res16UNetBase): + BLOCK = BasicBlock + LAYERS = (2, 2, 2, 2, 2, 2, 2, 2) + + +class Res16UNet34(Res16UNetBase): + BLOCK = BasicBlock + LAYERS = (2, 3, 4, 6, 2, 2, 2, 2) + + +class Res16UNet50(Res16UNetBase): + BLOCK = Bottleneck + LAYERS = (2, 3, 4, 6, 2, 2, 2, 2) + + +class Res16UNet101(Res16UNetBase): + BLOCK = Bottleneck + LAYERS = (2, 3, 4, 23, 2, 2, 2, 2) + + +class Res16UNet14A(Res16UNet14): + PLANES = (32, 64, 128, 256, 128, 128, 96, 96) + + +class Res16UNet14A2(Res16UNet14A): + LAYERS = (1, 1, 1, 1, 2, 2, 2, 2) + + +class Res16UNet14B(Res16UNet14): + PLANES = (32, 64, 128, 256, 128, 128, 128, 128) + + +class Res16UNet14B2(Res16UNet14B): + LAYERS = (1, 1, 1, 1, 2, 2, 2, 2) + + +class Res16UNet14B3(Res16UNet14B): + LAYERS = (2, 2, 2, 2, 1, 1, 1, 1) + + +class Res16UNet14C(Res16UNet14): + PLANES = (32, 64, 128, 256, 192, 192, 128, 128) + + +class Res16UNet14D(Res16UNet14): + PLANES = (32, 64, 128, 256, 384, 384, 384, 384) + + +class Res16UNet18A(Res16UNet18): + PLANES = (32, 64, 128, 256, 128, 128, 96, 96) + + +class Res16UNet18B(Res16UNet18): + PLANES = (32, 64, 128, 256, 128, 128, 128, 128) + + +class Res16UNet18D(Res16UNet18): + PLANES = (32, 64, 128, 256, 384, 384, 384, 384) + + +class Res16UNet34A(Res16UNet34): + PLANES = (32, 64, 128, 256, 256, 128, 64, 64) + + +class Res16UNet34B(Res16UNet34): + PLANES = (32, 64, 128, 256, 256, 128, 64, 32) + + +class Res16UNet34C(Res16UNet34): + PLANES = (32, 64, 128, 256, 256, 128, 96, 96) + + +class Custom30M(Res16UNet34): + PLANES = (32, 64, 128, 256, 128, 64, 64, 32) + + +class Res16UNet34D(Res16UNet34): + PLANES = (32, 64, 128, 256, 256, 128, 96, 128) + + +class STRes16UNetBase(Res16UNetBase): + + CONV_TYPE = ConvType.SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS + + def __init__(self, in_channels, out_channels, config, D=4, **kwargs): + super().__init__(in_channels, out_channels, config, D, **kwargs) + + +class STRes16UNet14(STRes16UNetBase, Res16UNet14): + pass + + +class STRes16UNet14A(STRes16UNetBase, Res16UNet14A): + pass + + +class STRes16UNet18(STRes16UNetBase, Res16UNet18): + pass + + +class STRes16UNet34(STRes16UNetBase, Res16UNet34): + pass + + +class STRes16UNet50(STRes16UNetBase, Res16UNet50): + pass + + +class STRes16UNet101(STRes16UNetBase, Res16UNet101): + pass + + +class STRes16UNet18A(STRes16UNet18): + PLANES = (32, 64, 128, 256, 128, 128, 96, 96) + + +class STResTesseract16UNetBase(STRes16UNetBase): + pass + # CONV_TYPE = ConvType.HYPERCUBE + + +class STResTesseract16UNet18A(STRes16UNet18A, STResTesseract16UNetBase): + pass diff --git a/models/Mask3D/build/lib/mask3d/models/resnet.py b/models/Mask3D/build/lib/mask3d/models/resnet.py new file mode 100644 index 0000000000000000000000000000000000000000..f6ad622893d191fce0cf9db6edafbc83f684d218 --- /dev/null +++ b/models/Mask3D/build/lib/mask3d/models/resnet.py @@ -0,0 +1,243 @@ +import torch.nn as nn +import MinkowskiEngine as ME + +from mask3d.models.model import Model +from mask3d.models.modules.common import ConvType, NormType, conv, get_norm, sum_pool +from mask3d.models.modules.resnet_block import BasicBlock, Bottleneck + + +class ResNetBase(Model): + BLOCK = None + LAYERS = () + INIT_DIM = 64 + PLANES = (64, 128, 256, 512) + OUT_PIXEL_DIST = 32 + HAS_LAST_BLOCK = False + CONV_TYPE = ConvType.HYPERCUBE + + def __init__(self, in_channels, out_channels, config, D=3, **kwargs): + assert self.BLOCK is not None + assert self.OUT_PIXEL_DIST > 0 + + super().__init__(in_channels, out_channels, config, D, **kwargs) + + self.network_initialization(in_channels, out_channels, config, D) + self.weight_initialization() + + def network_initialization(self, in_channels, out_channels, config, D): + def space_n_time_m(n, m): + return n if D == 3 else [n, n, n, m] + + if D == 4: + self.OUT_PIXEL_DIST = space_n_time_m(self.OUT_PIXEL_DIST, 1) + + dilations = config.dilations + bn_momentum = config.bn_momentum + self.inplanes = self.INIT_DIM + self.conv1 = conv( + in_channels, + self.inplanes, + kernel_size=space_n_time_m(config.conv1_kernel_size, 1), + stride=1, + D=D, + ) + + self.bn1 = get_norm( + NormType.BATCH_NORM, + self.inplanes, + D=self.D, + bn_momentum=bn_momentum, + ) + self.relu = ME.MinkowskiReLU(inplace=True) + self.pool = sum_pool( + kernel_size=space_n_time_m(2, 1), stride=space_n_time_m(2, 1), D=D + ) + + self.layer1 = self._make_layer( + self.BLOCK, + self.PLANES[0], + self.LAYERS[0], + stride=space_n_time_m(2, 1), + dilation=space_n_time_m(dilations[0], 1), + ) + self.layer2 = self._make_layer( + self.BLOCK, + self.PLANES[1], + self.LAYERS[1], + stride=space_n_time_m(2, 1), + dilation=space_n_time_m(dilations[1], 1), + ) + self.layer3 = self._make_layer( + self.BLOCK, + self.PLANES[2], + self.LAYERS[2], + stride=space_n_time_m(2, 1), + dilation=space_n_time_m(dilations[2], 1), + ) + self.layer4 = self._make_layer( + self.BLOCK, + self.PLANES[3], + self.LAYERS[3], + stride=space_n_time_m(2, 1), + dilation=space_n_time_m(dilations[3], 1), + ) + + self.final = conv( + self.PLANES[3] * self.BLOCK.expansion, + out_channels, + kernel_size=1, + bias=True, + D=D, + ) + + def weight_initialization(self): + for m in self.modules(): + if isinstance(m, ME.MinkowskiBatchNorm): + nn.init.constant_(m.bn.weight, 1) + nn.init.constant_(m.bn.bias, 0) + + def _make_layer( + self, + block, + planes, + blocks, + stride=1, + dilation=1, + norm_type=NormType.BATCH_NORM, + bn_momentum=0.1, + ): + downsample = None + if stride != 1 or self.inplanes != planes * block.expansion: + downsample = nn.Sequential( + conv( + self.inplanes, + planes * block.expansion, + kernel_size=1, + stride=stride, + bias=False, + D=self.D, + ), + get_norm( + norm_type, + planes * block.expansion, + D=self.D, + bn_momentum=bn_momentum, + ), + ) + layers = [] + layers.append( + block( + self.inplanes, + planes, + stride=stride, + dilation=dilation, + downsample=downsample, + conv_type=self.CONV_TYPE, + D=self.D, + ) + ) + self.inplanes = planes * block.expansion + for i in range(1, blocks): + layers.append( + block( + self.inplanes, + planes, + stride=1, + dilation=dilation, + conv_type=self.CONV_TYPE, + D=self.D, + ) + ) + + return nn.Sequential(*layers) + + def forward(self, x): + x = self.conv1(x) + x = self.bn1(x) + x = self.relu(x) + x = self.pool(x) + + x = self.layer1(x) + x = self.layer2(x) + x = self.layer3(x) + x = self.layer4(x) + + x = self.final(x) + return x + + +class ResNet14(ResNetBase): + BLOCK = BasicBlock + LAYERS = (1, 1, 1, 1) + + +class ResNet18(ResNetBase): + BLOCK = BasicBlock + LAYERS = (2, 2, 2, 2) + + +class ResNet34(ResNetBase): + BLOCK = BasicBlock + LAYERS = (3, 4, 6, 3) + + +class ResNet50(ResNetBase): + BLOCK = Bottleneck + LAYERS = (3, 4, 6, 3) + + +class ResNet101(ResNetBase): + BLOCK = Bottleneck + LAYERS = (3, 4, 23, 3) + + +class STResNetBase(ResNetBase): + + CONV_TYPE = ConvType.SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS + + def __init__(self, in_channels, out_channels, config, D=4, **kwargs): + super().__init__(in_channels, out_channels, config, D, **kwargs) + + +class STResNet14(STResNetBase, ResNet14): + pass + + +class STResNet18(STResNetBase, ResNet18): + pass + + +class STResNet34(STResNetBase, ResNet34): + pass + + +class STResNet50(STResNetBase, ResNet50): + pass + + +class STResNet101(STResNetBase, ResNet101): + pass + + +class STResTesseractNetBase(STResNetBase): + CONV_TYPE = ConvType.HYPERCUBE + + +class STResTesseractNet14(STResTesseractNetBase, STResNet14): + pass + + +class STResTesseractNet18(STResTesseractNetBase, STResNet18): + pass + + +class STResTesseractNet34(STResTesseractNetBase, STResNet34): + pass + + +class STResTesseractNet50(STResTesseractNetBase, STResNet50): + pass + + +class STResTesseractNet101(STResTesseractNetBase, STResNet101): + pass diff --git a/models/Mask3D/build/lib/mask3d/models/resunet.py b/models/Mask3D/build/lib/mask3d/models/resunet.py new file mode 100644 index 0000000000000000000000000000000000000000..98a3adc56f09d534256960c080594e5df3a41c7c --- /dev/null +++ b/models/Mask3D/build/lib/mask3d/models/resunet.py @@ -0,0 +1,617 @@ +import torch.nn as nn +import MinkowskiEngine as ME +import MinkowskiEngine.MinkowskiOps as me +from MinkowskiEngine import MinkowskiReLU + +from mask3d.models.resnet import ResNetBase, get_norm +from mask3d.models.modules.common import ConvType, NormType, conv, conv_tr +from mask3d.models.modules.resnet_block import BasicBlock, Bottleneck, BasicBlockINBN + + +class MinkUNetBase(ResNetBase): + BLOCK = None + PLANES = (64, 128, 256, 512, 256, 128, 128) + DILATIONS = (1, 1, 1, 1, 1, 1) + LAYERS = (2, 2, 2, 2, 2, 2) + INIT_DIM = 64 + OUT_PIXEL_DIST = 1 + NORM_TYPE = NormType.BATCH_NORM + NON_BLOCK_CONV_TYPE = ConvType.SPATIAL_HYPERCUBE + CONV_TYPE = ConvType.SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS + + # To use the model, must call initialize_coords before forward pass. + # Once data is processed, call clear to reset the model before calling initialize_coords + def __init__(self, in_channels, out_channels, config, D=3, **kwargs): + super().__init__(in_channels, out_channels, config, D) + + def network_initialization(self, in_channels, out_channels, config, D): + # Setup net_metadata + dilations = self.DILATIONS + bn_momentum = config.bn_momentum + + def space_n_time_m(n, m): + return n if D == 3 else [n, n, n, m] + + if D == 4: + self.OUT_PIXEL_DIST = space_n_time_m(self.OUT_PIXEL_DIST, 1) + + # Output of the first conv concated to conv6 + self.inplanes = self.INIT_DIM + self.conv1p1s1 = conv( + in_channels, + self.inplanes, + kernel_size=space_n_time_m(config.conv1_kernel_size, 1), + stride=1, + dilation=1, + conv_type=self.NON_BLOCK_CONV_TYPE, + D=D, + ) + + self.bn1 = get_norm( + self.NORM_TYPE, self.PLANES[0], D, bn_momentum=bn_momentum + ) + self.block1 = self._make_layer( + self.BLOCK, + self.PLANES[0], + self.LAYERS[0], + dilation=dilations[0], + norm_type=self.NORM_TYPE, + bn_momentum=bn_momentum, + ) + + self.conv2p1s2 = conv( + self.inplanes, + self.inplanes, + kernel_size=space_n_time_m(2, 1), + stride=space_n_time_m(2, 1), + dilation=1, + conv_type=self.NON_BLOCK_CONV_TYPE, + D=D, + ) + self.bn2 = get_norm( + self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum + ) + self.block2 = self._make_layer( + self.BLOCK, + self.PLANES[1], + self.LAYERS[1], + dilation=dilations[1], + norm_type=self.NORM_TYPE, + bn_momentum=bn_momentum, + ) + + self.conv3p2s2 = conv( + self.inplanes, + self.inplanes, + kernel_size=space_n_time_m(2, 1), + stride=space_n_time_m(2, 1), + dilation=1, + conv_type=self.NON_BLOCK_CONV_TYPE, + D=D, + ) + self.bn3 = get_norm( + self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum + ) + self.block3 = self._make_layer( + self.BLOCK, + self.PLANES[2], + self.LAYERS[2], + dilation=dilations[2], + norm_type=self.NORM_TYPE, + bn_momentum=bn_momentum, + ) + + self.conv4p4s2 = conv( + self.inplanes, + self.inplanes, + kernel_size=space_n_time_m(2, 1), + stride=space_n_time_m(2, 1), + dilation=1, + conv_type=self.NON_BLOCK_CONV_TYPE, + D=D, + ) + self.bn4 = get_norm( + self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum + ) + self.block4 = self._make_layer( + self.BLOCK, + self.PLANES[3], + self.LAYERS[3], + dilation=dilations[3], + norm_type=self.NORM_TYPE, + bn_momentum=bn_momentum, + ) + self.convtr4p8s2 = conv_tr( + self.inplanes, + self.PLANES[4], + kernel_size=space_n_time_m(2, 1), + upsample_stride=space_n_time_m(2, 1), + dilation=1, + bias=False, + conv_type=self.NON_BLOCK_CONV_TYPE, + D=D, + ) + self.bntr4 = get_norm( + self.NORM_TYPE, self.PLANES[4], D, bn_momentum=bn_momentum + ) + + self.inplanes = self.PLANES[4] + self.PLANES[2] * self.BLOCK.expansion + self.block5 = self._make_layer( + self.BLOCK, + self.PLANES[4], + self.LAYERS[4], + dilation=dilations[4], + norm_type=self.NORM_TYPE, + bn_momentum=bn_momentum, + ) + self.convtr5p4s2 = conv_tr( + self.inplanes, + self.PLANES[5], + kernel_size=space_n_time_m(2, 1), + upsample_stride=space_n_time_m(2, 1), + dilation=1, + bias=False, + conv_type=self.NON_BLOCK_CONV_TYPE, + D=D, + ) + self.bntr5 = get_norm( + self.NORM_TYPE, self.PLANES[5], D, bn_momentum=bn_momentum + ) + + self.inplanes = self.PLANES[5] + self.PLANES[1] * self.BLOCK.expansion + self.block6 = self._make_layer( + self.BLOCK, + self.PLANES[5], + self.LAYERS[5], + dilation=dilations[5], + norm_type=self.NORM_TYPE, + bn_momentum=bn_momentum, + ) + self.convtr6p2s2 = conv_tr( + self.inplanes, + self.PLANES[6], + kernel_size=space_n_time_m(2, 1), + upsample_stride=space_n_time_m(2, 1), + dilation=1, + bias=False, + conv_type=self.NON_BLOCK_CONV_TYPE, + D=D, + ) + self.bntr6 = get_norm( + self.NORM_TYPE, self.PLANES[6], D, bn_momentum=bn_momentum + ) + self.relu = MinkowskiReLU(inplace=True) + + self.final = nn.Sequential( + conv( + self.PLANES[6] + self.PLANES[0] * self.BLOCK.expansion, + 512, + kernel_size=1, + stride=1, + dilation=1, + bias=False, + D=D, + ), + ME.MinkowskiBatchNorm(512), + ME.MinkowskiReLU(), + conv( + 512, + out_channels, + kernel_size=1, + stride=1, + dilation=1, + bias=True, + D=D, + ), + ) + + def forward(self, x): + out = self.conv1p1s1(x) + out = self.bn1(out) + out = self.relu(out) + + out_b1p1 = self.block1(out) + + out = self.conv2p1s2(out_b1p1) + out = self.bn2(out) + out = self.relu(out) + + out_b2p2 = self.block2(out) + + out = self.conv3p2s2(out_b2p2) + out = self.bn3(out) + out = self.relu(out) + + out_b3p4 = self.block3(out) + + out = self.conv4p4s2(out_b3p4) + out = self.bn4(out) + out = self.relu(out) + + # pixel_dist=8 + out = self.block4(out) + + out = self.convtr4p8s2(out) + out = self.bntr4(out) + out = self.relu(out) + + out = me.cat(out, out_b3p4) + out = self.block5(out) + + out = self.convtr5p4s2(out) + out = self.bntr5(out) + out = self.relu(out) + + out = me.cat(out, out_b2p2) + out = self.block6(out) + + out = self.convtr6p2s2(out) + out = self.bntr6(out) + out = self.relu(out) + + out = me.cat(out, out_b1p1) + return self.final(out) + + +class ResUNet14(MinkUNetBase): + BLOCK = BasicBlock + LAYERS = (1, 1, 1, 1, 1, 1) + + +class ResUNet18(MinkUNetBase): + BLOCK = BasicBlock + LAYERS = (2, 2, 2, 2, 2, 2) + + +class ResUNet18INBN(ResUNet18): + NORM_TYPE = NormType.INSTANCE_BATCH_NORM + BLOCK = BasicBlockINBN + + +class ResUNet34(MinkUNetBase): + BLOCK = BasicBlock + LAYERS = (3, 4, 6, 3, 2, 2) + + +class ResUNet50(MinkUNetBase): + BLOCK = Bottleneck + LAYERS = (3, 4, 6, 3, 2, 2) + + +class ResUNet101(MinkUNetBase): + BLOCK = Bottleneck + LAYERS = (3, 4, 23, 3, 2, 2) + + +class ResUNet14D(ResUNet14): + PLANES = (64, 128, 256, 512, 512, 512, 512) + + +class ResUNet18D(ResUNet18): + PLANES = (64, 128, 256, 512, 512, 512, 512) + + +class ResUNet34D(ResUNet34): + PLANES = (64, 128, 256, 512, 512, 512, 512) + + +class ResUNet34E(ResUNet34): + INIT_DIM = 32 + PLANES = (32, 64, 128, 256, 128, 64, 64) + + +class ResUNet34F(ResUNet34): + INIT_DIM = 32 + PLANES = (32, 64, 128, 256, 128, 64, 32) + + +class MinkUNetHyper(MinkUNetBase): + BLOCK = None + PLANES = (64, 128, 256, 512, 256, 128, 128) + DILATIONS = (1, 1, 1, 1, 1, 1) + LAYERS = (2, 2, 2, 2, 2, 2) + INIT_DIM = 64 + OUT_PIXEL_DIST = 1 + NORM_TYPE = NormType.BATCH_NORM + NON_BLOCK_CONV_TYPE = ConvType.SPATIAL_HYPERCUBE + CONV_TYPE = ConvType.SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS + + # To use the model, must call initialize_coords before forward pass. + # Once data is processed, call clear to reset the model before calling initialize_coords + def __init__(self, in_channels, out_channels, config, D=3, **kwargs): + super(MinkUNetBase, self).__init__( + in_channels, out_channels, config, D + ) + + def network_initialization(self, in_channels, out_channels, config, D): + # Setup net_metadata + dilations = self.DILATIONS + bn_momentum = config.bn_momentum + + def space_n_time_m(n, m): + return n if D == 3 else [n, n, n, m] + + if D == 4: + self.OUT_PIXEL_DIST = space_n_time_m(self.OUT_PIXEL_DIST, 1) + + # Output of the first conv concated to conv6 + self.inplanes = self.INIT_DIM + self.conv1p1s1 = conv( + in_channels, + self.inplanes, + kernel_size=space_n_time_m(config.conv1_kernel_size, 1), + stride=1, + dilation=1, + conv_type=self.NON_BLOCK_CONV_TYPE, + D=D, + ) + + self.bn1 = get_norm( + self.NORM_TYPE, self.PLANES[0], D, bn_momentum=bn_momentum + ) + self.block1 = self._make_layer( + self.BLOCK, + self.PLANES[0], + self.LAYERS[0], + dilation=dilations[0], + norm_type=self.NORM_TYPE, + bn_momentum=bn_momentum, + ) + + self.conv2p1s2 = conv( + self.inplanes, + self.inplanes, + kernel_size=space_n_time_m(2, 1), + stride=space_n_time_m(2, 1), + dilation=1, + conv_type=self.NON_BLOCK_CONV_TYPE, + D=D, + ) + self.bn2 = get_norm( + self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum + ) + self.block2 = self._make_layer( + self.BLOCK, + self.PLANES[1], + self.LAYERS[1], + dilation=dilations[1], + norm_type=self.NORM_TYPE, + bn_momentum=bn_momentum, + ) + + self.conv3p2s2 = conv( + self.inplanes, + self.inplanes, + kernel_size=space_n_time_m(2, 1), + stride=space_n_time_m(2, 1), + dilation=1, + conv_type=self.NON_BLOCK_CONV_TYPE, + D=D, + ) + self.bn3 = get_norm( + self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum + ) + self.block3 = self._make_layer( + self.BLOCK, + self.PLANES[2], + self.LAYERS[2], + dilation=dilations[2], + norm_type=self.NORM_TYPE, + bn_momentum=bn_momentum, + ) + + self.conv4p4s2 = conv( + self.inplanes, + self.inplanes, + kernel_size=space_n_time_m(2, 1), + stride=space_n_time_m(2, 1), + dilation=1, + conv_type=self.NON_BLOCK_CONV_TYPE, + D=D, + ) + self.bn4 = get_norm( + self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum + ) + self.block4 = self._make_layer( + self.BLOCK, + self.PLANES[3], + self.LAYERS[3], + dilation=dilations[3], + norm_type=self.NORM_TYPE, + bn_momentum=bn_momentum, + ) + self.pool_tr4 = ME.MinkowskiPoolingTranspose( + kernel_size=8, stride=8, dimension=D + ) + _ = self.inplanes + self.convtr4p8s2 = conv_tr( + self.inplanes, + self.PLANES[4], + kernel_size=space_n_time_m(2, 1), + upsample_stride=space_n_time_m(2, 1), + dilation=1, + bias=False, + conv_type=self.NON_BLOCK_CONV_TYPE, + D=D, + ) + self.bntr4 = get_norm( + self.NORM_TYPE, self.PLANES[4], D, bn_momentum=bn_momentum + ) + + self.inplanes = self.PLANES[4] + self.PLANES[2] * self.BLOCK.expansion + self.block5 = self._make_layer( + self.BLOCK, + self.PLANES[4], + self.LAYERS[4], + dilation=dilations[4], + norm_type=self.NORM_TYPE, + bn_momentum=bn_momentum, + ) + self.pool_tr5 = ME.MinkowskiPoolingTranspose( + kernel_size=4, stride=4, dimension=D + ) + out_pool5 = self.inplanes + self.convtr5p4s2 = conv_tr( + self.inplanes, + self.PLANES[5], + kernel_size=space_n_time_m(2, 1), + upsample_stride=space_n_time_m(2, 1), + dilation=1, + bias=False, + conv_type=self.NON_BLOCK_CONV_TYPE, + D=D, + ) + self.bntr5 = get_norm( + self.NORM_TYPE, self.PLANES[5], D, bn_momentum=bn_momentum + ) + + self.inplanes = self.PLANES[5] + self.PLANES[1] * self.BLOCK.expansion + self.block6 = self._make_layer( + self.BLOCK, + self.PLANES[5], + self.LAYERS[5], + dilation=dilations[5], + norm_type=self.NORM_TYPE, + bn_momentum=bn_momentum, + ) + self.pool_tr6 = ME.MinkowskiPoolingTranspose( + kernel_size=2, stride=2, dimension=D + ) + out_pool6 = self.inplanes + self.convtr6p2s2 = conv_tr( + self.inplanes, + self.PLANES[6], + kernel_size=space_n_time_m(2, 1), + upsample_stride=space_n_time_m(2, 1), + dilation=1, + bias=False, + conv_type=self.NON_BLOCK_CONV_TYPE, + D=D, + ) + self.bntr6 = get_norm( + self.NORM_TYPE, self.PLANES[6], D, bn_momentum=bn_momentum + ) + + self.relu = MinkowskiReLU(inplace=True) + + self.final = nn.Sequential( + conv( + out_pool5 + + out_pool6 + + self.PLANES[6] + + self.PLANES[0] * self.BLOCK.expansion, + 512, + kernel_size=1, + bias=False, + D=D, + ), + ME.MinkowskiBatchNorm(512), + ME.MinkowskiReLU(), + conv(512, out_channels, kernel_size=1, bias=True, D=D), + ) + + def forward(self, x): + out = self.conv1p1s1(x) + out = self.bn1(out) + out = self.relu(out) + + out_b1p1 = self.block1(out) + + out = self.conv2p1s2(out_b1p1) + out = self.bn2(out) + out = self.relu(out) + + out_b2p2 = self.block2(out) + + out = self.conv3p2s2(out_b2p2) + out = self.bn3(out) + out = self.relu(out) + + out_b3p4 = self.block3(out) + + out = self.conv4p4s2(out_b3p4) + out = self.bn4(out) + out = self.relu(out) + + # pixel_dist=8 + out = self.block4(out) + + out = self.convtr4p8s2(out) + out = self.bntr4(out) + out = self.relu(out) + + out = me.cat(out, out_b3p4) + out = self.block5(out) + out_5 = self.pool_tr5(out) + + out = self.convtr5p4s2(out) + out = self.bntr5(out) + out = self.relu(out) + + out = me.cat(out, out_b2p2) + out = self.block6(out) + out_6 = self.pool_tr6(out) + + out = self.convtr6p2s2(out) + out = self.bntr6(out) + out = self.relu(out) + + out = me.cat(out, out_b1p1, out_6, out_5) + return self.final(out) + + +class MinkUNetHyper14INBN(MinkUNetHyper): + NORM_TYPE = NormType.INSTANCE_BATCH_NORM + BLOCK = BasicBlockINBN + + +class STMinkUNetBase(MinkUNetBase): + + CONV_TYPE = ConvType.SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS + + def __init__(self, in_channels, out_channels, config, D=4, **kwargs): + super().__init__(in_channels, out_channels, config, D, **kwargs) + + +class STResUNet14(STMinkUNetBase, ResUNet14): + pass + + +class STResUNet18(STMinkUNetBase, ResUNet18): + pass + + +class STResUNet34(STMinkUNetBase, ResUNet34): + pass + + +class STResUNet50(STMinkUNetBase, ResUNet50): + pass + + +class STResUNet101(STMinkUNetBase, ResUNet101): + pass + + +class STResTesseractUNetBase(STMinkUNetBase): + CONV_TYPE = ConvType.HYPERCUBE + + +class STResTesseractUNet14(STResTesseractUNetBase, ResUNet14): + pass + + +class STResTesseractUNet18(STResTesseractUNetBase, ResUNet18): + pass + + +class STResTesseractUNet34(STResTesseractUNetBase, ResUNet34): + pass + + +class STResTesseractUNet50(STResTesseractUNetBase, ResUNet50): + pass + + +class STResTesseractUNet101(STResTesseractUNetBase, ResUNet101): + pass diff --git a/models/Mask3D/build/lib/mask3d/models/wrapper.py b/models/Mask3D/build/lib/mask3d/models/wrapper.py new file mode 100644 index 0000000000000000000000000000000000000000..a6bf1678d2106049b8e6a2ac2f3a9aff37dcfc9c --- /dev/null +++ b/models/Mask3D/build/lib/mask3d/models/wrapper.py @@ -0,0 +1,32 @@ +import random + +from torch.nn import Module +from MinkowskiEngine import SparseTensor + + +class Wrapper(Module): + """ + Wrapper for the segmentation networks. + """ + + OUT_PIXEL_DIST = -1 + + def __init__(self, NetClass, in_nchannel, out_nchannel, config): + super().__init__() + self.initialize_filter(NetClass, in_nchannel, out_nchannel, config) + + def initialize_filter(self, NetClass, in_nchannel, out_nchannel, config): + raise NotImplementedError("Must initialize a model and a filter") + + def forward(self, x, coords, colors=None): + soutput = self.model(x) + + # During training, make the network invariant to the filter + if not self.training or random.random() < 0.5: + # Filter requires the model to finish the forward pass + wrapper_coords = self.filter.initialize_coords( + self.model, coords, colors + ) + finput = SparseTensor(soutput.F, wrapper_coords) + soutput = self.filter(finput) + return soutput diff --git a/models/Mask3D/build/lib/mask3d/predict.py b/models/Mask3D/build/lib/mask3d/predict.py new file mode 100644 index 0000000000000000000000000000000000000000..4c085fd01897c13540da8eac9f941dcf0847ca6f --- /dev/null +++ b/models/Mask3D/build/lib/mask3d/predict.py @@ -0,0 +1,187 @@ +import hydra +from omegaconf import DictConfig, OmegaConf +from models.mask3d import Mask3D +import os +import torch + +import MinkowskiEngine as ME +import open3d as o3d +import numpy as np +import albumentations as A + +from utils.utils import ( + flatten_dict, + load_baseline_model, + load_checkpoint_with_missing_or_exsessive_keys, + load_backbone_checkpoint_with_missing_or_exsessive_keys, +) + +from datasets.scannet200.scannet200_constants import ( + SCANNET_COLOR_MAP_200, + SCANNET_COLOR_MAP_20, + VALID_CLASS_IDS_200, + VALID_CLASS_IDS_20, + CLASS_LABELS_200, + CLASS_LABELS_20, +) + +root_dir = '/home/weders/scratch/scratch/scannetter/arkit/raw/Validation' + +class InstanceSegmentation(torch.nn.Module): + def __init__(self, cfg): + super().__init__() + self.model = hydra.utils.instantiate(cfg.model) + + + def forward(self, x, raw_coordinates=None): + return self.model(x, raw_coordinates=raw_coordinates) + +@hydra.main( + config_path="conf", config_name="config_base_instance_segmentation.yaml" +) +def main(cfg: DictConfig): + + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + os.chdir(hydra.utils.get_original_cwd()) + model = InstanceSegmentation(cfg) + + if cfg.general.backbone_checkpoint is not None: + cfg, model = load_backbone_checkpoint_with_missing_or_exsessive_keys( + cfg, model + ) + if cfg.general.checkpoint is not None: + cfg, model = load_checkpoint_with_missing_or_exsessive_keys(cfg, model) + + model = model.to(device) + # model.eval() + + color_mean = (0.47793125906962, 0.4303257521323044, 0.3749598901421883) + color_std = (0.2834475483823543, 0.27566157565723015, 0.27018971370874995) + normalize_color = A.Normalize(mean=color_mean, std=color_std) + + # iterate over data + for sc in os.listdir(root_dir): + + + if not os.path.exists(os.path.join(root_dir, sc, 'mesh_tsdf.ply')): + continue + + # save outputs + output_dir = os.path.join(root_dir, sc, 'pred_mask3d_ours') + if not os.path.exists(output_dir): + os.makedirs(output_dir) + + if sc != '42445991': + continue + + # if os.path.exists(os.path.join(output_dir, 'mask3d_predictions.txt')): + # print('Skipping', sc) + # continue + + print('Processing', sc) + + mesh = o3d.io.read_triangle_mesh(os.path.join(root_dir, sc, 'mesh_tsdf.ply')) + mesh.compute_vertex_normals() + + points = np.asarray(mesh.vertices) + colors = np.asarray(mesh.vertex_colors) + + + colors = colors * 255. + pseudo_image = colors.astype(np.uint8)[np.newaxis, :, :] + colors = np.squeeze(normalize_color(image=pseudo_image)["image"]) + + # voxelize data + coords = np.floor(points / 0.02) + + # maybe this change (_, _, ...) is not necessary and we can directly get out + # the sample coordinates? + _, _, unique_map, inverse_map = ME.utils.sparse_quantize(coordinates=coords, features=colors, return_index=True, return_inverse=True) + + sample_coordinates = coords[unique_map] + coordinates = [torch.from_numpy(sample_coordinates).int()] + sample_features = colors[unique_map] + features = [torch.from_numpy(sample_features).float()] + + coordinates, _ = ME.utils.sparse_collate(coords=coordinates, feats=features) + features = torch.cat(features, dim=0) + data = ME.SparseTensor( + coordinates=coordinates, + features=features, + device=device, + ) + + # run model + with torch.no_grad(): + outputs = model(data, raw_coordinates=features) + + del data + torch.cuda.empty_cache() + + # parse predictions + logits = outputs["pred_logits"] + masks = outputs["pred_masks"] + + + # reformat predictions + logits = logits[0].detach().cpu() + masks = masks[0].detach().cpu() + + labels = [] + confidences = [] + masks_binary = [] + + for i in range(len(logits)): + p_labels = torch.softmax(logits[i], dim=-1) + p_masks = torch.sigmoid(masks[:, i]) + l = torch.argmax(p_labels, dim=-1) + c_label = torch.max(p_labels) + m = p_masks > 0.5 + c_m = p_masks[m].sum() / (m.sum() + 1e-8) + c = c_label * c_m + if l < 200 and c > 0.5: + labels.append(l.item()) + confidences.append(c.item()) + masks_binary.append(m[inverse_map]) # mapping the mask back to the original point cloud + + + # save labelled mesh + mesh_labelled = o3d.geometry.TriangleMesh() + mesh_labelled.vertices = mesh.vertices + mesh_labelled.triangles = mesh.triangles + + labels_mapped = np.zeros((len(mesh.vertices), 1)) + colors_mapped = np.zeros((len(mesh.vertices), 3)) + + confidences, labels, masks_binary = zip(*sorted(zip(confidences, labels, masks_binary), reverse=False)) + for i, (l, c, m) in enumerate(zip(labels, confidences, masks_binary)): + labels_mapped[m == 1] = l + if l == 0: + l_ = -1 + 2 # label offset is 2 for scannet 200, 0 needs to be mapped to -1 before (see trainer.py in Mask3D) + else: + l_ = l + 2 + # print(VALID_CLASS_IDS_200[l_], SCANNET_COLOR_MAP_200[VALID_CLASS_IDS_200[l_]], l_, CLASS_LABELS_200[l_]) + colors_mapped[m == 1] = SCANNET_COLOR_MAP_200[VALID_CLASS_IDS_200[l_]] + + # colors_mapped[mask_mapped == 1] = SCANNET_COLOR_MAP_200[VALID_CLASS_IDS_200[l]] + + + + + mesh_labelled.vertex_colors = o3d.utility.Vector3dVector(colors_mapped.astype(np.float32) / 255.) + o3d.io.write_triangle_mesh(f'{output_dir}/mesh_tsdf_labelled.ply', mesh_labelled) + + mask_path = os.path.join(output_dir, 'pred_mask') + if not os.path.exists(mask_path): + os.makedirs(mask_path) + + # sorting by confidence + with open(os.path.join(output_dir, 'mask3d_predictions.txt'), 'w') as f: + for i, (l, c, m) in enumerate(zip(labels, confidences, masks_binary)): + mask_file = f'pred_mask/{str(i).zfill(3)}.txt' + f.write(f'{mask_file} {VALID_CLASS_IDS_200[l]} {c}\n') + np.savetxt(os.path.join(output_dir, mask_file), m.numpy(), fmt='%d') + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/models/Mask3D/build/lib/mask3d/preprocess_arkitscenes.py b/models/Mask3D/build/lib/mask3d/preprocess_arkitscenes.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/Mask3D/build/lib/mask3d/trainer/__init__.py b/models/Mask3D/build/lib/mask3d/trainer/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/Mask3D/build/lib/mask3d/trainer/trainer.py b/models/Mask3D/build/lib/mask3d/trainer/trainer.py new file mode 100644 index 0000000000000000000000000000000000000000..b794e38aa5b2cef7eb106f95ced43466768b3dba --- /dev/null +++ b/models/Mask3D/build/lib/mask3d/trainer/trainer.py @@ -0,0 +1,1302 @@ +import gc +from contextlib import nullcontext +from pathlib import Path +import statistics +import shutil +import os +import math +import pyviz3d.visualizer as vis +from torch_scatter import scatter_mean +import matplotlib +from benchmark.evaluate_semantic_instance import evaluate +from collections import defaultdict +from sklearn.cluster import DBSCAN +from utils.votenet_utils.eval_det import eval_det +from datasets.scannet200.scannet200_splits import ( + HEAD_CATS_SCANNET_200, + TAIL_CATS_SCANNET_200, + COMMON_CATS_SCANNET_200, + VALID_CLASS_IDS_200_VALIDATION, +) + +import hydra +import MinkowskiEngine as ME +import numpy as np +import pytorch_lightning as pl +import torch +from models.metrics import IoU +import random +import colorsys +from typing import List, Tuple +import functools + + +@functools.lru_cache(20) +def get_evenly_distributed_colors( + count: int, +) -> List[Tuple[np.uint8, np.uint8, np.uint8]]: + # lru cache caches color tuples + HSV_tuples = [(x / count, 1.0, 1.0) for x in range(count)] + random.shuffle(HSV_tuples) + return list( + map( + lambda x: (np.array(colorsys.hsv_to_rgb(*x)) * 255).astype( + np.uint8 + ), + HSV_tuples, + ) + ) + + +class RegularCheckpointing(pl.Callback): + def on_train_epoch_end( + self, trainer: "pl.Trainer", pl_module: "pl.LightningModule" + ): + general = pl_module.config.general + trainer.save_checkpoint(f"{general.save_dir}/last-epoch.ckpt") + print("Checkpoint created") + + +class InstanceSegmentation(pl.LightningModule): + def __init__(self, config): + super().__init__() + + self.decoder_id = config.general.decoder_id + + if config.model.train_on_segments: + self.mask_type = "segment_mask" + else: + self.mask_type = "masks" + + self.eval_on_segments = config.general.eval_on_segments + + self.config = config + self.save_hyperparameters() + # model + self.model = hydra.utils.instantiate(config.model) + self.optional_freeze = nullcontext + if config.general.freeze_backbone: + self.optional_freeze = torch.no_grad + # loss + self.ignore_label = config.data.ignore_label + + matcher = hydra.utils.instantiate(config.matcher) + weight_dict = { + "loss_ce": matcher.cost_class, + "loss_mask": matcher.cost_mask, + "loss_dice": matcher.cost_dice, + } + + aux_weight_dict = {} + for i in range(self.model.num_levels * self.model.num_decoders): + if i not in self.config.general.ignore_mask_idx: + aux_weight_dict.update( + {k + f"_{i}": v for k, v in weight_dict.items()} + ) + else: + aux_weight_dict.update( + {k + f"_{i}": 0.0 for k, v in weight_dict.items()} + ) + weight_dict.update(aux_weight_dict) + + self.preds = dict() + self.bbox_preds = dict() + self.bbox_gt = dict() + + self.criterion = hydra.utils.instantiate( + config.loss, matcher=matcher, weight_dict=weight_dict + ) + + # metrics + self.confusion = hydra.utils.instantiate(config.metrics) + self.iou = IoU() + # misc + self.labels_info = dict() + + def forward( + self, x, point2segment=None, raw_coordinates=None, is_eval=False + ): + with self.optional_freeze(): + x = self.model( + x, + point2segment, + raw_coordinates=raw_coordinates, + is_eval=is_eval, + ) + return x + + def training_step(self, batch, batch_idx): + data, target, file_names = batch + + if data.features.shape[0] > self.config.general.max_batch_size: + print("data exceeds threshold") + raise RuntimeError("BATCH TOO BIG") + + if len(target) == 0: + print("no targets") + return None + + raw_coordinates = None + if self.config.data.add_raw_coordinates: + raw_coordinates = data.features[:, -3:] + data.features = data.features[:, :-3] + + data = ME.SparseTensor( + coordinates=data.coordinates, + features=data.features, + device=self.device, + ) + + try: + output = self.forward( + data, + point2segment=[ + target[i]["point2segment"] for i in range(len(target)) + ], + raw_coordinates=raw_coordinates, + ) + except RuntimeError as run_err: + print(run_err) + if ( + "only a single point gives nans in cross-attention" + == run_err.args[0] + ): + return None + else: + raise run_err + + try: + losses = self.criterion(output, target, mask_type=self.mask_type) + except ValueError as val_err: + print(f"ValueError: {val_err}") + print(f"data shape: {data.shape}") + print(f"data feat shape: {data.features.shape}") + print(f"data feat nans: {data.features.isnan().sum()}") + print(f"output: {output}") + print(f"target: {target}") + print(f"filenames: {file_names}") + raise val_err + + for k in list(losses.keys()): + if k in self.criterion.weight_dict: + losses[k] *= self.criterion.weight_dict[k] + else: + # remove this loss if not specified in `weight_dict` + losses.pop(k) + + logs = { + f"train_{k}": v.detach().cpu().item() for k, v in losses.items() + } + + logs["train_mean_loss_ce"] = statistics.mean( + [item for item in [v for k, v in logs.items() if "loss_ce" in k]] + ) + + logs["train_mean_loss_mask"] = statistics.mean( + [item for item in [v for k, v in logs.items() if "loss_mask" in k]] + ) + + logs["train_mean_loss_dice"] = statistics.mean( + [item for item in [v for k, v in logs.items() if "loss_dice" in k]] + ) + + self.log_dict(logs) + return sum(losses.values()) + + def validation_step(self, batch, batch_idx): + return self.eval_step(batch, batch_idx) + + def export(self, pred_masks, scores, pred_classes, file_names, decoder_id): + root_path = f"eval_output" + base_path = f"{root_path}/instance_evaluation_{self.config.general.experiment_name}_{self.current_epoch}/decoder_{decoder_id}" + pred_mask_path = f"{base_path}/pred_mask" + + Path(pred_mask_path).mkdir(parents=True, exist_ok=True) + + file_name = file_names + with open(f"{base_path}/{file_name}.txt", "w") as fout: + real_id = -1 + for instance_id in range(len(pred_classes)): + real_id += 1 + pred_class = pred_classes[instance_id] + score = scores[instance_id] + mask = pred_masks[:, instance_id].astype("uint8") + + if score > self.config.general.export_threshold: + # reduce the export size a bit. I guess no performance difference + np.savetxt( + f"{pred_mask_path}/{file_name}_{real_id}.txt", + mask, + fmt="%d", + ) + fout.write( + f"pred_mask/{file_name}_{real_id}.txt {pred_class} {score}\n" + ) + + def training_epoch_end(self, outputs): + train_loss = sum([out["loss"].cpu().item() for out in outputs]) / len( + outputs + ) + results = {"train_loss_mean": train_loss} + self.log_dict(results) + + def validation_epoch_end(self, outputs): + self.test_epoch_end(outputs) + + def save_visualizations( + self, + target_full, + full_res_coords, + sorted_masks, + sort_classes, + file_name, + original_colors, + original_normals, + sort_scores_values, + point_size=20, + sorted_heatmaps=None, + query_pos=None, + backbone_features=None, + ): + + full_res_coords -= full_res_coords.mean(axis=0) + + gt_pcd_pos = [] + gt_pcd_normals = [] + gt_pcd_color = [] + gt_inst_pcd_color = [] + gt_boxes = [] + + if "labels" in target_full: + instances_colors = torch.from_numpy( + np.vstack( + get_evenly_distributed_colors( + target_full["labels"].shape[0] + ) + ) + ) + for instance_counter, (label, mask) in enumerate( + zip(target_full["labels"], target_full["masks"]) + ): + if label == 255: + continue + + mask_tmp = mask.detach().cpu().numpy() + mask_coords = full_res_coords[mask_tmp.astype(bool), :] + + if len(mask_coords) == 0: + continue + + gt_pcd_pos.append(mask_coords) + mask_coords_min = full_res_coords[ + mask_tmp.astype(bool), : + ].min(axis=0) + mask_coords_max = full_res_coords[ + mask_tmp.astype(bool), : + ].max(axis=0) + size = mask_coords_max - mask_coords_min + mask_coords_middle = mask_coords_min + size / 2 + + gt_boxes.append( + { + "position": mask_coords_middle, + "size": size, + "color": self.validation_dataset.map2color([label])[0], + } + ) + + gt_pcd_color.append( + self.validation_dataset.map2color([label]).repeat( + gt_pcd_pos[-1].shape[0], 1 + ) + ) + gt_inst_pcd_color.append( + instances_colors[instance_counter % len(instances_colors)] + .unsqueeze(0) + .repeat(gt_pcd_pos[-1].shape[0], 1) + ) + + gt_pcd_normals.append( + original_normals[mask_tmp.astype(bool), :] + ) + + gt_pcd_pos = np.concatenate(gt_pcd_pos) + gt_pcd_normals = np.concatenate(gt_pcd_normals) + gt_pcd_color = np.concatenate(gt_pcd_color) + gt_inst_pcd_color = np.concatenate(gt_inst_pcd_color) + + v = vis.Visualizer() + + v.add_points( + "RGB Input", + full_res_coords, + colors=original_colors, + normals=original_normals, + visible=True, + point_size=point_size, + ) + + if backbone_features is not None: + v.add_points( + "PCA", + full_res_coords, + colors=backbone_features, + normals=original_normals, + visible=False, + point_size=point_size, + ) + + if "labels" in target_full: + v.add_points( + "Semantics (GT)", + gt_pcd_pos, + colors=gt_pcd_color, + normals=gt_pcd_normals, + alpha=0.8, + visible=False, + point_size=point_size, + ) + v.add_points( + "Instances (GT)", + gt_pcd_pos, + colors=gt_inst_pcd_color, + normals=gt_pcd_normals, + alpha=0.8, + visible=False, + point_size=point_size, + ) + + pred_coords = [] + pred_normals = [] + pred_sem_color = [] + pred_inst_color = [] + + for did in range(len(sorted_masks)): + instances_colors = torch.from_numpy( + np.vstack( + get_evenly_distributed_colors( + max(1, sorted_masks[did].shape[1]) + ) + ) + ) + + for i in reversed(range(sorted_masks[did].shape[1])): + coords = full_res_coords[ + sorted_masks[did][:, i].astype(bool), : + ] + + mask_coords = full_res_coords[ + sorted_masks[did][:, i].astype(bool), : + ] + mask_normals = original_normals[ + sorted_masks[did][:, i].astype(bool), : + ] + + label = sort_classes[did][i] + + if len(mask_coords) == 0: + continue + + pred_coords.append(mask_coords) + pred_normals.append(mask_normals) + + pred_sem_color.append( + self.validation_dataset.map2color([label]).repeat( + mask_coords.shape[0], 1 + ) + ) + + pred_inst_color.append( + instances_colors[i % len(instances_colors)] + .unsqueeze(0) + .repeat(mask_coords.shape[0], 1) + ) + + if len(pred_coords) > 0: + pred_coords = np.concatenate(pred_coords) + pred_normals = np.concatenate(pred_normals) + pred_sem_color = np.concatenate(pred_sem_color) + pred_inst_color = np.concatenate(pred_inst_color) + + v.add_points( + "Semantics (Mask3D)", + pred_coords, + colors=pred_sem_color, + normals=pred_normals, + visible=False, + alpha=0.8, + point_size=point_size, + ) + v.add_points( + "Instances (Mask3D)", + pred_coords, + colors=pred_inst_color, + normals=pred_normals, + visible=False, + alpha=0.8, + point_size=point_size, + ) + + v.save( + f"{self.config['general']['save_dir']}/visualizations/{file_name}" + ) + + def eval_step(self, batch, batch_idx): + data, target, file_names = batch + inverse_maps = data.inverse_maps + target_full = data.target_full + original_colors = data.original_colors + data_idx = data.idx + original_normals = data.original_normals + original_coordinates = data.original_coordinates + + # if len(target) == 0 or len(target_full) == 0: + # print("no targets") + # return None + + if len(data.coordinates) == 0: + return 0.0 + + raw_coordinates = None + if self.config.data.add_raw_coordinates: + raw_coordinates = data.features[:, -3:] + data.features = data.features[:, :-3] + + if raw_coordinates.shape[0] == 0: + return 0.0 + + data = ME.SparseTensor( + coordinates=data.coordinates, + features=data.features, + device=self.device, + ) + + try: + output = self.forward( + data, + point2segment=[ + target[i]["point2segment"] for i in range(len(target)) + ], + raw_coordinates=raw_coordinates, + is_eval=True, + ) + except RuntimeError as run_err: + print(run_err) + if ( + "only a single point gives nans in cross-attention" + == run_err.args[0] + ): + return None + else: + raise run_err + + if self.config.data.test_mode != "test": + if self.config.trainer.deterministic: + torch.use_deterministic_algorithms(False) + + try: + losses = self.criterion( + output, target, mask_type=self.mask_type + ) + except ValueError as val_err: + print(f"ValueError: {val_err}") + print(f"data shape: {data.shape}") + print(f"data feat shape: {data.features.shape}") + print(f"data feat nans: {data.features.isnan().sum()}") + print(f"output: {output}") + print(f"target: {target}") + print(f"filenames: {file_names}") + raise val_err + + for k in list(losses.keys()): + if k in self.criterion.weight_dict: + losses[k] *= self.criterion.weight_dict[k] + else: + # remove this loss if not specified in `weight_dict` + losses.pop(k) + if self.config.trainer.deterministic: + torch.use_deterministic_algorithms(True) + + if self.config.general.save_visualizations: + backbone_features = ( + output["backbone_features"].F.detach().cpu().numpy() + ) + from sklearn import decomposition + + pca = decomposition.PCA(n_components=3) + pca.fit(backbone_features) + pca_features = pca.transform(backbone_features) + rescaled_pca = ( + 255 + * (pca_features - pca_features.min()) + / (pca_features.max() - pca_features.min()) + ) + + self.eval_instance_step( + output, + target, + target_full, + inverse_maps, + file_names, + original_coordinates, + original_colors, + original_normals, + raw_coordinates, + data_idx, + backbone_features=rescaled_pca + if self.config.general.save_visualizations + else None, + ) + + if self.config.data.test_mode != "test": + return { + f"val_{k}": v.detach().cpu().item() for k, v in losses.items() + } + else: + return 0.0 + + def test_step(self, batch, batch_idx): + return self.eval_step(batch, batch_idx) + + def get_full_res_mask( + self, mask, inverse_map, point2segment_full, is_heatmap=False + ): + mask = mask.detach().cpu()[inverse_map] # full res + + if self.eval_on_segments and is_heatmap == False: + mask = scatter_mean( + mask, point2segment_full, dim=0 + ) # full res segments + mask = (mask > 0.5).float() + mask = mask.detach().cpu()[ + point2segment_full.cpu() + ] # full res points + + return mask + + def get_mask_and_scores( + self, mask_cls, mask_pred, num_queries=100, num_classes=18, device=None + ): + if device is None: + device = self.device + labels = ( + torch.arange(num_classes, device=device) + .unsqueeze(0) + .repeat(num_queries, 1) + .flatten(0, 1) + ) + + if self.config.general.topk_per_image != -1: + scores_per_query, topk_indices = mask_cls.flatten(0, 1).topk( + self.config.general.topk_per_image, sorted=True + ) + else: + scores_per_query, topk_indices = mask_cls.flatten(0, 1).topk( + num_queries, sorted=True + ) + + labels_per_query = labels[topk_indices] + topk_indices = topk_indices // num_classes + mask_pred = mask_pred[:, topk_indices] + + result_pred_mask = (mask_pred > 0).float() + heatmap = mask_pred.float().sigmoid() + + mask_scores_per_image = (heatmap * result_pred_mask).sum(0) / ( + result_pred_mask.sum(0) + 1e-6 + ) + score = scores_per_query * mask_scores_per_image + classes = labels_per_query + + return score, result_pred_mask, classes, heatmap + + def eval_instance_step( + self, + output, + target_low_res, + target_full_res, + inverse_maps, + file_names, + full_res_coords, + original_colors, + original_normals, + raw_coords, + idx, + first_full_res=False, + backbone_features=None, + ): + label_offset = self.validation_dataset.label_offset + prediction = output["aux_outputs"] + prediction.append( + { + "pred_logits": output["pred_logits"], + "pred_masks": output["pred_masks"], + } + ) + + prediction[self.decoder_id][ + "pred_logits" + ] = torch.functional.F.softmax( + prediction[self.decoder_id]["pred_logits"], dim=-1 + )[ + ..., :-1 + ] + + all_pred_classes = list() + all_pred_masks = list() + all_pred_scores = list() + all_heatmaps = list() + all_query_pos = list() + + offset_coords_idx = 0 + for bid in range(len(prediction[self.decoder_id]["pred_masks"])): + if not first_full_res: + if self.model.train_on_segments: + masks = ( + prediction[self.decoder_id]["pred_masks"][bid] + .detach() + .cpu()[target_low_res[bid]["point2segment"].cpu()] + ) + else: + masks = ( + prediction[self.decoder_id]["pred_masks"][bid] + .detach() + .cpu() + ) + + if self.config.general.use_dbscan: + new_preds = { + "pred_masks": list(), + "pred_logits": list(), + } + + curr_coords_idx = masks.shape[0] + curr_coords = raw_coords[ + offset_coords_idx : curr_coords_idx + offset_coords_idx + ] + offset_coords_idx += curr_coords_idx + + for curr_query in range(masks.shape[1]): + curr_masks = masks[:, curr_query] > 0 + + if curr_coords[curr_masks].shape[0] > 0: + clusters = ( + DBSCAN( + eps=self.config.general.dbscan_eps, + min_samples=self.config.general.dbscan_min_points, + n_jobs=-1, + ) + .fit(curr_coords[curr_masks]) + .labels_ + ) + + new_mask = torch.zeros(curr_masks.shape, dtype=int) + new_mask[curr_masks] = ( + torch.from_numpy(clusters) + 1 + ) + + for cluster_id in np.unique(clusters): + original_pred_masks = masks[:, curr_query] + if cluster_id != -1: + new_preds["pred_masks"].append( + original_pred_masks + * (new_mask == cluster_id + 1) + ) + new_preds["pred_logits"].append( + prediction[self.decoder_id][ + "pred_logits" + ][bid, curr_query] + ) + + scores, masks, classes, heatmap = self.get_mask_and_scores( + torch.stack(new_preds["pred_logits"]).cpu(), + torch.stack(new_preds["pred_masks"]).T, + len(new_preds["pred_logits"]), + self.model.num_classes - 1, + ) + else: + scores, masks, classes, heatmap = self.get_mask_and_scores( + prediction[self.decoder_id]["pred_logits"][bid] + .detach() + .cpu(), + masks, + prediction[self.decoder_id]["pred_logits"][bid].shape[ + 0 + ], + self.model.num_classes - 1, + ) + + masks = self.get_full_res_mask( + masks, + inverse_maps[bid], + target_full_res[bid]["point2segment"], + ) + + heatmap = self.get_full_res_mask( + heatmap, + inverse_maps[bid], + target_full_res[bid]["point2segment"], + is_heatmap=True, + ) + + if backbone_features is not None: + backbone_features = self.get_full_res_mask( + torch.from_numpy(backbone_features), + inverse_maps[bid], + target_full_res[bid]["point2segment"], + is_heatmap=True, + ) + backbone_features = backbone_features.numpy() + else: + assert False, "not tested" + masks = self.get_full_res_mask( + prediction[self.decoder_id]["pred_masks"][bid].cpu(), + inverse_maps[bid], + target_full_res[bid]["point2segment"], + ) + + scores, masks, classes, heatmap = self.get_mask_and_scores( + prediction[self.decoder_id]["pred_logits"][bid].cpu(), + masks, + prediction[self.decoder_id]["pred_logits"][bid].shape[0], + self.model.num_classes - 1, + device="cpu", + ) + + masks = masks.numpy() + heatmap = heatmap.numpy() + + sort_scores = scores.sort(descending=True) + sort_scores_index = sort_scores.indices.cpu().numpy() + sort_scores_values = sort_scores.values.cpu().numpy() + sort_classes = classes[sort_scores_index] + + sorted_masks = masks[:, sort_scores_index] + sorted_heatmap = heatmap[:, sort_scores_index] + + if self.config.general.filter_out_instances: + keep_instances = set() + pairwise_overlap = sorted_masks.T @ sorted_masks + normalization = pairwise_overlap.max(axis=0) + norm_overlaps = pairwise_overlap / normalization + + for instance_id in range(norm_overlaps.shape[0]): + # filter out unlikely masks and nearly empty masks + # if not(sort_scores_values[instance_id] < 0.3 or sorted_masks[:, instance_id].sum() < 500): + if not ( + sort_scores_values[instance_id] + < self.config.general.scores_threshold + ): + # check if mask != empty + if not sorted_masks[:, instance_id].sum() == 0.0: + overlap_ids = set( + np.nonzero( + norm_overlaps[instance_id, :] + > self.config.general.iou_threshold + )[0] + ) + + if len(overlap_ids) == 0: + keep_instances.add(instance_id) + else: + if instance_id == min(overlap_ids): + keep_instances.add(instance_id) + + keep_instances = sorted(list(keep_instances)) + all_pred_classes.append(sort_classes[keep_instances]) + all_pred_masks.append(sorted_masks[:, keep_instances]) + all_pred_scores.append(sort_scores_values[keep_instances]) + all_heatmaps.append(sorted_heatmap[:, keep_instances]) + else: + all_pred_classes.append(sort_classes) + all_pred_masks.append(sorted_masks) + all_pred_scores.append(sort_scores_values) + all_heatmaps.append(sorted_heatmap) + + if self.validation_dataset.dataset_name == "scannet200": + all_pred_classes[bid][all_pred_classes[bid] == 0] = -1 + if self.config.data.test_mode != "test": + target_full_res[bid]["labels"][ + target_full_res[bid]["labels"] == 0 + ] = -1 + + for bid in range(len(prediction[self.decoder_id]["pred_masks"])): + all_pred_classes[ + bid + ] = self.validation_dataset._remap_model_output( + all_pred_classes[bid].cpu() + label_offset + ) + + if ( + self.config.data.test_mode != "test" + and len(target_full_res) != 0 + ): + target_full_res[bid][ + "labels" + ] = self.validation_dataset._remap_model_output( + target_full_res[bid]["labels"].cpu() + label_offset + ) + + # PREDICTION BOX + bbox_data = [] + for query_id in range( + all_pred_masks[bid].shape[1] + ): # self.model.num_queries + obj_coords = full_res_coords[bid][ + all_pred_masks[bid][:, query_id].astype(bool), : + ] + if obj_coords.shape[0] > 0: + obj_center = obj_coords.mean(axis=0) + obj_axis_length = obj_coords.max( + axis=0 + ) - obj_coords.min(axis=0) + + bbox = np.concatenate((obj_center, obj_axis_length)) + + bbox_data.append( + ( + all_pred_classes[bid][query_id].item(), + bbox, + all_pred_scores[bid][query_id], + ) + ) + self.bbox_preds[file_names[bid]] = bbox_data + + # GT BOX + bbox_data = [] + for obj_id in range(target_full_res[bid]["masks"].shape[0]): + if target_full_res[bid]["labels"][obj_id].item() == 255: + continue + + obj_coords = full_res_coords[bid][ + target_full_res[bid]["masks"][obj_id, :] + .cpu() + .detach() + .numpy() + .astype(bool), + :, + ] + if obj_coords.shape[0] > 0: + obj_center = obj_coords.mean(axis=0) + obj_axis_length = obj_coords.max( + axis=0 + ) - obj_coords.min(axis=0) + + bbox = np.concatenate((obj_center, obj_axis_length)) + bbox_data.append( + ( + target_full_res[bid]["labels"][obj_id].item(), + bbox, + ) + ) + + self.bbox_gt[file_names[bid]] = bbox_data + + if self.config.general.eval_inner_core == -1: + self.preds[file_names[bid]] = { + "pred_masks": all_pred_masks[bid], + "pred_scores": all_pred_scores[bid], + "pred_classes": all_pred_classes[bid], + } + else: + # prev val_dataset + self.preds[file_names[bid]] = { + "pred_masks": all_pred_masks[bid][ + self.test_dataset.data[idx[bid]]["cond_inner"] + ], + "pred_scores": all_pred_scores[bid], + "pred_classes": all_pred_classes[bid], + } + + if self.config.general.save_visualizations: + if "cond_inner" in self.test_dataset.data[idx[bid]]: + target_full_res[bid]["masks"] = target_full_res[bid][ + "masks" + ][:, self.test_dataset.data[idx[bid]]["cond_inner"]] + self.save_visualizations( + target_full_res[bid], + full_res_coords[bid][ + self.test_dataset.data[idx[bid]]["cond_inner"] + ], + [self.preds[file_names[bid]]["pred_masks"]], + [self.preds[file_names[bid]]["pred_classes"]], + file_names[bid], + original_colors[bid][ + self.test_dataset.data[idx[bid]]["cond_inner"] + ], + original_normals[bid][ + self.test_dataset.data[idx[bid]]["cond_inner"] + ], + [self.preds[file_names[bid]]["pred_scores"]], + sorted_heatmaps=[ + all_heatmaps[bid][ + self.test_dataset.data[idx[bid]]["cond_inner"] + ] + ], + query_pos=all_query_pos[bid][ + self.test_dataset.data[idx[bid]]["cond_inner"] + ] + if len(all_query_pos) > 0 + else None, + backbone_features=backbone_features[ + self.test_dataset.data[idx[bid]]["cond_inner"] + ], + point_size=self.config.general.visualization_point_size, + ) + else: + self.save_visualizations( + target_full_res[bid], + full_res_coords[bid], + [self.preds[file_names[bid]]["pred_masks"]], + [self.preds[file_names[bid]]["pred_classes"]], + file_names[bid], + original_colors[bid], + original_normals[bid], + [self.preds[file_names[bid]]["pred_scores"]], + sorted_heatmaps=[all_heatmaps[bid]], + query_pos=all_query_pos[bid] + if len(all_query_pos) > 0 + else None, + backbone_features=backbone_features, + point_size=self.config.general.visualization_point_size, + ) + + if self.config.general.export: + if self.validation_dataset.dataset_name == "stpls3d": + scan_id, _, _, crop_id = file_names[bid].split("_") + crop_id = int(crop_id.replace(".txt", "")) + file_name = ( + f"{scan_id}_points_GTv3_0{crop_id}_inst_nostuff" + ) + + self.export( + self.preds[file_names[bid]]["pred_masks"], + self.preds[file_names[bid]]["pred_scores"], + self.preds[file_names[bid]]["pred_classes"], + file_name, + self.decoder_id, + ) + else: + self.export( + self.preds[file_names[bid]]["pred_masks"], + self.preds[file_names[bid]]["pred_scores"], + self.preds[file_names[bid]]["pred_classes"], + file_names[bid], + self.decoder_id, + ) + + def eval_instance_epoch_end(self): + log_prefix = f"val" + ap_results = {} + + head_results, tail_results, common_results = [], [], [] + + box_ap_50 = eval_det( + self.bbox_preds, self.bbox_gt, ovthresh=0.5, use_07_metric=False + ) + box_ap_25 = eval_det( + self.bbox_preds, self.bbox_gt, ovthresh=0.25, use_07_metric=False + ) + mean_box_ap_25 = sum([v for k, v in box_ap_25[-1].items()]) / len( + box_ap_25[-1].keys() + ) + mean_box_ap_50 = sum([v for k, v in box_ap_50[-1].items()]) / len( + box_ap_50[-1].keys() + ) + + ap_results[f"{log_prefix}_mean_box_ap_25"] = mean_box_ap_25 + ap_results[f"{log_prefix}_mean_box_ap_50"] = mean_box_ap_50 + + for class_id in box_ap_50[-1].keys(): + class_name = self.train_dataset.label_info[class_id]["name"] + ap_results[f"{log_prefix}_{class_name}_val_box_ap_50"] = box_ap_50[ + -1 + ][class_id] + + for class_id in box_ap_25[-1].keys(): + class_name = self.train_dataset.label_info[class_id]["name"] + ap_results[f"{log_prefix}_{class_name}_val_box_ap_25"] = box_ap_25[ + -1 + ][class_id] + + root_path = f"eval_output" + base_path = f"{root_path}/instance_evaluation_{self.config.general.experiment_name}_{self.current_epoch}" + + if self.validation_dataset.dataset_name in [ + "scannet", + "stpls3d", + "scannet200", + ]: + gt_data_path = f"{self.validation_dataset.data_dir[0]}/instance_gt/{self.validation_dataset.mode}" + else: + gt_data_path = f"{self.validation_dataset.data_dir[0]}/instance_gt/Area_{self.config.general.area}" + + pred_path = f"{base_path}/tmp_output.txt" + + log_prefix = f"val" + + if not os.path.exists(base_path): + os.makedirs(base_path) + + try: + if self.validation_dataset.dataset_name == "s3dis": + new_preds = {} + for key in self.preds.keys(): + new_preds[ + key.replace(f"Area_{self.config.general.area}_", "") + ] = { + "pred_classes": self.preds[key]["pred_classes"] + 1, + "pred_masks": self.preds[key]["pred_masks"], + "pred_scores": self.preds[key]["pred_scores"], + } + mprec, mrec = evaluate( + new_preds, gt_data_path, pred_path, dataset="s3dis" + ) + ap_results[f"{log_prefix}_mean_precision"] = mprec + ap_results[f"{log_prefix}_mean_recall"] = mrec + elif self.validation_dataset.dataset_name == "stpls3d": + new_preds = {} + for key in self.preds.keys(): + new_preds[key.replace(".txt", "")] = { + "pred_classes": self.preds[key]["pred_classes"], + "pred_masks": self.preds[key]["pred_masks"], + "pred_scores": self.preds[key]["pred_scores"], + } + + evaluate(new_preds, gt_data_path, pred_path, dataset="stpls3d") + else: + evaluate( + self.preds, + gt_data_path, + pred_path, + dataset=self.validation_dataset.dataset_name, + ) + with open(pred_path, "r") as fin: + for line_id, line in enumerate(fin): + if line_id == 0: + # ignore header + continue + class_name, _, ap, ap_50, ap_25 = line.strip().split(",") + + if self.validation_dataset.dataset_name == "scannet200": + if class_name in VALID_CLASS_IDS_200_VALIDATION: + ap_results[ + f"{log_prefix}_{class_name}_val_ap" + ] = float(ap) + ap_results[ + f"{log_prefix}_{class_name}_val_ap_50" + ] = float(ap_50) + ap_results[ + f"{log_prefix}_{class_name}_val_ap_25" + ] = float(ap_25) + + if class_name in HEAD_CATS_SCANNET_200: + head_results.append( + np.array( + (float(ap), float(ap_50), float(ap_25)) + ) + ) + elif class_name in COMMON_CATS_SCANNET_200: + common_results.append( + np.array( + (float(ap), float(ap_50), float(ap_25)) + ) + ) + elif class_name in TAIL_CATS_SCANNET_200: + tail_results.append( + np.array( + (float(ap), float(ap_50), float(ap_25)) + ) + ) + else: + assert (False, "class not known!") + else: + ap_results[ + f"{log_prefix}_{class_name}_val_ap" + ] = float(ap) + ap_results[ + f"{log_prefix}_{class_name}_val_ap_50" + ] = float(ap_50) + ap_results[ + f"{log_prefix}_{class_name}_val_ap_25" + ] = float(ap_25) + + if self.validation_dataset.dataset_name == "scannet200": + head_results = np.stack(head_results) + common_results = np.stack(common_results) + tail_results = np.stack(tail_results) + + mean_tail_results = np.nanmean(tail_results, axis=0) + mean_common_results = np.nanmean(common_results, axis=0) + mean_head_results = np.nanmean(head_results, axis=0) + + ap_results[ + f"{log_prefix}_mean_tail_ap_25" + ] = mean_tail_results[0] + ap_results[ + f"{log_prefix}_mean_common_ap_25" + ] = mean_common_results[0] + ap_results[ + f"{log_prefix}_mean_head_ap_25" + ] = mean_head_results[0] + + ap_results[ + f"{log_prefix}_mean_tail_ap_50" + ] = mean_tail_results[1] + ap_results[ + f"{log_prefix}_mean_common_ap_50" + ] = mean_common_results[1] + ap_results[ + f"{log_prefix}_mean_head_ap_50" + ] = mean_head_results[1] + + ap_results[ + f"{log_prefix}_mean_tail_ap_25" + ] = mean_tail_results[2] + ap_results[ + f"{log_prefix}_mean_common_ap_25" + ] = mean_common_results[2] + ap_results[ + f"{log_prefix}_mean_head_ap_25" + ] = mean_head_results[2] + + overall_ap_results = np.nanmean( + np.vstack((head_results, common_results, tail_results)), + axis=0, + ) + + ap_results[f"{log_prefix}_mean_ap"] = overall_ap_results[0] + ap_results[f"{log_prefix}_mean_ap_50"] = overall_ap_results[1] + ap_results[f"{log_prefix}_mean_ap_25"] = overall_ap_results[2] + + ap_results = { + key: 0.0 if math.isnan(score) else score + for key, score in ap_results.items() + } + else: + mean_ap = statistics.mean( + [ + item + for key, item in ap_results.items() + if key.endswith("val_ap") + ] + ) + mean_ap_50 = statistics.mean( + [ + item + for key, item in ap_results.items() + if key.endswith("val_ap_50") + ] + ) + mean_ap_25 = statistics.mean( + [ + item + for key, item in ap_results.items() + if key.endswith("val_ap_25") + ] + ) + + ap_results[f"{log_prefix}_mean_ap"] = mean_ap + ap_results[f"{log_prefix}_mean_ap_50"] = mean_ap_50 + ap_results[f"{log_prefix}_mean_ap_25"] = mean_ap_25 + + ap_results = { + key: 0.0 if math.isnan(score) else score + for key, score in ap_results.items() + } + except (IndexError, OSError) as e: + print("NO SCORES!!!") + ap_results[f"{log_prefix}_mean_ap"] = 0.0 + ap_results[f"{log_prefix}_mean_ap_50"] = 0.0 + ap_results[f"{log_prefix}_mean_ap_25"] = 0.0 + + self.log_dict(ap_results) + + if not self.config.general.export: + shutil.rmtree(base_path) + + del self.preds + del self.bbox_preds + del self.bbox_gt + + gc.collect() + + self.preds = dict() + self.bbox_preds = dict() + self.bbox_gt = dict() + + def test_epoch_end(self, outputs): + if self.config.general.export: + return + + self.eval_instance_epoch_end() + + dd = defaultdict(list) + for output in outputs: + for key, val in output.items(): # .items() in Python 3. + dd[key].append(val) + + dd = {k: statistics.mean(v) for k, v in dd.items()} + + dd["val_mean_loss_ce"] = statistics.mean( + [item for item in [v for k, v in dd.items() if "loss_ce" in k]] + ) + dd["val_mean_loss_mask"] = statistics.mean( + [item for item in [v for k, v in dd.items() if "loss_mask" in k]] + ) + dd["val_mean_loss_dice"] = statistics.mean( + [item for item in [v for k, v in dd.items() if "loss_dice" in k]] + ) + + self.log_dict(dd) + + def configure_optimizers(self): + optimizer = hydra.utils.instantiate( + self.config.optimizer, params=self.parameters() + ) + if "steps_per_epoch" in self.config.scheduler.scheduler.keys(): + self.config.scheduler.scheduler.steps_per_epoch = len( + self.train_dataloader() + ) + lr_scheduler = hydra.utils.instantiate( + self.config.scheduler.scheduler, optimizer=optimizer + ) + scheduler_config = {"scheduler": lr_scheduler} + scheduler_config.update(self.config.scheduler.pytorch_lightning_params) + return [optimizer], [scheduler_config] + + def prepare_data(self): + self.train_dataset = hydra.utils.instantiate( + self.config.data.train_dataset + ) + self.validation_dataset = hydra.utils.instantiate( + self.config.data.validation_dataset + ) + self.test_dataset = hydra.utils.instantiate( + self.config.data.test_dataset + ) + self.labels_info = self.train_dataset.label_info + + def train_dataloader(self): + c_fn = hydra.utils.instantiate(self.config.data.train_collation) + return hydra.utils.instantiate( + self.config.data.train_dataloader, + self.train_dataset, + collate_fn=c_fn, + ) + + def val_dataloader(self): + c_fn = hydra.utils.instantiate(self.config.data.validation_collation) + return hydra.utils.instantiate( + self.config.data.validation_dataloader, + self.validation_dataset, + collate_fn=c_fn, + ) + + def test_dataloader(self): + c_fn = hydra.utils.instantiate(self.config.data.test_collation) + return hydra.utils.instantiate( + self.config.data.test_dataloader, + self.test_dataset, + collate_fn=c_fn, + ) diff --git a/models/Mask3D/build/lib/mask3d/utils/__init__.py b/models/Mask3D/build/lib/mask3d/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/Mask3D/build/lib/mask3d/utils/gradflow_check.py b/models/Mask3D/build/lib/mask3d/utils/gradflow_check.py new file mode 100644 index 0000000000000000000000000000000000000000..2fedc91592d66d4e5bdef7531daafccc5b5f2e81 --- /dev/null +++ b/models/Mask3D/build/lib/mask3d/utils/gradflow_check.py @@ -0,0 +1,62 @@ +""" https://github.com/alwynmathew/gradflow-check """ +import matplotlib.pyplot as plt +import numpy as np +from matplotlib.lines import Line2D + + +def plot_grad_flow(named_parameters): + ave_grads = [] + layers = [] + for n, p in named_parameters: + if (p.requires_grad) and ("bias" not in n): + if p.grad: + layers.append(n) + ave_grads.append(p.grad.abs().mean()) + else: + print(f"{n} - doesn't have gradient computed") + + plt.plot(ave_grads, alpha=0.3, color="b") + plt.hlines(0, 0, len(ave_grads) + 1, linewidth=1, color="k") + plt.xticks(range(0, len(ave_grads), 1), layers, rotation="vertical") + plt.xlim(xmin=0, xmax=len(ave_grads)) + plt.xlabel("Layers") + plt.ylabel("average gradient") + plt.title("Gradient flow") + plt.grid(True) + + +def plot_grad_flow_v2(named_parameters): + """Plots the gradients flowing through different layers in the net during training. + Can be used for checking for possible gradient vanishing / exploding problems. + + Usage: Plug this function in Trainer class after loss.backwards() as + "plot_grad_flow(self.model.named_parameters())" to visualize the gradient flow""" + ave_grads = [] + max_grads = [] + layers = [] + for n, p in named_parameters: + if (p.requires_grad) and ("bias" not in n): + layers.append(n) + if p.grad: + ave_grads.append(p.grad.abs().mean()) + max_grads.append(p.grad.abs().max()) + else: + print(f"{n} - doesn't have gradient computed") + plt.bar(np.arange(len(max_grads)), max_grads, alpha=0.1, lw=1, color="c") + plt.bar(np.arange(len(max_grads)), ave_grads, alpha=0.1, lw=1, color="b") + plt.hlines(0, 0, len(ave_grads) + 1, lw=2, color="k") + plt.xticks(range(0, len(ave_grads), 1), layers, rotation="vertical") + plt.xlim(left=0, right=len(ave_grads)) + plt.ylim(bottom=-0.001, top=0.02) # zoom in on the lower gradient regions + plt.xlabel("Layers") + plt.ylabel("average gradient") + plt.title("Gradient flow") + plt.grid(True) + plt.legend( + [ + Line2D([0], [0], color="c", lw=4), + Line2D([0], [0], color="b", lw=4), + Line2D([0], [0], color="k", lw=4), + ], + ["max-gradient", "mean-gradient", "zero-gradient"], + ) diff --git a/models/Mask3D/build/lib/mask3d/utils/kfold.py b/models/Mask3D/build/lib/mask3d/utils/kfold.py new file mode 100644 index 0000000000000000000000000000000000000000..5bfeba130c890eec35530adeb23f1362041f7cdc --- /dev/null +++ b/models/Mask3D/build/lib/mask3d/utils/kfold.py @@ -0,0 +1,89 @@ +""" Author: https://github.com/yk-szk/stratified_group_kfold """ +import random +import numpy as np + + +class StratifiedGroupKFold: + """ + Stratified Group K-fold with sklearn.model_selection.KFold compabitility. + + Split dataset into k folds with balanced label distribution (stratified) and non-overlapping group. + + Args: + n_splits (int): # of splits + shuffle (bool): Shuffle + seed (int): Seed value for random number generator + """ + + def __init__(self, n_splits, shuffle=True, random_state=None): + self.n_splits = n_splits + self.shuffle = shuffle + self.seed = random_state + + def split(self, X, labels, groups): + assert len(X) == len(labels) == len(groups), "Invalid input length" + assert ( + len(set(groups)) >= self.n_splits + ), "The number of groups needs to be larger than n_splits" + + def encode(v): + s = set(v) + d = {l: i for i, l in enumerate(s)} + return [d[e] for e in v] + + labels, groups = encode(labels), encode(groups) + num_labels, num_groups = max(labels) + 1, max(groups) + 1 + label_counts_per_group = np.zeros((num_groups, num_labels), dtype=int) + global_label_dist = np.bincount(labels) + for label, g in zip(labels, groups): + label_counts_per_group[g][label] += 1 + + label_counts_per_fold = np.zeros( + (self.n_splits, num_labels), dtype=int + ) + groups_per_fold = [set() for _ in range(self.n_splits)] + + def eval_label_counts_per_fold(y_counts, fold): + fold += y_counts + std_per_label = ( + np.std(label_counts_per_fold, axis=0) / global_label_dist + ) + fold -= y_counts + return np.mean(std_per_label) + + groups_and_label_counts = list(enumerate(label_counts_per_group)) + if self.shuffle: + rng = random.Random(self.seed) + mean_std = np.mean(np.std(label_counts_per_group, axis=1)) + groups_and_label_counts.sort( + key=lambda g_counts: -np.std(g_counts[1]) + + rng.gauss(0, mean_std) + ) # add rng.gauss to increase the randomness + else: + groups_and_label_counts.sort( + key=lambda g_counts: -np.std(g_counts[1]) + ) + + for g, label_counts in groups_and_label_counts: + evals = [ + eval_label_counts_per_fold( + label_counts, label_counts_per_fold[i] + ) + for i in range(self.n_splits) + ] + best_fold = np.argmin(evals) + label_counts_per_fold[best_fold] += label_counts + groups_per_fold[best_fold].add(g) + + all_groups = set(groups) + for test_groups in groups_per_fold: + train_groups = all_groups - test_groups + + train_indices = [ + i for i, g in enumerate(groups) if g in train_groups + ] + test_indices = [ + i for i, g in enumerate(groups) if g in test_groups + ] + + yield train_indices, test_indices diff --git a/models/Mask3D/build/lib/mask3d/utils/pc_visualizations.py b/models/Mask3D/build/lib/mask3d/utils/pc_visualizations.py new file mode 100644 index 0000000000000000000000000000000000000000..26937b9f293f9cc2b87cc67d3c8742c80f770d60 --- /dev/null +++ b/models/Mask3D/build/lib/mask3d/utils/pc_visualizations.py @@ -0,0 +1,202 @@ +from io import BytesIO +from imageio import imread + +import open3d as o3d +from PIL import Image +import numpy as np +import plotly.graph_objects as go +from plotly.subplots import make_subplots +from pandas import DataFrame +import matplotlib +import seaborn as sns +import pyviz3d.visualizer as viz + +matplotlib.use("Agg") +import matplotlib.pyplot as plt + + +def point_cloud_plolty( + coordinates, + label_color, + label_text, + prediction_color, + prediction_text, + normals, +): + def draw_point_cloud(coords, colors=None, label_text=None): + marker = dict(size=1, opacity=0.8) + if colors is not None: + marker.update({"color": colors}) + if (colors is None) and (label_text is not None): + marker.update({"color": label_text}) + fig = go.Scatter3d( + x=coords[:, 0], + y=coords[:, 1], + z=coords[:, 2], + text=label_text, + mode="markers", + marker=marker, + ) + return fig + + fig = make_subplots( + rows=1, + cols=2, + specs=[[{"type": "scatter3d"}, {"type": "scatter3d"}]], + ) + fig.add_trace( + draw_point_cloud(coordinates, prediction_color, prediction_text), + row=1, + col=1, + ) + # adding image with prediction + fig.add_trace( + draw_point_cloud(coordinates, label_color, label_text), row=1, col=2 + ) + fig.show() + # data = fig.to_image(width=1080, height=720, format="png") + # image = Image.open(BytesIO(data)) + # return image + + +def point_cloud_pyviz3d( + name, + coordinates, + path, + color=None, + normals=None, + label_color=None, + prediction_color=None, + point_size=25, + voxel_size=0.01, +): + + # because of visualization + coordinates = coordinates * voxel_size + # First, we set up a visualizer + visualizer = viz.Visualizer() + if label_color is not None: + visualizer.add_points( + name=f"{name}_label", + positions=coordinates, + colors=label_color, + point_size=point_size, + visible=False, + ) + + if prediction_color is not None: + visualizer.add_points( + name=f"{name}_prediction", + positions=coordinates, + colors=prediction_color, + point_size=point_size, + visible=False, + ) + + visualizer.add_points( + name=name, + positions=coordinates, + colors=color, + normals=normals, + point_size=point_size, + visible=False, + ) + # When we added everything we need to the visualizer, we save it. + visualizer.save(path, verbose=False) + + +def point_cloud_open3d(coordinates): + points = o3d.geometry.PointCloud(o3d.utility.Vector3dVector(coordinates)) + o3d.visualization.draw_geometries([points]) + + +def _remap_model_output(output, labels): + output = np.array(output) + output_remapped = output.copy() + for i, k in enumerate(labels.keys()): + output_remapped[output == i] = k + return output_remapped + + +def save_visualization( + coordinates, + name="none", + color=None, + normals=None, + target=None, + prediction=None, + target_info=None, + path="./saved", + backend="pyviz3d", + voxel_size=0.05, + color_mean=[0.47793125906962, 0.4303257521323044, 0.3749598901421883], + color_std=[0.2834475483823543, 0.27566157565723015, 0.27018971370874995], +): + target = _remap_model_output(target, target_info) + prediction = _remap_model_output(prediction, target_info) + coordinates = coordinates[:, :3] - coordinates[:, :3].mean(axis=0) + coordinates = coordinates * voxel_size + if color is not None: + color = (color * color_std + color_mean) * 255 + + target_color = np.zeros((len(target), 3)) + target_text = np.full((len(target)), "empty") + prediction_color = np.zeros((len(prediction), 3)) + prediction_text = np.full((len(prediction)), "empty") + if target_info is not None: + for k, v in target_info.items(): + target_color[target == k] = v["color"] + target_text[target == k] = v["name"] + prediction_color[prediction == k] = v["color"] + prediction_text[prediction == k] = v["name"] + if backend == "pyviz3d": + point_cloud_pyviz3d( + name=name, + coordinates=coordinates, + path=path, + color=color, + normals=normals, + label_color=target_color, + prediction_color=prediction_color, + voxel_size=1, + ) + elif backend == "plotly": + point_cloud_plolty( + coordinates=coordinates, + normals=normals, + label_color=target_color, + label_text=target_text, + prediction_color=prediction_color, + prediction_text=prediction_text, + ) + elif backend == "open3d": + point_cloud_open3d(coordinates) + else: + print("No such backend") + + +def draw_confsion_matrix(confusion_matrix, label_db): + index = [i for i in range(confusion_matrix.shape[0])] + index = _remap_model_output(index, label_db) + column_names = np.full((len(index)), "empty") + for k, v in label_db.items(): + column_names[index == k] = v["name"] + df_cm = DataFrame( + confusion_matrix, index=column_names, columns=column_names + ) + # pretty_plot_confusion_matrix(df_cm, fz=9) + sns.heatmap( + df_cm, + annot=True, + fmt="d", + linewidths=0.25, + annot_kws={"size": 5}, + vmax=10000, + ) + buf = BytesIO() + plt.savefig(buf, format="jpg") + plt.close() + buf.seek(0) + image = imread(buf, format="jpg") + buf.close() + return image diff --git a/models/Mask3D/build/lib/mask3d/utils/point_cloud_utils.py b/models/Mask3D/build/lib/mask3d/utils/point_cloud_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..7d2b5ec875da78d299c23afa70531cb0df04e278 --- /dev/null +++ b/models/Mask3D/build/lib/mask3d/utils/point_cloud_utils.py @@ -0,0 +1,83 @@ +from pathlib import Path +from typing import List, Optional, Tuple + +import numpy as np +import open3d +from plyfile import PlyData, PlyElement + + +def load_ply(filepath): + with open(filepath, "rb") as f: + plydata = PlyData.read(f) + data = plydata.elements[0].data + coords = np.array([data["x"], data["y"], data["z"]], dtype=np.float32).T + feats = None + labels = None + if ({"red", "green", "blue"} - set(data.dtype.names)) == set(): + feats = np.array( + [data["red"], data["green"], data["blue"]], dtype=np.uint8 + ).T + if "label" in data.dtype.names: + labels = np.array(data["label"], dtype=np.uint32) + return coords, feats, labels + + +def load_ply_with_normals(filepath): + mesh = open3d.io.read_triangle_mesh(str(filepath)) + if not mesh.has_vertex_normals(): + mesh.compute_vertex_normals() + vertices = np.asarray(mesh.vertices) + normals = np.asarray(mesh.vertex_normals) + + coords, feats, labels = load_ply(filepath) + assert np.allclose(coords, vertices), "different coordinates" + feats = np.hstack((feats, normals)) + + return coords, feats, labels + + +def load_obj_with_normals(filepath): + mesh = open3d.io.read_triangle_mesh(str(filepath)) + if not mesh.has_vertex_normals(): + mesh.compute_vertex_normals() + coords = np.asarray(mesh.vertices) + normals = np.asarray(mesh.vertex_normals) + colors = np.asarray(mesh.vertex_colors) + feats = np.hstack((colors, normals)) + + return coords, feats + + +def write_point_cloud_in_ply( + filepath: Path, + coords: np.ndarray, + feats: Optional[np.ndarray] = None, + labels: Optional[np.ndarray] = None, + dtypes: Optional[List[Tuple[str, str]]] = [ + ("x", "= (3, 8): + from collections.abc import MutableMapping +else: + from collections import MutableMapping + +import torch +from loguru import logger + + +def flatten_dict(d, parent_key="", sep="_"): + """ + https://stackoverflow.com/questions/6027558/flatten-nested-dictionaries-compressing-keys + """ + items = [] + for k, v in d.items(): + new_key = parent_key + sep + k if parent_key else k + if isinstance(v, MutableMapping): + items.extend(flatten_dict(v, new_key, sep=sep).items()) + else: + items.append((new_key, v)) + return dict(items) + + +def load_baseline_model(cfg, model): + # if it is Minkoski weights + cfg.model.in_channels = 3 + cfg.model.config.conv1_kernel_size = 5 + cfg.data.add_normals = False + cfg.data.train_dataset.color_mean_std = [(0.5, 0.5, 0.5), (1, 1, 1)] + cfg.data.validation_dataset.color_mean_std = [(0.5, 0.5, 0.5), (1, 1, 1)] + cfg.data.test_dataset.color_mean_std = [(0.5, 0.5, 0.5), (1, 1, 1)] + cfg.data.voxel_size = 0.02 + model = model(cfg) + state_dict = torch.load(cfg.general.checkpoint)["state_dict"] + model.model.load_state_dict(state_dict) + return cfg, model + + +def load_backbone_checkpoint_with_missing_or_exsessive_keys(cfg, model): + state_dict = torch.load(cfg.general.backbone_checkpoint)["state_dict"] + correct_dict = dict(model.state_dict()) + + # if parametrs not found in checkpoint they will be randomly initialized + for key in state_dict.keys(): + if correct_dict.pop(f"model.backbone.{key}", None) is None: + logger.warning( + f"Key not found, it will be initialized randomly: {key}" + ) + + # if parametrs have different shape, it will randomly initialize + state_dict = torch.load(cfg.general.backbone_checkpoint)["state_dict"] + correct_dict = dict(model.state_dict()) + for key in correct_dict.keys(): + if key.replace("model.backbone.", "") not in state_dict: + logger.warning(f"{key} not in loaded checkpoint") + state_dict.update( + {key.replace("model.backbone.", ""): correct_dict[key]} + ) + elif ( + state_dict[key.replace("model.backbone.", "")].shape + != correct_dict[key].shape + ): + logger.warning( + f"incorrect shape {key}:{state_dict[key.replace('model.backbone.', '')].shape} vs {correct_dict[key].shape}" + ) + state_dict.update({key: correct_dict[key]}) + + # if we have more keys just discard them + correct_dict = dict(model.state_dict()) + new_state_dict = dict() + for key in state_dict.keys(): + if f"model.backbone.{key}" in correct_dict.keys(): + new_state_dict.update({f"model.backbone.{key}": state_dict[key]}) + elif key in correct_dict.keys(): + new_state_dict.update({key: correct_dict[key]}) + else: + logger.warning(f"excessive key: {key}") + model.load_state_dict(new_state_dict) + return cfg, model + + +def load_checkpoint_with_missing_or_exsessive_keys(cfg, model): + state_dict = torch.load(cfg.general.checkpoint)["state_dict"] + correct_dict = dict(model.state_dict()) + + # if parametrs not found in checkpoint they will be randomly initialized + for key in state_dict.keys(): + if correct_dict.pop(key, None) is None: + logger.warning( + f"Key not found, it will be initialized randomly: {key}" + ) + + # if parametrs have different shape, it will randomly initialize + state_dict = torch.load(cfg.general.checkpoint)["state_dict"] + correct_dict = dict(model.state_dict()) + for key in correct_dict.keys(): + if key not in state_dict: + logger.warning(f"{key} not in loaded checkpoint") + state_dict.update({key: correct_dict[key]}) + elif state_dict[key].shape != correct_dict[key].shape: + logger.warning( + f"incorrect shape {key}:{state_dict[key].shape} vs {correct_dict[key].shape}" + ) + state_dict.update({key: correct_dict[key]}) + + # if we have more keys just discard them + correct_dict = dict(model.state_dict()) + new_state_dict = dict() + for key in state_dict.keys(): + if key in correct_dict.keys(): + new_state_dict.update({key: state_dict[key]}) + else: + logger.warning(f"excessive key: {key}") + model.load_state_dict(new_state_dict) + return cfg, model + + +def freeze_until(net, param_name: str = None): + """ + Freeze net until param_name + https://opendatascience.slack.com/archives/CGK4KQBHD/p1588373239292300?thread_ts=1588105223.275700&cid=CGK4KQBHD + Args: + net: + param_name: + Returns: + """ + found_name = False + for name, params in net.named_parameters(): + if name == param_name: + found_name = True + params.requires_grad = found_name diff --git a/models/Mask3D/mask3d.egg-info/PKG-INFO b/models/Mask3D/mask3d.egg-info/PKG-INFO new file mode 100644 index 0000000000000000000000000000000000000000..8bc09c1d9f7d373a6ae88b90feddb4097f838333 --- /dev/null +++ b/models/Mask3D/mask3d.egg-info/PKG-INFO @@ -0,0 +1,11 @@ +Metadata-Version: 2.1 +Name: mask3d +Version: 0.1 +Summary: UNKNOWN +Home-page: UNKNOWN +License: UNKNOWN +Platform: UNKNOWN +License-File: LICENSE + +UNKNOWN + diff --git a/models/Mask3D/mask3d.egg-info/SOURCES.txt b/models/Mask3D/mask3d.egg-info/SOURCES.txt new file mode 100644 index 0000000000000000000000000000000000000000..d8664a91f3fa541efb77e4f4bb3dd0dde5aadf2d --- /dev/null +++ b/models/Mask3D/mask3d.egg-info/SOURCES.txt @@ -0,0 +1,110 @@ +LICENSE +MANIFEST.in +README.md +setup.py +mask3d/__init__.py +mask3d/main_instance_segmentation.py +mask3d/predict.py +mask3d/preprocess_arkitscenes.py +mask3d.egg-info/PKG-INFO +mask3d.egg-info/SOURCES.txt +mask3d.egg-info/dependency_links.txt +mask3d.egg-info/top_level.txt +mask3d/benchmark/__init__.py +mask3d/benchmark/evaluate_semantic_instance.py +mask3d/benchmark/util.py +mask3d/benchmark/util_3d.py +mask3d/conf/__init__.py +mask3d/conf/config_base_instance_segmentation.yaml +mask3d/conf/augmentation/albumentations_aug.yaml +mask3d/conf/augmentation/volumentations_aug.yaml +mask3d/conf/callbacks/callbacks_instance_segmentation.yaml +mask3d/conf/data/indoor.yaml +mask3d/conf/data/outdoor.yaml +mask3d/conf/data/collation_functions/voxelize_collate.yaml +mask3d/conf/data/collation_functions/voxelize_collate_merge.yaml +mask3d/conf/data/data_loaders/simple_loader.yaml +mask3d/conf/data/data_loaders/simple_loader_save_memory.yaml +mask3d/conf/data/datasets/matterport.yaml +mask3d/conf/data/datasets/matterport_scannet.yaml +mask3d/conf/data/datasets/rio.yaml +mask3d/conf/data/datasets/s3dis.yaml +mask3d/conf/data/datasets/scannet.yaml +mask3d/conf/data/datasets/scannet200.yaml +mask3d/conf/data/datasets/semantic_kitti.yaml +mask3d/conf/data/datasets/stpls3d.yaml +mask3d/conf/logging/base.yaml +mask3d/conf/logging/full.yaml +mask3d/conf/logging/minimal.yaml +mask3d/conf/logging/offline.yaml +mask3d/conf/loss/cross_entropy.yaml +mask3d/conf/loss/set_criterion.yaml +mask3d/conf/loss/set_criterion_custom_weights_1.yaml +mask3d/conf/matcher/hungarian_matcher.yaml +mask3d/conf/metrics/miou.yaml +mask3d/conf/model/mask3d.yaml +mask3d/conf/optimizer/adamw.yaml +mask3d/conf/optimizer/adamw_lower.yaml +mask3d/conf/scheduler/exponentiallr.yaml +mask3d/conf/scheduler/lambdalr.yaml +mask3d/conf/scheduler/onecyclelr.yaml +mask3d/conf/trainer/trainer.yaml +mask3d/conf/trainer/trainer600.yaml +mask3d/datasets/__init__.py +mask3d/datasets/outdoor_semseg.py +mask3d/datasets/random_cuboid.py +mask3d/datasets/semseg.py +mask3d/datasets/utils.py +mask3d/datasets/preprocessing/__init__.py +mask3d/datasets/preprocessing/arkitscenes_preprocessing.py +mask3d/datasets/preprocessing/base_preprocessing.py +mask3d/datasets/preprocessing/s3dis_preprocessing.py +mask3d/datasets/preprocessing/scannet_preprocessing.py +mask3d/datasets/preprocessing/semantic_kitti_preprocessing.py +mask3d/datasets/preprocessing/stpls3d_preprocessing.py +mask3d/datasets/scannet200/__init__.py +mask3d/datasets/scannet200/scannet200_constants.py +mask3d/datasets/scannet200/scannet200_splits.py +mask3d/models/__init__.py +mask3d/models/criterion.py +mask3d/models/mask3d.py +mask3d/models/matcher.py +mask3d/models/misc.py +mask3d/models/model.py +mask3d/models/position_embedding.py +mask3d/models/res16unet.py +mask3d/models/resnet.py +mask3d/models/resunet.py +mask3d/models/wrapper.py +mask3d/models/metrics/__init__.py +mask3d/models/metrics/confusionmatrix.py +mask3d/models/metrics/metrics.py +mask3d/models/modules/3detr_helpers.py +mask3d/models/modules/__init__.py +mask3d/models/modules/common.py +mask3d/models/modules/helpers_3detr.py +mask3d/models/modules/resnet_block.py +mask3d/models/modules/senet_block.py +mask3d/trainer/__init__.py +mask3d/trainer/trainer.py +mask3d/utils/__init__.py +mask3d/utils/gradflow_check.py +mask3d/utils/kfold.py +mask3d/utils/pc_visualizations.py +mask3d/utils/point_cloud_utils.py +mask3d/utils/utils.py +mask3d/utils/pointops2/__init__.py +mask3d/utils/pointops2/setup.py +mask3d/utils/pointops2/functions/__init__.py +mask3d/utils/pointops2/functions/pointops.py +mask3d/utils/pointops2/functions/pointops2.py +mask3d/utils/pointops2/functions/pointops_ablation.py +mask3d/utils/pointops2/functions/test_attention_op_step1.py +mask3d/utils/pointops2/functions/test_attention_op_step1_v2.py +mask3d/utils/pointops2/functions/test_attention_op_step2.py +mask3d/utils/pointops2/functions/test_relative_pos_encoding_op_step1.py +mask3d/utils/pointops2/functions/test_relative_pos_encoding_op_step1_v2.py +mask3d/utils/pointops2/functions/test_relative_pos_encoding_op_step1_v3.py +mask3d/utils/pointops2/functions/test_relative_pos_encoding_op_step2.py +mask3d/utils/pointops2/functions/test_relative_pos_encoding_op_step2_v2.py +mask3d/utils/pointops2/src/__init__.py \ No newline at end of file diff --git a/models/Mask3D/mask3d.egg-info/dependency_links.txt b/models/Mask3D/mask3d.egg-info/dependency_links.txt new file mode 100644 index 0000000000000000000000000000000000000000..8b137891791fe96927ad78e64b0aad7bded08bdc --- /dev/null +++ b/models/Mask3D/mask3d.egg-info/dependency_links.txt @@ -0,0 +1 @@ + diff --git a/models/Mask3D/mask3d.egg-info/top_level.txt b/models/Mask3D/mask3d.egg-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..347620dbc6cab3f22ef5e880a7f4ff468f301c49 --- /dev/null +++ b/models/Mask3D/mask3d.egg-info/top_level.txt @@ -0,0 +1 @@ +mask3d diff --git a/models/Mask3D/mask3d/__init__.py b/models/Mask3D/mask3d/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e2b6e21d418d6ee195db3d2b8682476c8fb448cd --- /dev/null +++ b/models/Mask3D/mask3d/__init__.py @@ -0,0 +1,276 @@ +import hydra +import torch +from torch_scatter import scatter_mean + +from mask3d.models.mask3d import Mask3D +from mask3d.utils.utils import ( + load_checkpoint_with_missing_or_exsessive_keys, + load_backbone_checkpoint_with_missing_or_exsessive_keys, +) + +class InstanceSegmentation(torch.nn.Module): + def __init__(self, cfg): + super().__init__() + self.model = hydra.utils.instantiate(cfg.model) + + + def forward(self, x, raw_coordinates=None, point2segment=None): + return self.model(x, raw_coordinates=raw_coordinates, point2segment=point2segment) + + +from omegaconf import OmegaConf, DictConfig +import hydra +from hydra.core.global_hydra import GlobalHydra +from hydra.experimental import initialize, compose + +# imports for input loading +import albumentations as A +import MinkowskiEngine as ME +import numpy as np +import open3d as o3d + +# imports for output +from mask3d.datasets.scannet200.scannet200_constants import (VALID_CLASS_IDS_20, VALID_CLASS_IDS_200, SCANNET_COLOR_MAP_20, SCANNET_COLOR_MAP_200) + +def get_model(checkpoint_path=None, dataset_name = "scannet200"): + + + # Initialize the directory with config files + with initialize(config_path="conf"): + # Compose a configuration + cfg = compose(config_name="config_base_instance_segmentation.yaml") + + cfg.general.checkpoint = checkpoint_path + + # would be nicd to avoid this hardcoding below + # dataset_name = checkpoint_path.split('/')[-1].split('_')[0] + if dataset_name == 'scannet200': + cfg.general.num_targets = 201 + cfg.general.train_mode = False + cfg.general.eval_on_segments = True + cfg.general.topk_per_image = 300 + cfg.general.use_dbscan = True + cfg.general.dbscan_eps = 0.95 + cfg.general.export_threshold = 0.001 + + # # data + cfg.data.num_labels = 200 + cfg.data.test_mode = "test" + + # # model + cfg.model.num_queries = 150 + + if dataset_name == 'scannet': + cfg.general.num_targets = 19 + cfg.general.train_mode = False + cfg.general.eval_on_segments = True + cfg.general.topk_per_image = 300 + cfg.general.use_dbscan = True + cfg.general.dbscan_eps = 0.95 + cfg.general.export_threshold = 0.001 + + # # data + cfg.data.num_labels = 20 + cfg.data.test_mode = "test" + + # # model + cfg.model.num_queries = 150 + + #TODO: this has to be fixed and discussed with Jonas + # cfg.model.scene_min = -3. + # cfg.model.scene_max = 3. + + # # Initialize the Hydra context + # hydra.core.global_hydra.GlobalHydra.instance().clear() + # hydra.initialize(config_path="conf") + + # Load the configuration + # cfg = hydra.compose(config_name="config_base_instance_segmentation.yaml") + model = InstanceSegmentation(cfg) + + if cfg.general.backbone_checkpoint is not None: + cfg, model = load_backbone_checkpoint_with_missing_or_exsessive_keys( + cfg, model + ) + if cfg.general.checkpoint is not None: + cfg, model = load_checkpoint_with_missing_or_exsessive_keys(cfg, model) + + return model + + +def load_mesh(pcl_file): + + # load point cloud + input_mesh_path = pcl_file + mesh = o3d.io.read_triangle_mesh(input_mesh_path) + return mesh + +def load_ply(path_2_mesh): + pcd = o3d.io.read_point_cloud(path_2_mesh) + return pcd + +def prepare_data(pointcloud_file, device): + # normalization for point cloud features + color_mean = (0.47793125906962, 0.4303257521323044, 0.3749598901421883) + color_std = (0.2834475483823543, 0.27566157565723015, 0.27018971370874995) + normalize_color = A.Normalize(mean=color_mean, std=color_std) + + datatype = None + + if pointcloud_file.split('.')[-1] == 'ply': + try: + mesh = load_mesh(pointcloud_file) + points = np.asarray(mesh.vertices) + colors = np.asarray(mesh.vertex_colors) + colors = colors * 255. + datatype = "mesh" + except: + pcd = load_ply(pointcloud_file) + points = np.asarray(pcd.points) + colors = np.asarray(pcd.colors) + datatype = "point cloud" + + if datatype is None: + print("DATA TYPE IS NOT SUPPORTED!") + exit() + segments = None + elif pointcloud_file.split('.')[-1] == 'npy': + points = np.load(pointcloud_file) + points, colors, normals, segments, labels = ( + points[:, :3], + points[:, 3:6], + points[:, 6:9], + points[:, 9], + points[:, 10:12], + ) + datatype = "mesh" + + else: + print("FORMAT NOT SUPPORTED") + exit() + if datatype == "mesh": + pseudo_image = colors.astype(np.uint8)[np.newaxis, :, :] + colors = np.squeeze(normalize_color(image=pseudo_image)["image"]) + + coords = np.floor(points / 0.02) + _, _, unique_map, inverse_map = ME.utils.sparse_quantize( + coordinates=coords, + features=colors, + return_index=True, + return_inverse=True, + ) + + sample_coordinates = coords[unique_map] + coordinates = [torch.from_numpy(sample_coordinates).int()] + sample_features = colors[unique_map] + features = [torch.from_numpy(sample_features).float()] + + if segments is not None: + point2segment_full = segments + point2segment = segments[unique_map] + point2segment = [torch.from_numpy(point2segment).long()] + point2segment_full = [torch.from_numpy(point2segment_full).long()] + + # Concatenate all lists + input_dict = {"coords": coordinates, "feats": features} + if len(point2segment) > 0: + input_dict["labels"] = point2segment + coordinates, _, point2segment = ME.utils.sparse_collate(**input_dict) + point2segment = point2segment.cuda() + else: + coordinates, _ = ME.utils.sparse_collate(**input_dict) + point2segment = None + point2segment_full = None + else: + point2segment = None + point2segment_full = None + coordinates, _ = ME.utils.sparse_collate(coords=coordinates, feats=features) + + features = torch.cat(features, dim=0) + data = ME.SparseTensor( + coordinates=coordinates, + features=features, + device=device, + ) + return data, points, colors, features, unique_map, inverse_map, point2segment, point2segment_full + + +def map_output_to_pointcloud(outputs, + inverse_map, + point2segment, + point2segment_full): + + # parse predictions + logits = outputs["pred_logits"] + logits = torch.functional.F.softmax(logits, dim=-1)[..., :-1] + masks = outputs["pred_masks"] + # reformat predictions + logits = logits[0] + masks = masks[0] if point2segment is None else masks[0][point2segment] + + num_queries = len(logits) + scores_per_query, topk_indices = logits.flatten(0, 1).topk( + num_queries, sorted=True + ) + + topk_indices = topk_indices // 200 + masks = masks[:, topk_indices] + + result_pred_mask = (masks > 0).float() + heatmap = masks.float().sigmoid() + + mask_scores_per_image = (heatmap * result_pred_mask).sum(0) / ( + result_pred_mask.sum(0) + 1e-6 + ) + score = scores_per_query * mask_scores_per_image + result_pred_mask = get_full_res_mask(result_pred_mask, inverse_map, point2segment_full[0]) if point2segment_full is not None else result_pred_mask[inverse_map] + return (result_pred_mask, score) + +def get_full_res_mask(mask, inverse_map, point2segment_full): + mask = mask.detach().cpu()[inverse_map] # full res + mask = scatter_mean(mask, point2segment_full, dim=0) # full res segments + mask = (mask > 0.5).float() + mask = mask.detach().cpu()[point2segment_full.cpu()] # full res points + return mask + +def save_colorized_mesh(mesh, labels_mapped, output_file, colormap='scannet'): + + # colorize mesh + colors = np.zeros((len(mesh.vertices), 3)) + for li in np.unique(labels_mapped): + if colormap == 'scannet': + raise ValueError('Not implemented yet') + elif colormap == 'scannet200': + v_li = VALID_CLASS_IDS_200[int(li)] + colors[(labels_mapped == li)[:, 0], :] = SCANNET_COLOR_MAP_200[v_li] + else: + raise ValueError('Unknown colormap - not supported') + + colors = colors / 255. + mesh.vertex_colors = o3d.utility.Vector3dVector(colors) + o3d.io.write_triangle_mesh(output_file, mesh) + +if __name__ == '__main__': + + model = get_model('checkpoints/scannet200/scannet200_benchmark.ckpt') + model.eval() + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + model.to(device) + + # load input data + pointcloud_file = 'data/pcl.ply' + mesh = load_mesh(pointcloud_file) + + # prepare data + data, points, colors, features, unique_map, inverse_map = prepare_data(mesh, device) + + # run model + with torch.no_grad(): + outputs = model(data, raw_coordinates=features) + + # map output to point cloud + labels = map_output_to_pointcloud(mesh, outputs, inverse_map) + + # save colorized mesh + save_colorized_mesh(mesh, labels, 'data/pcl_labelled.ply', colormap='scannet200') + \ No newline at end of file diff --git a/models/Mask3D/mask3d/benchmark/__init__.py b/models/Mask3D/mask3d/benchmark/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/Mask3D/mask3d/benchmark/evaluate_semantic_instance.py b/models/Mask3D/mask3d/benchmark/evaluate_semantic_instance.py new file mode 100644 index 0000000000000000000000000000000000000000..242cb87a09b5c69a0d967217a2cd97706197a63d --- /dev/null +++ b/models/Mask3D/mask3d/benchmark/evaluate_semantic_instance.py @@ -0,0 +1,1141 @@ +# Evaluates semantic instance task +# Adapted from the CityScapes evaluation: https://github.com/mcordts/cityscapesScripts/tree/master/cityscapesscripts/evaluation +# Input: +# - path to .txt prediction files +# - path to .txt ground truth files +# - output file to write results to +# Each .txt prediction file look like: +# [(pred0) rel. path to pred. mask over verts as .txt] [(pred0) label id] [(pred0) confidence] +# [(pred1) rel. path to pred. mask over verts as .txt] [(pred1) label id] [(pred1) confidence] +# [(pred2) rel. path to pred. mask over verts as .txt] [(pred2) label id] [(pred2) confidence] +# ... +# +# NOTE: The prediction files must live in the root of the given prediction path. +# Predicted mask .txt files must live in a subfolder. +# Additionally, filenames must not contain spaces. +# The relative paths to predicted masks must contain one integer per line, +# where each line corresponds to vertices in the *_vh_clean_2.ply (in that order). +# Non-zero integers indicate part of the predicted instance. +# The label ids specify the class of the corresponding mask. +# Confidence is a float confidence score of the mask. +# +# Note that only the valid classes are used for evaluation, +# i.e., any ground truth label not in the valid label set +# is ignored in the evaluation. +# +# example usage: evaluate_semantic_instance.py --scan_path [path to scan data] --output_file [output file] + +# python imports +import math +import os, sys, argparse +import inspect +from copy import deepcopy +from uuid import uuid4 + +import torch + +try: + import numpy as np +except: + print("Failed to import numpy package.") + sys.exit(-1) + +from scipy import stats + +# currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) +# parentdir = os.path.dirname(currentdir) +# sys.path.insert(0,parentdir) +import benchmark.util as util +import benchmark.util_3d as util_3d + +# parser = argparse.ArgumentParser() +# parser.add_argument('--gt_path', default='', help='path to directory of gt .txt files') +# parser.add_argument('--output_file', default='', help='output file [default: ./semantic_instance_evaluation.txt]') +# opt = parser.parse_args() + +# if opt.output_file == '': +# opt.output_file = os.path.join(os.getcwd(), 'semantic_instance_evaluation.txt') + + +# ---------- Label info ---------- # +CLASS_LABELS = [ + "cabinet", + "bed", + "chair", + "sofa", + "table", + "door", + "window", + "bookshelf", + "picture", + "counter", + "desk", + "curtain", + "refrigerator", + "shower curtain", + "toilet", + "sink", + "bathtub", + "otherfurniture", +] +VALID_CLASS_IDS = np.array( + [3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39] +) +ID_TO_LABEL = {} +LABEL_TO_ID = {} +for i in range(len(VALID_CLASS_IDS)): + LABEL_TO_ID[CLASS_LABELS[i]] = VALID_CLASS_IDS[i] + ID_TO_LABEL[VALID_CLASS_IDS[i]] = CLASS_LABELS[i] +# ---------- Evaluation params ---------- # +# overlaps for evaluation +opt = {} +opt["overlaps"] = np.append(np.arange(0.5, 0.95, 0.05), 0.25) +# minimum region size for evaluation [verts] +opt["min_region_sizes"] = np.array([100]) # 100 for s3dis, scannet +# distance thresholds [m] +opt["distance_threshes"] = np.array([float("inf")]) +# distance confidences +opt["distance_confs"] = np.array([-float("inf")]) + + +def evaluate_matches(matches): + overlaps = opt["overlaps"] + min_region_sizes = [opt["min_region_sizes"][0]] + dist_threshes = [opt["distance_threshes"][0]] + dist_confs = [opt["distance_confs"][0]] + + # results: class x overlap + ap = np.zeros( + (len(dist_threshes), len(CLASS_LABELS), len(overlaps)), float + ) + for di, (min_region_size, distance_thresh, distance_conf) in enumerate( + zip(min_region_sizes, dist_threshes, dist_confs) + ): + for oi, overlap_th in enumerate(overlaps): + pred_visited = {} + for m in matches: + for p in matches[m]["pred"]: + for label_name in CLASS_LABELS: + for p in matches[m]["pred"][label_name]: + if "uuid" in p: + pred_visited[p["uuid"]] = False + for li, label_name in enumerate(CLASS_LABELS): + y_true = np.empty(0) + y_score = np.empty(0) + hard_false_negatives = 0 + has_gt = False + has_pred = False + for m in matches: + pred_instances = matches[m]["pred"][label_name] + gt_instances = matches[m]["gt"][label_name] + # filter groups in ground truth + gt_instances = [ + gt + for gt in gt_instances + if gt["instance_id"] >= 1000 + and gt["vert_count"] >= min_region_size + and gt["med_dist"] <= distance_thresh + and gt["dist_conf"] >= distance_conf + ] + if gt_instances: + has_gt = True + if pred_instances: + has_pred = True + + cur_true = np.ones(len(gt_instances)) + cur_score = np.ones(len(gt_instances)) * (-float("inf")) + cur_match = np.zeros(len(gt_instances), dtype=bool) + # collect matches + for (gti, gt) in enumerate(gt_instances): + found_match = False + num_pred = len(gt["matched_pred"]) + for pred in gt["matched_pred"]: + # greedy assignments + if pred_visited[pred["uuid"]]: + continue + overlap = float(pred["intersection"]) / ( + gt["vert_count"] + + pred["vert_count"] + - pred["intersection"] + ) + if overlap > overlap_th: + confidence = pred["confidence"] + # if already have a prediction for this gt, + # the prediction with the lower score is automatically a false positive + if cur_match[gti]: + max_score = max(cur_score[gti], confidence) + min_score = min(cur_score[gti], confidence) + cur_score[gti] = max_score + # append false positive + cur_true = np.append(cur_true, 0) + cur_score = np.append(cur_score, min_score) + cur_match = np.append(cur_match, True) + # otherwise set score + else: + found_match = True + cur_match[gti] = True + cur_score[gti] = confidence + pred_visited[pred["uuid"]] = True + if not found_match: + hard_false_negatives += 1 + # remove non-matched ground truth instances + cur_true = cur_true[cur_match == True] + cur_score = cur_score[cur_match == True] + + # collect non-matched predictions as false positive + for pred in pred_instances: + found_gt = False + for gt in pred["matched_gt"]: + overlap = float(gt["intersection"]) / ( + gt["vert_count"] + + pred["vert_count"] + - gt["intersection"] + ) + if overlap > overlap_th: + found_gt = True + break + if not found_gt: + num_ignore = pred["void_intersection"] + for gt in pred["matched_gt"]: + # group? + if gt["instance_id"] < 1000: + num_ignore += gt["intersection"] + # small ground truth instances + if ( + gt["vert_count"] < min_region_size + or gt["med_dist"] > distance_thresh + or gt["dist_conf"] < distance_conf + ): + num_ignore += gt["intersection"] + proportion_ignore = ( + float(num_ignore) / pred["vert_count"] + ) + # if not ignored append false positive + if proportion_ignore <= overlap_th: + cur_true = np.append(cur_true, 0) + confidence = pred["confidence"] + cur_score = np.append(cur_score, confidence) + + # append to overall results + y_true = np.append(y_true, cur_true) + y_score = np.append(y_score, cur_score) + + # compute average precision + if has_gt and has_pred: + # compute precision recall curve first + + # sorting and cumsum + score_arg_sort = np.argsort(y_score) + y_score_sorted = y_score[score_arg_sort] + y_true_sorted = y_true[score_arg_sort] + y_true_sorted_cumsum = np.cumsum(y_true_sorted) + + # unique thresholds + (thresholds, unique_indices) = np.unique( + y_score_sorted, return_index=True + ) + num_prec_recall = len(unique_indices) + 1 + + # prepare precision recall + num_examples = len(y_score_sorted) + # https://github.com/ScanNet/ScanNet/pull/26 + # all predictions are non-matched but also all of them are ignored and not counted as FP + # y_true_sorted_cumsum is empty + # num_true_examples = y_true_sorted_cumsum[-1] + num_true_examples = ( + y_true_sorted_cumsum[-1] + if len(y_true_sorted_cumsum) > 0 + else 0 + ) + precision = np.zeros(num_prec_recall) + recall = np.zeros(num_prec_recall) + + # deal with the first point + y_true_sorted_cumsum = np.append(y_true_sorted_cumsum, 0) + # deal with remaining + for idx_res, idx_scores in enumerate(unique_indices): + cumsum = y_true_sorted_cumsum[idx_scores - 1] + tp = num_true_examples - cumsum + fp = num_examples - idx_scores - tp + fn = cumsum + hard_false_negatives + p = float(tp) / (tp + fp) + r = float(tp) / (tp + fn) + precision[idx_res] = p + recall[idx_res] = r + + # first point in curve is artificial + precision[-1] = 1.0 + recall[-1] = 0.0 + + # compute average of precision-recall curve + recall_for_conv = np.copy(recall) + recall_for_conv = np.append( + recall_for_conv[0], recall_for_conv + ) + recall_for_conv = np.append(recall_for_conv, 0.0) + + stepWidths = np.convolve( + recall_for_conv, [-0.5, 0, 0.5], "valid" + ) + # integrate is now simply a dot product + ap_current = np.dot(precision, stepWidths) + + elif has_gt: + ap_current = 0.0 + else: + ap_current = float("nan") + ap[di, li, oi] = ap_current + return ap + + +def compute_averages(aps): + d_inf = 0 + o50 = np.where(np.isclose(opt["overlaps"], 0.5)) + o25 = np.where(np.isclose(opt["overlaps"], 0.25)) + oAllBut25 = np.where(np.logical_not(np.isclose(opt["overlaps"], 0.25))) + avg_dict = {} + # avg_dict['all_ap'] = np.nanmean(aps[ d_inf,:,: ]) + avg_dict["all_ap"] = np.nanmean(aps[d_inf, :, oAllBut25]) + avg_dict["all_ap_50%"] = np.nanmean(aps[d_inf, :, o50]) + avg_dict["all_ap_25%"] = np.nanmean(aps[d_inf, :, o25]) + avg_dict["classes"] = {} + for (li, label_name) in enumerate(CLASS_LABELS): + avg_dict["classes"][label_name] = {} + # avg_dict["classes"][label_name]["ap"] = np.average(aps[ d_inf,li, :]) + avg_dict["classes"][label_name]["ap"] = np.average( + aps[d_inf, li, oAllBut25] + ) + avg_dict["classes"][label_name]["ap50%"] = np.average( + aps[d_inf, li, o50] + ) + avg_dict["classes"][label_name]["ap25%"] = np.average( + aps[d_inf, li, o25] + ) + return avg_dict + + +def make_pred_info(pred: dict): + # pred = {'pred_scores' = 100, 'pred_classes' = 100 'pred_masks' = Nx100} + pred_info = {} + assert ( + pred["pred_classes"].shape[0] + == pred["pred_scores"].shape[0] + == pred["pred_masks"].shape[1] + ) + for i in range(len(pred["pred_classes"])): + info = {} + info["label_id"] = pred["pred_classes"][i] + info["conf"] = pred["pred_scores"][i] + info["mask"] = pred["pred_masks"][:, i] + pred_info[uuid4()] = info # we later need to identify these objects + return pred_info + + +def assign_instances_for_scan(pred: dict, gt_file: str): + pred_info = make_pred_info(pred) + try: + gt_ids = util_3d.load_ids(gt_file) + except Exception as e: + util.print_error("unable to load " + gt_file + ": " + str(e)) + + # get gt instances + gt_instances = util_3d.get_instances( + gt_ids, VALID_CLASS_IDS, CLASS_LABELS, ID_TO_LABEL + ) + # associate + gt2pred = deepcopy(gt_instances) + for label in gt2pred: + for gt in gt2pred[label]: + gt["matched_pred"] = [] + pred2gt = {} + for label in CLASS_LABELS: + pred2gt[label] = [] + num_pred_instances = 0 + # mask of void labels in the groundtruth + bool_void = np.logical_not(np.in1d(gt_ids // 1000, VALID_CLASS_IDS)) + # go thru all prediction masks + for uuid in pred_info: + label_id = int(pred_info[uuid]["label_id"]) + conf = pred_info[uuid]["conf"] + if not label_id in ID_TO_LABEL: + continue + label_name = ID_TO_LABEL[label_id] + # read the mask + pred_mask = pred_info[uuid]["mask"] + assert len(pred_mask) == len(gt_ids) + # convert to binary + pred_mask = np.not_equal(pred_mask, 0) + num = np.count_nonzero(pred_mask) + if num < opt["min_region_sizes"][0]: + continue # skip if empty + + pred_instance = {} + pred_instance["uuid"] = uuid + pred_instance["pred_id"] = num_pred_instances + pred_instance["label_id"] = label_id + pred_instance["vert_count"] = num + pred_instance["confidence"] = conf + pred_instance["void_intersection"] = np.count_nonzero( + np.logical_and(bool_void, pred_mask) + ) + + # matched gt instances + matched_gt = [] + # go thru all gt instances with matching label + for (gt_num, gt_inst) in enumerate(gt2pred[label_name]): + intersection = np.count_nonzero( + np.logical_and(gt_ids == gt_inst["instance_id"], pred_mask) + ) + if intersection > 0: + gt_copy = gt_inst.copy() + pred_copy = pred_instance.copy() + gt_copy["intersection"] = intersection + pred_copy["intersection"] = intersection + matched_gt.append(gt_copy) + gt2pred[label_name][gt_num]["matched_pred"].append(pred_copy) + pred_instance["matched_gt"] = matched_gt + num_pred_instances += 1 + pred2gt[label_name].append(pred_instance) + + return gt2pred, pred2gt + + +def print_results(avgs): + sep = "" + col1 = ":" + lineLen = 64 + + print("") + print("#" * lineLen) + line = "" + line += "{:<15}".format("what") + sep + col1 + line += "{:>15}".format("AP") + sep + line += "{:>15}".format("AP_50%") + sep + line += "{:>15}".format("AP_25%") + sep + print(line) + print("#" * lineLen) + + for (li, label_name) in enumerate(CLASS_LABELS): + ap_avg = avgs["classes"][label_name]["ap"] + ap_50o = avgs["classes"][label_name]["ap50%"] + ap_25o = avgs["classes"][label_name]["ap25%"] + line = "{:<15}".format(label_name) + sep + col1 + line += sep + "{:>15.3f}".format(ap_avg) + sep + line += sep + "{:>15.3f}".format(ap_50o) + sep + line += sep + "{:>15.3f}".format(ap_25o) + sep + print(line) + + all_ap_avg = avgs["all_ap"] + all_ap_50o = avgs["all_ap_50%"] + all_ap_25o = avgs["all_ap_25%"] + + print("-" * lineLen) + line = "{:<15}".format("average") + sep + col1 + line += "{:>15.3f}".format(all_ap_avg) + sep + line += "{:>15.3f}".format(all_ap_50o) + sep + line += "{:>15.3f}".format(all_ap_25o) + sep + print(line) + print("") + + +def write_result_file(avgs, filename): + _SPLITTER = "," + with open(filename, "w") as f: + f.write( + _SPLITTER.join(["class", "class id", "ap", "ap50", "ap25"]) + "\n" + ) + for i in range(len(VALID_CLASS_IDS)): + class_name = CLASS_LABELS[i] + class_id = VALID_CLASS_IDS[i] + ap = avgs["classes"][class_name]["ap"] + ap50 = avgs["classes"][class_name]["ap50%"] + ap25 = avgs["classes"][class_name]["ap25%"] + f.write( + _SPLITTER.join( + [str(x) for x in [class_name, class_id, ap, ap50, ap25]] + ) + + "\n" + ) + + +def evaluate( + preds: dict, gt_path: str, output_file: str, dataset: str = "scannet" +): + global CLASS_LABELS + global VALID_CLASS_IDS + global ID_TO_LABEL + global LABEL_TO_ID + global opt + + if dataset == "stpls3d": + # global CLASS_LABELS + # global VALID_CLASS_IDS + # global ID_TO_LABEL + # global LABEL_TO_ID + + opt["min_region_sizes"] = np.array([10]) + + CLASS_LABELS = [ + "Build", + "LowVeg", + "MediumVeg", + "HighVeg", + "Vehicle", + "Truck", + "Aircraft", + "MilitaryVeh", + "Bike", + "Motorcycle", + "LightPole", + "StreetSign", + "Clutter", + "Fence", + ] + VALID_CLASS_IDS = np.array( + [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14] + ) + + ID_TO_LABEL = {} + LABEL_TO_ID = {} + for i in range(len(VALID_CLASS_IDS)): + LABEL_TO_ID[CLASS_LABELS[i]] = VALID_CLASS_IDS[i] + ID_TO_LABEL[VALID_CLASS_IDS[i]] = CLASS_LABELS[i] + + if dataset == "s3dis": + # global CLASS_LABELS + # global VALID_CLASS_IDS + # global ID_TO_LABEL + # global LABEL_TO_ID + + CLASS_LABELS = [ + "ceiling", + "floor", + "wall", + "beam", + "column", + "window", + "door", + "table", + "chair", + "sofa", + "bookcase", + "board", + "clutter", + ] + VALID_CLASS_IDS = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]) + ID_TO_LABEL = {} + LABEL_TO_ID = {} + for i in range(len(VALID_CLASS_IDS)): + LABEL_TO_ID[CLASS_LABELS[i]] = VALID_CLASS_IDS[i] + ID_TO_LABEL[VALID_CLASS_IDS[i]] = CLASS_LABELS[i] + + if dataset == "scannet200": + CLASS_LABELS = ( + "chair", + "table", + "door", + "couch", + "cabinet", + "shelf", + "desk", + "office chair", + "bed", + "pillow", + "sink", + "picture", + "window", + "toilet", + "bookshelf", + "monitor", + "curtain", + "book", + "armchair", + "coffee table", + "box", + "refrigerator", + "lamp", + "kitchen cabinet", + "towel", + "clothes", + "tv", + "nightstand", + "counter", + "dresser", + "stool", + "cushion", + "plant", + "ceiling", + "bathtub", + "end table", + "dining table", + "keyboard", + "bag", + "backpack", + "toilet paper", + "printer", + "tv stand", + "whiteboard", + "blanket", + "shower curtain", + "trash can", + "closet", + "stairs", + "microwave", + "stove", + "shoe", + "computer tower", + "bottle", + "bin", + "ottoman", + "bench", + "board", + "washing machine", + "mirror", + "copier", + "basket", + "sofa chair", + "file cabinet", + "fan", + "laptop", + "shower", + "paper", + "person", + "paper towel dispenser", + "oven", + "blinds", + "rack", + "plate", + "blackboard", + "piano", + "suitcase", + "rail", + "radiator", + "recycling bin", + "container", + "wardrobe", + "soap dispenser", + "telephone", + "bucket", + "clock", + "stand", + "light", + "laundry basket", + "pipe", + "clothes dryer", + "guitar", + "toilet paper holder", + "seat", + "speaker", + "column", + "bicycle", + "ladder", + "bathroom stall", + "shower wall", + "cup", + "jacket", + "storage bin", + "coffee maker", + "dishwasher", + "paper towel roll", + "machine", + "mat", + "windowsill", + "bar", + "toaster", + "bulletin board", + "ironing board", + "fireplace", + "soap dish", + "kitchen counter", + "doorframe", + "toilet paper dispenser", + "mini fridge", + "fire extinguisher", + "ball", + "hat", + "shower curtain rod", + "water cooler", + "paper cutter", + "tray", + "shower door", + "pillar", + "ledge", + "toaster oven", + "mouse", + "toilet seat cover dispenser", + "furniture", + "cart", + "storage container", + "scale", + "tissue box", + "light switch", + "crate", + "power outlet", + "decoration", + "sign", + "projector", + "closet door", + "vacuum cleaner", + "candle", + "plunger", + "stuffed animal", + "headphones", + "dish rack", + "broom", + "guitar case", + "range hood", + "dustpan", + "hair dryer", + "water bottle", + "handicap bar", + "purse", + "vent", + "shower floor", + "water pitcher", + "mailbox", + "bowl", + "paper bag", + "alarm clock", + "music stand", + "projector screen", + "divider", + "laundry detergent", + "bathroom counter", + "object", + "bathroom vanity", + "closet wall", + "laundry hamper", + "bathroom stall door", + "ceiling light", + "trash bin", + "dumbbell", + "stair rail", + "tube", + "bathroom cabinet", + "cd case", + "closet rod", + "coffee kettle", + "structure", + "shower head", + "keyboard piano", + "case of water bottles", + "coat rack", + "storage organizer", + "folded chair", + "fire alarm", + "power strip", + "calendar", + "poster", + "potted plant", + "luggage", + "mattress", + ) + + VALID_CLASS_IDS = np.array( + ( + 2, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 21, + 22, + 23, + 24, + 26, + 27, + 28, + 29, + 31, + 32, + 33, + 34, + 35, + 36, + 38, + 39, + 40, + 41, + 42, + 44, + 45, + 46, + 47, + 48, + 49, + 50, + 51, + 52, + 54, + 55, + 56, + 57, + 58, + 59, + 62, + 63, + 64, + 65, + 66, + 67, + 68, + 69, + 70, + 71, + 72, + 73, + 74, + 75, + 76, + 77, + 78, + 79, + 80, + 82, + 84, + 86, + 87, + 88, + 89, + 90, + 93, + 95, + 96, + 97, + 98, + 99, + 100, + 101, + 102, + 103, + 104, + 105, + 106, + 107, + 110, + 112, + 115, + 116, + 118, + 120, + 121, + 122, + 125, + 128, + 130, + 131, + 132, + 134, + 136, + 138, + 139, + 140, + 141, + 145, + 148, + 154, + 155, + 156, + 157, + 159, + 161, + 163, + 165, + 166, + 168, + 169, + 170, + 177, + 180, + 185, + 188, + 191, + 193, + 195, + 202, + 208, + 213, + 214, + 221, + 229, + 230, + 232, + 233, + 242, + 250, + 261, + 264, + 276, + 283, + 286, + 300, + 304, + 312, + 323, + 325, + 331, + 342, + 356, + 370, + 392, + 395, + 399, + 408, + 417, + 488, + 540, + 562, + 570, + 572, + 581, + 609, + 748, + 776, + 1156, + 1163, + 1164, + 1165, + 1166, + 1167, + 1168, + 1169, + 1170, + 1171, + 1172, + 1173, + 1174, + 1175, + 1176, + 1178, + 1179, + 1180, + 1181, + 1182, + 1183, + 1184, + 1185, + 1186, + 1187, + 1188, + 1189, + 1190, + 1191, + ) + ) + + ID_TO_LABEL = {} + LABEL_TO_ID = {} + for i in range(len(VALID_CLASS_IDS)): + LABEL_TO_ID[CLASS_LABELS[i]] = VALID_CLASS_IDS[i] + ID_TO_LABEL[VALID_CLASS_IDS[i]] = CLASS_LABELS[i] + + total_true = 0 + total_seen = 0 + NUM_CLASSES = len(VALID_CLASS_IDS) + + true_positive_classes = np.zeros(NUM_CLASSES) + positive_classes = np.zeros(NUM_CLASSES) + gt_classes = np.zeros(NUM_CLASSES) + + # precision & recall + total_gt_ins = np.zeros(NUM_CLASSES) + at = 0.5 + tpsins = [[] for _ in range(NUM_CLASSES)] + fpsins = [[] for _ in range(NUM_CLASSES)] + # mucov and mwcov + all_mean_cov = [[] for _ in range(NUM_CLASSES)] + all_mean_weighted_cov = [[] for _ in range(NUM_CLASSES)] + + print("evaluating", len(preds), "scans...") + matches = {} + for i, (k, v) in enumerate(preds.items()): + gt_file = os.path.join(gt_path, k + ".txt") + if not os.path.isfile(gt_file): + util.print_error( + "Scan {} does not match any gt file".format(k), user_fault=True + ) + + if dataset == "s3dis": + gt_ids = util_3d.load_ids(gt_file) + gt_sem = (gt_ids // 1000) - 1 + gt_ins = gt_ids - (gt_ids // 1000) * 1000 + + # pred_sem = v['pred_classes'] - 1 + pred_sem = np.zeros(v["pred_masks"].shape[0], dtype=np.int) + # TODO CONTINUE HERE!!!!!!!!!!!!! + pred_ins = np.zeros(v["pred_masks"].shape[0], dtype=np.int) + + for inst_id in reversed(range(v["pred_masks"].shape[1])): + point_ids = np.argwhere(v["pred_masks"][:, inst_id] == 1.0)[ + :, 0 + ] + pred_ins[point_ids] = inst_id + 1 + pred_sem[point_ids] = v["pred_classes"][inst_id] - 1 + + # semantic acc + total_true += np.sum(pred_sem == gt_sem) + total_seen += pred_sem.shape[0] + + # TODO PARALLELIZ THIS!!!!!!! + # pn semantic mIoU + """ + for j in range(gt_sem.shape[0]): + gt_l = int(gt_sem[j]) + pred_l = int(pred_sem[j]) + gt_classes[gt_l] += 1 + positive_classes[pred_l] += 1 + true_positive_classes[gt_l] += int(gt_l == pred_l) + """ + + uniq, counts = np.unique(pred_sem, return_counts=True) + positive_classes[uniq] += counts + + uniq, counts = np.unique(gt_sem, return_counts=True) + gt_classes[uniq] += counts + + uniq, counts = np.unique( + gt_sem[pred_sem == gt_sem], return_counts=True + ) + true_positive_classes[uniq] += counts + + # instance + un = np.unique(pred_ins) + pts_in_pred = [[] for _ in range(NUM_CLASSES)] + for ig, g in enumerate(un): # each object in prediction + if g == -1: + continue + tmp = pred_ins == g + sem_seg_i = int(stats.mode(pred_sem[tmp])[0]) + pts_in_pred[sem_seg_i] += [tmp] + + un = np.unique(gt_ins) + pts_in_gt = [[] for _ in range(NUM_CLASSES)] + for ig, g in enumerate(un): + tmp = gt_ins == g + sem_seg_i = int(stats.mode(gt_sem[tmp])[0]) + pts_in_gt[sem_seg_i] += [tmp] + + # instance mucov & mwcov + for i_sem in range(NUM_CLASSES): + sum_cov = 0 + mean_cov = 0 + mean_weighted_cov = 0 + num_gt_point = 0 + for ig, ins_gt in enumerate(pts_in_gt[i_sem]): + ovmax = 0.0 + num_ins_gt_point = np.sum(ins_gt) + num_gt_point += num_ins_gt_point + for ip, ins_pred in enumerate(pts_in_pred[i_sem]): + union = ins_pred | ins_gt + intersect = ins_pred & ins_gt + iou = float(np.sum(intersect)) / np.sum(union) + + if iou > ovmax: + ovmax = iou + ipmax = ip + + sum_cov += ovmax + mean_weighted_cov += ovmax * num_ins_gt_point + + if len(pts_in_gt[i_sem]) != 0: + mean_cov = sum_cov / len(pts_in_gt[i_sem]) + all_mean_cov[i_sem].append(mean_cov) + + mean_weighted_cov /= num_gt_point + all_mean_weighted_cov[i_sem].append(mean_weighted_cov) + + if dataset == "s3dis": + # instance precision & recall + for i_sem in range(NUM_CLASSES): + tp = [0.0] * len(pts_in_pred[i_sem]) + fp = [0.0] * len(pts_in_pred[i_sem]) + gtflag = np.zeros(len(pts_in_gt[i_sem])) + total_gt_ins[i_sem] += len(pts_in_gt[i_sem]) + + for ip, ins_pred in enumerate(pts_in_pred[i_sem]): + ovmax = -1.0 + + for ig, ins_gt in enumerate(pts_in_gt[i_sem]): + union = ins_pred | ins_gt + intersect = ins_pred & ins_gt + iou = float(np.sum(intersect)) / np.sum(union) + + if iou > ovmax: + ovmax = iou + igmax = ig + + if ovmax >= at: + tp[ip] = 1 # true + else: + fp[ip] = 1 # false positive + + tpsins[i_sem] += tp + fpsins[i_sem] += fp + + matches_key = os.path.abspath(gt_file) + # assign gt to predictions + gt2pred, pred2gt = assign_instances_for_scan(v, gt_file) + matches[matches_key] = {} + matches[matches_key]["gt"] = gt2pred + matches[matches_key]["pred"] = pred2gt + sys.stdout.write("\rscans processed: {}".format(i + 1)) + sys.stdout.flush() + print("") + ap_scores = evaluate_matches(matches) + avgs = compute_averages(ap_scores) + + # print + print_results(avgs) + write_result_file(avgs, output_file) + + if dataset == "s3dis": + MUCov = np.zeros(NUM_CLASSES) + MWCov = np.zeros(NUM_CLASSES) + for i_sem in range(NUM_CLASSES): + MUCov[i_sem] = np.mean(all_mean_cov[i_sem]) + MWCov[i_sem] = np.mean(all_mean_weighted_cov[i_sem]) + + precision = np.zeros(NUM_CLASSES) + recall = np.zeros(NUM_CLASSES) + for i_sem in range(NUM_CLASSES): + tp = np.asarray(tpsins[i_sem]).astype(np.float) + fp = np.asarray(fpsins[i_sem]).astype(np.float) + tp = np.sum(tp) + fp = np.sum(fp) + rec = tp / total_gt_ins[i_sem] + prec = tp / (tp + fp) + + precision[i_sem] = prec + recall[i_sem] = rec + + """ + LOG_FOUT = open(os.path.join('results_a5.txt'), 'w') + + def log_string(out_str): + LOG_FOUT.write(out_str + '\n') + LOG_FOUT.flush() + print(out_str) + """ + + return np.mean(precision), np.mean(recall) + + +# TODO: remove this +# import pandas as pd +# def main(): +# print("!!! CLI is only for debugging purposes. use `evaluate()` instead.") +# evaluate(pd.read_pickle("/globalwork/schult/saved_predictions.pkl"), opt.gt_path, opt.output_file) + +# if __name__ == '__main__': +# main() diff --git a/models/Mask3D/mask3d/benchmark/util.py b/models/Mask3D/mask3d/benchmark/util.py new file mode 100644 index 0000000000000000000000000000000000000000..9a4224cd4f785c8a5a7cde490cf0f9999e61dbe7 --- /dev/null +++ b/models/Mask3D/mask3d/benchmark/util.py @@ -0,0 +1,128 @@ +import os, sys +import csv + +try: + import numpy as np +except: + print("Failed to import numpy package.") + sys.exit(-1) +try: + import imageio +except: + print("Please install the module 'imageio' for image processing, e.g.") + print("pip install imageio") + sys.exit(-1) + +# print an error message and quit +def print_error(message, user_fault=False): + sys.stderr.write("ERROR: " + str(message) + "\n") + if user_fault: + sys.exit(2) + sys.exit(-1) + + +# if string s represents an int +def represents_int(s): + try: + int(s) + return True + except ValueError: + return False + + +def read_label_mapping( + filename, label_from="raw_category", label_to="nyu40id" +): + assert os.path.isfile(filename) + mapping = dict() + with open(filename) as csvfile: + reader = csv.DictReader(csvfile, delimiter="\t") + for row in reader: + mapping[row[label_from]] = int(row[label_to]) + # if ints convert + if represents_int(list(mapping.keys())[0]): + mapping = {int(k): v for k, v in mapping.items()} + return mapping + + +# input: scene_types.txt or scene_types_all.txt +def read_scene_types_mapping(filename, remove_spaces=True): + assert os.path.isfile(filename) + mapping = dict() + lines = open(filename).read().splitlines() + lines = [line.split("\t") for line in lines] + if remove_spaces: + mapping = {x[1].strip(): int(x[0]) for x in lines} + else: + mapping = {x[1]: int(x[0]) for x in lines} + return mapping + + +# color by label +def visualize_label_image(filename, image): + height = image.shape[0] + width = image.shape[1] + vis_image = np.zeros([height, width, 3], dtype=np.uint8) + color_palette = create_color_palette() + for idx, color in enumerate(color_palette): + vis_image[image == idx] = color + imageio.imwrite(filename, vis_image) + + +# color by different instances (mod length of color palette) +def visualize_instance_image(filename, image): + height = image.shape[0] + width = image.shape[1] + vis_image = np.zeros([height, width, 3], dtype=np.uint8) + color_palette = create_color_palette() + instances = np.unique(image) + for idx, inst in enumerate(instances): + vis_image[image == inst] = color_palette[inst % len(color_palette)] + imageio.imwrite(filename, vis_image) + + +# color palette for nyu40 labels +def create_color_palette(): + return [ + (0, 0, 0), + (174, 199, 232), # wall + (152, 223, 138), # floor + (31, 119, 180), # cabinet + (255, 187, 120), # bed + (188, 189, 34), # chair + (140, 86, 75), # sofa + (255, 152, 150), # table + (214, 39, 40), # door + (197, 176, 213), # window + (148, 103, 189), # bookshelf + (196, 156, 148), # picture + (23, 190, 207), # counter + (178, 76, 76), + (247, 182, 210), # desk + (66, 188, 102), + (219, 219, 141), # curtain + (140, 57, 197), + (202, 185, 52), + (51, 176, 203), + (200, 54, 131), + (92, 193, 61), + (78, 71, 183), + (172, 114, 82), + (255, 127, 14), # refrigerator + (91, 163, 138), + (153, 98, 156), + (140, 153, 101), + (158, 218, 229), # shower curtain + (100, 125, 154), + (178, 127, 135), + (120, 185, 128), + (146, 111, 194), + (44, 160, 44), # toilet + (112, 128, 144), # sink + (96, 207, 209), + (227, 119, 194), # bathtub + (213, 92, 176), + (94, 106, 211), + (82, 84, 163), # otherfurn + (100, 85, 144), + ] diff --git a/models/Mask3D/mask3d/benchmark/util_3d.py b/models/Mask3D/mask3d/benchmark/util_3d.py new file mode 100644 index 0000000000000000000000000000000000000000..572064f3ca251563466ca6bfbe2c70dacdad205f --- /dev/null +++ b/models/Mask3D/mask3d/benchmark/util_3d.py @@ -0,0 +1,177 @@ +import os, sys +import json + +try: + import numpy as np +except: + print("Failed to import numpy package.") + sys.exit(-1) + +try: + from plyfile import PlyData, PlyElement +except: + print("Please install the module 'plyfile' for PLY i/o, e.g.") + print("pip install plyfile") + sys.exit(-1) + +import benchmark.util as util + + +# matrix: 4x4 np array +# points Nx3 np array +def transform_points(matrix, points): + assert len(points.shape) == 2 and points.shape[1] == 3 + num_points = points.shape[0] + p = np.concatenate([points, np.ones((num_points, 1))], axis=1) + p = np.matmul(matrix, np.transpose(p)) + p = np.transpose(p) + p[:, :3] /= p[:, 3, None] + return p[:, :3] + + +def export_ids(filename, ids): + with open(filename, "w") as f: + for id in ids: + f.write("%d\n" % id) + + +def load_ids(filename): + ids = open(filename).read().splitlines() + ids = np.array(ids, dtype=np.int64) + return ids + + +def read_mesh_vertices(filename): + assert os.path.isfile(filename) + with open(filename, "rb") as f: + plydata = PlyData.read(f) + num_verts = plydata["vertex"].count + vertices = np.zeros(shape=[num_verts, 3], dtype=np.float32) + vertices[:, 0] = plydata["vertex"].data["x"] + vertices[:, 1] = plydata["vertex"].data["y"] + vertices[:, 2] = plydata["vertex"].data["z"] + return vertices + + +# export 3d instance labels for instance evaluation +def export_instance_ids_for_eval(filename, label_ids, instance_ids): + assert label_ids.shape[0] == instance_ids.shape[0] + output_mask_path_relative = "pred_mask" + name = os.path.splitext(os.path.basename(filename))[0] + output_mask_path = os.path.join( + os.path.dirname(filename), output_mask_path_relative + ) + if not os.path.isdir(output_mask_path): + os.mkdir(output_mask_path) + insts = np.unique(instance_ids) + zero_mask = np.zeros(shape=(instance_ids.shape[0]), dtype=np.int32) + with open(filename, "w") as f: + for idx, inst_id in enumerate(insts): + if inst_id == 0: # 0 -> no instance for this vertex + continue + output_mask_file = os.path.join( + output_mask_path_relative, name + "_" + str(idx) + ".txt" + ) + loc = np.where(instance_ids == inst_id) + label_id = label_ids[loc[0][0]] + f.write("%s %d %f\n" % (output_mask_file, label_id, 1.0)) + # write mask + mask = np.copy(zero_mask) + mask[loc[0]] = 1 + export_ids(output_mask_file, mask) + + +# ------------ Instance Utils ------------ # + + +class Instance(object): + instance_id = 0 + label_id = 0 + vert_count = 0 + med_dist = -1 + dist_conf = 0.0 + + def __init__(self, mesh_vert_instances, instance_id): + if instance_id == -1: + return + self.instance_id = int(instance_id) + self.label_id = int(self.get_label_id(instance_id)) + self.vert_count = int( + self.get_instance_verts(mesh_vert_instances, instance_id) + ) + + def get_label_id(self, instance_id): + return int(instance_id // 1000) + + def get_instance_verts(self, mesh_vert_instances, instance_id): + return (mesh_vert_instances == instance_id).sum() + + def to_json(self): + return json.dumps( + self, default=lambda o: o.__dict__, sort_keys=True, indent=4 + ) + + def to_dict(self): + dict = {} + dict["instance_id"] = self.instance_id + dict["label_id"] = self.label_id + dict["vert_count"] = self.vert_count + dict["med_dist"] = self.med_dist + dict["dist_conf"] = self.dist_conf + return dict + + def from_json(self, data): + self.instance_id = int(data["instance_id"]) + self.label_id = int(data["label_id"]) + self.vert_count = int(data["vert_count"]) + if "med_dist" in data: + self.med_dist = float(data["med_dist"]) + self.dist_conf = float(data["dist_conf"]) + + def __str__(self): + return "(" + str(self.instance_id) + ")" + + +def read_instance_prediction_file(filename, pred_path): + lines = open(filename).read().splitlines() + instance_info = {} + abs_pred_path = os.path.abspath(pred_path) + for line in lines: + parts = line.split(" ") + if len(parts) != 3: + util.print_error( + "invalid instance prediction file. Expected (per line): [rel path prediction] [label id prediction] [confidence prediction]" + ) + if os.path.isabs(parts[0]): + util.print_error( + "invalid instance prediction file. First entry in line must be a relative path" + ) + mask_file = os.path.join(os.path.dirname(filename), parts[0]) + mask_file = os.path.abspath(mask_file) + # check that mask_file lives inside prediction path + if os.path.commonprefix([mask_file, abs_pred_path]) != abs_pred_path: + util.print_error( + "predicted mask {} in prediction text file {} points outside of prediction path.".format( + mask_file, filename + ) + ) + + info = {} + info["label_id"] = int(float(parts[1])) + info["conf"] = float(parts[2]) + instance_info[mask_file] = info + return instance_info + + +def get_instances(ids, class_ids, class_labels, id2label): + instances = {} + for label in class_labels: + instances[label] = [] + instance_ids = np.unique(ids) + for id in instance_ids: + if id == 0: + continue + inst = Instance(ids, id) + if inst.label_id in class_ids: + instances[id2label[inst.label_id]].append(inst.to_dict()) + return instances diff --git a/models/Mask3D/mask3d/conf/__init__.py b/models/Mask3D/mask3d/conf/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/Mask3D/mask3d/conf/augmentation/albumentations_aug.yaml b/models/Mask3D/mask3d/conf/augmentation/albumentations_aug.yaml new file mode 100644 index 0000000000000000000000000000000000000000..006663b4be251bf0f41ac2f66f855ae3d59a2878 --- /dev/null +++ b/models/Mask3D/mask3d/conf/augmentation/albumentations_aug.yaml @@ -0,0 +1,30 @@ +__version__: 0.4.5 +transform: + __class_fullname__: albumentations.core.composition.Compose + additional_targets: {} + bbox_params: null + keypoint_params: null + p: 1.0 + transforms: + - __class_fullname__: albumentations.augmentations.transforms.RandomBrightnessContrast + always_apply: true + brightness_by_max: true + brightness_limit: + - -0.2 + - 0.2 + contrast_limit: + - -0.2 + - 0.2 + p: 0.5 + - __class_fullname__: albumentations.augmentations.transforms.RGBShift + always_apply: true + b_shift_limit: + - -20 + - 20 + g_shift_limit: + - -20 + - 20 + p: 0.5 + r_shift_limit: + - -20 + - 20 diff --git a/models/Mask3D/mask3d/conf/augmentation/volumentations_aug.yaml b/models/Mask3D/mask3d/conf/augmentation/volumentations_aug.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3b86407a2e735ad8dbba79f83746ceb79722aedf --- /dev/null +++ b/models/Mask3D/mask3d/conf/augmentation/volumentations_aug.yaml @@ -0,0 +1,53 @@ +# pi = 3.14159265358979 +# pi/2 = 1.57079632679489 +# pi/3 = 1.04719755119659 +# pi/6 = 0.52359877559829 +# pi/12 = 0.26179938779914 +# pi/24 = 0.13089969389957 +# +__version__: 0.1.6 +transform: + __class_fullname__: volumentations.core.composition.Compose + additional_targets: {} + p: 1.0 + transforms: + - __class_fullname__: volumentations.augmentations.transforms.Scale3d + always_apply: true + p: 0.5 + scale_limit: + - - -0.1 + - 0.1 + - - -0.1 + - 0.1 + - - -0.1 + - 0.1 + - __class_fullname__: volumentations.augmentations.transforms.RotateAroundAxis3d + always_apply: true + axis: + - 0 + - 0 + - 1 + p: 0.5 + rotation_limit: + - -3.141592653589793 + - 3.141592653589793 + - __class_fullname__: volumentations.augmentations.transforms.RotateAroundAxis3d + always_apply: true + axis: + - 0 + - 1 + - 0 + p: 0.5 + rotation_limit: + - -0.13089969389957 + - 0.13089969389957 + - __class_fullname__: volumentations.augmentations.transforms.RotateAroundAxis3d + always_apply: true + axis: + - 1 + - 0 + - 0 + p: 0.5 + rotation_limit: + - -0.13089969389957 + - 0.13089969389957 diff --git a/models/Mask3D/mask3d/conf/callbacks/callbacks_instance_segmentation.yaml b/models/Mask3D/mask3d/conf/callbacks/callbacks_instance_segmentation.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7f0958eed35ea4317ddc3f2378dd66336472c0fa --- /dev/null +++ b/models/Mask3D/mask3d/conf/callbacks/callbacks_instance_segmentation.yaml @@ -0,0 +1,11 @@ +# @package _group_ +- _target_: pytorch_lightning.callbacks.ModelCheckpoint + monitor: val_mean_ap_50 + save_last: true + save_top_k: 1 + mode: max + dirpath: ${general.save_dir} + filename: "{epoch}-{val_mean_ap_50:.3f}" + every_n_epochs: 1 + +- _target_: pytorch_lightning.callbacks.LearningRateMonitor diff --git a/models/Mask3D/mask3d/conf/config_base_instance_segmentation.yaml b/models/Mask3D/mask3d/conf/config_base_instance_segmentation.yaml new file mode 100644 index 0000000000000000000000000000000000000000..61aeae0519bd308a58293d07ee902beb6a64ed5d --- /dev/null +++ b/models/Mask3D/mask3d/conf/config_base_instance_segmentation.yaml @@ -0,0 +1,75 @@ +general: + train_mode: true + task: "instance_segmentation" + seed: null + checkpoint: null + backbone_checkpoint: null + freeze_backbone: false # train only last layer + linear_probing_backbone: false + train_on_segments: false + eval_on_segments: false + filter_out_instances: false + save_visualizations: false + visualization_point_size: 20 + decoder_id: -1 + export: false + use_dbscan: false + ignore_class_threshold: 100 + project_name: scannet + workspace: jonasschult + experiment_name: DEBUG_ABLATION + num_targets: 19 + add_instance: true + dbscan_eps: 0.95 + dbscan_min_points: 1 + + + export_threshold: 0.0001 + + reps_per_epoch: 1 + + on_crops: false + + scores_threshold: 0.0 + iou_threshold: 1.0 + + area: 5 + + eval_inner_core: -1 # disabled + + topk_per_image: 100 + + ignore_mask_idx: [] + + max_batch_size: 99999999 + + save_dir: saved/${general.experiment_name} + # time/commit/md5(config)_uuid + # time/experiment_id/version_uuid + # experiment_id: 1 # commit[:8], or unique from logger + # version: 1 # md5[:8] of config + + gpus: 1 + +defaults: + - data: indoor + - data/data_loaders: simple_loader + - data/datasets: scannet + - data/collation_functions: voxelize_collate + - logging: full + - model: mask3d + - metrics: miou + - optimizer: adamw + - scheduler: onecyclelr + - trainer: trainer600 + - callbacks: callbacks_instance_segmentation + - matcher: hungarian_matcher + - loss: set_criterion + +hydra: + run: + dir: saved/hydra_logs/${now:%Y-%m-%d}/${now:%H-%M-%S} + sweep: + dir: saved/hydra_logs/${now:%Y-%m-%d}/${now:%H-%M-%S} + # dir: ${general.save_dir} + subdir: ${hydra.job.num}_${hydra.job.id} diff --git a/models/Mask3D/mask3d/conf/data/collation_functions/voxelize_collate.yaml b/models/Mask3D/mask3d/conf/data/collation_functions/voxelize_collate.yaml new file mode 100644 index 0000000000000000000000000000000000000000..026552efb024e4e6fd90bf6bda9df283da2bf4c1 --- /dev/null +++ b/models/Mask3D/mask3d/conf/data/collation_functions/voxelize_collate.yaml @@ -0,0 +1,42 @@ +# @package data + +train_collation: + _target_: mask3d.datasets.utils.VoxelizeCollate + ignore_label: ${data.ignore_label} + voxel_size: ${data.voxel_size} + mode: ${data.train_mode} + small_crops: false + very_small_crops: false + batch_instance: false + probing: ${general.linear_probing_backbone} + task: ${general.task} + ignore_class_threshold: ${general.ignore_class_threshold} + filter_out_classes: ${data.train_dataset.filter_out_classes} + label_offset: ${data.train_dataset.label_offset} + num_queries: ${model.num_queries} + +validation_collation: + _target_: mask3d.datasets.utils.VoxelizeCollate + ignore_label: ${data.ignore_label} + voxel_size: ${data.voxel_size} + mode: ${data.validation_mode} + batch_instance: false + probing: ${general.linear_probing_backbone} + task: ${general.task} + ignore_class_threshold: ${general.ignore_class_threshold} + filter_out_classes: ${data.validation_dataset.filter_out_classes} + label_offset: ${data.validation_dataset.label_offset} + num_queries: ${model.num_queries} + +test_collation: + _target_: mask3d.datasets.utils.VoxelizeCollate + ignore_label: ${data.ignore_label} + voxel_size: ${data.voxel_size} + mode: ${data.test_mode} + batch_instance: false + probing: ${general.linear_probing_backbone} + task: ${general.task} + ignore_class_threshold: ${general.ignore_class_threshold} + filter_out_classes: ${data.test_dataset.filter_out_classes} + label_offset: ${data.test_dataset.label_offset} + num_queries: ${model.num_queries} \ No newline at end of file diff --git a/models/Mask3D/mask3d/conf/data/collation_functions/voxelize_collate_merge.yaml b/models/Mask3D/mask3d/conf/data/collation_functions/voxelize_collate_merge.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d5d3471d143ddfe999d8f3031e41ba6efce2e879 --- /dev/null +++ b/models/Mask3D/mask3d/conf/data/collation_functions/voxelize_collate_merge.yaml @@ -0,0 +1,36 @@ +# @package data + +train_collation: + _target_: mask3d.datasets.utils.VoxelizeCollateMerge + ignore_label: ${data.ignore_label} + voxel_size: ${data.voxel_size} + mode: ${data.train_mode} + small_crops: false + very_small_crops: false + scenes: 2 + batch_instance: false + make_one_pc_noise: false + place_nearby: false + place_far: false + proba: 1 + probing: ${general.linear_probing_backbone} + include_ignore: ${general.include_ignore} + task: ${general.task} + +validation_collation: + _target_: mask3d.datasets.utils.VoxelizeCollate + ignore_label: ${data.ignore_label} + voxel_size: ${data.voxel_size} + mode: ${data.validation_mode} + probing: ${general.linear_probing_backbone} + include_ignore: ${general.include_ignore} + task: ${general.task} + +test_collation: + _target_: mask3d.datasets.utils.VoxelizeCollate + ignore_label: ${data.ignore_label} + voxel_size: ${data.voxel_size} + mode: ${data.test_mode} + probing: ${general.linear_probing_backbone} + include_ignore: ${general.include_ignore} + task: ${general.task} diff --git a/models/Mask3D/mask3d/conf/data/data_loaders/simple_loader.yaml b/models/Mask3D/mask3d/conf/data/data_loaders/simple_loader.yaml new file mode 100644 index 0000000000000000000000000000000000000000..39996e14d769c2ba9341da582a1f7bf970fc7925 --- /dev/null +++ b/models/Mask3D/mask3d/conf/data/data_loaders/simple_loader.yaml @@ -0,0 +1,22 @@ +# @package data + +train_dataloader: + _target_: torch.utils.data.DataLoader + shuffle: true + pin_memory: ${data.pin_memory} + num_workers: ${data.num_workers} + batch_size: ${data.batch_size} + +validation_dataloader: + _target_: torch.utils.data.DataLoader + shuffle: false + pin_memory: ${data.pin_memory} + num_workers: ${data.num_workers} + batch_size: ${data.test_batch_size} + +test_dataloader: + _target_: torch.utils.data.DataLoader + shuffle: false + pin_memory: ${data.pin_memory} + num_workers: ${data.num_workers} + batch_size: ${data.test_batch_size} diff --git a/models/Mask3D/mask3d/conf/data/data_loaders/simple_loader_save_memory.yaml b/models/Mask3D/mask3d/conf/data/data_loaders/simple_loader_save_memory.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b1b1b45d13167dc07357a13feb5a513dd71c9a2e --- /dev/null +++ b/models/Mask3D/mask3d/conf/data/data_loaders/simple_loader_save_memory.yaml @@ -0,0 +1,22 @@ +# @package data + +train_dataloader: + _target_: torch.utils.data.DataLoader + shuffle: true + pin_memory: ${data.pin_memory} + num_workers: ${data.num_workers} + batch_size: ${data.batch_size} + +validation_dataloader: + _target_: torch.utils.data.DataLoader + shuffle: false + pin_memory: ${data.pin_memory} + num_workers: 1 + batch_size: ${data.test_batch_size} + +test_dataloader: + _target_: torch.utils.data.DataLoader + shuffle: false + pin_memory: ${data.pin_memory} + num_workers: 1 + batch_size: ${data.test_batch_size} diff --git a/models/Mask3D/mask3d/conf/data/datasets/matterport.yaml b/models/Mask3D/mask3d/conf/data/datasets/matterport.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6728ab9eb26bc78f435237d9d7d61800b900735d --- /dev/null +++ b/models/Mask3D/mask3d/conf/data/datasets/matterport.yaml @@ -0,0 +1,48 @@ +# @package data +train_dataset: + _target_: mix3d.datasets.semseg.SemanticSegmentationDataset + data_dir: data/processed/matterport + image_augmentations_path: mix3d/conf/augmentation/albumentations_aug.yaml + volume_augmentations_path: mix3d/conf/augmentation/volumentations_aug.yaml + label_db_filepath: data/processed/matterport/label_database.yaml + color_mean_std: data/processed/scannet/color_mean_std.yaml + data_percent: 1.0 + mode: ${data.train_mode} + ignore_label: ${data.ignore_label} + num_labels: ${data.num_labels} + add_raw_coordinates: ${data.add_raw_coordinates} + add_colors: ${data.add_colors} + add_normals: ${data.add_normals} + add_instance: ${data.add_instance} + +validation_dataset: + _target_: mix3d.datasets.semseg.SemanticSegmentationDataset + data_dir: data/processed/scannet + image_augmentations_path: null + volume_augmentations_path: null + label_db_filepath: data/processed/matterport/label_database.yaml + color_mean_std: data/processed/scannet/color_mean_std.yaml + data_percent: 1.0 + mode: ${data.validation_mode} + ignore_label: ${data.ignore_label} + num_labels: ${data.num_labels} + add_raw_coordinates: ${data.add_raw_coordinates} + add_colors: ${data.add_colors} + add_normals: ${data.add_normals} + add_instance: ${data.add_instance} + +test_dataset: + _target_: mix3d.datasets.semseg.SemanticSegmentationDataset + data_dir: data/processed/matterport + image_augmentations_path: null + volume_augmentations_path: null + label_db_filepath: data/processed/matterport/label_database.yaml + color_mean_std: data/processed/scannet/color_mean_std.yaml + data_percent: 1.0 + mode: ${data.test_mode} + ignore_label: ${data.ignore_label} + num_labels: ${data.num_labels} + add_raw_coordinates: ${data.add_raw_coordinates} + add_colors: ${data.add_colors} + add_normals: ${data.add_normals} + add_instance: ${data.add_instance} diff --git a/models/Mask3D/mask3d/conf/data/datasets/matterport_scannet.yaml b/models/Mask3D/mask3d/conf/data/datasets/matterport_scannet.yaml new file mode 100644 index 0000000000000000000000000000000000000000..df259ceaadfa68a90c2b8a60d7b74a958b30c79d --- /dev/null +++ b/models/Mask3D/mask3d/conf/data/datasets/matterport_scannet.yaml @@ -0,0 +1,50 @@ +# @package data +train_dataset: + _target_: mix3d.datasets.semseg.SemanticSegmentationDataset + data_dir: + - data/processed/scannet + - data/processed/matterport + image_augmentations_path: mix3d/conf/augmentation/albumentations_aug.yaml + volume_augmentations_path: mix3d/conf/augmentation/volumentations_aug.yaml + label_db_filepath: data/processed/scannet/label_database.yaml + color_mean_std: data/processed/scannet/color_mean_std.yaml + data_percent: 1.0 + mode: ${data.train_mode} + ignore_label: ${data.ignore_label} + num_labels: ${data.num_labels} + add_raw_coordinates: ${data.add_raw_coordinates} + add_colors: ${data.add_colors} + add_normals: ${data.add_normals} + add_instance: ${data.add_instance} + +validation_dataset: + _target_: mix3d.datasets.semseg.SemanticSegmentationDataset + data_dir: data/processed/scannet + image_augmentations_path: null + volume_augmentations_path: null + label_db_filepath: data/processed/scannet/label_database.yaml + color_mean_std: data/processed/scannet/color_mean_std.yaml + data_percent: 1.0 + mode: ${data.validation_mode} + ignore_label: ${data.ignore_label} + num_labels: ${data.num_labels} + add_raw_coordinates: ${data.add_raw_coordinates} + add_colors: ${data.add_colors} + add_normals: ${data.add_normals} + add_instance: ${data.add_instance} + +test_dataset: + _target_: mix3d.datasets.semseg.SemanticSegmentationDataset + data_dir: data/processed/scannet + image_augmentations_path: null + volume_augmentations_path: null + label_db_filepath: data/processed/scannet/label_database.yaml + color_mean_std: data/processed/scannet/color_mean_std.yaml + data_percent: 1.0 + mode: ${data.test_mode} + ignore_label: ${data.ignore_label} + num_labels: ${data.num_labels} + add_raw_coordinates: ${data.add_raw_coordinates} + add_colors: ${data.add_colors} + add_normals: ${data.add_normals} + add_instance: ${data.add_instance} diff --git a/models/Mask3D/mask3d/conf/data/datasets/rio.yaml b/models/Mask3D/mask3d/conf/data/datasets/rio.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1adfea36fea05b14a7fa95382677aee6144d1b4b --- /dev/null +++ b/models/Mask3D/mask3d/conf/data/datasets/rio.yaml @@ -0,0 +1,48 @@ +# @package data +train_dataset: + _target_: mix3d.datasets.semseg.SemanticSegmentationDataset + data_dir: data/processed/rio + image_augmentations_path: mix3d/conf/augmentation/albumentations_aug.yaml + volume_augmentations_path: mix3d/conf/augmentation/volumentations_aug.yaml + label_db_filepath: data/processed/scannet/label_database.yaml + color_mean_std: data/processed/scannet/color_mean_std.yaml + data_percent: 1.0 + mode: ${data.train_mode} + ignore_label: ${data.ignore_label} + num_labels: ${data.num_labels} + add_raw_coordinates: ${data.add_raw_coordinates} + add_colors: ${data.add_colors} + add_normals: ${data.add_normals} + add_instance: ${data.add_instance} + +validation_dataset: + _target_: mix3d.datasets.semseg.SemanticSegmentationDataset + data_dir: data/processed/rio + image_augmentations_path: null + volume_augmentations_path: null + label_db_filepath: data/processed/scannet/label_database.yaml + color_mean_std: data/processed/scannet/color_mean_std.yaml + data_percent: 1.0 + mode: ${data.validation_mode} + ignore_label: ${data.ignore_label} + num_labels: ${data.num_labels} + add_raw_coordinates: ${data.add_raw_coordinates} + add_colors: ${data.add_colors} + add_normals: ${data.add_normals} + add_instance: ${data.add_instance} + +test_dataset: + _target_: mix3d.datasets.semseg.SemanticSegmentationDataset + data_dir: data/processed/rio + image_augmentations_path: null + volume_augmentations_path: null + label_db_filepath: data/processed/scannet/label_database.yaml + color_mean_std: data/processed/scannet/color_mean_std.yaml + data_percent: 1.0 + mode: ${data.test_mode} + ignore_label: ${data.ignore_label} + num_labels: ${data.num_labels} + add_raw_coordinates: ${data.add_raw_coordinates} + add_colors: ${data.add_colors} + add_normals: ${data.add_normals} + add_instance: ${data.add_instance} diff --git a/models/Mask3D/mask3d/conf/data/datasets/s3dis.yaml b/models/Mask3D/mask3d/conf/data/datasets/s3dis.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2e1385416655514397d82737e1edc2d1a5997657 --- /dev/null +++ b/models/Mask3D/mask3d/conf/data/datasets/s3dis.yaml @@ -0,0 +1,87 @@ +# @package data +train_dataset: + _target_: mask3d.datasets.semseg.SemanticSegmentationDataset + dataset_name: "s3dis" + data_dir: data/processed/s3dis + image_augmentations_path: conf/augmentation/albumentations_aug.yaml + volume_augmentations_path: conf/augmentation/volumentations_aug.yaml + label_db_filepath: data/processed/s3dis/label_database.yaml + color_mean_std: data/processed/s3dis/color_mean_std.yaml + data_percent: 1.0 + mode: ${data.train_mode} + ignore_label: ${data.ignore_label} + num_labels: ${data.num_labels} + add_raw_coordinates: ${data.add_raw_coordinates} + add_colors: ${data.add_colors} + add_normals: ${data.add_normals} + add_instance: ${data.add_instance} + cache_data: ${data.cache_data} + # different augs experiments + instance_oversampling: 0.0 + place_around_existing: False + point_per_cut: 0 + max_cut_region: 0 + flip_in_center: false + noise_rate: 0 + resample_points: 0 + cropping: ${data.cropping} + cropping_args: ${data.cropping_args} + is_tta: false + crop_min_size: ${data.crop_min_size} + crop_length: ${data.crop_length} + cropping_v1: ${data.cropping_v1} + area: ${general.area} + filter_out_classes: [] + label_offset: 0 + +validation_dataset: + _target_: mask3d.datasets.semseg.SemanticSegmentationDataset + dataset_name: "s3dis" + data_dir: data/processed/s3dis + image_augmentations_path: null + volume_augmentations_path: null + label_db_filepath: data/processed/s3dis/label_database.yaml + color_mean_std: data/processed/s3dis/color_mean_std.yaml + data_percent: 1.0 + mode: ${data.validation_mode} + ignore_label: ${data.ignore_label} + num_labels: ${data.num_labels} + add_raw_coordinates: ${data.add_raw_coordinates} + add_colors: ${data.add_colors} + add_normals: ${data.add_normals} + add_instance: ${data.add_instance} + cache_data: ${data.cache_data} + cropping: false + is_tta: false + crop_min_size: ${data.crop_min_size} + crop_length: ${data.crop_length} + cropping_v1: ${data.cropping_v1} + area: ${general.area} + filter_out_classes: [] + label_offset: 0 + +test_dataset: + _target_: mask3d.datasets.semseg.SemanticSegmentationDataset + dataset_name: "s3dis" + data_dir: data/processed/s3dis + image_augmentations_path: null + volume_augmentations_path: null + label_db_filepath: data/processed/s3dis/label_database.yaml + color_mean_std: data/processed/s3dis/color_mean_std.yaml + data_percent: 1.0 + mode: ${data.test_mode} + ignore_label: ${data.ignore_label} + num_labels: ${data.num_labels} + add_raw_coordinates: ${data.add_raw_coordinates} + add_colors: ${data.add_colors} + add_normals: ${data.add_normals} + add_instance: ${data.add_instance} + cache_data: ${data.cache_data} + cropping: false + is_tta: false + crop_min_size: ${data.crop_min_size} + crop_length: ${data.crop_length} + cropping_v1: ${data.cropping_v1} + area: ${general.area} + filter_out_classes: [] + label_offset: 0 diff --git a/models/Mask3D/mask3d/conf/data/datasets/scannet.yaml b/models/Mask3D/mask3d/conf/data/datasets/scannet.yaml new file mode 100644 index 0000000000000000000000000000000000000000..50f1c6c5998d8f3c6dae35ef508225dff4b0271f --- /dev/null +++ b/models/Mask3D/mask3d/conf/data/datasets/scannet.yaml @@ -0,0 +1,79 @@ +# @package data +train_dataset: + _target_: mask3d.datasets.semseg.SemanticSegmentationDataset + dataset_name: "scannet" + data_dir: data/processed/scannet + image_augmentations_path: conf/augmentation/albumentations_aug.yaml + volume_augmentations_path: conf/augmentation/volumentations_aug.yaml + label_db_filepath: data/processed/scannet/label_database.yaml + color_mean_std: data/processed/scannet/color_mean_std.yaml + data_percent: 1.0 + mode: ${data.train_mode} + ignore_label: ${data.ignore_label} + num_labels: ${data.num_labels} + add_raw_coordinates: ${data.add_raw_coordinates} + add_colors: ${data.add_colors} + add_normals: ${data.add_normals} + add_instance: ${data.add_instance} + # different augs experiments + instance_oversampling: 0.0 + place_around_existing: false + point_per_cut: 0 + max_cut_region: 0 + flip_in_center: false + noise_rate: 0 + resample_points: 0 + add_unlabeled_pc: false + cropping: ${data.cropping} + cropping_args: ${data.cropping_args} + is_tta: false + crop_min_size: ${data.crop_min_size} + crop_length: ${data.crop_length} + filter_out_classes: [0, 1] + label_offset: 2 + +validation_dataset: + _target_: mask3d.datasets.semseg.SemanticSegmentationDataset + dataset_name: "scannet" + data_dir: data/processed/scannet + image_augmentations_path: null + volume_augmentations_path: null + label_db_filepath: data/processed/scannet/label_database.yaml + color_mean_std: data/processed/scannet/color_mean_std.yaml + data_percent: 1.0 + mode: ${data.validation_mode} + ignore_label: ${data.ignore_label} + num_labels: ${data.num_labels} + add_raw_coordinates: ${data.add_raw_coordinates} + add_colors: ${data.add_colors} + add_normals: ${data.add_normals} + add_instance: ${data.add_instance} + cropping: false + is_tta: false + crop_min_size: ${data.crop_min_size} + crop_length: ${data.crop_length} + filter_out_classes: [0, 1] + label_offset: 2 + +test_dataset: + _target_: mask3d.datasets.semseg.SemanticSegmentationDataset + dataset_name: "scannet" + data_dir: data/processed/scannet + image_augmentations_path: null + volume_augmentations_path: null + label_db_filepath: data/processed/scannet/label_database.yaml + color_mean_std: data/processed/scannet/color_mean_std.yaml + data_percent: 1.0 + mode: ${data.test_mode} + ignore_label: ${data.ignore_label} + num_labels: ${data.num_labels} + add_raw_coordinates: ${data.add_raw_coordinates} + add_colors: ${data.add_colors} + add_normals: ${data.add_normals} + add_instance: ${data.add_instance} + cropping: false + is_tta: false + crop_min_size: ${data.crop_min_size} + crop_length: ${data.crop_length} + filter_out_classes: [0, 1] + label_offset: 2 diff --git a/models/Mask3D/mask3d/conf/data/datasets/scannet200.yaml b/models/Mask3D/mask3d/conf/data/datasets/scannet200.yaml new file mode 100644 index 0000000000000000000000000000000000000000..730a6ab9f1965004ec9828d1e8b2429005bef6f2 --- /dev/null +++ b/models/Mask3D/mask3d/conf/data/datasets/scannet200.yaml @@ -0,0 +1,79 @@ +# @package data +train_dataset: + _target_: mask3d.datasets.semseg.SemanticSegmentationDataset + dataset_name: "scannet200" + data_dir: /home/weders/scratch/scratch/scannetter/arkit/raw/ + image_augmentations_path: conf/augmentation/albumentations_aug.yaml + volume_augmentations_path: conf/augmentation/volumentations_aug.yaml + # label_db_filepath: data/processed/scannet200/label_database.yaml + # color_mean_std: data/processed/scannet200/color_mean_std.yaml + data_percent: 1.0 + mode: ${data.train_mode} + ignore_label: ${data.ignore_label} + num_labels: ${data.num_labels} + add_raw_coordinates: ${data.add_raw_coordinates} + add_colors: ${data.add_colors} + add_normals: ${data.add_normals} + add_instance: ${data.add_instance} + # different augs experiments + instance_oversampling: 0.0 + place_around_existing: false + point_per_cut: 0 + max_cut_region: 0 + flip_in_center: false + noise_rate: 0 + resample_points: 0 + add_unlabeled_pc: false + cropping: ${data.cropping} + cropping_args: ${data.cropping_args} + is_tta: false + crop_min_size: ${data.crop_min_size} + crop_length: ${data.crop_length} + filter_out_classes: [0, 2] + label_offset: 2 + +validation_dataset: + _target_: mask3d.datasets.semseg.SemanticSegmentationDataset + dataset_name: "scannet200" + data_dir: /home/weders/scratch/scratch/scannetter/arkit/raw/ + image_augmentations_path: null + volume_augmentations_path: null + # label_db_filepath: data/processed/scannet200/label_database.yaml + # color_mean_std: data/processed/scannet200/color_mean_std.yaml + data_percent: 1.0 + mode: ${data.validation_mode} + ignore_label: ${data.ignore_label} + num_labels: ${data.num_labels} + add_raw_coordinates: ${data.add_raw_coordinates} + add_colors: ${data.add_colors} + add_normals: ${data.add_normals} + add_instance: ${data.add_instance} + cropping: false + is_tta: false + crop_min_size: ${data.crop_min_size} + crop_length: ${data.crop_length} + filter_out_classes: [0, 2] + label_offset: 2 + +test_dataset: + _target_: mask3d.datasets.semseg.SemanticSegmentationDataset + dataset_name: "scannet200" + data_dir: /home/weders/scratch/scratch/scannetter/arkit/raw/ + image_augmentations_path: null + volume_augmentations_path: null + # label_db_filepath: data/processed/scannet200/label_database.yaml + # color_mean_std: data/processed/scannet200/color_mean_std.yaml + data_percent: 1.0 + mode: ${data.test_mode} + ignore_label: ${data.ignore_label} + num_labels: ${data.num_labels} + add_raw_coordinates: ${data.add_raw_coordinates} + add_colors: ${data.add_colors} + add_normals: ${data.add_normals} + add_instance: ${data.add_instance} + cropping: false + is_tta: false + crop_min_size: ${data.crop_min_size} + crop_length: ${data.crop_length} + filter_out_classes: [0, 2] + label_offset: 2 diff --git a/models/Mask3D/mask3d/conf/data/datasets/semantic_kitti.yaml b/models/Mask3D/mask3d/conf/data/datasets/semantic_kitti.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9540ad610bd4a68d64369519d20e13009df9feda --- /dev/null +++ b/models/Mask3D/mask3d/conf/data/datasets/semantic_kitti.yaml @@ -0,0 +1,42 @@ +# @package data +train_dataset: + _target_: mix3d.datasets.outdoor_semseg.LidarDataset + data_dir: data/processed/semantic_kitti + label_db_filepath: data/processed/semantic_kitti/label_database.yaml + mode: ${data.train_mode} + add_reflection: ${data.add_reflection} + add_distance: ${data.add_distance} + add_instance: ${data.add_instance} + num_labels: ${data.num_labels} + sweep: ${data.sweep} + data_percent: 1.0 + ignore_label: ${data.ignore_label} + volume_augmentations_path: mix3d/conf/augmentation/volumentations_aug.yaml + +validation_dataset: + _target_: mix3d.datasets.outdoor_semseg.LidarDataset + data_dir: data/processed/semantic_kitti + label_db_filepath: data/processed/semantic_kitti/label_database.yaml + mode: ${data.validation_mode} + add_reflection: ${data.add_reflection} + add_distance: ${data.add_distance} + add_instance: ${data.add_instance} + num_labels: ${data.num_labels} + sweep: ${data.sweep} + data_percent: 1.0 + ignore_label: ${data.ignore_label} + volume_augmentations_path: null + +test_dataset: + _target_: mix3d.datasets.outdoor_semseg.LidarDataset + data_dir: data/processed/semantic_kitti + label_db_filepath: data/processed/semantic_kitti/label_database.yaml + mode: ${data.test_mode} + add_reflection: ${data.add_reflection} + add_distance: ${data.add_distance} + add_instance: ${data.add_instance} + num_labels: ${data.num_labels} + sweep: ${data.sweep} + data_percent: 1.0 + ignore_label: ${data.ignore_label} + volume_augmentations_path: null diff --git a/models/Mask3D/mask3d/conf/data/datasets/stpls3d.yaml b/models/Mask3D/mask3d/conf/data/datasets/stpls3d.yaml new file mode 100644 index 0000000000000000000000000000000000000000..913667d4123a7edead9d948358ae25cf9f7b4bb1 --- /dev/null +++ b/models/Mask3D/mask3d/conf/data/datasets/stpls3d.yaml @@ -0,0 +1,95 @@ +# @package data +train_dataset: + _target_: mask3d.datasets.semseg.SemanticSegmentationDataset + dataset_name: "stpls3d" + data_dir: data/processed/stpls3d + image_augmentations_path: conf/augmentation/albumentations_aug.yaml + volume_augmentations_path: conf/augmentation/volumentations_aug.yaml + label_db_filepath: data/processed/stpls3d/label_database.yaml + color_mean_std: data/processed/stpls3d/color_mean_std.yaml + data_percent: 1.0 + mode: ${data.train_mode} + ignore_label: ${data.ignore_label} + num_labels: ${data.num_labels} + add_raw_coordinates: ${data.add_raw_coordinates} + add_colors: ${data.add_colors} + add_normals: ${data.add_normals} + add_instance: ${data.add_instance} + cache_data: ${data.cache_data} + # different augs experiments + instance_oversampling: 0.0 + place_around_existing: False + point_per_cut: 0 + max_cut_region: 0 + flip_in_center: false + noise_rate: 0 + resample_points: 0 + cropping: ${data.cropping} + cropping_args: ${data.cropping_args} + is_tta: false + crop_min_size: ${data.crop_min_size} + crop_length: ${data.crop_length} + cropping_v1: ${data.cropping_v1} + area: ${general.area} + reps_per_epoch: ${general.reps_per_epoch} + eval_inner_core: ${general.eval_inner_core} + filter_out_classes: [0] + label_offset: 1 + is_elastic_distortion: true + color_drop: 0.0 + +validation_dataset: + _target_: mask3d.datasets.semseg.SemanticSegmentationDataset + dataset_name: "stpls3d" + data_dir: data/processed/stpls3d + image_augmentations_path: null + volume_augmentations_path: null + label_db_filepath: data/processed/stpls3d/label_database.yaml + color_mean_std: data/processed/stpls3d/color_mean_std.yaml + data_percent: 1.0 + mode: ${data.validation_mode} + ignore_label: ${data.ignore_label} + num_labels: ${data.num_labels} + add_raw_coordinates: ${data.add_raw_coordinates} + add_colors: ${data.add_colors} + add_normals: ${data.add_normals} + add_instance: ${data.add_instance} + cache_data: ${data.cache_data} + cropping: false + is_tta: false + crop_min_size: ${data.crop_min_size} + crop_length: ${data.crop_length} + cropping_v1: ${data.cropping_v1} + area: ${general.area} + on_crops: ${general.on_crops} + eval_inner_core: ${general.eval_inner_core} + filter_out_classes: [0] + label_offset: 1 + +test_dataset: + _target_: mask3d.datasets.semseg.SemanticSegmentationDataset + dataset_name: "stpls3d" + data_dir: data/processed/stpls3d + image_augmentations_path: null + volume_augmentations_path: null + label_db_filepath: data/processed/stpls3d/label_database.yaml + color_mean_std: data/processed/stpls3d/color_mean_std.yaml + data_percent: 1.0 + mode: ${data.test_mode} + ignore_label: ${data.ignore_label} + num_labels: ${data.num_labels} + add_raw_coordinates: ${data.add_raw_coordinates} + add_colors: ${data.add_colors} + add_normals: ${data.add_normals} + add_instance: ${data.add_instance} + cache_data: ${data.cache_data} + cropping: false + is_tta: false + crop_min_size: ${data.crop_min_size} + crop_length: ${data.crop_length} + cropping_v1: ${data.cropping_v1} + area: ${general.area} + on_crops: ${general.on_crops} + eval_inner_core: ${general.eval_inner_core} + filter_out_classes: [0] + label_offset: 1 diff --git a/models/Mask3D/mask3d/conf/data/indoor.yaml b/models/Mask3D/mask3d/conf/data/indoor.yaml new file mode 100644 index 0000000000000000000000000000000000000000..868c37ccfe901f14396b68a38eac47b42cb3e812 --- /dev/null +++ b/models/Mask3D/mask3d/conf/data/indoor.yaml @@ -0,0 +1,43 @@ +# @package _group_ + +# these parameters are inherited by datasets, data_loaders and collators +# but they might be overwritten + +# splits +train_mode: train +validation_mode: validation +test_mode: validation # test # validation + +# dataset +ignore_label: 255 +add_raw_coordinates: true # 3dim +add_colors: true # 3dim +add_normals: false # 3dim +in_channels: 3 # in_channels = 3 * (add_normals + add_colors + add_raw_coordinates) +num_labels: 20 +# num_labels: 41 +add_instance: ${general.add_instance} +task: ${general.task} + +# data loader +pin_memory: false +num_workers: 4 +batch_size: 5 +test_batch_size: 1 +cache_data: false + +# collation +voxel_size: 0.02 + +reps_per_epoch: ${general.reps_per_epoch} + +cropping: false +cropping_args: + min_points: 30000 + aspect: 0.8 + min_crop: 0.5 + max_crop: 1.0 + +crop_min_size: 20000 +crop_length: 6.0 +cropping_v1: true \ No newline at end of file diff --git a/models/Mask3D/mask3d/conf/data/outdoor.yaml b/models/Mask3D/mask3d/conf/data/outdoor.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a77474f62d1cfb53f130160f641c65cb81a62956 --- /dev/null +++ b/models/Mask3D/mask3d/conf/data/outdoor.yaml @@ -0,0 +1,26 @@ +# @package _group_ + +# these parameters are inherited by datasets, data_loaders and collators +# but they might be overwritten + +# splits +train_mode: train +validation_mode: validation +test_mode: validation + +# dataset +ignore_label: 255 +add_distance: true # 1dim +add_reflection: true # 1dim +in_channels: 2 # in_channels = add_distance + add_reflection +num_labels: 19 +add_instance: false + +# data loader +pin_memory: true +num_workers: 4 +batch_size: 18 +sweep: 1 + +# collation +voxel_size: 0.15 diff --git a/models/Mask3D/mask3d/conf/logging/base.yaml b/models/Mask3D/mask3d/conf/logging/base.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3d700a101ddf3d1e2c1a3cdea08190afff762a5b --- /dev/null +++ b/models/Mask3D/mask3d/conf/logging/base.yaml @@ -0,0 +1,10 @@ +# @package _group_ +- _target_: pytorch_lightning.loggers.NeptuneLogger + project_name: ${general.workspace}/${general.project_name} + experiment_name: ${general.experiment_name} + offline_mode: false + +- _target_: pytorch_lightning.loggers.CSVLogger + save_dir: ${general.save_dir} + name: ${general.experiment_id} + version: ${general.version} diff --git a/models/Mask3D/mask3d/conf/logging/full.yaml b/models/Mask3D/mask3d/conf/logging/full.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b434e94dc1f0889cf0829b5f89b8509717a3546c --- /dev/null +++ b/models/Mask3D/mask3d/conf/logging/full.yaml @@ -0,0 +1,8 @@ +# @package _group_ +- _target_: pytorch_lightning.loggers.WandbLogger + project: ${general.project_name} + name: ${general.experiment_name} + save_dir: ${general.save_dir} + entity: "schult" + resume: "allow" + id: ${general.experiment_name} diff --git a/models/Mask3D/mask3d/conf/logging/minimal.yaml b/models/Mask3D/mask3d/conf/logging/minimal.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b1c46e26fefedcec50d4fdc9fc77c187d60cf7b9 --- /dev/null +++ b/models/Mask3D/mask3d/conf/logging/minimal.yaml @@ -0,0 +1,5 @@ +# @package _group_ +- _target_: pytorch_lightning.loggers.CSVLogger + save_dir: ${general.save_dir} + name: ${general.experiment_id} + version: ${general.version} diff --git a/models/Mask3D/mask3d/conf/logging/offline.yaml b/models/Mask3D/mask3d/conf/logging/offline.yaml new file mode 100644 index 0000000000000000000000000000000000000000..914ad19142ca22c3778be709208323908460ebac --- /dev/null +++ b/models/Mask3D/mask3d/conf/logging/offline.yaml @@ -0,0 +1,10 @@ +# @package _group_ +- _target_: pytorch_lightning.loggers.TensorBoardLogger + name: ${general.experiment_id} + version: ${general.version} + save_dir: ${general.save_dir} + +- _target_: pytorch_lightning.loggers.CSVLogger + name: ${general.experiment_id} + version: ${general.version} + save_dir: ${general.save_dir} \ No newline at end of file diff --git a/models/Mask3D/mask3d/conf/loss/cross_entropy.yaml b/models/Mask3D/mask3d/conf/loss/cross_entropy.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c000f40ad2ab40605c244e38243a6e0cc7933768 --- /dev/null +++ b/models/Mask3D/mask3d/conf/loss/cross_entropy.yaml @@ -0,0 +1,3 @@ +# @package _group_ +_target_: torch.nn.CrossEntropyLoss +ignore_index: ${data.ignore_label} diff --git a/models/Mask3D/mask3d/conf/loss/set_criterion.yaml b/models/Mask3D/mask3d/conf/loss/set_criterion.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3c04ba49ce1823c2d6e923a03ae0514490d463e9 --- /dev/null +++ b/models/Mask3D/mask3d/conf/loss/set_criterion.yaml @@ -0,0 +1,11 @@ +# @package _group_ +_target_: mask3d.models.criterion.SetCriterion +num_classes: ${general.num_targets} +eos_coef: 0.1 +losses: + - "labels" + - "masks" +num_points: ${matcher.num_points} +oversample_ratio: 3.0 +importance_sample_ratio: 0.75 +class_weights: -1 diff --git a/models/Mask3D/mask3d/conf/loss/set_criterion_custom_weights_1.yaml b/models/Mask3D/mask3d/conf/loss/set_criterion_custom_weights_1.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1d2c308e081c1ffa61beb13308b27e6ff753f0f4 --- /dev/null +++ b/models/Mask3D/mask3d/conf/loss/set_criterion_custom_weights_1.yaml @@ -0,0 +1,11 @@ +# @package _group_ +_target_: mask3d.models.criterion.SetCriterion +num_classes: ${general.num_targets} +eos_coef: 0.1 +losses: + - "labels" + - "masks" +num_points: ${matcher.num_points} +oversample_ratio: 3.0 +importance_sample_ratio: 0.75 +class_weights: [1.0,1.5,10.0,1.0,1.0,1.0,1.0,1.0,10.0,10.0,1.0,10.0,1.0,1.0] diff --git a/models/Mask3D/mask3d/conf/matcher/hungarian_matcher.yaml b/models/Mask3D/mask3d/conf/matcher/hungarian_matcher.yaml new file mode 100644 index 0000000000000000000000000000000000000000..47750b20906b6b40a131b702ba360e36ee4c8380 --- /dev/null +++ b/models/Mask3D/mask3d/conf/matcher/hungarian_matcher.yaml @@ -0,0 +1,6 @@ +# @package _group_ +_target_: mask3d.models.matcher.HungarianMatcher +cost_class: 2. +cost_mask: 5. +cost_dice: 2. +num_points: -1 diff --git a/models/Mask3D/mask3d/conf/metrics/miou.yaml b/models/Mask3D/mask3d/conf/metrics/miou.yaml new file mode 100644 index 0000000000000000000000000000000000000000..68d1b61181d9615d7d6d7638261d119a4fc47074 --- /dev/null +++ b/models/Mask3D/mask3d/conf/metrics/miou.yaml @@ -0,0 +1,4 @@ +# @package _group_ +_target_: mask3d.models.metrics.ConfusionMatrix +num_classes: ${data.num_labels} +ignore_label: ${data.ignore_label} diff --git a/models/Mask3D/mask3d/conf/model/mask3d.yaml b/models/Mask3D/mask3d/conf/model/mask3d.yaml new file mode 100644 index 0000000000000000000000000000000000000000..95718d8710477650561e0ddd845688f50c868032 --- /dev/null +++ b/models/Mask3D/mask3d/conf/model/mask3d.yaml @@ -0,0 +1,47 @@ +# @package _group_ +_target_: mask3d.models.Mask3D + +# transformer parameters +hidden_dim: 128 +dim_feedforward: 1024 +num_queries: 100 +num_heads: 8 +num_decoders: 3 +dropout: 0.0 +pre_norm: false +use_level_embed: false +normalize_pos_enc: true +positional_encoding_type: "fourier" +gauss_scale: 1.0 +hlevels: [0,1,2,3] + +# queries +non_parametric_queries: true +random_query_both: false +random_normal: false +random_queries: false +use_np_features: false + +# sampling +sample_sizes: [200, 800, 3200, 12800, 51200] +max_sample_size: false # change false means sampling activated + +shared_decoder: true +num_classes: ${general.num_targets} +train_on_segments: ${general.train_on_segments} +scatter_type: "mean" + +voxel_size: ${data.voxel_size} + +config: + backbone: + _target_: mask3d.models.Res16UNet34C + config: + dialations: [ 1, 1, 1, 1 ] + conv1_kernel_size: 5 + bn_momentum: 0.02 + # depends on normals, color, raw_coordinates + # varies from 3 to 9 + in_channels: ${data.in_channels} + out_channels: ${data.num_labels} + out_fpn: true diff --git a/models/Mask3D/mask3d/conf/optimizer/adamw.yaml b/models/Mask3D/mask3d/conf/optimizer/adamw.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4b4020d1ddd1444c94ea5bfbe1281c485fca587e --- /dev/null +++ b/models/Mask3D/mask3d/conf/optimizer/adamw.yaml @@ -0,0 +1,3 @@ +# @package _group_ +_target_: torch.optim.AdamW +lr: 0.0001 \ No newline at end of file diff --git a/models/Mask3D/mask3d/conf/optimizer/adamw_lower.yaml b/models/Mask3D/mask3d/conf/optimizer/adamw_lower.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7e42f091a0d5dd03b66ab1dcec8b81d78a692af9 --- /dev/null +++ b/models/Mask3D/mask3d/conf/optimizer/adamw_lower.yaml @@ -0,0 +1,3 @@ +# @package _group_ +_target_: torch.optim.AdamW +lr: 0.005 diff --git a/models/Mask3D/mask3d/conf/scheduler/exponentiallr.yaml b/models/Mask3D/mask3d/conf/scheduler/exponentiallr.yaml new file mode 100644 index 0000000000000000000000000000000000000000..dc5224083670b286d75fda46304560dbcca3aecb --- /dev/null +++ b/models/Mask3D/mask3d/conf/scheduler/exponentiallr.yaml @@ -0,0 +1,11 @@ +# @package _group_ + +scheduler: + _target_: torch.optim.lr_scheduler.ExponentialLR + gamma: 0.99999 + last_epoch: -1 # ${trainer.max_epochs} + # need to set to number because of tensorboard logger + # steps_per_epoch: -1 + +pytorch_lightning_params: + interval: step diff --git a/models/Mask3D/mask3d/conf/scheduler/lambdalr.yaml b/models/Mask3D/mask3d/conf/scheduler/lambdalr.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b63f6f4333e98931ce22f1a38829de0ef51a3719 --- /dev/null +++ b/models/Mask3D/mask3d/conf/scheduler/lambdalr.yaml @@ -0,0 +1,8 @@ +# @package _group_ + +scheduler: + _target_: torch.optim.lr_scheduler.StepLR + step_size: 99999 + +pytorch_lightning_params: + interval: epoch diff --git a/models/Mask3D/mask3d/conf/scheduler/onecyclelr.yaml b/models/Mask3D/mask3d/conf/scheduler/onecyclelr.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c788877193d7366c21088cf9fefb77e4f62ef4d9 --- /dev/null +++ b/models/Mask3D/mask3d/conf/scheduler/onecyclelr.yaml @@ -0,0 +1,11 @@ +# @package _group_ + +scheduler: + _target_: torch.optim.lr_scheduler.OneCycleLR + max_lr: ${optimizer.lr} + epochs: ${trainer.max_epochs} + # need to set to number because of tensorboard logger + steps_per_epoch: -1 + +pytorch_lightning_params: + interval: step diff --git a/models/Mask3D/mask3d/conf/trainer/trainer.yaml b/models/Mask3D/mask3d/conf/trainer/trainer.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f436300f9ca6bbbe96ca6c1b4c7e8eeffe35fabd --- /dev/null +++ b/models/Mask3D/mask3d/conf/trainer/trainer.yaml @@ -0,0 +1,7 @@ +# @package _group_ +deterministic: false +max_epochs: 1000 +min_epochs: 1 +resume_from_checkpoint: null +check_val_every_n_epoch: 50 +num_sanity_val_steps: -1 diff --git a/models/Mask3D/mask3d/conf/trainer/trainer600.yaml b/models/Mask3D/mask3d/conf/trainer/trainer600.yaml new file mode 100644 index 0000000000000000000000000000000000000000..dc9f00295aafe3431d1c0e7ca50dbc29559ea134 --- /dev/null +++ b/models/Mask3D/mask3d/conf/trainer/trainer600.yaml @@ -0,0 +1,7 @@ +# @package _group_ +deterministic: false +max_epochs: 601 +min_epochs: 1 +resume_from_checkpoint: null +check_val_every_n_epoch: 50 +num_sanity_val_steps: 2 diff --git a/models/Mask3D/mask3d/datasets/__init__.py b/models/Mask3D/mask3d/datasets/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/Mask3D/mask3d/datasets/outdoor_semseg.py b/models/Mask3D/mask3d/datasets/outdoor_semseg.py new file mode 100644 index 0000000000000000000000000000000000000000..4592a6eda45c1a7626530eb19c42c267496749df --- /dev/null +++ b/models/Mask3D/mask3d/datasets/outdoor_semseg.py @@ -0,0 +1,206 @@ +import logging +from pathlib import Path +from typing import List, Optional, Union, Tuple +from random import random + +import numpy as np +import volumentations as V +import yaml +from torch.utils.data import Dataset + +logger = logging.getLogger(__name__) + + +class LidarDataset(Dataset): + def __init__( + self, + data_dir: Optional[ + Union[str, Tuple[str]] + ] = "data/processed/semantic_kitti", + label_db_filepath: Optional[ + str + ] = "./data/processed/semantic_kitti/label_database.yaml", + mode: Optional[str] = "train", + add_reflection: Optional[bool] = True, + add_distance: Optional[bool] = False, + add_instance: Optional[bool] = True, + num_labels: Optional[int] = -1, + data_percent: Optional[float] = 1.0, + ignore_label: Optional[Union[int, List[int]]] = 255, + volume_augmentations_path: Optional[str] = None, + sweep: Optional[int] = 1, + ): + self.mode = mode + self.data_dir = data_dir + if type(data_dir) == str: + self.data_dir = [self.data_dir] + self.ignore_label = ignore_label + self.add_instance = add_instance + self.add_distance = add_distance + self.add_reflection = add_reflection + + # loading database files + self._data = [] + for database_path in self.data_dir: + database_path = Path(database_path) + if not (database_path / f"{mode}_database.yaml").exists(): + print(f"generate {database_path}/{mode}_database.yaml first") + exit() + self._data.extend( + self._load_yaml(database_path / f"{mode}_database.yaml") + ) + + labels = self._load_yaml(Path(label_db_filepath)) + self._labels = self._select_correct_labels(labels, num_labels) + + # augmentations + self.volume_augmentations = V.NoOp() + if volume_augmentations_path is not None: + self.volume_augmentations = V.load( + volume_augmentations_path, data_format="yaml" + ) + + # reformulating in sweeps + data = [[]] + last_scene = self._data[0]["scene"] + for x in self._data: + if x["scene"] == last_scene: + data[-1].append(x) + else: + last_scene = x["scene"] + data.append([x]) + for i in range(len(data)): + data[i] = list(self.chunks(data[i], sweep)) + self._data = [val for sublist in data for val in sublist] + + if data_percent < 1.0: + self._data = self._data[: int(len(self._data) * data_percent)] + + @staticmethod + def chunks(lst, n): + """Yield successive n-sized chunks from lst.""" + for i in range(0, len(lst), n): + yield lst[i : i + n] + + def __len__(self): + return len(self.data) + + def __getitem__(self, idx: int): + points = [] + for sweep in self.data[idx]: + points.append(np.load(sweep["filepath"])) + # rotate + points[-1][:, :3] = ( + points[-1][:, :3] @ np.array(sweep["pose"])[:3, :3] + ) + # translate + points[-1][:, :3] += np.array(sweep["pose"])[:3, 3] + points = np.vstack(points) + + coordinates, features, labels = ( + points[:, :3], + points[:, 3:-2], + points[:, -2:], + ) + + if not self.add_reflection: + features = np.ones(np.ones((len(coordinates), 1))) + + if self.add_distance: + center_coordinate = coordinates.mean(0) + features = np.hstack( + ( + features, + np.linalg.norm(coordinates - center_coordinate, axis=1)[ + :, np.newaxis + ], + ) + ) + + # volume and image augmentations for train + if "train" in self.mode: + coordinates -= coordinates.mean(0) + if 0.5 > random(): + coordinates += ( + np.random.uniform(coordinates.min(0), coordinates.max(0)) + / 2 + ) + aug = self.volume_augmentations( + points=coordinates, + features=features, + labels=labels, + ) + coordinates, features, labels = ( + aug["points"], + aug["features"], + aug["labels"], + ) + + # prepare labels and map from 0 to 20(40) + labels = labels.astype(np.int32) + if labels.size > 0: + labels[:, 0] = self._remap_from_zero(labels[:, 0]) + if not self.add_instance: + # taking only first column, which is segmentation label, not instance + labels = labels[:, 0].flatten() + + return coordinates, features, labels + + @property + def data(self): + """database file containing information about preproscessed dataset""" + return self._data + + @property + def label_info(self): + """database file containing information labels used by dataset""" + return self._labels + + @staticmethod + def _load_yaml(filepath): + with open(filepath) as f: + file = yaml.safe_load(f) + return file + + def _select_correct_labels(self, labels, num_labels): + number_of_validation_labels = 0 + number_of_all_labels = 0 + for ( + k, + v, + ) in labels.items(): + number_of_all_labels += 1 + if v["validation"]: + number_of_validation_labels += 1 + + if num_labels == number_of_all_labels: + return labels + elif num_labels == number_of_validation_labels: + valid_labels = dict() + for ( + k, + v, + ) in labels.items(): + if v["validation"]: + valid_labels.update({k: v}) + return valid_labels + else: + msg = f"""not available number labels, select from: + {number_of_validation_labels}, {number_of_all_labels}""" + raise ValueError(msg) + + def _remap_from_zero(self, labels): + labels[ + ~np.isin(labels, list(self.label_info.keys())) + ] = self.ignore_label + # remap to the range from 0 + for i, k in enumerate(self.label_info.keys()): + labels[labels == k] = i + return labels + + def _remap_model_output(self, output): + output = np.array(output) + output_remapped = output.copy() + for i, k in enumerate(self.label_info.keys()): + output_remapped[output == i] = k + return output_remapped diff --git a/models/Mask3D/mask3d/datasets/preprocessing/__init__.py b/models/Mask3D/mask3d/datasets/preprocessing/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/Mask3D/mask3d/datasets/preprocessing/arkitscenes_preprocessing.py b/models/Mask3D/mask3d/datasets/preprocessing/arkitscenes_preprocessing.py new file mode 100644 index 0000000000000000000000000000000000000000..2f222dc27e73eedab1e1d82b14c1573ce632af7c --- /dev/null +++ b/models/Mask3D/mask3d/datasets/preprocessing/arkitscenes_preprocessing.py @@ -0,0 +1,116 @@ +import re +from pathlib import Path +import numpy as np +import pandas as pd +from fire import Fire +from natsort import natsorted +from loguru import logger +import os + +from datasets.preprocessing.base_preprocessing import BasePreprocessing +from utils.point_cloud_utils import load_ply_with_normals + +from datasets.scannet200.scannet200_constants import ( + VALID_CLASS_IDS_200, + SCANNET_COLOR_MAP_200, + CLASS_LABELS_200, +) + + +class ARKitScenesPreprocessing(BasePreprocessing): + def __init__( + self, + data_dir: str = "/home/weders/scratch/scratch/scannetter/arkit/raw", + save_dir: str = "/home/weders/scratch/scratch/scannetter/arkit/raw", + modes: tuple = ('Validation', ), + n_jobs: int = 1, + git_repo: str = "./data/raw/scannet/ScanNet", + mesh_file: str="mesh_tsdf.ply", + scannet200: bool = False, + ): + super().__init__(data_dir, save_dir, modes, n_jobs) + + self.scannet200 = scannet200 + git_repo = Path(git_repo) + for mode in self.modes: + scenes = os.listdir(os.path.join(data_dir, mode)) + scans_folder = "scans_test" if mode == "test" else "scans" + filepaths = [] + for scene in scenes: + if os.path.exists(os.path.join(data_dir, mode, scene, mesh_file)): + filepaths.append( + self.data_dir + / mode + / scene + / mesh_file) + self.files[mode] = natsorted(filepaths) + + def process_file(self, filepath, mode): + """process_file. + + Please note, that for obtaining segmentation labels ply files were used. + + Args: + filepath: path to the main ply file + mode: train, test or validation + + Returns: + filebase: info about file + """ + scene = int(filepath.parent.name) + print(scene) + filebase = { + "filepath": filepath, + "scene": scene, + "sub_scene": scene, + "raw_filepath": str(filepath), + "file_len": -1, + } + # reading both files and checking that they are fitting + coords, features, _ = load_ply_with_normals(filepath) + file_len = len(coords) + filebase["file_len"] = file_len + points = np.hstack((coords, features)) + + print(features.shape) + + points = np.concatenate((points, np.zeros((file_len, 4))), axis=1) # adding segment and label fake columns + + processed_filepath = ( + self.save_dir / mode / f"data_mask3d.npy" + ) + if not processed_filepath.parent.exists(): + processed_filepath.parent.mkdir(parents=True, exist_ok=True) + np.save(processed_filepath, points.astype(np.float32)) + filebase["filepath"] = str(processed_filepath) + + return filebase + + @logger.catch + def fix_bugs_in_labels(self): + if not self.scannet200: + logger.add(self.save_dir / "fixed_bugs_in_labels.log") + found_wrong_labels = { + tuple([270, 0]): 50, + tuple([270, 2]): 50, + tuple([384, 0]): 149, + } + for scene, wrong_label in found_wrong_labels.items(): + scene, sub_scene = scene + bug_file = ( + self.save_dir / "train" / f"{scene:04}_{sub_scene:02}.npy" + ) + points = np.load(bug_file) + bug_mask = points[:, -1] != wrong_label + points = points[bug_mask] + np.save(bug_file, points) + logger.info(f"Fixed {bug_file}") + + def _parse_scene_subscene(self, name): + scene_match = re.match(r"scene(\d{4})_(\d{2})", name) + print(scene_match) + return int(scene_match.group(1)), int(scene_match.group(2)) + + +if __name__ == "__main__": + Fire(ARKitScenesPreprocessing) \ No newline at end of file diff --git a/models/Mask3D/mask3d/datasets/preprocessing/base_preprocessing.py b/models/Mask3D/mask3d/datasets/preprocessing/base_preprocessing.py new file mode 100644 index 0000000000000000000000000000000000000000..a17fd4f89aca0d16d27b1bd10c9f40b3e40a6e61 --- /dev/null +++ b/models/Mask3D/mask3d/datasets/preprocessing/base_preprocessing.py @@ -0,0 +1,204 @@ +import os +import sys +import re +import yaml +import json +import multiprocessing +from pathlib import Path +from hashlib import md5 + +import numpy as np +from fire import Fire +from tqdm import tqdm +from joblib import Parallel, delayed +from loguru import logger + + +class BasePreprocessing: + def __init__( + self, + data_dir: str = "./data/raw/", + save_dir: str = "./data/processed/", + modes: tuple = ("train", "validation", "test"), + n_jobs: int = -1, + ): + self.data_dir = Path(data_dir) + self.save_dir = Path(save_dir) + self.n_jobs = n_jobs + self.modes = modes + + if not self.data_dir.exists(): + logger.error("data folder doesn't exist") + raise FileNotFoundError + if self.save_dir.exists() is False: + self.save_dir.mkdir(parents=True, exist_ok=True) + + self.files = {} + for data_type in self.modes: + self.files.update({data_type: []}) + + @logger.catch + def preprocess(self): + self.n_jobs = ( + multiprocessing.cpu_count() if self.n_jobs == -1 else self.n_jobs + ) + for mode in self.modes: + database = [] + logger.info(f"Tasks for {mode}: {len(self.files[mode])}") + parallel_results = Parallel(n_jobs=self.n_jobs, verbose=10)( + delayed(self.process_file)(file, mode) + for file in self.files[mode] + ) + for filebase in parallel_results: + database.append(filebase) + self.save_database(database, mode) + # self.fix_bugs_in_labels() + # self.joint_database() + # self.compute_color_mean_std( + # train_database_path=(self.save_dir / "train_database.yaml") + # ) + + def preprocess_sequential(self): + for mode in self.modes: + database = [] + for filepath in tqdm(self.files[mode], unit="file"): + filebase = self.process_file(filepath, mode) + database.append(filebase) + self.save_database(database, mode) + self.fix_bugs_in_labels() + self.joint_database() + self.compute_color_mean_std( + train_database_path=(self.save_dir / "train_database.yaml") + ) + + def process_file(self, filepath, mode): + """process_file. + + Args: + filepath: path to the main file + mode: typically train, test or validation + + Returns: + filebase: info about file + """ + raise NotImplementedError + + def make_instance_database_sequential( + self, + train_database_path: str = "./data/processed/train_database.yaml", + mode="instance", + ): + train_database = self._load_yaml(train_database_path) + instance_database = [] + for sample in tqdm(train_database): + instance_database.append(self.extract_instance_from_file(sample)) + self.save_database(instance_database, mode=mode) + + @logger.catch + def make_instance_database( + self, + train_database_path: str = "./data/processed/train_database.yaml", + mode="instance", + ): + self.n_jobs = ( + multiprocessing.cpu_count() if self.n_jobs == -1 else self.n_jobs + ) + train_database = self._load_yaml(train_database_path) + instance_database = [] + logger.info(f"Files in database: {len(train_database)}") + parallel_results = Parallel(n_jobs=self.n_jobs, verbose=10)( + delayed(self.extract_instance_from_file)(sample) + for sample in train_database + ) + for filebase in parallel_results: + instance_database.append(filebase) + self.save_database(instance_database, mode=mode) + + def extract_instance_from_file(self, sample_from_database): + points = np.load(sample_from_database["filepath"]) + labels = points[:, -2:] + file_instances = [] + for instance_id in np.unique(labels[:, 1]): + occupied_indices = np.isin(labels[:, 1], instance_id) + instance_points = points[occupied_indices].copy() + instance_classes = ( + np.unique(instance_points[:, 9]).astype(int).tolist() + ) + + hash_string = str(sample_from_database["filepath"]) + str( + instance_id + ) + hash_string = md5(hash_string.encode("utf-8")).hexdigest() + instance_filepath = ( + self.save_dir / "instances" / f"{hash_string}.npy" + ) + instance = { + "classes": instance_classes, + "instance_filepath": str(instance_filepath), + "instance_size": len(instance_points), + "original_file": str(sample_from_database["filepath"]), + } + if not instance_filepath.parent.exists(): + instance_filepath.parent.mkdir(parents=True, exist_ok=True) + np.save(instance_filepath, instance_points.astype(np.float32)) + file_instances.append(instance) + return file_instances + + def fix_bugs_in_labels(self): + pass + + def compute_color_mean_std( + self, + train_database_path: str = "./data/processed/train_database.yaml", + ): + pass + + def save_database(self, database, mode): + for element in database: + self._dict_to_yaml(element) + self._save_yaml(self.save_dir / (mode + "_database.yaml"), database) + + def joint_database(self, train_modes=["train", "validation"]): + joint_db = [] + for mode in train_modes: + joint_db.extend( + self._load_yaml(self.save_dir / (mode + "_database.yaml")) + ) + self._save_yaml( + self.save_dir / "train_validation_database.yaml", joint_db + ) + + @classmethod + def _read_json(cls, path): + with open(path) as f: + file = json.load(f) + return file + + @classmethod + def _save_yaml(cls, path, file): + with open(path, "w") as f: + yaml.safe_dump( + file, f, default_style=None, default_flow_style=False + ) + + @classmethod + def _dict_to_yaml(cls, dictionary): + if not isinstance(dictionary, dict): + return + for k, v in dictionary.items(): + if isinstance(v, dict): + cls._dict_to_yaml(v) + if isinstance(v, np.ndarray): + dictionary[k] = v.tolist() + if isinstance(v, Path): + dictionary[k] = str(v) + + @classmethod + def _load_yaml(cls, filepath): + with open(filepath) as f: + file = yaml.safe_load(f) + return file + + +if __name__ == "__main__": + Fire(BasePreprocessing) diff --git a/models/Mask3D/mask3d/datasets/preprocessing/s3dis_preprocessing.py b/models/Mask3D/mask3d/datasets/preprocessing/s3dis_preprocessing.py new file mode 100644 index 0000000000000000000000000000000000000000..3e7ff4967ca9dc22248c6863b41f7b652687ae98 --- /dev/null +++ b/models/Mask3D/mask3d/datasets/preprocessing/s3dis_preprocessing.py @@ -0,0 +1,282 @@ +import os +import re + +import numpy as np +from fire import Fire +from loguru import logger +from natsort import natsorted + +from datasets.preprocessing.base_preprocessing import BasePreprocessing + + +class S3DISPreprocessing(BasePreprocessing): + def __init__( + self, + data_dir: str = "./data/raw/s3dis", + save_dir: str = "./data/processed/s3dis", + modes: tuple = ( + "Area_1", + "Area_2", + "Area_3", + "Area_4", + "Area_5", + "Area_6", + ), + n_jobs: int = -1, + ): + super().__init__(data_dir, save_dir, modes, n_jobs) + + self.class_map = { + "ceiling": 0, + "floor": 1, + "wall": 2, + "beam": 3, + "column": 4, + "window": 5, + "door": 6, + "table": 7, + "chair": 8, + "sofa": 9, + "bookcase": 10, + "board": 11, + "clutter": 12, + "stairs": 12, # stairs are also mapped to clutter + } + + self.color_map = [ + [0, 255, 0], # ceiling + [0, 0, 255], # floor + [0, 255, 255], # wall + [255, 255, 0], # beam + [255, 0, 255], # column + [100, 100, 255], # window + [200, 200, 100], # door + [170, 120, 200], # table + [255, 0, 0], # chair + [200, 100, 100], # sofa + [10, 200, 100], # bookcase + [200, 200, 200], # board + [50, 50, 50], + ] # clutter + + self.create_label_database() + + for mode in self.modes: + filepaths = [] + for scene_path in [ + f.path for f in os.scandir(self.data_dir / mode) if f.is_dir() + ]: + filepaths.append(scene_path) + self.files[mode] = natsorted(filepaths) + + def create_label_database(self): + label_database = dict() + for class_name, class_id in self.class_map.items(): + label_database[class_id] = { + "color": self.color_map[class_id], + "name": class_name, + "validation": True, + } + + self._save_yaml(self.save_dir / "label_database.yaml", label_database) + return label_database + + def _buf_count_newlines_gen(self, fname): + def _make_gen(reader): + while True: + b = reader(2**16) + if not b: + break + yield b + + with open(fname, "rb") as f: + count = sum(buf.count(b"\n") for buf in _make_gen(f.raw.read)) + return count + + def process_file(self, filepath, mode): + """process_file. + + Please note, that for obtaining segmentation labels ply files were used. + + Args: + filepath: path to the main ply file + mode: train, test or validation + + Returns: + filebase: info about file + """ + filebase = { + "filepath": filepath, + "scene": filepath.split("/")[-1], + "area": mode, + "raw_filepath": str(filepath), + "file_len": -1, + } + + scene_name = filepath.split("/")[-1] + instance_counter = 0 + scene_points = [] + for instance in [ + f + for f in os.scandir( + self.data_dir / mode / scene_name / "Annotations" + ) + if f.name.endswith(".txt") + ]: + instance_class = self.class_map[instance.name.split("_")[0]] + instance_points = np.loadtxt(instance.path) + + instance_normals = np.ones((instance_points.shape[0], 3)) + instance_class = np.array(instance_class).repeat( + instance_points.shape[0] + )[..., None] + instance_id = np.array(instance_counter).repeat( + instance_points.shape[0] + )[..., None] + + instance_points = np.hstack( + ( + instance_points, + instance_normals, + instance_class, + instance_id, + ) + ) + + scene_points.append(instance_points) + instance_counter += 1 + + points = np.vstack(scene_points) + + pcd_size = self._buf_count_newlines_gen(f"{filepath}/{scene_name}.txt") + if points.shape[0] != pcd_size: + print(f"FILE SIZE DOES NOT MATCH FOR {filepath}/{scene_name}.txt") + print(f"({points.shape[0]} vs. {pcd_size})") + + filebase["raw_segmentation_filepath"] = "" + + # add segment id as additional feature (DUMMY) + points = np.hstack((points, np.ones(points.shape[0])[..., None])) + points[:, [9, 10, -1]] = points[ + :, [-1, 9, 10] + ] # move segments after RGB + + gt_data = (points[:, -2] + 1) * 1000 + points[:, -1] + 1 + + file_len = len(points) + filebase["file_len"] = file_len + + processed_filepath = self.save_dir / mode / f"{scene_name}.npy" + if not processed_filepath.parent.exists(): + processed_filepath.parent.mkdir(parents=True, exist_ok=True) + np.save(processed_filepath, points.astype(np.float32)) + filebase["filepath"] = str(processed_filepath) + + processed_gt_filepath = ( + self.save_dir / "instance_gt" / mode / f"{scene_name}.txt" + ) + if not processed_gt_filepath.parent.exists(): + processed_gt_filepath.parent.mkdir(parents=True, exist_ok=True) + np.savetxt(processed_gt_filepath, gt_data.astype(np.int32), fmt="%d") + filebase["instance_gt_filepath"] = str(processed_gt_filepath) + + filebase["color_mean"] = [ + float((points[:, 3] / 255).mean()), + float((points[:, 4] / 255).mean()), + float((points[:, 5] / 255).mean()), + ] + filebase["color_std"] = [ + float(((points[:, 3] / 255) ** 2).mean()), + float(((points[:, 4] / 255) ** 2).mean()), + float(((points[:, 5] / 255) ** 2).mean()), + ] + return filebase + + def compute_color_mean_std(self, train_database_path: str = ""): + area_database_paths = [ + f + for f in os.scandir(self.save_dir) + if f.name.startswith("Area_") and f.name.endswith(".yaml") + ] + + for database_path in area_database_paths: + database = self._load_yaml(database_path.path) + color_mean, color_std = [], [] + for sample in database: + color_std.append(sample["color_std"]) + color_mean.append(sample["color_mean"]) + + color_mean = np.array(color_mean).mean(axis=0) + color_std = np.sqrt( + np.array(color_std).mean(axis=0) - color_mean**2 + ) + feats_mean_std = { + "mean": [float(each) for each in color_mean], + "std": [float(each) for each in color_std], + } + self._save_yaml( + self.save_dir / f"{database_path.name}_color_mean_std.yaml", + feats_mean_std, + ) + + for database_path in area_database_paths: + all_mean, all_std = [], [] + for let_out_path in area_database_paths: + if database_path == let_out_path: + continue + + database = self._load_yaml(let_out_path.path) + for sample in database: + all_std.append(sample["color_std"]) + all_mean.append(sample["color_mean"]) + + all_color_mean = np.array(all_mean).mean(axis=0) + all_color_std = np.sqrt( + np.array(all_std).mean(axis=0) - all_color_mean**2 + ) + feats_mean_std = { + "mean": [float(each) for each in all_color_mean], + "std": [float(each) for each in all_color_std], + } + file_path = database_path.name.replace("_database.yaml", "") + self._save_yaml( + self.save_dir / f"{file_path}_color_mean_std.yaml", + feats_mean_std, + ) + + @logger.catch + def fix_bugs_in_labels(self): + pass + + def joint_database( + self, + train_modes=( + "Area_1", + "Area_2", + "Area_3", + "Area_4", + "Area_5", + "Area_6", + ), + ): + for mode in train_modes: + joint_db = [] + for let_out in train_modes: + if mode == let_out: + continue + joint_db.extend( + self._load_yaml( + self.save_dir / (let_out + "_database.yaml") + ) + ) + self._save_yaml( + self.save_dir / f"train_{mode}_database.yaml", joint_db + ) + + def _parse_scene_subscene(self, name): + scene_match = re.match(r"scene(\d{4})_(\d{2})", name) + return int(scene_match.group(1)), int(scene_match.group(2)) + + +if __name__ == "__main__": + Fire(S3DISPreprocessing) diff --git a/models/Mask3D/mask3d/datasets/preprocessing/scannet_preprocessing.py b/models/Mask3D/mask3d/datasets/preprocessing/scannet_preprocessing.py new file mode 100644 index 0000000000000000000000000000000000000000..5a981864612e04930b04c9c0df8aaa6e2d9249a3 --- /dev/null +++ b/models/Mask3D/mask3d/datasets/preprocessing/scannet_preprocessing.py @@ -0,0 +1,296 @@ +import re +from pathlib import Path +import numpy as np +import pandas as pd +from fire import Fire +from natsort import natsorted +from loguru import logger + +from datasets.preprocessing.base_preprocessing import BasePreprocessing +from utils.point_cloud_utils import load_ply_with_normals + +from datasets.scannet200.scannet200_constants import ( + VALID_CLASS_IDS_200, + SCANNET_COLOR_MAP_200, + CLASS_LABELS_200, +) + + +class ScannetPreprocessing(BasePreprocessing): + def __init__( + self, + data_dir: str = "./data/raw/scannet/scannet", + save_dir: str = "./data/processed/scannet", + modes: tuple = ("train", "validation", "test"), + n_jobs: int = -1, + git_repo: str = "./data/raw/scannet/ScanNet", + scannet200: bool = False, + ): + super().__init__(data_dir, save_dir, modes, n_jobs) + + self.scannet200 = scannet200 + + if self.scannet200: + self.labels_pd = pd.read_csv( + self.data_dir / "scannetv2-labels.combined.tsv", + sep="\t", + header=0, + ) + + git_repo = Path(git_repo) + self.create_label_database(git_repo) + for mode in self.modes: + trainval_split_dir = git_repo / "Tasks" / "Benchmark" + scannet_special_mode = "val" if mode == "validation" else mode + with open( + trainval_split_dir / (f"scannetv2_{scannet_special_mode}.txt") + ) as f: + # -1 because the last one is always empty + split_file = f.read().split("\n")[:-1] + + scans_folder = "scans_test" if mode == "test" else "scans" + filepaths = [] + for scene in split_file: + filepaths.append( + self.data_dir + / scans_folder + / scene + / (scene + "_vh_clean_2.ply") + ) + self.files[mode] = natsorted(filepaths) + + def create_label_database(self, git_repo): + if self.scannet200: + label_database = {} + for row_id, class_id in enumerate(VALID_CLASS_IDS_200): + label_database[class_id] = { + "color": SCANNET_COLOR_MAP_200[class_id], + "name": CLASS_LABELS_200[row_id], + "validation": True, + } + self._save_yaml( + self.save_dir / "label_database.yaml", label_database + ) + return label_database + else: + if (self.save_dir / "label_database.yaml").exists(): + return self._load_yaml(self.save_dir / "label_database.yaml") + df = pd.read_csv( + self.data_dir / "scannetv2-labels.combined.tsv", sep="\t" + ) + df = ( + df[~df[["nyu40class", "nyu40id"]].duplicated()][ + ["nyu40class", "nyu40id"] + ] + .set_index("nyu40id") + .sort_index()[["nyu40class"]] + .rename(columns={"nyu40class": "name"}) + .replace(" ", "_", regex=True) + ) + df = pd.DataFrame([{"name": "empty"}]).append(df) + df["validation"] = False + + with open( + git_repo + / "Tasks" + / "Benchmark" + / "classes_SemVoxLabel-nyu40id.txt" + ) as f: + for_validation = f.read().split("\n") + for category in for_validation: + index = int(re.split(" +", category)[0]) + df.loc[index, "validation"] = True + + # doing this hack because otherwise I will have to install imageio + with open(git_repo / "BenchmarkScripts" / "util.py") as f: + util = f.read() + color_list = eval("[" + util.split("return [\n")[1]) + + df["color"] = color_list + + label_database = df.to_dict("index") + self._save_yaml( + self.save_dir / "label_database.yaml", label_database + ) + return label_database + + def process_file(self, filepath, mode): + """process_file. + + Please note, that for obtaining segmentation labels ply files were used. + + Args: + filepath: path to the main ply file + mode: train, test or validation + + Returns: + filebase: info about file + """ + scene, sub_scene = self._parse_scene_subscene(filepath.name) + filebase = { + "filepath": filepath, + "scene": scene, + "sub_scene": sub_scene, + "raw_filepath": str(filepath), + "file_len": -1, + } + # reading both files and checking that they are fitting + coords, features, _ = load_ply_with_normals(filepath) + file_len = len(coords) + filebase["file_len"] = file_len + points = np.hstack((coords, features)) + + if mode in ["train", "validation"]: + # getting scene information + description_filepath = Path( + filepath + ).parent / filepath.name.replace("_vh_clean_2.ply", ".txt") + with open(description_filepath) as f: + scene_type = f.read().split("\n")[:-1] + scene_type = scene_type[-1].split(" = ")[1] + filebase["scene_type"] = scene_type + filebase["raw_description_filepath"] = description_filepath + + # getting instance info + instance_info_filepath = next( + Path(filepath).parent.glob("*.aggregation.json") + ) + segment_indexes_filepath = next( + Path(filepath).parent.glob("*[0-9].segs.json") + ) + instance_db = self._read_json(instance_info_filepath) + segments = self._read_json(segment_indexes_filepath) + segments = np.array(segments["segIndices"]) + filebase["raw_instance_filepath"] = instance_info_filepath + filebase["raw_segmentation_filepath"] = segment_indexes_filepath + + # add segment id as additional feature + segment_ids = np.unique(segments, return_inverse=True)[1] + points = np.hstack((points, segment_ids[..., None])) + + # reading labels file + label_filepath = filepath.parent / filepath.name.replace( + ".ply", ".labels.ply" + ) + filebase["raw_label_filepath"] = label_filepath + label_coords, label_colors, labels = load_ply_with_normals( + label_filepath + ) + if not np.allclose(coords, label_coords): + raise ValueError("files doesn't have same coordinates") + + # adding instance label + labels = labels[:, np.newaxis] + empty_instance_label = np.full(labels.shape, -1) + labels = np.hstack((labels, empty_instance_label)) + for instance in instance_db["segGroups"]: + segments_occupied = np.array(instance["segments"]) + occupied_indices = np.isin(segments, segments_occupied) + labels[occupied_indices, 1] = instance["id"] + + if self.scannet200: + label200 = instance["label"] + # Map the category name to id + label_ids = self.labels_pd[ + self.labels_pd["raw_category"] == label200 + ]["id"] + label_id = ( + int(label_ids.iloc[0]) if len(label_ids) > 0 else 0 + ) + labels[occupied_indices, 0] = label_id + points = np.hstack((points, labels)) + + # gt_data = (points[:, -2] + 1) * 1000 + points[:, -1] + 1 + gt_data = points[:, -2] * 1000 + points[:, -1] + 1 + else: + segments_test = "../../data/raw/scannet_test_segments" + segment_indexes_filepath = filepath.name.replace( + ".ply", ".0.010000.segs.json" + ) + segments = self._read_json( + f"{segments_test}/{segment_indexes_filepath}" + ) + segments = np.array(segments["segIndices"]) + # add segment id as additional feature + segment_ids = np.unique(segments, return_inverse=True)[1] + points = np.hstack((points, segment_ids[..., None])) + + processed_filepath = ( + self.save_dir / mode / f"{scene:04}_{sub_scene:02}.npy" + ) + if not processed_filepath.parent.exists(): + processed_filepath.parent.mkdir(parents=True, exist_ok=True) + np.save(processed_filepath, points.astype(np.float32)) + filebase["filepath"] = str(processed_filepath) + + if mode == "test": + return filebase + + processed_gt_filepath = ( + self.save_dir + / "instance_gt" + / mode + / f"scene{scene:04}_{sub_scene:02}.txt" + ) + if not processed_gt_filepath.parent.exists(): + processed_gt_filepath.parent.mkdir(parents=True, exist_ok=True) + np.savetxt(processed_gt_filepath, gt_data.astype(np.int32), fmt="%d") + filebase["instance_gt_filepath"] = str(processed_gt_filepath) + + filebase["color_mean"] = [ + float((features[:, 0] / 255).mean()), + float((features[:, 1] / 255).mean()), + float((features[:, 2] / 255).mean()), + ] + filebase["color_std"] = [ + float(((features[:, 0] / 255) ** 2).mean()), + float(((features[:, 1] / 255) ** 2).mean()), + float(((features[:, 2] / 255) ** 2).mean()), + ] + return filebase + + def compute_color_mean_std( + self, + train_database_path: str = "./data/processed/scannet/train_database.yaml", + ): + train_database = self._load_yaml(train_database_path) + color_mean, color_std = [], [] + for sample in train_database: + color_std.append(sample["color_std"]) + color_mean.append(sample["color_mean"]) + + color_mean = np.array(color_mean).mean(axis=0) + color_std = np.sqrt(np.array(color_std).mean(axis=0) - color_mean**2) + feats_mean_std = { + "mean": [float(each) for each in color_mean], + "std": [float(each) for each in color_std], + } + self._save_yaml(self.save_dir / "color_mean_std.yaml", feats_mean_std) + + @logger.catch + def fix_bugs_in_labels(self): + if not self.scannet200: + logger.add(self.save_dir / "fixed_bugs_in_labels.log") + found_wrong_labels = { + tuple([270, 0]): 50, + tuple([270, 2]): 50, + tuple([384, 0]): 149, + } + for scene, wrong_label in found_wrong_labels.items(): + scene, sub_scene = scene + bug_file = ( + self.save_dir / "train" / f"{scene:04}_{sub_scene:02}.npy" + ) + points = np.load(bug_file) + bug_mask = points[:, -1] != wrong_label + points = points[bug_mask] + np.save(bug_file, points) + logger.info(f"Fixed {bug_file}") + + def _parse_scene_subscene(self, name): + scene_match = re.match(r"scene(\d{4})_(\d{2})", name) + return int(scene_match.group(1)), int(scene_match.group(2)) + + +if __name__ == "__main__": + Fire(ScannetPreprocessing) diff --git a/models/Mask3D/mask3d/datasets/preprocessing/semantic_kitti_preprocessing.py b/models/Mask3D/mask3d/datasets/preprocessing/semantic_kitti_preprocessing.py new file mode 100644 index 0000000000000000000000000000000000000000..d483e535435cca026588c3177cfe368fad99596b --- /dev/null +++ b/models/Mask3D/mask3d/datasets/preprocessing/semantic_kitti_preprocessing.py @@ -0,0 +1,181 @@ +import re +from pathlib import Path +from hashlib import md5 +from natsort import natsorted + +import numpy as np +from fire import Fire + +from base_preprocessing import BasePreprocessing + + +class SemanticKittiPreprocessing(BasePreprocessing): + def __init__( + self, + data_dir: str = "./data/raw/semantic_kitti", + save_dir: str = "./data/processed/semantic_kitti", + modes: tuple = ("train", "validation", "test"), + n_jobs: int = -1, + git_repo: str = "./data/raw/semantic-kitti-api", + ): + super().__init__(data_dir, save_dir, modes, n_jobs) + + git_repo = Path(git_repo) + self.create_label_database(git_repo / "config" / "semantic-kitti.yaml") + self.config = self._load_yaml( + git_repo / "config" / "semantic-kitti.yaml" + ) + self.pose = dict() + + for mode in self.modes: + scene_mode = "valid" if mode == "validation" else mode + self.pose[mode] = dict() + for scene in sorted(self.config["split"][scene_mode]): + filepaths = list( + self.data_dir.glob(f"*/{scene:02}/velodyne/*bin") + ) + filepaths = [str(file) for file in filepaths] + self.files[mode].extend(natsorted(filepaths)) + calibration = parse_calibration( + Path(filepaths[0]).parent.parent / "calib.txt" + ) + self.pose[mode].update( + { + scene: parse_poses( + Path(filepaths[0]).parent.parent / "poses.txt", + calibration, + ), + } + ) + + def create_label_database(self, config_file): + if (self.save_dir / "label_database.yaml").exists(): + return self._load_yaml(self.save_dir / "label_database.yaml") + config = self._load_yaml(config_file) + label_database = {} + for key, old_key in config["learning_map_inv"].items(): + label_database.update( + { + key: { + "name": config["labels"][old_key], + # bgr -> rgb + "color": config["color_map"][old_key][::-1], + "validation": not config["learning_ignore"][key], + } + } + ) + + self._save_yaml(self.save_dir / "label_database.yaml", label_database) + return label_database + + def process_file(self, filepath, mode): + """process_file. + + Args: + filepath: path to the main ply file + mode: train, test + + Returns: + filebase: info about file + """ + scene, sub_scene = re.search(r"(\d{2}).*(\d{6})", filepath).group(1, 2) + filebase = { + "filepath": filepath, + "scene": int(scene), + "sub_scene": int(sub_scene), + "file_len": -1, + "pose": self.pose[mode][int(scene)][int(sub_scene)].tolist(), + } + + points = np.fromfile(filepath, dtype=np.float32).reshape(-1, 4) + file_len = len(points) + filebase["file_len"] = file_len + + if mode in ["train", "validation"]: + # getting label info + label_filepath = filepath.replace("velodyne", "labels").replace( + "bin", "label" + ) + filebase["label_filepath"] = label_filepath + label = np.fromfile(label_filepath, dtype=np.uint32).astype( + np.int32 + ) + if not points.shape[0] == label.shape[0]: + raise ValueError("Files do not have same length") + semantic_label = label & 0xFFFF + instance_label = label >> 16 + + semantic_label_copy = semantic_label.copy() + for label in np.unique(semantic_label): + semantic_label[semantic_label_copy == label] = self.config[ + "learning_map" + ][label] + + label = np.hstack( + (semantic_label[:, np.newaxis], instance_label[:, np.newaxis]) + ) + points = np.hstack((points, label)) + + processed_filepath = self.save_dir / mode / f"{scene}_{sub_scene}.npy" + if not processed_filepath.parent.exists(): + processed_filepath.parent.mkdir(parents=True, exist_ok=True) + np.save(processed_filepath, points.astype(np.float32)) + filebase["filepath"] = str(processed_filepath) + + return filebase + + +def parse_calibration(filename): + """read calibration file with given filename + Returns + ------- + dict + Calibration matrices as 4x4 numpy arrays. + """ + calib = {} + + with open(filename) as calib_file: + for line in calib_file: + key, content = line.strip().split(":") + values = [float(v) for v in content.strip().split()] + + pose = np.zeros((4, 4)) + pose[0, 0:4] = values[0:4] + pose[1, 0:4] = values[4:8] + pose[2, 0:4] = values[8:12] + pose[3, 3] = 1.0 + + calib[key] = pose + return calib + + +def parse_poses(filename, calibration): + """read poses file with per-scan poses from given filename + Returns + ------- + list + list of poses as 4x4 numpy arrays. + """ + + poses = [] + + Tr = calibration["Tr"] + Tr_inv = np.linalg.inv(Tr) + + with open(filename) as file: + for line in file: + values = [float(v) for v in line.strip().split()] + + pose = np.zeros((4, 4)) + pose[0, 0:4] = values[0:4] + pose[1, 0:4] = values[4:8] + pose[2, 0:4] = values[8:12] + pose[3, 3] = 1.0 + + poses.append(np.matmul(Tr_inv, np.matmul(pose, Tr))) + + return poses + + +if __name__ == "__main__": + Fire(SemanticKittiPreprocessing) diff --git a/models/Mask3D/mask3d/datasets/preprocessing/stpls3d_preprocessing.py b/models/Mask3D/mask3d/datasets/preprocessing/stpls3d_preprocessing.py new file mode 100644 index 0000000000000000000000000000000000000000..63ed5bff5d52e656f4bad2f853e5973b433871bd --- /dev/null +++ b/models/Mask3D/mask3d/datasets/preprocessing/stpls3d_preprocessing.py @@ -0,0 +1,291 @@ +import re +import os +import numpy as np +from fire import Fire +from natsort import natsorted +from loguru import logger +import pandas as pd + +from datasets.preprocessing.base_preprocessing import BasePreprocessing + + +class STPLS3DPreprocessing(BasePreprocessing): + def __init__( + self, + data_dir: str = "../../data/raw/stpls3d", + save_dir: str = "../../data/processed/stpls3d", + modes: tuple = ("train", "validation", "test"), + n_jobs: int = -1, + ): + super().__init__(data_dir, save_dir, modes, n_jobs) + + # https://github.com/meidachen/STPLS3D/blob/main/HAIS/STPLS3DInstanceSegmentationChallenge_Codalab_Evaluate.py#L31 + CLASS_LABELS = [ + "Build", + "LowVeg", + "MediumVeg", + "HighVeg", + "Vehicle", + "Truck", + "Aircraft", + "MilitaryVeh", + "Bike", + "Motorcycle", + "LightPole", + "StreetSign", + "Clutter", + "Fence", + ] + VALID_CLASS_IDS = np.array( + [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14] + ) + + self.class_map = { + "Ground": 0, + "Build": 1, + "LowVeg": 2, + "MediumVeg": 3, + "HighVeg": 4, + "Vehicle": 5, + "Truck": 6, + "Aircraft": 7, + "MilitaryVeh": 8, + "Bike": 9, + "Motorcycle": 10, + "LightPole": 11, + "StreetSign": 12, + "Clutter": 13, + "Fence": 14, + } + + self.color_map = [ + [0, 255, 0], # Ground + [0, 0, 255], # Build + [0, 255, 255], # LowVeg + [255, 255, 0], # MediumVeg + [255, 0, 255], # HiVeg + [100, 100, 255], # Vehicle + [200, 200, 100], # Truck + [170, 120, 200], # Aircraft + [255, 0, 0], # MilitaryVec + [200, 100, 100], # Bike + [10, 200, 100], # Motorcycle + [200, 200, 200], # LightPole + [50, 50, 50], # StreetSign + [60, 130, 60], # Clutter + [130, 30, 60], + ] # Fence + + self.create_label_database() + + for mode in self.modes: + filepaths = [] + for scene_path in [ + f.path for f in os.scandir(self.data_dir / mode) + ]: + filepaths.append(scene_path) + self.files[mode] = natsorted(filepaths) + + def create_label_database(self): + label_database = dict() + for class_name, class_id in self.class_map.items(): + label_database[class_id] = { + "color": self.color_map[class_id], + "name": class_name, + "validation": True, + } + + self._save_yaml(self.save_dir / "label_database.yaml", label_database) + return label_database + + def process_file(self, filepath, mode): + """process_file. + + Please note, that for obtaining segmentation labels ply files were used. + + Args: + filepath: path to the main ply file + mode: train, test or validation + + Returns: + filebase: info about file + """ + filebase = { + "filepath": filepath, + "scene": filepath.split("/")[-1], + "raw_filepath": str(filepath), + "file_len": -1, + } + + points = pd.read_csv(filepath, header=None).values + + filebase["raw_segmentation_filepath"] = "" + + # add segment id as additional feature (DUMMY) + if mode in ["train", "validation"]: + points = np.hstack( + ( + points, + np.ones(points.shape[0])[..., None], # normal 1 + np.ones(points.shape[0])[..., None], # normal 2 + np.ones(points.shape[0])[..., None], # normal 3 + np.ones(points.shape[0])[..., None], + ) + ) # segments + else: + # we need to add dummies for semantics and instances + points = np.hstack( + ( + points, + np.ones(points.shape[0])[..., None], # semantic class + np.ones(points.shape[0])[..., None], # instance id + np.ones(points.shape[0])[..., None], # normal 1 + np.ones(points.shape[0])[..., None], # normal 2 + np.ones(points.shape[0])[..., None], # normal 3 + np.ones(points.shape[0])[..., None], + ) + ) # segments + + points = points[ + :, [0, 1, 2, 3, 4, 5, 8, 9, 10, 11, 6, 7] + ] # move segments after RGB + + # move point clouds to be in positive range (important for split pointcloud function) + points[:, :3] = points[:, :3] - points[:, :3].min(0) + + points = points.astype(np.float32) + + if mode == "test": + points = points[:, :-2] + else: + points[ + points[:, -1] == -100.0, -1 + ] = -1 # -1 indicates "no instance" + + file_len = len(points) + filebase["file_len"] = file_len + + processed_filepath = ( + self.save_dir + / mode + / f"{filebase['scene'].replace('.txt', '')}.npy" + ) + if not processed_filepath.parent.exists(): + processed_filepath.parent.mkdir(parents=True, exist_ok=True) + np.save(processed_filepath, points.astype(np.float32)) + filebase["filepath"] = str(processed_filepath) + + if mode in ["validation", "test"]: + blocks = self.splitPointCloud(points) + + filebase["instance_gt_filepath"] = [] + filebase["filepath_crop"] = [] + for block_id, block in enumerate(blocks): + if len(block) > 10000: + if mode == "validation": + new_instance_ids = np.unique( + block[:, -1], return_inverse=True + )[1] + + assert new_instance_ids.shape[0] == block.shape[0] + # == 0 means -1 == no instance + # new_instance_ids[new_instance_ids == 0] + assert ( + new_instance_ids.max() < 1000 + ), "we cannot encode when there are more than 999 instances in a block" + + gt_data = (block[:, -2]) * 1000 + new_instance_ids + + processed_gt_filepath = ( + self.save_dir + / "instance_gt" + / mode + / f"{filebase['scene'].replace('.txt', '')}_{block_id}.txt" + ) + if not processed_gt_filepath.parent.exists(): + processed_gt_filepath.parent.mkdir( + parents=True, exist_ok=True + ) + np.savetxt( + processed_gt_filepath, + gt_data.astype(np.int32), + fmt="%d", + ) + filebase["instance_gt_filepath"].append( + str(processed_gt_filepath) + ) + + processed_filepath = ( + self.save_dir + / mode + / f"{filebase['scene'].replace('.txt', '')}_{block_id}.npy" + ) + if not processed_filepath.parent.exists(): + processed_filepath.parent.mkdir( + parents=True, exist_ok=True + ) + np.save(processed_filepath, block.astype(np.float32)) + filebase["filepath_crop"].append(str(processed_filepath)) + else: + print("block was smaller than 1000 points") + assert False + + filebase["color_mean"] = [ + float((points[:, 3] / 255).mean()), + float((points[:, 4] / 255).mean()), + float((points[:, 5] / 255).mean()), + ] + filebase["color_std"] = [ + float(((points[:, 3] / 255) ** 2).mean()), + float(((points[:, 4] / 255) ** 2).mean()), + float(((points[:, 5] / 255) ** 2).mean()), + ] + return filebase + + def compute_color_mean_std( + self, + train_database_path: str = "./data/processed/stpls3d/train_database.yaml", + ): + train_database = self._load_yaml(train_database_path) + color_mean, color_std = [], [] + for sample in train_database: + color_std.append(sample["color_std"]) + color_mean.append(sample["color_mean"]) + + color_mean = np.array(color_mean).mean(axis=0) + color_std = np.sqrt(np.array(color_std).mean(axis=0) - color_mean**2) + feats_mean_std = { + "mean": [float(each) for each in color_mean], + "std": [float(each) for each in color_std], + } + self._save_yaml(self.save_dir / "color_mean_std.yaml", feats_mean_std) + + def splitPointCloud(self, cloud, size=50.0, stride=50): + limitMax = np.amax(cloud[:, 0:3], axis=0) + width = int(np.ceil((limitMax[0] - size) / stride)) + 1 + depth = int(np.ceil((limitMax[1] - size) / stride)) + 1 + cells = [ + (x * stride, y * stride) + for x in range(width) + for y in range(depth) + ] + blocks = [] + for (x, y) in cells: + xcond = (cloud[:, 0] <= x + size) & (cloud[:, 0] >= x) + ycond = (cloud[:, 1] <= y + size) & (cloud[:, 1] >= y) + cond = xcond & ycond + block = cloud[cond, :] + blocks.append(block) + return blocks + + @logger.catch + def fix_bugs_in_labels(self): + pass + + def _parse_scene_subscene(self, name): + scene_match = re.match(r"scene(\d{4})_(\d{2})", name) + return int(scene_match.group(1)), int(scene_match.group(2)) + + +if __name__ == "__main__": + Fire(STPLS3DPreprocessing) diff --git a/models/Mask3D/mask3d/datasets/random_cuboid.py b/models/Mask3D/mask3d/datasets/random_cuboid.py new file mode 100644 index 0000000000000000000000000000000000000000..334b87ecadbd9cbee2979d462532fb4a479b280f --- /dev/null +++ b/models/Mask3D/mask3d/datasets/random_cuboid.py @@ -0,0 +1,96 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import numpy as np +import torch + + +def check_aspect(crop_range, aspect_min): + xy_aspect = np.min(crop_range[:2]) / np.max(crop_range[:2]) + xz_aspect = np.min(crop_range[[0, 2]]) / np.max(crop_range[[0, 2]]) + yz_aspect = np.min(crop_range[1:]) / np.max(crop_range[1:]) + return ( + (xy_aspect >= aspect_min) + or (xz_aspect >= aspect_min) + or (yz_aspect >= aspect_min) + ) + + +class RandomCuboid(object): + """ + RandomCuboid augmentation from DepthContrast [https://arxiv.org/abs/2101.02691] + We slightly modify this operation to account for object detection. + This augmentation randomly crops a cuboid from the input and + ensures that the cropped cuboid contains at least one bounding box + """ + + def __init__( + self, + min_points, + # aspect=0.8, + crop_length=6.0, + version1=True, + ): + # self.aspect = aspect + self.crop_length = crop_length + self.min_points = min_points + self.version1 = version1 + + def __call__(self, point_cloud): + if point_cloud.shape[0] < self.min_points: + print("too small pcd") + return np.ones(point_cloud.shape[0], dtype=np.bool) + + range_xyz = np.max(point_cloud[:, :2], axis=0) - np.min( + point_cloud[:, :2], axis=0 + ) + + for _ in range(100): + # crop_range = self.min_crop + np.random.rand(3) * ( + # self.max_crop - self.min_crop + # ) + # crop_range[-1] = 999. + # if not check_aspect(crop_range, self.aspect): + # continue + + sample_center = point_cloud[:, :2].min(axis=0) + range_xyz / 2 + + if self.version1: + offset_x = np.random.uniform( + -range_xyz[0] / 4, range_xyz[0] / 4 + ) + offset_y = np.random.uniform( + -range_xyz[1] / 4, range_xyz[1] / 4 + ) + else: + offset_x = np.random.uniform( + -(range_xyz[0] / 2) + self.crop_length / 4, + +(range_xyz[0] / 2) - self.crop_length / 4, + ) + offset_y = np.random.uniform( + -(range_xyz[1] / 2) + self.crop_length / 4, + +(range_xyz[1] / 2) - self.crop_length / 4, + ) + + sample_center[0] = sample_center[0] + offset_x + sample_center[1] = sample_center[1] + offset_y + + min_xy = sample_center - self.crop_length / 2 + max_xy = sample_center + self.crop_length / 2 + + upper_idx = ( + np.sum((point_cloud[:, :2] <= max_xy).astype(np.int32), 1) == 2 + ) + lower_idx = ( + np.sum((point_cloud[:, :2] >= min_xy).astype(np.int32), 1) == 2 + ) + + new_pointidx = (upper_idx) & (lower_idx) + + if np.sum(new_pointidx) < self.min_points: + print("TOO SMALL") + continue + + return new_pointidx + + # fallback + print("FALLBACK") + return np.ones(point_cloud.shape[0], dtype=np.bool) diff --git a/models/Mask3D/mask3d/datasets/scannet200/__init__.py b/models/Mask3D/mask3d/datasets/scannet200/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/Mask3D/mask3d/datasets/scannet200/scannet200_constants.py b/models/Mask3D/mask3d/datasets/scannet200/scannet200_constants.py new file mode 100644 index 0000000000000000000000000000000000000000..1d921407068335b82ad10af912d7e9d715dbd6ca --- /dev/null +++ b/models/Mask3D/mask3d/datasets/scannet200/scannet200_constants.py @@ -0,0 +1,704 @@ +### ScanNet Benchmark constants ### +VALID_CLASS_IDS_20 = ( + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 14, + 16, + 24, + 28, + 33, + 34, + 36, + 39, +) + +CLASS_LABELS_20 = ( + "wall", + "floor", + "cabinet", + "bed", + "chair", + "sofa", + "table", + "door", + "window", + "bookshelf", + "picture", + "counter", + "desk", + "curtain", + "refrigerator", + "shower curtain", + "toilet", + "sink", + "bathtub", + "otherfurniture", +) + +SCANNET_COLOR_MAP_20 = { + 0: (0.0, 0.0, 0.0), + 1: (174.0, 199.0, 232.0), + 2: (152.0, 223.0, 138.0), + 3: (31.0, 119.0, 180.0), + 4: (255.0, 187.0, 120.0), + 5: (188.0, 189.0, 34.0), + 6: (140.0, 86.0, 75.0), + 7: (255.0, 152.0, 150.0), + 8: (214.0, 39.0, 40.0), + 9: (197.0, 176.0, 213.0), + 10: (148.0, 103.0, 189.0), + 11: (196.0, 156.0, 148.0), + 12: (23.0, 190.0, 207.0), + 14: (247.0, 182.0, 210.0), + 15: (66.0, 188.0, 102.0), + 16: (219.0, 219.0, 141.0), + 17: (140.0, 57.0, 197.0), + 18: (202.0, 185.0, 52.0), + 19: (51.0, 176.0, 203.0), + 20: (200.0, 54.0, 131.0), + 21: (92.0, 193.0, 61.0), + 22: (78.0, 71.0, 183.0), + 23: (172.0, 114.0, 82.0), + 24: (255.0, 127.0, 14.0), + 25: (91.0, 163.0, 138.0), + 26: (153.0, 98.0, 156.0), + 27: (140.0, 153.0, 101.0), + 28: (158.0, 218.0, 229.0), + 29: (100.0, 125.0, 154.0), + 30: (178.0, 127.0, 135.0), + 32: (146.0, 111.0, 194.0), + 33: (44.0, 160.0, 44.0), + 34: (112.0, 128.0, 144.0), + 35: (96.0, 207.0, 209.0), + 36: (227.0, 119.0, 194.0), + 37: (213.0, 92.0, 176.0), + 38: (94.0, 106.0, 211.0), + 39: (82.0, 84.0, 163.0), + 40: (100.0, 85.0, 144.0), +} + +### ScanNet200 Benchmark constants ### +VALID_CLASS_IDS_200 = ( + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 21, + 22, + 23, + 24, + 26, + 27, + 28, + 29, + 31, + 32, + 33, + 34, + 35, + 36, + 38, + 39, + 40, + 41, + 42, + 44, + 45, + 46, + 47, + 48, + 49, + 50, + 51, + 52, + 54, + 55, + 56, + 57, + 58, + 59, + 62, + 63, + 64, + 65, + 66, + 67, + 68, + 69, + 70, + 71, + 72, + 73, + 74, + 75, + 76, + 77, + 78, + 79, + 80, + 82, + 84, + 86, + 87, + 88, + 89, + 90, + 93, + 95, + 96, + 97, + 98, + 99, + 100, + 101, + 102, + 103, + 104, + 105, + 106, + 107, + 110, + 112, + 115, + 116, + 118, + 120, + 121, + 122, + 125, + 128, + 130, + 131, + 132, + 134, + 136, + 138, + 139, + 140, + 141, + 145, + 148, + 154, + 155, + 156, + 157, + 159, + 161, + 163, + 165, + 166, + 168, + 169, + 170, + 177, + 180, + 185, + 188, + 191, + 193, + 195, + 202, + 208, + 213, + 214, + 221, + 229, + 230, + 232, + 233, + 242, + 250, + 261, + 264, + 276, + 283, + 286, + 300, + 304, + 312, + 323, + 325, + 331, + 342, + 356, + 370, + 392, + 395, + 399, + 408, + 417, + 488, + 540, + 562, + 570, + 572, + 581, + 609, + 748, + 776, + 1156, + 1163, + 1164, + 1165, + 1166, + 1167, + 1168, + 1169, + 1170, + 1171, + 1172, + 1173, + 1174, + 1175, + 1176, + 1178, + 1179, + 1180, + 1181, + 1182, + 1183, + 1184, + 1185, + 1186, + 1187, + 1188, + 1189, + 1190, + 1191, +) + +CLASS_LABELS_200 = ( + "wall", + "chair", + "floor", + "table", + "door", + "couch", + "cabinet", + "shelf", + "desk", + "office chair", + "bed", + "pillow", + "sink", + "picture", + "window", + "toilet", + "bookshelf", + "monitor", + "curtain", + "book", + "armchair", + "coffee table", + "box", + "refrigerator", + "lamp", + "kitchen cabinet", + "towel", + "clothes", + "tv", + "nightstand", + "counter", + "dresser", + "stool", + "cushion", + "plant", + "ceiling", + "bathtub", + "end table", + "dining table", + "keyboard", + "bag", + "backpack", + "toilet paper", + "printer", + "tv stand", + "whiteboard", + "blanket", + "shower curtain", + "trash can", + "closet", + "stairs", + "microwave", + "stove", + "shoe", + "computer tower", + "bottle", + "bin", + "ottoman", + "bench", + "board", + "washing machine", + "mirror", + "copier", + "basket", + "sofa chair", + "file cabinet", + "fan", + "laptop", + "shower", + "paper", + "person", + "paper towel dispenser", + "oven", + "blinds", + "rack", + "plate", + "blackboard", + "piano", + "suitcase", + "rail", + "radiator", + "recycling bin", + "container", + "wardrobe", + "soap dispenser", + "telephone", + "bucket", + "clock", + "stand", + "light", + "laundry basket", + "pipe", + "clothes dryer", + "guitar", + "toilet paper holder", + "seat", + "speaker", + "column", + "bicycle", + "ladder", + "bathroom stall", + "shower wall", + "cup", + "jacket", + "storage bin", + "coffee maker", + "dishwasher", + "paper towel roll", + "machine", + "mat", + "windowsill", + "bar", + "toaster", + "bulletin board", + "ironing board", + "fireplace", + "soap dish", + "kitchen counter", + "doorframe", + "toilet paper dispenser", + "mini fridge", + "fire extinguisher", + "ball", + "hat", + "shower curtain rod", + "water cooler", + "paper cutter", + "tray", + "shower door", + "pillar", + "ledge", + "toaster oven", + "mouse", + "toilet seat cover dispenser", + "furniture", + "cart", + "storage container", + "scale", + "tissue box", + "light switch", + "crate", + "power outlet", + "decoration", + "sign", + "projector", + "closet door", + "vacuum cleaner", + "candle", + "plunger", + "stuffed animal", + "headphones", + "dish rack", + "broom", + "guitar case", + "range hood", + "dustpan", + "hair dryer", + "water bottle", + "handicap bar", + "purse", + "vent", + "shower floor", + "water pitcher", + "mailbox", + "bowl", + "paper bag", + "alarm clock", + "music stand", + "projector screen", + "divider", + "laundry detergent", + "bathroom counter", + "object", + "bathroom vanity", + "closet wall", + "laundry hamper", + "bathroom stall door", + "ceiling light", + "trash bin", + "dumbbell", + "stair rail", + "tube", + "bathroom cabinet", + "cd case", + "closet rod", + "coffee kettle", + "structure", + "shower head", + "keyboard piano", + "case of water bottles", + "coat rack", + "storage organizer", + "folded chair", + "fire alarm", + "power strip", + "calendar", + "poster", + "potted plant", + "luggage", + "mattress", +) + +SCANNET_COLOR_MAP_200 = { + 0: (0.0, 0.0, 0.0), + 1: (174.0, 199.0, 232.0), + 2: (188.0, 189.0, 34.0), + 3: (152.0, 223.0, 138.0), + 4: (255.0, 152.0, 150.0), + 5: (214.0, 39.0, 40.0), + 6: (91.0, 135.0, 229.0), + 7: (31.0, 119.0, 180.0), + 8: (229.0, 91.0, 104.0), + 9: (247.0, 182.0, 210.0), + 10: (91.0, 229.0, 110.0), + 11: (255.0, 187.0, 120.0), + 13: (141.0, 91.0, 229.0), + 14: (112.0, 128.0, 144.0), + 15: (196.0, 156.0, 148.0), + 16: (197.0, 176.0, 213.0), + 17: (44.0, 160.0, 44.0), + 18: (148.0, 103.0, 189.0), + 19: (229.0, 91.0, 223.0), + 21: (219.0, 219.0, 141.0), + 22: (192.0, 229.0, 91.0), + 23: (88.0, 218.0, 137.0), + 24: (58.0, 98.0, 137.0), + 26: (177.0, 82.0, 239.0), + 27: (255.0, 127.0, 14.0), + 28: (237.0, 204.0, 37.0), + 29: (41.0, 206.0, 32.0), + 31: (62.0, 143.0, 148.0), + 32: (34.0, 14.0, 130.0), + 33: (143.0, 45.0, 115.0), + 34: (137.0, 63.0, 14.0), + 35: (23.0, 190.0, 207.0), + 36: (16.0, 212.0, 139.0), + 38: (90.0, 119.0, 201.0), + 39: (125.0, 30.0, 141.0), + 40: (150.0, 53.0, 56.0), + 41: (186.0, 197.0, 62.0), + 42: (227.0, 119.0, 194.0), + 44: (38.0, 100.0, 128.0), + 45: (120.0, 31.0, 243.0), + 46: (154.0, 59.0, 103.0), + 47: (169.0, 137.0, 78.0), + 48: (143.0, 245.0, 111.0), + 49: (37.0, 230.0, 205.0), + 50: (14.0, 16.0, 155.0), + 51: (196.0, 51.0, 182.0), + 52: (237.0, 80.0, 38.0), + 54: (138.0, 175.0, 62.0), + 55: (158.0, 218.0, 229.0), + 56: (38.0, 96.0, 167.0), + 57: (190.0, 77.0, 246.0), + 58: (208.0, 49.0, 84.0), + 59: (208.0, 193.0, 72.0), + 62: (55.0, 220.0, 57.0), + 63: (10.0, 125.0, 140.0), + 64: (76.0, 38.0, 202.0), + 65: (191.0, 28.0, 135.0), + 66: (211.0, 120.0, 42.0), + 67: (118.0, 174.0, 76.0), + 68: (17.0, 242.0, 171.0), + 69: (20.0, 65.0, 247.0), + 70: (208.0, 61.0, 222.0), + 71: (162.0, 62.0, 60.0), + 72: (210.0, 235.0, 62.0), + 73: (45.0, 152.0, 72.0), + 74: (35.0, 107.0, 149.0), + 75: (160.0, 89.0, 237.0), + 76: (227.0, 56.0, 125.0), + 77: (169.0, 143.0, 81.0), + 78: (42.0, 143.0, 20.0), + 79: (25.0, 160.0, 151.0), + 80: (82.0, 75.0, 227.0), + 82: (253.0, 59.0, 222.0), + 84: (240.0, 130.0, 89.0), + 86: (123.0, 172.0, 47.0), + 87: (71.0, 194.0, 133.0), + 88: (24.0, 94.0, 205.0), + 89: (134.0, 16.0, 179.0), + 90: (159.0, 32.0, 52.0), + 93: (213.0, 208.0, 88.0), + 95: (64.0, 158.0, 70.0), + 96: (18.0, 163.0, 194.0), + 97: (65.0, 29.0, 153.0), + 98: (177.0, 10.0, 109.0), + 99: (152.0, 83.0, 7.0), + 100: (83.0, 175.0, 30.0), + 101: (18.0, 199.0, 153.0), + 102: (61.0, 81.0, 208.0), + 103: (213.0, 85.0, 216.0), + 104: (170.0, 53.0, 42.0), + 105: (161.0, 192.0, 38.0), + 106: (23.0, 241.0, 91.0), + 107: (12.0, 103.0, 170.0), + 110: (151.0, 41.0, 245.0), + 112: (133.0, 51.0, 80.0), + 115: (184.0, 162.0, 91.0), + 116: (50.0, 138.0, 38.0), + 118: (31.0, 237.0, 236.0), + 120: (39.0, 19.0, 208.0), + 121: (223.0, 27.0, 180.0), + 122: (254.0, 141.0, 85.0), + 125: (97.0, 144.0, 39.0), + 128: (106.0, 231.0, 176.0), + 130: (12.0, 61.0, 162.0), + 131: (124.0, 66.0, 140.0), + 132: (137.0, 66.0, 73.0), + 134: (250.0, 253.0, 26.0), + 136: (55.0, 191.0, 73.0), + 138: (60.0, 126.0, 146.0), + 139: (153.0, 108.0, 234.0), + 140: (184.0, 58.0, 125.0), + 141: (135.0, 84.0, 14.0), + 145: (139.0, 248.0, 91.0), + 148: (53.0, 200.0, 172.0), + 154: (63.0, 69.0, 134.0), + 155: (190.0, 75.0, 186.0), + 156: (127.0, 63.0, 52.0), + 157: (141.0, 182.0, 25.0), + 159: (56.0, 144.0, 89.0), + 161: (64.0, 160.0, 250.0), + 163: (182.0, 86.0, 245.0), + 165: (139.0, 18.0, 53.0), + 166: (134.0, 120.0, 54.0), + 168: (49.0, 165.0, 42.0), + 169: (51.0, 128.0, 133.0), + 170: (44.0, 21.0, 163.0), + 177: (232.0, 93.0, 193.0), + 180: (176.0, 102.0, 54.0), + 185: (116.0, 217.0, 17.0), + 188: (54.0, 209.0, 150.0), + 191: (60.0, 99.0, 204.0), + 193: (129.0, 43.0, 144.0), + 195: (252.0, 100.0, 106.0), + 202: (187.0, 196.0, 73.0), + 208: (13.0, 158.0, 40.0), + 213: (52.0, 122.0, 152.0), + 214: (128.0, 76.0, 202.0), + 221: (187.0, 50.0, 115.0), + 229: (180.0, 141.0, 71.0), + 230: (77.0, 208.0, 35.0), + 232: (72.0, 183.0, 168.0), + 233: (97.0, 99.0, 203.0), + 242: (172.0, 22.0, 158.0), + 250: (155.0, 64.0, 40.0), + 261: (118.0, 159.0, 30.0), + 264: (69.0, 252.0, 148.0), + 276: (45.0, 103.0, 173.0), + 283: (111.0, 38.0, 149.0), + 286: (184.0, 9.0, 49.0), + 300: (188.0, 174.0, 67.0), + 304: (53.0, 206.0, 53.0), + 312: (97.0, 235.0, 252.0), + 323: (66.0, 32.0, 182.0), + 325: (236.0, 114.0, 195.0), + 331: (241.0, 154.0, 83.0), + 342: (133.0, 240.0, 52.0), + 356: (16.0, 205.0, 144.0), + 370: (75.0, 101.0, 198.0), + 392: (237.0, 95.0, 251.0), + 395: (191.0, 52.0, 49.0), + 399: (227.0, 254.0, 54.0), + 408: (49.0, 206.0, 87.0), + 417: (48.0, 113.0, 150.0), + 488: (125.0, 73.0, 182.0), + 540: (229.0, 32.0, 114.0), + 562: (158.0, 119.0, 28.0), + 570: (60.0, 205.0, 27.0), + 572: (18.0, 215.0, 201.0), + 581: (79.0, 76.0, 153.0), + 609: (134.0, 13.0, 116.0), + 748: (192.0, 97.0, 63.0), + 776: (108.0, 163.0, 18.0), + 1156: (95.0, 220.0, 156.0), + 1163: (98.0, 141.0, 208.0), + 1164: (144.0, 19.0, 193.0), + 1165: (166.0, 36.0, 57.0), + 1166: (212.0, 202.0, 34.0), + 1167: (23.0, 206.0, 34.0), + 1168: (91.0, 211.0, 236.0), + 1169: (79.0, 55.0, 137.0), + 1170: (182.0, 19.0, 117.0), + 1171: (134.0, 76.0, 14.0), + 1172: (87.0, 185.0, 28.0), + 1173: (82.0, 224.0, 187.0), + 1174: (92.0, 110.0, 214.0), + 1175: (168.0, 80.0, 171.0), + 1176: (197.0, 63.0, 51.0), + 1178: (175.0, 199.0, 77.0), + 1179: (62.0, 180.0, 98.0), + 1180: (8.0, 91.0, 150.0), + 1181: (77.0, 15.0, 130.0), + 1182: (154.0, 65.0, 96.0), + 1183: (197.0, 152.0, 11.0), + 1184: (59.0, 155.0, 45.0), + 1185: (12.0, 147.0, 145.0), + 1186: (54.0, 35.0, 219.0), + 1187: (210.0, 73.0, 181.0), + 1188: (221.0, 124.0, 77.0), + 1189: (149.0, 214.0, 66.0), + 1190: (72.0, 185.0, 134.0), + 1191: (42.0, 94.0, 198.0), +} + +### For instance segmentation the non-object categories ### +VALID_PANOPTIC_IDS = (1, 3) + +CLASS_LABELS_PANOPTIC = ("wall", "floor") diff --git a/models/Mask3D/mask3d/datasets/scannet200/scannet200_splits.py b/models/Mask3D/mask3d/datasets/scannet200/scannet200_splits.py new file mode 100644 index 0000000000000000000000000000000000000000..3a5585f70319d1eb061669bd82bbf3d64d0bca7b --- /dev/null +++ b/models/Mask3D/mask3d/datasets/scannet200/scannet200_splits.py @@ -0,0 +1,625 @@ +### This file contains the HEAD - COMMON - TAIL split category ids for ScanNet 200 + +HEAD_CATS_SCANNET_200 = [ + "tv stand", + "curtain", + "blinds", + "shower curtain", + "bookshelf", + "tv", + "kitchen cabinet", + "pillow", + "lamp", + "dresser", + "monitor", + "object", + "ceiling", + "board", + "stove", + "closet wall", + "couch", + "office chair", + "kitchen counter", + "shower", + "closet", + "doorframe", + "sofa chair", + "mailbox", + "nightstand", + "washing machine", + "picture", + "book", + "sink", + "recycling bin", + "table", + "backpack", + "shower wall", + "toilet", + "copier", + "counter", + "stool", + "refrigerator", + "window", + "file cabinet", + "chair", + "wall", + "plant", + "coffee table", + "stairs", + "armchair", + "cabinet", + "bathroom vanity", + "bathroom stall", + "mirror", + "blackboard", + "trash can", + "stair rail", + "box", + "towel", + "door", + "clothes", + "whiteboard", + "bed", + "floor", + "bathtub", + "desk", + "wardrobe", + "clothes dryer", + "radiator", + "shelf", +] +COMMON_CATS_SCANNET_200 = [ + "cushion", + "end table", + "dining table", + "keyboard", + "bag", + "toilet paper", + "printer", + "blanket", + "microwave", + "shoe", + "computer tower", + "bottle", + "bin", + "ottoman", + "bench", + "basket", + "fan", + "laptop", + "person", + "paper towel dispenser", + "oven", + "rack", + "piano", + "suitcase", + "rail", + "container", + "telephone", + "stand", + "light", + "laundry basket", + "pipe", + "seat", + "column", + "bicycle", + "ladder", + "jacket", + "storage bin", + "coffee maker", + "dishwasher", + "machine", + "mat", + "windowsill", + "bulletin board", + "fireplace", + "mini fridge", + "water cooler", + "shower door", + "pillar", + "ledge", + "furniture", + "cart", + "decoration", + "closet door", + "vacuum cleaner", + "dish rack", + "range hood", + "projector screen", + "divider", + "bathroom counter", + "laundry hamper", + "bathroom stall door", + "ceiling light", + "trash bin", + "bathroom cabinet", + "structure", + "storage organizer", + "potted plant", + "mattress", +] +TAIL_CATS_SCANNET_200 = [ + "paper", + "plate", + "soap dispenser", + "bucket", + "clock", + "guitar", + "toilet paper holder", + "speaker", + "cup", + "paper towel roll", + "bar", + "toaster", + "ironing board", + "soap dish", + "toilet paper dispenser", + "fire extinguisher", + "ball", + "hat", + "shower curtain rod", + "paper cutter", + "tray", + "toaster oven", + "mouse", + "toilet seat cover dispenser", + "storage container", + "scale", + "tissue box", + "light switch", + "crate", + "power outlet", + "sign", + "projector", + "candle", + "plunger", + "stuffed animal", + "headphones", + "broom", + "guitar case", + "dustpan", + "hair dryer", + "water bottle", + "handicap bar", + "purse", + "vent", + "shower floor", + "water pitcher", + "bowl", + "paper bag", + "alarm clock", + "music stand", + "laundry detergent", + "dumbbell", + "tube", + "cd case", + "closet rod", + "coffee kettle", + "shower head", + "keyboard piano", + "case of water bottles", + "coat rack", + "folded chair", + "fire alarm", + "power strip", + "calendar", + "poster", + "luggage", +] + + +### Given the different size of the official train and val sets, not all ScanNet200 categories are present in the validation set. +### Here we list of categories with labels and IDs present in both train and validation set, and the remaining categories those are present in train, but not in val +### We dont evaluate on unseen validation categories in this benchmark + +VALID_CLASS_IDS_200_VALIDATION = ( + "wall", + "chair", + "floor", + "table", + "door", + "couch", + "cabinet", + "shelf", + "desk", + "office chair", + "bed", + "pillow", + "sink", + "picture", + "window", + "toilet", + "bookshelf", + "monitor", + "curtain", + "book", + "armchair", + "coffee table", + "box", + "refrigerator", + "lamp", + "kitchen cabinet", + "towel", + "clothes", + "tv", + "nightstand", + "counter", + "dresser", + "stool", + "cushion", + "plant", + "ceiling", + "bathtub", + "end table", + "dining table", + "keyboard", + "bag", + "backpack", + "toilet paper", + "printer", + "tv stand", + "whiteboard", + "blanket", + "shower curtain", + "trash can", + "closet", + "stairs", + "microwave", + "stove", + "shoe", + "computer tower", + "bottle", + "bin", + "ottoman", + "bench", + "board", + "washing machine", + "mirror", + "copier", + "basket", + "sofa chair", + "file cabinet", + "fan", + "laptop", + "shower", + "paper", + "person", + "paper towel dispenser", + "oven", + "blinds", + "rack", + "plate", + "blackboard", + "piano", + "suitcase", + "rail", + "radiator", + "recycling bin", + "container", + "wardrobe", + "soap dispenser", + "telephone", + "bucket", + "clock", + "stand", + "light", + "laundry basket", + "pipe", + "clothes dryer", + "guitar", + "toilet paper holder", + "seat", + "speaker", + "column", + "ladder", + "bathroom stall", + "shower wall", + "cup", + "jacket", + "storage bin", + "coffee maker", + "dishwasher", + "paper towel roll", + "machine", + "mat", + "windowsill", + "bar", + "toaster", + "bulletin board", + "ironing board", + "fireplace", + "soap dish", + "kitchen counter", + "doorframe", + "toilet paper dispenser", + "mini fridge", + "fire extinguisher", + "ball", + "hat", + "shower curtain rod", + "water cooler", + "paper cutter", + "tray", + "shower door", + "pillar", + "ledge", + "toaster oven", + "mouse", + "toilet seat cover dispenser", + "furniture", + "cart", + "scale", + "tissue box", + "light switch", + "crate", + "power outlet", + "decoration", + "sign", + "projector", + "closet door", + "vacuum cleaner", + "plunger", + "stuffed animal", + "headphones", + "dish rack", + "broom", + "range hood", + "dustpan", + "hair dryer", + "water bottle", + "handicap bar", + "vent", + "shower floor", + "water pitcher", + "mailbox", + "bowl", + "paper bag", + "projector screen", + "divider", + "laundry detergent", + "bathroom counter", + "object", + "bathroom vanity", + "closet wall", + "laundry hamper", + "bathroom stall door", + "ceiling light", + "trash bin", + "dumbbell", + "stair rail", + "tube", + "bathroom cabinet", + "closet rod", + "coffee kettle", + "shower head", + "keyboard piano", + "case of water bottles", + "coat rack", + "folded chair", + "fire alarm", + "power strip", + "calendar", + "poster", + "potted plant", + "mattress", +) + +CLASS_LABELS_200_VALIDATION = ( + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 21, + 22, + 23, + 24, + 26, + 27, + 28, + 29, + 31, + 32, + 33, + 34, + 35, + 36, + 38, + 39, + 40, + 41, + 42, + 44, + 45, + 46, + 47, + 48, + 49, + 50, + 51, + 52, + 54, + 55, + 56, + 57, + 58, + 59, + 62, + 63, + 64, + 65, + 66, + 67, + 68, + 69, + 70, + 71, + 72, + 73, + 74, + 75, + 76, + 77, + 78, + 79, + 80, + 82, + 84, + 86, + 87, + 88, + 89, + 90, + 93, + 95, + 96, + 97, + 98, + 99, + 100, + 101, + 102, + 103, + 104, + 105, + 106, + 107, + 110, + 112, + 115, + 116, + 118, + 120, + 122, + 125, + 128, + 130, + 131, + 132, + 134, + 136, + 138, + 139, + 140, + 141, + 145, + 148, + 154, + 155, + 156, + 157, + 159, + 161, + 163, + 165, + 166, + 168, + 169, + 170, + 177, + 180, + 185, + 188, + 191, + 193, + 195, + 202, + 208, + 213, + 214, + 229, + 230, + 232, + 233, + 242, + 250, + 261, + 264, + 276, + 283, + 300, + 304, + 312, + 323, + 325, + 342, + 356, + 370, + 392, + 395, + 408, + 417, + 488, + 540, + 562, + 570, + 609, + 748, + 776, + 1156, + 1163, + 1164, + 1165, + 1166, + 1167, + 1168, + 1169, + 1170, + 1171, + 1172, + 1173, + 1175, + 1176, + 1179, + 1180, + 1181, + 1182, + 1184, + 1185, + 1186, + 1187, + 1188, + 1189, + 1191, +) + +VALID_CLASS_IDS_200_TRAIN_ONLY = ( + "bicycle", + "storage container", + "candle", + "guitar case", + "purse", + "alarm clock", + "music stand", + "cd case", + "structure", + "storage organizer", + "luggage", +) + +CLASS_LABELS_200_TRAIN_ONLY = ( + 121, + 221, + 286, + 331, + 399, + 572, + 581, + 1174, + 1178, + 1183, + 1190, +) diff --git a/models/Mask3D/mask3d/datasets/semseg.py b/models/Mask3D/mask3d/datasets/semseg.py new file mode 100644 index 0000000000000000000000000000000000000000..a848b1a20e4690971bf16790fcea00ade84441c0 --- /dev/null +++ b/models/Mask3D/mask3d/datasets/semseg.py @@ -0,0 +1,993 @@ +import logging +from itertools import product +from pathlib import Path +from random import random, sample, uniform +from typing import List, Optional, Tuple, Union +from random import choice +from copy import deepcopy +from random import randrange + + +import numpy +import torch +from datasets.random_cuboid import RandomCuboid + +import albumentations as A +import numpy as np +import scipy +import volumentations as V +import yaml + +# from yaml import CLoader as Loader +from torch.utils.data import Dataset +from datasets.scannet200.scannet200_constants import ( + SCANNET_COLOR_MAP_200, + SCANNET_COLOR_MAP_20, +) + +logger = logging.getLogger(__name__) + + +class SemanticSegmentationDataset(Dataset): + """Docstring for SemanticSegmentationDataset.""" + + def __init__( + self, + dataset_name="scannet", + data_dir: Optional[Union[str, Tuple[str]]] = "data/processed/scannet", + label_db_filepath: Optional[ + str + ] = "configs/scannet_preprocessing/label_database.yaml", + # mean std values from scannet + color_mean_std: Optional[Union[str, Tuple[Tuple[float]]]] = ( + (0.47793125906962, 0.4303257521323044, 0.3749598901421883), + (0.2834475483823543, 0.27566157565723015, 0.27018971370874995), + ), + mode: Optional[str] = "train", + add_colors: Optional[bool] = True, + add_normals: Optional[bool] = True, + add_raw_coordinates: Optional[bool] = False, + add_instance: Optional[bool] = False, + num_labels: Optional[int] = -1, + data_percent: Optional[float] = 1.0, + ignore_label: Optional[Union[int, Tuple[int]]] = 255, + volume_augmentations_path: Optional[str] = None, + image_augmentations_path: Optional[str] = None, + instance_oversampling=0, + place_around_existing=False, + max_cut_region=0, + point_per_cut=100, + flip_in_center=False, + noise_rate=0.0, + resample_points=0.0, + cache_data=False, + add_unlabeled_pc=False, + task="instance_segmentation", + cropping=False, + cropping_args=None, + is_tta=False, + crop_min_size=20000, + crop_length=6.0, + cropping_v1=True, + reps_per_epoch=1, + area=-1, + on_crops=False, + eval_inner_core=-1, + filter_out_classes=[], + label_offset=0, + add_clip=False, + is_elastic_distortion=True, + color_drop=0.0, + ): + assert task in [ + "instance_segmentation", + "semantic_segmentation", + ], "unknown task" + + self.add_clip = add_clip + self.dataset_name = dataset_name + self.is_elastic_distortion = is_elastic_distortion + self.color_drop = color_drop + + if self.dataset_name == "scannet": + self.color_map = SCANNET_COLOR_MAP_20 + self.color_map[255] = (255, 255, 255) + elif self.dataset_name == "stpls3d": + self.color_map = { + 0: [0, 255, 0], # Ground + 1: [0, 0, 255], # Build + 2: [0, 255, 255], # LowVeg + 3: [255, 255, 0], # MediumVeg + 4: [255, 0, 255], # HiVeg + 5: [100, 100, 255], # Vehicle + 6: [200, 200, 100], # Truck + 7: [170, 120, 200], # Aircraft + 8: [255, 0, 0], # MilitaryVec + 9: [200, 100, 100], # Bike + 10: [10, 200, 100], # Motorcycle + 11: [200, 200, 200], # LightPole + 12: [50, 50, 50], # StreetSign + 13: [60, 130, 60], # Clutter + 14: [130, 30, 60], + } # Fence + elif self.dataset_name == "scannet200": + self.color_map = SCANNET_COLOR_MAP_200 + elif self.dataset_name == "s3dis": + self.color_map = { + 0: [0, 255, 0], # ceiling + 1: [0, 0, 255], # floor + 2: [0, 255, 255], # wall + 3: [255, 255, 0], # beam + 4: [255, 0, 255], # column + 5: [100, 100, 255], # window + 6: [200, 200, 100], # door + 7: [170, 120, 200], # table + 8: [255, 0, 0], # chair + 9: [200, 100, 100], # sofa + 10: [10, 200, 100], # bookcase + 11: [200, 200, 200], # board + 12: [50, 50, 50], # clutter + } + else: + assert False, "dataset not known" + + self.task = task + + self.filter_out_classes = filter_out_classes + self.label_offset = label_offset + + self.area = area + self.eval_inner_core = eval_inner_core + + self.reps_per_epoch = reps_per_epoch + + self.cropping = cropping + self.cropping_args = cropping_args + self.is_tta = is_tta + self.on_crops = on_crops + + self.crop_min_size = crop_min_size + self.crop_length = crop_length + + self.version1 = cropping_v1 + + self.random_cuboid = RandomCuboid( + self.crop_min_size, + crop_length=self.crop_length, + version1=self.version1, + ) + + self.mode = mode + self.data_dir = data_dir + self.add_unlabeled_pc = add_unlabeled_pc + if add_unlabeled_pc: + self.other_database = self._load_yaml( + Path(data_dir).parent / "matterport" / "train_database.yaml" + ) + if type(data_dir) == str: + self.data_dir = [self.data_dir] + self.ignore_label = ignore_label + self.add_colors = add_colors + self.add_normals = add_normals + self.add_instance = add_instance + self.add_raw_coordinates = add_raw_coordinates + self.instance_oversampling = instance_oversampling + self.place_around_existing = place_around_existing + self.max_cut_region = max_cut_region + self.point_per_cut = point_per_cut + self.flip_in_center = flip_in_center + self.noise_rate = noise_rate + self.resample_points = resample_points + + # loading database files + self._data = [] + for database_path in self.data_dir: + database_path = Path(database_path) + mode = 'Validation' + if self.dataset_name != "s3dis": + if not (database_path / f"{mode}_database.yaml").exists(): + print( + f"generate {database_path}/{mode}_database.yaml first" + ) + exit() + self._data.extend( + self._load_yaml(database_path / f"{mode}_database.yaml") + ) + else: + # mode_s3dis = f"Area_{self.area}" + mode_s3dis = "Validation" + if self.mode == "train": + mode_s3dis = "train_" + mode_s3dis + if not ( + database_path / f"{mode_s3dis}_database.yaml" + ).exists(): + print( + f"generate {database_path}/{mode_s3dis}_database.yaml first" + ) + exit() + self._data.extend( + self._load_yaml( + database_path / f"{mode_s3dis}_database.yaml" + ) + ) + if data_percent < 1.0: + self._data = sample( + self._data, int(len(self._data) * data_percent) + ) + # labels = self._load_yaml(Path(label_db_filepath)) + + # if working only on classes for validation - discard others + # self._labels = self._select_correct_labels(labels, num_labels) + + if instance_oversampling > 0: + self.instance_data = self._load_yaml( + Path(label_db_filepath).parent / "instance_database.yaml" + ) + + # normalize color channels + if self.dataset_name == "s3dis": + color_mean_std = color_mean_std.replace( + "color_mean_std.yaml", f"Area_{self.area}_color_mean_std.yaml" + ) + + if Path(str(color_mean_std)).exists(): + color_mean_std = self._load_yaml(color_mean_std) + color_mean, color_std = ( + tuple(color_mean_std["mean"]), + tuple(color_mean_std["std"]), + ) + elif len(color_mean_std[0]) == 3 and len(color_mean_std[1]) == 3: + color_mean, color_std = color_mean_std[0], color_mean_std[1] + else: + logger.error( + "pass mean and std as tuple of tuples, or as an .yaml file" + ) + + # augmentations + self.volume_augmentations = V.NoOp() + if (volume_augmentations_path is not None) and ( + volume_augmentations_path != "none" + ): + self.volume_augmentations = V.load( + Path(volume_augmentations_path), data_format="yaml" + ) + self.image_augmentations = A.NoOp() + if (image_augmentations_path is not None) and ( + image_augmentations_path != "none" + ): + self.image_augmentations = A.load( + Path(image_augmentations_path), data_format="yaml" + ) + # mandatory color augmentation + if add_colors: + self.normalize_color = A.Normalize(mean=color_mean, std=color_std) + + self.cache_data = cache_data + # new_data = [] + if self.cache_data: + new_data = [] + for i in range(len(self._data)): + self._data[i]["data"] = np.load( + self.data[i]["filepath"].replace("../../", "") + ) + if self.on_crops: + if self.eval_inner_core == -1: + for block_id, block in enumerate( + self.splitPointCloud(self._data[i]["data"]) + ): + if len(block) > 10000: + new_data.append( + { + "instance_gt_filepath": self._data[i][ + "instance_gt_filepath" + ][block_id] + if len( + self._data[i][ + "instance_gt_filepath" + ] + ) + > 0 + else list(), + "scene": f"{self._data[i]['scene'].replace('.txt', '')}_{block_id}.txt", + "raw_filepath": f"{self.data[i]['filepath'].replace('.npy', '')}_{block_id}", + "data": block, + } + ) + else: + assert False + else: + conds_inner, blocks_outer = self.splitPointCloud( + self._data[i]["data"], + size=self.crop_length, + inner_core=self.eval_inner_core, + ) + + for block_id in range(len(conds_inner)): + cond_inner = conds_inner[block_id] + block_outer = blocks_outer[block_id] + + if cond_inner.sum() > 10000: + new_data.append( + { + "instance_gt_filepath": self._data[i][ + "instance_gt_filepath" + ][block_id] + if len( + self._data[i][ + "instance_gt_filepath" + ] + ) + > 0 + else list(), + "scene": f"{self._data[i]['scene'].replace('.txt', '')}_{block_id}.txt", + "raw_filepath": f"{self.data[i]['filepath'].replace('.npy', '')}_{block_id}", + "data": block_outer, + "cond_inner": cond_inner, + } + ) + else: + assert False + + if self.on_crops: + self._data = new_data + # new_data.append(np.load(self.data[i]["filepath"].replace("../../", ""))) + # self._data = new_data + + def splitPointCloud(self, cloud, size=50.0, stride=50, inner_core=-1): + if inner_core == -1: + limitMax = np.amax(cloud[:, 0:3], axis=0) + width = int(np.ceil((limitMax[0] - size) / stride)) + 1 + depth = int(np.ceil((limitMax[1] - size) / stride)) + 1 + cells = [ + (x * stride, y * stride) + for x in range(width) + for y in range(depth) + ] + blocks = [] + for (x, y) in cells: + xcond = (cloud[:, 0] <= x + size) & (cloud[:, 0] >= x) + ycond = (cloud[:, 1] <= y + size) & (cloud[:, 1] >= y) + cond = xcond & ycond + block = cloud[cond, :] + blocks.append(block) + return blocks + else: + limitMax = np.amax(cloud[:, 0:3], axis=0) + width = int(np.ceil((limitMax[0] - inner_core) / stride)) + 1 + depth = int(np.ceil((limitMax[1] - inner_core) / stride)) + 1 + cells = [ + (x * stride, y * stride) + for x in range(width) + for y in range(depth) + ] + blocks_outer = [] + conds_inner = [] + for (x, y) in cells: + xcond_outer = ( + cloud[:, 0] <= x + inner_core / 2.0 + size / 2 + ) & (cloud[:, 0] >= x + inner_core / 2.0 - size / 2) + ycond_outer = ( + cloud[:, 1] <= y + inner_core / 2.0 + size / 2 + ) & (cloud[:, 1] >= y + inner_core / 2.0 - size / 2) + + cond_outer = xcond_outer & ycond_outer + block_outer = cloud[cond_outer, :] + + xcond_inner = (block_outer[:, 0] <= x + inner_core) & ( + block_outer[:, 0] >= x + ) + ycond_inner = (block_outer[:, 1] <= y + inner_core) & ( + block_outer[:, 1] >= y + ) + + cond_inner = xcond_inner & ycond_inner + + conds_inner.append(cond_inner) + blocks_outer.append(block_outer) + return conds_inner, blocks_outer + + def map2color(self, labels): + output_colors = list() + + for label in labels: + output_colors.append(self.color_map[label]) + + return torch.tensor(output_colors) + + def __len__(self): + if self.is_tta: + return 5 * len(self.data) + else: + return self.reps_per_epoch * len(self.data) + + def __getitem__(self, idx: int): + idx = idx % len(self.data) + if self.is_tta: + idx = idx % len(self.data) + + if self.cache_data: + points = self.data[idx]["data"] + else: + assert not self.on_crops, "you need caching if on crops" + points = np.load(self.data[idx]["filepath"].replace("../../", "")) + + if "train" in self.mode and self.dataset_name in ["s3dis", "stpls3d"]: + inds = self.random_cuboid(points) + points = points[inds] + + coordinates, color, normals, segments, labels = ( + points[:, :3], + points[:, 3:6], + points[:, 6:9], + points[:, 9], + points[:, 10:12], + ) + + raw_coordinates = coordinates.copy() + raw_color = color + raw_normals = normals + + if not self.add_colors: + color = np.ones((len(color), 3)) + + # volume and image augmentations for train + if "train" in self.mode or self.is_tta: + if self.cropping: + new_idx = self.random_cuboid( + coordinates, + labels[:, 1], + self._remap_from_zero(labels[:, 0].copy()), + ) + + coordinates = coordinates[new_idx] + color = color[new_idx] + labels = labels[new_idx] + segments = segments[new_idx] + raw_color = raw_color[new_idx] + raw_normals = raw_normals[new_idx] + normals = normals[new_idx] + points = points[new_idx] + + coordinates -= coordinates.mean(0) + + try: + coordinates += ( + np.random.uniform(coordinates.min(0), coordinates.max(0)) + / 2 + ) + except OverflowError as err: + print(coordinates) + print(coordinates.shape) + raise err + + if self.instance_oversampling > 0.0: + ( + coordinates, + color, + normals, + labels, + ) = self.augment_individual_instance( + coordinates, + color, + normals, + labels, + self.instance_oversampling, + ) + + if self.flip_in_center: + coordinates = flip_in_center(coordinates) + + for i in (0, 1): + if random() < 0.5: + coord_max = np.max(points[:, i]) + coordinates[:, i] = coord_max - coordinates[:, i] + + if random() < 0.95: + if self.is_elastic_distortion: + for granularity, magnitude in ((0.2, 0.4), (0.8, 1.6)): + coordinates = elastic_distortion( + coordinates, granularity, magnitude + ) + aug = self.volume_augmentations( + points=coordinates, + normals=normals, + features=color, + labels=labels, + ) + coordinates, color, normals, labels = ( + aug["points"], + aug["features"], + aug["normals"], + aug["labels"], + ) + pseudo_image = color.astype(np.uint8)[np.newaxis, :, :] + color = np.squeeze( + self.image_augmentations(image=pseudo_image)["image"] + ) + + if self.point_per_cut != 0: + number_of_cuts = int(len(coordinates) / self.point_per_cut) + for _ in range(number_of_cuts): + size_of_cut = np.random.uniform(0.05, self.max_cut_region) + # not wall, floor or empty + point = choice(coordinates) + x_min = point[0] - size_of_cut + x_max = x_min + size_of_cut + y_min = point[1] - size_of_cut + y_max = y_min + size_of_cut + z_min = point[2] - size_of_cut + z_max = z_min + size_of_cut + indexes = crop( + coordinates, x_min, y_min, z_min, x_max, y_max, z_max + ) + coordinates, normals, color, labels = ( + coordinates[~indexes], + normals[~indexes], + color[~indexes], + labels[~indexes], + ) + + # if self.noise_rate > 0: + # coordinates, color, normals, labels = random_points( + # coordinates, + # color, + # normals, + # labels, + # self.noise_rate, + # self.ignore_label, + # ) + + if (self.resample_points > 0) or (self.noise_rate > 0): + coordinates, color, normals, labels = random_around_points( + coordinates, + color, + normals, + labels, + self.resample_points, + self.noise_rate, + self.ignore_label, + ) + + if self.add_unlabeled_pc: + if random() < 0.8: + new_points = np.load( + self.other_database[ + np.random.randint(0, len(self.other_database) - 1) + ]["filepath"] + ) + ( + unlabeled_coords, + unlabeled_color, + unlabeled_normals, + unlabeled_labels, + ) = ( + new_points[:, :3], + new_points[:, 3:6], + new_points[:, 6:9], + new_points[:, 9:], + ) + unlabeled_coords -= unlabeled_coords.mean(0) + unlabeled_coords += ( + np.random.uniform( + unlabeled_coords.min(0), unlabeled_coords.max(0) + ) + / 2 + ) + + aug = self.volume_augmentations( + points=unlabeled_coords, + normals=unlabeled_normals, + features=unlabeled_color, + labels=unlabeled_labels, + ) + ( + unlabeled_coords, + unlabeled_color, + unlabeled_normals, + unlabeled_labels, + ) = ( + aug["points"], + aug["features"], + aug["normals"], + aug["labels"], + ) + pseudo_image = unlabeled_color.astype(np.uint8)[ + np.newaxis, :, : + ] + unlabeled_color = np.squeeze( + self.image_augmentations(image=pseudo_image)["image"] + ) + + coordinates = np.concatenate( + (coordinates, unlabeled_coords) + ) + color = np.concatenate((color, unlabeled_color)) + normals = np.concatenate((normals, unlabeled_normals)) + labels = np.concatenate( + ( + labels, + np.full_like(unlabeled_labels, self.ignore_label), + ) + ) + + if random() < self.color_drop: + color[:] = 255 + + # normalize color information + pseudo_image = color.astype(np.uint8)[np.newaxis, :, :] + color = np.squeeze(self.normalize_color(image=pseudo_image)["image"]) + + # prepare labels and map from 0 to 20(40) + labels = labels.astype(np.int32) + # if labels.size > 0: + # labels[:, 0] = self._remap_from_zero(labels[:, 0]) + # if not self.add_instance: + # # taking only first column, which is segmentation label, not instance + # labels = labels[:, 0].flatten()[..., None] + + labels = np.hstack((labels, segments[..., None].astype(np.int32))) + + features = color + if self.add_normals: + features = np.hstack((features, normals)) + if self.add_raw_coordinates: + if len(features.shape) == 1: + features = np.hstack((features[None, ...], coordinates)) + else: + features = np.hstack((features, coordinates)) + + # if self.task != "semantic_segmentation": + if self.data[idx]["raw_filepath"].split("/")[-2] in [ + "scene0636_00", + "scene0154_00", + ]: + return self.__getitem__(0) + + if self.dataset_name == "s3dis": + return ( + coordinates, + features, + labels, + self.data[idx]["area"] + "_" + self.data[idx]["scene"], + raw_color, + raw_normals, + raw_coordinates, + idx, + ) + if self.dataset_name == "stpls3d": + if labels.shape[1] != 1: # only segments --> test set! + if np.unique(labels[:, -2]).shape[0] < 2: + print("NO INSTANCES") + return self.__getitem__(0) + return ( + coordinates, + features, + labels, + self.data[idx]["scene"], + raw_color, + raw_normals, + raw_coordinates, + idx, + ) + else: + return ( + coordinates, + features, + labels, + self.data[idx]["raw_filepath"].split("/")[-2], + raw_color, + raw_normals, + raw_coordinates, + idx, + ) + + @property + def data(self): + """database file containing information about preproscessed dataset""" + return self._data + + @property + def label_info(self): + """database file containing information labels used by dataset""" + return self._labels + + @staticmethod + def _load_yaml(filepath): + with open(filepath) as f: + # file = yaml.load(f, Loader=Loader) + file = yaml.load(f) + return file + + def _select_correct_labels(self, labels, num_labels): + number_of_validation_labels = 0 + number_of_all_labels = 0 + for ( + k, + v, + ) in labels.items(): + number_of_all_labels += 1 + if v["validation"]: + number_of_validation_labels += 1 + + if num_labels == number_of_all_labels: + return labels + elif num_labels == number_of_validation_labels: + valid_labels = dict() + for ( + k, + v, + ) in labels.items(): + if v["validation"]: + valid_labels.update({k: v}) + return valid_labels + else: + msg = f"""not available number labels, select from: + {number_of_validation_labels}, {number_of_all_labels}""" + raise ValueError(msg) + + def _remap_from_zero(self, labels): + labels[ + ~np.isin(labels, list(self.label_info.keys())) + ] = self.ignore_label + # remap to the range from 0 + for i, k in enumerate(self.label_info.keys()): + labels[labels == k] = i + return labels + + def _remap_model_output(self, output): + output = np.array(output) + output_remapped = output.copy() + for i, k in enumerate(self.label_info.keys()): + output_remapped[output == i] = k + return output_remapped + + def augment_individual_instance( + self, coordinates, color, normals, labels, oversampling=1.0 + ): + max_instance = int(len(np.unique(labels[:, 1]))) + # randomly selecting half of non-zero instances + for instance in range(0, int(max_instance * oversampling)): + if self.place_around_existing: + center = choice( + coordinates[ + labels[:, 1] == choice(np.unique(labels[:, 1])) + ] + ) + else: + center = np.array( + [uniform(-5, 5), uniform(-5, 5), uniform(-0.5, 2)] + ) + instance = choice(choice(self.instance_data)) + instance = np.load(instance["instance_filepath"]) + # centering two objects + instance[:, :3] = ( + instance[:, :3] - instance[:, :3].mean(axis=0) + center + ) + max_instance = max_instance + 1 + instance[:, -1] = max_instance + aug = V.Compose( + [ + V.Scale3d(), + V.RotateAroundAxis3d( + rotation_limit=np.pi / 24, axis=(1, 0, 0) + ), + V.RotateAroundAxis3d( + rotation_limit=np.pi / 24, axis=(0, 1, 0) + ), + V.RotateAroundAxis3d(rotation_limit=np.pi, axis=(0, 0, 1)), + ] + )( + points=instance[:, :3], + features=instance[:, 3:6], + normals=instance[:, 6:9], + labels=instance[:, 9:], + ) + coordinates = np.concatenate((coordinates, aug["points"])) + color = np.concatenate((color, aug["features"])) + normals = np.concatenate((normals, aug["normals"])) + labels = np.concatenate((labels, aug["labels"])) + + return coordinates, color, normals, labels + + +def elastic_distortion(pointcloud, granularity, magnitude): + """Apply elastic distortion on sparse coordinate space. + + pointcloud: numpy array of (number of points, at least 3 spatial dims) + granularity: size of the noise grid (in same scale[m/cm] as the voxel grid) + magnitude: noise multiplier + """ + blurx = np.ones((3, 1, 1, 1)).astype("float32") / 3 + blury = np.ones((1, 3, 1, 1)).astype("float32") / 3 + blurz = np.ones((1, 1, 3, 1)).astype("float32") / 3 + coords = pointcloud[:, :3] + coords_min = coords.min(0) + + # Create Gaussian noise tensor of the size given by granularity. + noise_dim = ((coords - coords_min).max(0) // granularity).astype(int) + 3 + noise = np.random.randn(*noise_dim, 3).astype(np.float32) + + # Smoothing. + for _ in range(2): + noise = scipy.ndimage.filters.convolve( + noise, blurx, mode="constant", cval=0 + ) + noise = scipy.ndimage.filters.convolve( + noise, blury, mode="constant", cval=0 + ) + noise = scipy.ndimage.filters.convolve( + noise, blurz, mode="constant", cval=0 + ) + + # Trilinear interpolate noise filters for each spatial dimensions. + ax = [ + np.linspace(d_min, d_max, d) + for d_min, d_max, d in zip( + coords_min - granularity, + coords_min + granularity * (noise_dim - 2), + noise_dim, + ) + ] + interp = scipy.interpolate.RegularGridInterpolator( + ax, noise, bounds_error=0, fill_value=0 + ) + pointcloud[:, :3] = coords + interp(coords) * magnitude + return pointcloud + + +def crop(points, x_min, y_min, z_min, x_max, y_max, z_max): + if x_max <= x_min or y_max <= y_min or z_max <= z_min: + raise ValueError( + "We should have x_min < x_max and y_min < y_max and z_min < z_max. But we got" + " (x_min = {x_min}, y_min = {y_min}, z_min = {z_min}," + " x_max = {x_max}, y_max = {y_max}, z_max = {z_max})".format( + x_min=x_min, + x_max=x_max, + y_min=y_min, + y_max=y_max, + z_min=z_min, + z_max=z_max, + ) + ) + inds = np.all( + [ + (points[:, 0] >= x_min), + (points[:, 0] < x_max), + (points[:, 1] >= y_min), + (points[:, 1] < y_max), + (points[:, 2] >= z_min), + (points[:, 2] < z_max), + ], + axis=0, + ) + return inds + + +def flip_in_center(coordinates): + # moving coordinates to center + coordinates -= coordinates.mean(0) + aug = V.Compose( + [ + V.Flip3d(axis=(0, 1, 0), always_apply=True), + V.Flip3d(axis=(1, 0, 0), always_apply=True), + ] + ) + + first_crop = coordinates[:, 0] > 0 + first_crop &= coordinates[:, 1] > 0 + # x -y + second_crop = coordinates[:, 0] > 0 + second_crop &= coordinates[:, 1] < 0 + # -x y + third_crop = coordinates[:, 0] < 0 + third_crop &= coordinates[:, 1] > 0 + # -x -y + fourth_crop = coordinates[:, 0] < 0 + fourth_crop &= coordinates[:, 1] < 0 + + if first_crop.size > 1: + coordinates[first_crop] = aug(points=coordinates[first_crop])["points"] + if second_crop.size > 1: + minimum = coordinates[second_crop].min(0) + minimum[2] = 0 + minimum[0] = 0 + coordinates[second_crop] = aug(points=coordinates[second_crop])[ + "points" + ] + coordinates[second_crop] += minimum + if third_crop.size > 1: + minimum = coordinates[third_crop].min(0) + minimum[2] = 0 + minimum[1] = 0 + coordinates[third_crop] = aug(points=coordinates[third_crop])["points"] + coordinates[third_crop] += minimum + if fourth_crop.size > 1: + minimum = coordinates[fourth_crop].min(0) + minimum[2] = 0 + coordinates[fourth_crop] = aug(points=coordinates[fourth_crop])[ + "points" + ] + coordinates[fourth_crop] += minimum + + return coordinates + + +def random_around_points( + coordinates, + color, + normals, + labels, + rate=0.2, + noise_rate=0, + ignore_label=255, +): + coord_indexes = sample( + list(range(len(coordinates))), k=int(len(coordinates) * rate) + ) + noisy_coordinates = deepcopy(coordinates[coord_indexes]) + noisy_coordinates += np.random.uniform( + -0.2 - noise_rate, 0.2 + noise_rate, size=noisy_coordinates.shape + ) + + if noise_rate > 0: + noisy_color = np.random.randint(0, 255, size=noisy_coordinates.shape) + noisy_normals = np.random.rand(*noisy_coordinates.shape) * 2 - 1 + noisy_labels = np.full(labels[coord_indexes].shape, ignore_label) + + coordinates = np.vstack((coordinates, noisy_coordinates)) + color = np.vstack((color, noisy_color)) + normals = np.vstack((normals, noisy_normals)) + labels = np.vstack((labels, noisy_labels)) + else: + noisy_color = deepcopy(color[coord_indexes]) + noisy_normals = deepcopy(normals[coord_indexes]) + noisy_labels = deepcopy(labels[coord_indexes]) + + coordinates = np.vstack((coordinates, noisy_coordinates)) + color = np.vstack((color, noisy_color)) + normals = np.vstack((normals, noisy_normals)) + labels = np.vstack((labels, noisy_labels)) + + return coordinates, color, normals, labels + + +def random_points( + coordinates, color, normals, labels, noise_rate=0.6, ignore_label=255 +): + max_boundary = coordinates.max(0) + 0.1 + min_boundary = coordinates.min(0) - 0.1 + + noisy_coordinates = int( + (max(max_boundary) - min(min_boundary)) / noise_rate + ) + + noisy_coordinates = np.array( + list( + product( + np.linspace( + min_boundary[0], max_boundary[0], noisy_coordinates + ), + np.linspace( + min_boundary[1], max_boundary[1], noisy_coordinates + ), + np.linspace( + min_boundary[2], max_boundary[2], noisy_coordinates + ), + ) + ) + ) + noisy_coordinates += np.random.uniform( + -noise_rate, noise_rate, size=noisy_coordinates.shape + ) + + noisy_color = np.random.randint(0, 255, size=noisy_coordinates.shape) + noisy_normals = np.random.rand(*noisy_coordinates.shape) * 2 - 1 + noisy_labels = np.full( + (noisy_coordinates.shape[0], labels.shape[1]), ignore_label + ) + + coordinates = np.vstack((coordinates, noisy_coordinates)) + color = np.vstack((color, noisy_color)) + normals = np.vstack((normals, noisy_normals)) + labels = np.vstack((labels, noisy_labels)) + return coordinates, color, normals, labels diff --git a/models/Mask3D/mask3d/datasets/utils.py b/models/Mask3D/mask3d/datasets/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..c91fb68ed0058a264ce76f97a618bca6e7d35a70 --- /dev/null +++ b/models/Mask3D/mask3d/datasets/utils.py @@ -0,0 +1,639 @@ +import MinkowskiEngine as ME +import numpy as np +import torch +from random import random + + +class VoxelizeCollate: + def __init__( + self, + ignore_label=255, + voxel_size=1, + mode="test", + small_crops=False, + very_small_crops=False, + batch_instance=False, + probing=False, + task="instance_segmentation", + ignore_class_threshold=100, + filter_out_classes=[], + label_offset=0, + num_queries=None, + ): + assert task in [ + "instance_segmentation", + "semantic_segmentation", + ], "task not known" + self.task = task + self.filter_out_classes = filter_out_classes + self.label_offset = label_offset + self.voxel_size = voxel_size + self.ignore_label = ignore_label + self.mode = mode + self.batch_instance = batch_instance + self.small_crops = small_crops + self.very_small_crops = very_small_crops + self.probing = probing + self.ignore_class_threshold = ignore_class_threshold + + self.num_queries = num_queries + + def __call__(self, batch): + if ("train" in self.mode) and ( + self.small_crops or self.very_small_crops + ): + batch = make_crops(batch) + if ("train" in self.mode) and self.very_small_crops: + batch = make_crops(batch) + return voxelize( + batch, + self.ignore_label, + self.voxel_size, + self.probing, + self.mode, + task=self.task, + ignore_class_threshold=self.ignore_class_threshold, + filter_out_classes=self.filter_out_classes, + label_offset=self.label_offset, + num_queries=self.num_queries, + ) + + +class VoxelizeCollateMerge: + def __init__( + self, + ignore_label=255, + voxel_size=1, + mode="test", + scenes=2, + small_crops=False, + very_small_crops=False, + batch_instance=False, + make_one_pc_noise=False, + place_nearby=False, + place_far=False, + proba=1, + probing=False, + task="instance_segmentation", + ): + assert task in [ + "instance_segmentation", + "semantic_segmentation", + ], "task not known" + self.task = task + self.mode = mode + self.scenes = scenes + self.small_crops = small_crops + self.very_small_crops = very_small_crops + self.ignore_label = ignore_label + self.voxel_size = voxel_size + self.batch_instance = batch_instance + self.make_one_pc_noise = make_one_pc_noise + self.place_nearby = place_nearby + self.place_far = place_far + self.proba = proba + self.probing = probing + + def __call__(self, batch): + if ( + ("train" in self.mode) + and (not self.make_one_pc_noise) + and (self.proba > random()) + ): + if self.small_crops or self.very_small_crops: + batch = make_crops(batch) + if self.very_small_crops: + batch = make_crops(batch) + if self.batch_instance: + batch = batch_instances(batch) + new_batch = [] + for i in range(0, len(batch), self.scenes): + batch_coordinates = [] + batch_features = [] + batch_labels = [] + + batch_filenames = "" + batch_raw_color = [] + batch_raw_normals = [] + + offset_instance_id = 0 + offset_segment_id = 0 + + for j in range(min(len(batch[i:]), self.scenes)): + batch_coordinates.append(batch[i + j][0]) + batch_features.append(batch[i + j][1]) + + if j == 0: + batch_filenames = batch[i + j][3] + else: + batch_filenames = ( + batch_filenames + f"+{batch[i + j][3]}" + ) + + batch_raw_color.append(batch[i + j][4]) + batch_raw_normals.append(batch[i + j][5]) + + # make instance ids and segment ids unique + # take care that -1 instances stay at -1 + batch_labels.append( + batch[i + j][2] + + [0, offset_instance_id, offset_segment_id] + ) + batch_labels[-1][batch[i + j][2][:, 1] == -1, 1] = -1 + + max_instance_id, max_segment_id = batch[i + j][2].max( + axis=0 + )[1:] + offset_segment_id = offset_segment_id + max_segment_id + 1 + offset_instance_id = ( + offset_instance_id + max_instance_id + 1 + ) + + if (len(batch_coordinates) == 2) and self.place_nearby: + border = batch_coordinates[0][:, 0].max() + border -= batch_coordinates[1][:, 0].min() + batch_coordinates[1][:, 0] += border + elif (len(batch_coordinates) == 2) and self.place_far: + batch_coordinates[1] += ( + np.random.uniform((-10, -10, -10), (10, 10, 10)) * 200 + ) + new_batch.append( + ( + np.vstack(batch_coordinates), + np.vstack(batch_features), + np.concatenate(batch_labels), + batch_filenames, + np.vstack(batch_raw_color), + np.vstack(batch_raw_normals), + ) + ) + # TODO WHAT ABOUT POINT2SEGMENT AND SO ON ... + batch = new_batch + elif ("train" in self.mode) and self.make_one_pc_noise: + new_batch = [] + for i in range(0, len(batch), 2): + if (i + 1) < len(batch): + new_batch.append( + [ + np.vstack((batch[i][0], batch[i + 1][0])), + np.vstack((batch[i][1], batch[i + 1][1])), + np.concatenate( + ( + batch[i][2], + np.full_like( + batch[i + 1][2], self.ignore_label + ), + ) + ), + ] + ) + new_batch.append( + [ + np.vstack((batch[i][0], batch[i + 1][0])), + np.vstack((batch[i][1], batch[i + 1][1])), + np.concatenate( + ( + np.full_like( + batch[i][2], self.ignore_label + ), + batch[i + 1][2], + ) + ), + ] + ) + else: + new_batch.append([batch[i][0], batch[i][1], batch[i][2]]) + batch = new_batch + # return voxelize(batch, self.ignore_label, self.voxel_size, self.probing, self.mode) + return voxelize( + batch, + self.ignore_label, + self.voxel_size, + self.probing, + self.mode, + task=self.task, + ) + + +def batch_instances(batch): + new_batch = [] + for sample in batch: + for instance_id in np.unique(sample[2][:, 1]): + new_batch.append( + ( + sample[0][sample[2][:, 1] == instance_id], + sample[1][sample[2][:, 1] == instance_id], + sample[2][sample[2][:, 1] == instance_id][:, 0], + ), + ) + return new_batch + + +def voxelize( + batch, + ignore_label, + voxel_size, + probing, + mode, + task, + ignore_class_threshold, + filter_out_classes, + label_offset, + num_queries, +): + ( + coordinates, + features, + labels, + original_labels, + inverse_maps, + original_colors, + original_normals, + original_coordinates, + idx, + ) = ([], [], [], [], [], [], [], [], []) + voxelization_dict = { + "ignore_label": ignore_label, + # "quantization_size": self.voxel_size, + "return_index": True, + "return_inverse": True, + } + + full_res_coords = [] + + for sample in batch: + idx.append(sample[7]) + original_coordinates.append(sample[6]) + original_labels.append(sample[2]) + full_res_coords.append(sample[0]) + original_colors.append(sample[4]) + original_normals.append(sample[5]) + + coords = np.floor(sample[0] / voxel_size) + voxelization_dict.update( + { + "coordinates": torch.from_numpy(coords).to("cpu").contiguous(), + "features": sample[1], + } + ) + + # maybe this change (_, _, ...) is not necessary and we can directly get out + # the sample coordinates? + _, _, unique_map, inverse_map = ME.utils.sparse_quantize( + **voxelization_dict + ) + inverse_maps.append(inverse_map) + + sample_coordinates = coords[unique_map] + coordinates.append(torch.from_numpy(sample_coordinates).int()) + sample_features = sample[1][unique_map] + features.append(torch.from_numpy(sample_features).float()) + if len(sample[2]) > 0: + sample_labels = sample[2][unique_map] + labels.append(torch.from_numpy(sample_labels).long()) + + # Concatenate all lists + input_dict = {"coords": coordinates, "feats": features} + if len(labels) > 0: + input_dict["labels"] = labels + coordinates, features, labels = ME.utils.sparse_collate(**input_dict) + else: + coordinates, features = ME.utils.sparse_collate(**input_dict) + labels = torch.Tensor([]) + + if probing: + return ( + NoGpu( + coordinates, + features, + original_labels, + inverse_maps, + ), + labels, + ) + + if mode == "test": + for i in range(len(input_dict["labels"])): + _, ret_index, ret_inv = np.unique( + input_dict["labels"][i][:, 0], + return_index=True, + return_inverse=True, + ) + input_dict["labels"][i][:, 0] = torch.from_numpy(ret_inv) + # input_dict["segment2label"].append(input_dict["labels"][i][ret_index][:, :-1]) + else: + input_dict["segment2label"] = [] + + if "labels" in input_dict: + for i in range(len(input_dict["labels"])): + # TODO BIGGER CHANGE CHECK!!! + _, ret_index, ret_inv = np.unique( + input_dict["labels"][i][:, -1], + return_index=True, + return_inverse=True, + ) + input_dict["labels"][i][:, -1] = torch.from_numpy(ret_inv) + input_dict["segment2label"].append( + input_dict["labels"][i][ret_index][:, :-1] + ) + + if "labels" in input_dict: + list_labels = input_dict["labels"] + + target = [] + target_full = [] + + if len(list_labels[0].shape) == 1: + for batch_id in range(len(list_labels)): + label_ids = list_labels[batch_id].unique() + if 255 in label_ids: + label_ids = label_ids[:-1] + + target.append( + { + "labels": label_ids, + "masks": list_labels[batch_id] + == label_ids.unsqueeze(1), + } + ) + else: + if mode == "test": + for i in range(len(input_dict["labels"])): + target.append( + {"point2segment": input_dict["labels"][i][:, 0]} + ) + target_full.append( + { + "point2segment": torch.from_numpy( + original_labels[i][:, 0] + ).long() + } + ) + else: + target = get_instance_masks( + list_labels, + list_segments=input_dict["segment2label"], + task=task, + ignore_class_threshold=ignore_class_threshold, + filter_out_classes=filter_out_classes, + label_offset=label_offset, + ) + for i in range(len(target)): + target[i]["point2segment"] = input_dict["labels"][i][:, 2] + if "train" not in mode: + target_full = get_instance_masks( + [torch.from_numpy(l) for l in original_labels], + task=task, + ignore_class_threshold=ignore_class_threshold, + filter_out_classes=filter_out_classes, + label_offset=label_offset, + ) + for i in range(len(target_full)): + target_full[i]["point2segment"] = torch.from_numpy( + original_labels[i][:, 2] + ).long() + else: + target = [] + target_full = [] + coordinates = [] + features = [] + + if "train" not in mode: + return ( + NoGpu( + coordinates, + features, + original_labels, + inverse_maps, + full_res_coords, + target_full, + original_colors, + original_normals, + original_coordinates, + idx, + ), + target, + [sample[3] for sample in batch], + ) + else: + return ( + NoGpu( + coordinates, + features, + original_labels, + inverse_maps, + full_res_coords, + ), + target, + [sample[3] for sample in batch], + ) + + +def get_instance_masks( + list_labels, + task, + list_segments=None, + ignore_class_threshold=100, + filter_out_classes=[], + label_offset=0, +): + target = [] + + for batch_id in range(len(list_labels)): + label_ids = [] + masks = [] + segment_masks = [] + instance_ids = list_labels[batch_id][:, 1].unique() + + for instance_id in instance_ids: + if instance_id == -1: + continue + + # TODO is it possible that a ignore class (255) is an instance??? + # instance == -1 ??? + tmp = list_labels[batch_id][ + list_labels[batch_id][:, 1] == instance_id + ] + label_id = tmp[0, 0] + + if ( + label_id in filter_out_classes + ): # floor, wall, undefined==255 is not included + continue + + if ( + 255 in filter_out_classes + and label_id.item() == 255 + and tmp.shape[0] < ignore_class_threshold + ): + continue + + label_ids.append(label_id) + masks.append(list_labels[batch_id][:, 1] == instance_id) + + if list_segments: + segment_mask = torch.zeros( + list_segments[batch_id].shape[0] + ).bool() + segment_mask[ + list_labels[batch_id][ + list_labels[batch_id][:, 1] == instance_id + ][:, 2].unique() + ] = True + segment_masks.append(segment_mask) + + if len(label_ids) == 0: + return list() + + label_ids = torch.stack(label_ids) + masks = torch.stack(masks) + if list_segments: + segment_masks = torch.stack(segment_masks) + + if task == "semantic_segmentation": + new_label_ids = [] + new_masks = [] + new_segment_masks = [] + for label_id in label_ids.unique(): + masking = label_ids == label_id + + new_label_ids.append(label_id) + new_masks.append(masks[masking, :].sum(dim=0).bool()) + + if list_segments: + new_segment_masks.append( + segment_masks[masking, :].sum(dim=0).bool() + ) + + label_ids = torch.stack(new_label_ids) + masks = torch.stack(new_masks) + + if list_segments: + segment_masks = torch.stack(new_segment_masks) + + target.append( + { + "labels": label_ids, + "masks": masks, + "segment_mask": segment_masks, + } + ) + else: + target.append({"labels": label_ids, "masks": masks}) + else: + l = torch.clamp(label_ids - label_offset, min=0) + + if list_segments: + target.append( + { + "labels": l, + "masks": masks, + "segment_mask": segment_masks, + } + ) + else: + target.append({"labels": l, "masks": masks}) + return target + + +def make_crops(batch): + new_batch = [] + # detupling + for scene in batch: + new_batch.append([scene[0], scene[1], scene[2]]) + batch = new_batch + new_batch = [] + for scene in batch: + # move to center for better quadrant split + scene[0][:, :3] -= scene[0][:, :3].mean(0) + + # BUGFIX - there always would be a point in every quadrant + scene[0] = np.vstack( + ( + scene[0], + np.array( + [ + [0.1, 0.1, 0.1], + [0.1, -0.1, 0.1], + [-0.1, 0.1, 0.1], + [-0.1, -0.1, 0.1], + ] + ), + ) + ) + scene[1] = np.vstack((scene[1], np.zeros((4, scene[1].shape[1])))) + scene[2] = np.concatenate( + (scene[2], np.full_like((scene[2]), 255)[:4]) + ) + + crop = scene[0][:, 0] > 0 + crop &= scene[0][:, 1] > 0 + if crop.size > 1: + new_batch.append([scene[0][crop], scene[1][crop], scene[2][crop]]) + + crop = scene[0][:, 0] > 0 + crop &= scene[0][:, 1] < 0 + if crop.size > 1: + new_batch.append([scene[0][crop], scene[1][crop], scene[2][crop]]) + + crop = scene[0][:, 0] < 0 + crop &= scene[0][:, 1] > 0 + if crop.size > 1: + new_batch.append([scene[0][crop], scene[1][crop], scene[2][crop]]) + + crop = scene[0][:, 0] < 0 + crop &= scene[0][:, 1] < 0 + if crop.size > 1: + new_batch.append([scene[0][crop], scene[1][crop], scene[2][crop]]) + + # moving all of them to center + for i in range(len(new_batch)): + new_batch[i][0][:, :3] -= new_batch[i][0][:, :3].mean(0) + return new_batch + + +class NoGpu: + def __init__( + self, + coordinates, + features, + original_labels=None, + inverse_maps=None, + full_res_coords=None, + target_full=None, + original_colors=None, + original_normals=None, + original_coordinates=None, + idx=None, + ): + """helper class to prevent gpu loading on lightning""" + self.coordinates = coordinates + self.features = features + self.original_labels = original_labels + self.inverse_maps = inverse_maps + self.full_res_coords = full_res_coords + self.target_full = target_full + self.original_colors = original_colors + self.original_normals = original_normals + self.original_coordinates = original_coordinates + self.idx = idx + + +class NoGpuMask: + def __init__( + self, + coordinates, + features, + original_labels=None, + inverse_maps=None, + masks=None, + labels=None, + ): + """helper class to prevent gpu loading on lightning""" + self.coordinates = coordinates + self.features = features + self.original_labels = original_labels + self.inverse_maps = inverse_maps + + self.masks = masks + self.labels = labels diff --git a/models/Mask3D/mask3d/main_instance_segmentation.py b/models/Mask3D/mask3d/main_instance_segmentation.py new file mode 100644 index 0000000000000000000000000000000000000000..c2664673cb3a1fa16191e7baa82a50bbb8f5f195 --- /dev/null +++ b/models/Mask3D/mask3d/main_instance_segmentation.py @@ -0,0 +1,114 @@ +import logging +import os +from hashlib import md5 +from uuid import uuid4 +import hydra +from dotenv import load_dotenv +from omegaconf import DictConfig, OmegaConf +from trainer.trainer import InstanceSegmentation, RegularCheckpointing +from pytorch_lightning.callbacks import ModelCheckpoint +from utils.utils import ( + flatten_dict, + load_baseline_model, + load_checkpoint_with_missing_or_exsessive_keys, + load_backbone_checkpoint_with_missing_or_exsessive_keys, +) +from pytorch_lightning import Trainer, seed_everything + + +def get_parameters(cfg: DictConfig): + logger = logging.getLogger(__name__) + load_dotenv(".env") + + # parsing input parameters + seed_everything(cfg.general.seed) + + # getting basic configuration + if cfg.general.get("gpus", None) is None: + cfg.general.gpus = os.environ.get("CUDA_VISIBLE_DEVICES", None) + loggers = [] + + # cfg.general.experiment_id = "0" # str(Repo("./").commit())[:8] + # params = flatten_dict(OmegaConf.to_container(cfg, resolve=True)) + + # create unique id for experiments that are run locally + # unique_id = "_" + str(uuid4())[:4] + # cfg.general.version = md5(str(params).encode("utf-8")).hexdigest()[:8] + unique_id + + if not os.path.exists(cfg.general.save_dir): + os.makedirs(cfg.general.save_dir) + else: + print("EXPERIMENT ALREADY EXIST") + cfg["trainer"][ + "resume_from_checkpoint" + ] = f"{cfg.general.save_dir}/last-epoch.ckpt" + + for log in cfg.logging: + print(log) + # loggers.append(hydra.utils.instantiate(log)) + # loggers[-1].log_hyperparams( + # flatten_dict(OmegaConf.to_container(cfg, resolve=True)) + # ) + + model = InstanceSegmentation(cfg) + if cfg.general.backbone_checkpoint is not None: + cfg, model = load_backbone_checkpoint_with_missing_or_exsessive_keys( + cfg, model + ) + if cfg.general.checkpoint is not None: + cfg, model = load_checkpoint_with_missing_or_exsessive_keys(cfg, model) + + logger.info(flatten_dict(OmegaConf.to_container(cfg, resolve=True))) + return cfg, model, loggers + + +@hydra.main( + config_path="conf", config_name="config_base_instance_segmentation.yaml" +) +def train(cfg: DictConfig): + os.chdir(hydra.utils.get_original_cwd()) + cfg, model, loggers = get_parameters(cfg) + callbacks = [] + for cb in cfg.callbacks: + callbacks.append(hydra.utils.instantiate(cb)) + + callbacks.append(RegularCheckpointing()) + + runner = Trainer( + logger=loggers, + gpus=cfg.general.gpus, + callbacks=callbacks, + weights_save_path=str(cfg.general.save_dir), + **cfg.trainer, + ) + runner.fit(model) + + +@hydra.main( + config_path="conf", config_name="config_base_instance_segmentation.yaml" +) +def test(cfg: DictConfig): + # because hydra wants to change dir for some reason + os.chdir(hydra.utils.get_original_cwd()) + cfg, model, loggers = get_parameters(cfg) + runner = Trainer( + gpus=cfg.general.gpus, + logger=loggers, + weights_save_path=str(cfg.general.save_dir), + **cfg.trainer, + ) + runner.test(model) + + +@hydra.main( + config_path="conf", config_name="config_base_instance_segmentation.yaml" +) +def main(cfg: DictConfig): + if cfg["general"]["train_mode"]: + train(cfg) + else: + test(cfg) + + +if __name__ == "__main__": + main() diff --git a/models/Mask3D/mask3d/models/__init__.py b/models/Mask3D/mask3d/models/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b092c965bba4c734b49a7f4d2e3ab6fee8471d17 --- /dev/null +++ b/models/Mask3D/mask3d/models/__init__.py @@ -0,0 +1,44 @@ +import mask3d.models.resunet as resunet +import mask3d.models.res16unet as res16unet +from mask3d.models.res16unet import ( + Res16UNet34C, + Res16UNet34A, + Res16UNet14A, + Res16UNet34D, + Res16UNet18D, + Res16UNet18B, + Custom30M, +) +from mask3d.models.mask3d import Mask3D + +MODELS = [] + + +def add_models(module): + MODELS.extend([getattr(module, a) for a in dir(module) if "Net" in a]) + + +add_models(resunet) +add_models(res16unet) +add_models(mask3d) + + +def get_models(): + """Returns a tuple of sample models.""" + return MODELS + + +def load_model(name): + """Creates and returns an instance of the model given its class name.""" + # Find the model class from its name + all_models = get_models() + mdict = {model.__name__: model for model in all_models} + if name not in mdict: + print("Invalid model index. Options are:") + # Display a list of valid model names + for model in all_models: + print(f"\t* {model.__name__}") + return None + NetClass = mdict[name] + + return NetClass diff --git a/models/Mask3D/mask3d/models/criterion.py b/models/Mask3D/mask3d/models/criterion.py new file mode 100644 index 0000000000000000000000000000000000000000..19ce8bc8ecf4a0be08ce91e45857412a8d55efba --- /dev/null +++ b/models/Mask3D/mask3d/models/criterion.py @@ -0,0 +1,343 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# Modified by Bowen Cheng from https://github.com/facebookresearch/detr/blob/master/models/detr.py +# Modified for Mask3D +""" +MaskFormer criterion. +""" + +import torch +import torch.nn.functional as F +from torch import nn + +from detectron2.utils.comm import get_world_size +from detectron2.projects.point_rend.point_features import ( + get_uncertain_point_coords_with_randomness, + point_sample, +) + +from mask3d.models.misc import ( + is_dist_avail_and_initialized, + nested_tensor_from_tensor_list, +) + + +def dice_loss( + inputs: torch.Tensor, + targets: torch.Tensor, + num_masks: float, +): + """ + Compute the DICE loss, similar to generalized IOU for masks + Args: + inputs: A float tensor of arbitrary shape. + The predictions for each example. + targets: A float tensor with the same shape as inputs. Stores the binary + classification label for each element in inputs + (0 for the negative class and 1 for the positive class). + """ + inputs = inputs.sigmoid() + inputs = inputs.flatten(1) + numerator = 2 * (inputs * targets).sum(-1) + denominator = inputs.sum(-1) + targets.sum(-1) + loss = 1 - (numerator + 1) / (denominator + 1) + return loss.sum() / num_masks + + +dice_loss_jit = torch.jit.script(dice_loss) # type: torch.jit.ScriptModule + + +def sigmoid_ce_loss( + inputs: torch.Tensor, + targets: torch.Tensor, + num_masks: float, +): + """ + Args: + inputs: A float tensor of arbitrary shape. + The predictions for each example. + targets: A float tensor with the same shape as inputs. Stores the binary + classification label for each element in inputs + (0 for the negative class and 1 for the positive class). + Returns: + Loss tensor + """ + loss = F.binary_cross_entropy_with_logits( + inputs, targets, reduction="none" + ) + + return loss.mean(1).sum() / num_masks + + +sigmoid_ce_loss_jit = torch.jit.script( + sigmoid_ce_loss +) # type: torch.jit.ScriptModule + + +def calculate_uncertainty(logits): + """ + We estimate uncerainty as L1 distance between 0.0 and the logit prediction in 'logits' for the + foreground class in `classes`. + Args: + logits (Tensor): A tensor of shape (R, 1, ...) for class-specific or + class-agnostic, where R is the total number of predicted masks in all images and C is + the number of foreground classes. The values are logits. + Returns: + scores (Tensor): A tensor of shape (R, 1, ...) that contains uncertainty scores with + the most uncertain locations having the highest uncertainty score. + """ + assert logits.shape[1] == 1 + gt_class_logits = logits.clone() + return -(torch.abs(gt_class_logits)) + + +class SetCriterion(nn.Module): + """This class computes the loss for DETR. + The process happens in two steps: + 1) we compute hungarian assignment between ground truth boxes and the outputs of the model + 2) we supervise each pair of matched ground-truth / prediction (supervise class and box) + """ + + def __init__( + self, + num_classes, + matcher, + weight_dict, + eos_coef, + losses, + num_points, + oversample_ratio, + importance_sample_ratio, + class_weights, + ): + """Create the criterion. + Parameters: + num_classes: number of object categories, omitting the special no-object category + matcher: module able to compute a matching between targets and proposals + weight_dict: dict containing as key the names of the losses and as values their relative weight. + eos_coef: relative classification weight applied to the no-object category + losses: list of all the losses to be applied. See get_loss for list of available losses. + """ + super().__init__() + self.num_classes = num_classes - 1 + self.class_weights = class_weights + self.matcher = matcher + self.weight_dict = weight_dict + self.eos_coef = eos_coef + self.losses = losses + empty_weight = torch.ones(self.num_classes + 1) + empty_weight[-1] = self.eos_coef + + if self.class_weights != -1: + assert ( + len(self.class_weights) == self.num_classes + ), "CLASS WEIGHTS DO NOT MATCH" + empty_weight[:-1] = torch.tensor(self.class_weights) + + self.register_buffer("empty_weight", empty_weight) + + # pointwise mask loss parameters + self.num_points = num_points + self.oversample_ratio = oversample_ratio + self.importance_sample_ratio = importance_sample_ratio + + def loss_labels(self, outputs, targets, indices, num_masks, mask_type): + """Classification loss (NLL) + targets dicts must contain the key "labels" containing a tensor of dim [nb_target_boxes] + """ + assert "pred_logits" in outputs + src_logits = outputs["pred_logits"].float() + + idx = self._get_src_permutation_idx(indices) + target_classes_o = torch.cat( + [t["labels"][J] for t, (_, J) in zip(targets, indices)] + ) + target_classes = torch.full( + src_logits.shape[:2], + self.num_classes, + dtype=torch.int64, + device=src_logits.device, + ) + target_classes[idx] = target_classes_o + + loss_ce = F.cross_entropy( + src_logits.transpose(1, 2), + target_classes, + self.empty_weight, + ignore_index=253, + ) + losses = {"loss_ce": loss_ce} + return losses + + def loss_masks(self, outputs, targets, indices, num_masks, mask_type): + """Compute the losses related to the masks: the focal loss and the dice loss. + targets dicts must contain the key "masks" containing a tensor of dim [nb_target_boxes, h, w] + """ + assert "pred_masks" in outputs + + loss_masks = [] + loss_dices = [] + + for batch_id, (map_id, target_id) in enumerate(indices): + map = outputs["pred_masks"][batch_id][:, map_id].T + target_mask = targets[batch_id][mask_type][target_id] + + if self.num_points != -1: + point_idx = torch.randperm( + target_mask.shape[1], device=target_mask.device + )[: int(self.num_points * target_mask.shape[1])] + else: + # sample all points + point_idx = torch.arange( + target_mask.shape[1], device=target_mask.device + ) + + num_masks = target_mask.shape[0] + map = map[:, point_idx] + target_mask = target_mask[:, point_idx].float() + + loss_masks.append(sigmoid_ce_loss_jit(map, target_mask, num_masks)) + loss_dices.append(dice_loss_jit(map, target_mask, num_masks)) + # del target_mask + return { + "loss_mask": torch.sum(torch.stack(loss_masks)), + "loss_dice": torch.sum(torch.stack(loss_dices)), + } + + src_idx = self._get_src_permutation_idx(indices) + tgt_idx = self._get_tgt_permutation_idx(indices) + src_masks = outputs["pred_masks"] + src_masks = src_masks[src_idx] + masks = [t[mask_type] for t in targets] + # TODO use valid to mask invalid areas due to padding in loss + target_masks, valid = nested_tensor_from_tensor_list(masks).decompose() + target_masks = target_masks.to(src_masks) + target_masks = target_masks[tgt_idx] + + # No need to upsample predictions as we are using normalized coordinates :) + # N x 1 x H x W + src_masks = src_masks[:, None] + target_masks = target_masks[:, None] + + with torch.no_grad(): + # sample point_coords + point_coords = get_uncertain_point_coords_with_randomness( + src_masks, + lambda logits: calculate_uncertainty(logits), + self.num_points, + self.oversample_ratio, + self.importance_sample_ratio, + ) + # get gt labels + point_labels = point_sample( + target_masks, + point_coords, + align_corners=False, + ).squeeze(1) + + point_logits = point_sample( + src_masks, + point_coords, + align_corners=False, + ).squeeze(1) + + losses = { + "loss_mask": sigmoid_ce_loss_jit( + point_logits, point_labels, num_masks, mask_type + ), + "loss_dice": dice_loss_jit( + point_logits, point_labels, num_masks, mask_type + ), + } + + del src_masks + del target_masks + return losses + + def _get_src_permutation_idx(self, indices): + # permute predictions following indices + batch_idx = torch.cat( + [torch.full_like(src, i) for i, (src, _) in enumerate(indices)] + ) + src_idx = torch.cat([src for (src, _) in indices]) + return batch_idx, src_idx + + def _get_tgt_permutation_idx(self, indices): + # permute targets following indices + batch_idx = torch.cat( + [torch.full_like(tgt, i) for i, (_, tgt) in enumerate(indices)] + ) + tgt_idx = torch.cat([tgt for (_, tgt) in indices]) + return batch_idx, tgt_idx + + def get_loss(self, loss, outputs, targets, indices, num_masks, mask_type): + loss_map = {"labels": self.loss_labels, "masks": self.loss_masks} + assert loss in loss_map, f"do you really want to compute {loss} loss?" + return loss_map[loss](outputs, targets, indices, num_masks, mask_type) + + def forward(self, outputs, targets, mask_type): + """This performs the loss computation. + Parameters: + outputs: dict of tensors, see the output specification of the model for the format + targets: list of dicts, such that len(targets) == batch_size. + The expected keys in each dict depends on the losses applied, see each loss' doc + """ + outputs_without_aux = { + k: v for k, v in outputs.items() if k != "aux_outputs" + } + + # Retrieve the matching between the outputs of the last layer and the targets + indices = self.matcher(outputs_without_aux, targets, mask_type) + + # Compute the average number of target boxes accross all nodes, for normalization purposes + num_masks = sum(len(t["labels"]) for t in targets) + num_masks = torch.as_tensor( + [num_masks], + dtype=torch.float, + device=next(iter(outputs.values())).device, + ) + if is_dist_avail_and_initialized(): + torch.distributed.all_reduce(num_masks) + num_masks = torch.clamp(num_masks / get_world_size(), min=1).item() + + # Compute all the requested losses + losses = {} + for loss in self.losses: + losses.update( + self.get_loss( + loss, outputs, targets, indices, num_masks, mask_type + ) + ) + + # In case of auxiliary losses, we repeat this process with the output of each intermediate layer. + if "aux_outputs" in outputs: + for i, aux_outputs in enumerate(outputs["aux_outputs"]): + indices = self.matcher(aux_outputs, targets, mask_type) + for loss in self.losses: + l_dict = self.get_loss( + loss, + aux_outputs, + targets, + indices, + num_masks, + mask_type, + ) + l_dict = {k + f"_{i}": v for k, v in l_dict.items()} + losses.update(l_dict) + + return losses + + def __repr__(self): + head = "Criterion " + self.__class__.__name__ + body = [ + "matcher: {}".format(self.matcher.__repr__(_repr_indent=8)), + "losses: {}".format(self.losses), + "weight_dict: {}".format(self.weight_dict), + "num_classes: {}".format(self.num_classes), + "eos_coef: {}".format(self.eos_coef), + "num_points: {}".format(self.num_points), + "oversample_ratio: {}".format(self.oversample_ratio), + "importance_sample_ratio: {}".format(self.importance_sample_ratio), + ] + _repr_indent = 4 + lines = [head] + [" " * _repr_indent + line for line in body] + return "\n".join(lines) diff --git a/models/Mask3D/mask3d/models/mask3d.py b/models/Mask3D/mask3d/models/mask3d.py new file mode 100644 index 0000000000000000000000000000000000000000..b7cd4c7a6a74b44df90bbd8d668c7def474f2b10 --- /dev/null +++ b/models/Mask3D/mask3d/models/mask3d.py @@ -0,0 +1,870 @@ +import torch +import hydra +import torch.nn as nn +import MinkowskiEngine.MinkowskiOps as me +from MinkowskiEngine.MinkowskiPooling import MinkowskiAvgPooling +import numpy as np +from torch.nn import functional as F +from mask3d.models.modules.common import conv +from mask3d.models.position_embedding import PositionEmbeddingCoordsSine +from mask3d.models.modules.helpers_3detr import GenericMLP +from torch_scatter import scatter_mean, scatter_max, scatter_min +from torch.cuda.amp import autocast + +from pointnet2.pointnet2_utils import furthest_point_sample + + +class Mask3D(nn.Module): + def __init__( + self, + config, + hidden_dim, + num_queries, + num_heads, + dim_feedforward, + sample_sizes, + shared_decoder, + num_classes, + num_decoders, + dropout, + pre_norm, + positional_encoding_type, + non_parametric_queries, + train_on_segments, + normalize_pos_enc, + use_level_embed, + scatter_type, + hlevels, + use_np_features, + voxel_size, + max_sample_size, + random_queries, + gauss_scale, + random_query_both, + random_normal, + ): + super().__init__() + self.random_normal = random_normal + self.random_query_both = random_query_both + self.random_queries = random_queries + self.max_sample_size = max_sample_size + self.gauss_scale = gauss_scale + self.voxel_size = voxel_size + self.scatter_type = scatter_type + self.hlevels = hlevels + self.use_level_embed = use_level_embed + self.train_on_segments = train_on_segments + self.normalize_pos_enc = normalize_pos_enc + self.num_decoders = num_decoders + self.num_classes = num_classes + self.dropout = dropout + self.pre_norm = pre_norm + self.shared_decoder = shared_decoder + self.sample_sizes = sample_sizes + self.non_parametric_queries = non_parametric_queries + self.use_np_features = use_np_features + self.mask_dim = hidden_dim + self.num_heads = num_heads + self.num_queries = num_queries + self.pos_enc_type = positional_encoding_type + + self.backbone = hydra.utils.instantiate(config.backbone) + self.num_levels = len(self.hlevels) + sizes = self.backbone.PLANES[-5:] + + self.mask_features_head = conv( + self.backbone.PLANES[7], + self.mask_dim, + kernel_size=1, + stride=1, + bias=True, + D=3, + ) + + if self.scatter_type == "mean": + self.scatter_fn = scatter_mean + elif self.scatter_type == "max": + self.scatter_fn = lambda mask, p2s, dim: scatter_max( + mask, p2s, dim=dim + )[0] + else: + assert False, "Scatter function not known" + + assert ( + not use_np_features + ) or non_parametric_queries, "np features only with np queries" + + if self.non_parametric_queries: + self.query_projection = GenericMLP( + input_dim=self.mask_dim, + hidden_dims=[self.mask_dim], + output_dim=self.mask_dim, + use_conv=True, + output_use_activation=True, + hidden_use_bias=True, + ) + + if self.use_np_features: + self.np_feature_projection = nn.Sequential( + nn.Linear(sizes[-1], hidden_dim), + nn.ReLU(), + nn.Linear(hidden_dim, hidden_dim), + ) + elif self.random_query_both: + self.query_projection = GenericMLP( + input_dim=2 * self.mask_dim, + hidden_dims=[2 * self.mask_dim], + output_dim=2 * self.mask_dim, + use_conv=True, + output_use_activation=True, + hidden_use_bias=True, + ) + else: + # PARAMETRIC QUERIES + # learnable query features + self.query_feat = nn.Embedding(num_queries, hidden_dim) + # learnable query p.e. + self.query_pos = nn.Embedding(num_queries, hidden_dim) + + if self.use_level_embed: + # learnable scale-level embedding + self.level_embed = nn.Embedding(self.num_levels, hidden_dim) + + self.mask_embed_head = nn.Sequential( + nn.Linear(hidden_dim, hidden_dim), + nn.ReLU(), + nn.Linear(hidden_dim, hidden_dim), + ) + + self.class_embed_head = nn.Linear(hidden_dim, self.num_classes) + + if self.pos_enc_type == "legacy": + self.pos_enc = PositionalEncoding3D(channels=self.mask_dim) + elif self.pos_enc_type == "fourier": + self.pos_enc = PositionEmbeddingCoordsSine( + pos_type="fourier", + d_pos=self.mask_dim, + gauss_scale=self.gauss_scale, + normalize=self.normalize_pos_enc, + ) + elif self.pos_enc_type == "sine": + self.pos_enc = PositionEmbeddingCoordsSine( + pos_type="sine", + d_pos=self.mask_dim, + normalize=self.normalize_pos_enc, + ) + else: + assert False, "pos enc type not known" + + self.pooling = MinkowskiAvgPooling( + kernel_size=2, stride=2, dimension=3 + ) + + self.masked_transformer_decoder = nn.ModuleList() + self.cross_attention = nn.ModuleList() + self.self_attention = nn.ModuleList() + self.ffn_attention = nn.ModuleList() + self.lin_squeeze = nn.ModuleList() + + num_shared = self.num_decoders if not self.shared_decoder else 1 + + for _ in range(num_shared): + tmp_cross_attention = nn.ModuleList() + tmp_self_attention = nn.ModuleList() + tmp_ffn_attention = nn.ModuleList() + tmp_squeeze_attention = nn.ModuleList() + for i, hlevel in enumerate(self.hlevels): + tmp_cross_attention.append( + CrossAttentionLayer( + d_model=self.mask_dim, + nhead=self.num_heads, + dropout=self.dropout, + normalize_before=self.pre_norm, + ) + ) + + tmp_squeeze_attention.append( + nn.Linear(sizes[hlevel], self.mask_dim) + ) + + tmp_self_attention.append( + SelfAttentionLayer( + d_model=self.mask_dim, + nhead=self.num_heads, + dropout=self.dropout, + normalize_before=self.pre_norm, + ) + ) + + tmp_ffn_attention.append( + FFNLayer( + d_model=self.mask_dim, + dim_feedforward=dim_feedforward, + dropout=self.dropout, + normalize_before=self.pre_norm, + ) + ) + + self.cross_attention.append(tmp_cross_attention) + self.self_attention.append(tmp_self_attention) + self.ffn_attention.append(tmp_ffn_attention) + self.lin_squeeze.append(tmp_squeeze_attention) + + self.decoder_norm = nn.LayerNorm(hidden_dim) + + def get_pos_encs(self, coords): + pos_encodings_pcd = [] + + for i in range(len(coords)): + pos_encodings_pcd.append([[]]) + for coords_batch in coords[i].decomposed_features: + scene_min = coords_batch.min(dim=0)[0][None, ...] + scene_max = coords_batch.max(dim=0)[0][None, ...] + + with autocast(enabled=False): + tmp = self.pos_enc( + coords_batch[None, ...].float(), + input_range=[scene_min, scene_max], + ) + + pos_encodings_pcd[-1][0].append(tmp.squeeze(0).permute((1, 0))) + + return pos_encodings_pcd + + def forward( + self, x, point2segment=None, raw_coordinates=None, is_eval=False + ): + # print(x) + pcd_features, aux = self.backbone(x) + + batch_size = len(x.decomposed_coordinates) + + with torch.no_grad(): + coordinates = me.SparseTensor( + features=raw_coordinates, + coordinate_manager=aux[-1].coordinate_manager, + coordinate_map_key=aux[-1].coordinate_map_key, + device=aux[-1].device, + ) + + coords = [coordinates] + for _ in reversed(range(len(aux) - 1)): + coords.append(self.pooling(coords[-1])) + + coords.reverse() + + pos_encodings_pcd = self.get_pos_encs(coords) + mask_features = self.mask_features_head(pcd_features) + if point2segment is not None: + mask_segments = [] + for i, mask_feature in enumerate( + mask_features.decomposed_features + ): + mask_segments.append( + self.scatter_fn(mask_feature, point2segment[i], dim=0) + ) + + sampled_coords = None + + if self.non_parametric_queries: + fps_idx = [ + furthest_point_sample( + x.decomposed_coordinates[i][None, ...].float(), + self.num_queries, + ) + .squeeze(0) + .long() + for i in range(len(x.decomposed_coordinates)) + ] + + sampled_coords = torch.stack( + [ + coordinates.decomposed_features[i][fps_idx[i].long(), :] + for i in range(len(fps_idx)) + ] + ) + + mins = torch.stack( + [ + coordinates.decomposed_features[i].min(dim=0)[0] + for i in range(len(coordinates.decomposed_features)) + ] + ) + maxs = torch.stack( + [ + coordinates.decomposed_features[i].max(dim=0)[0] + for i in range(len(coordinates.decomposed_features)) + ] + ) + + query_pos = self.pos_enc( + sampled_coords.float(), input_range=[mins, maxs] + ) # Batch, Dim, queries + query_pos = self.query_projection(query_pos) + + if not self.use_np_features: + queries = torch.zeros_like(query_pos).permute((0, 2, 1)) + else: + queries = torch.stack( + [ + pcd_features.decomposed_features[i][ + fps_idx[i].long(), : + ] + for i in range(len(fps_idx)) + ] + ) + queries = self.np_feature_projection(queries) + query_pos = query_pos.permute((2, 0, 1)) + elif self.random_queries: + query_pos = ( + torch.rand( + batch_size, + self.mask_dim, + self.num_queries, + device=x.device, + ) + - 0.5 + ) + + queries = torch.zeros_like(query_pos).permute((0, 2, 1)) + query_pos = query_pos.permute((2, 0, 1)) + elif self.random_query_both: + if not self.random_normal: + query_pos_feat = ( + torch.rand( + batch_size, + 2 * self.mask_dim, + self.num_queries, + device=x.device, + ) + - 0.5 + ) + else: + query_pos_feat = torch.randn( + batch_size, + 2 * self.mask_dim, + self.num_queries, + device=x.device, + ) + + queries = query_pos_feat[:, : self.mask_dim, :].permute((0, 2, 1)) + query_pos = query_pos_feat[:, self.mask_dim :, :].permute( + (2, 0, 1) + ) + else: + # PARAMETRIC QUERIES + queries = self.query_feat.weight.unsqueeze(0).repeat( + batch_size, 1, 1 + ) + query_pos = self.query_pos.weight.unsqueeze(1).repeat( + 1, batch_size, 1 + ) + + predictions_class = [] + predictions_mask = [] + + for decoder_counter in range(self.num_decoders): + if self.shared_decoder: + decoder_counter = 0 + for i, hlevel in enumerate(self.hlevels): + if point2segment is not None: + output_class, outputs_mask, attn_mask = self.mask_module( + queries, + mask_features, + mask_segments, + len(aux) - hlevel - 1, + ret_attn_mask=True, + point2segment=point2segment, + coords=coords, + ) + else: + output_class, outputs_mask, attn_mask = self.mask_module( + queries, + mask_features, + None, + len(aux) - hlevel - 1, + ret_attn_mask=True, + point2segment=None, + coords=coords, + ) + + decomposed_aux = aux[hlevel].decomposed_features + decomposed_attn = attn_mask.decomposed_features + + curr_sample_size = max( + [pcd.shape[0] for pcd in decomposed_aux] + ) + + if min([pcd.shape[0] for pcd in decomposed_aux]) == 1: + raise RuntimeError( + "only a single point gives nans in cross-attention" + ) + + if not (self.max_sample_size or is_eval): + curr_sample_size = min( + curr_sample_size, self.sample_sizes[hlevel] + ) + + rand_idx = [] + mask_idx = [] + for k in range(len(decomposed_aux)): + pcd_size = decomposed_aux[k].shape[0] + if pcd_size <= curr_sample_size: + # we do not need to sample + # take all points and pad the rest with zeroes and mask it + idx = torch.zeros( + curr_sample_size, + dtype=torch.long, + device=queries.device, + ) + + midx = torch.ones( + curr_sample_size, + dtype=torch.bool, + device=queries.device, + ) + + idx[:pcd_size] = torch.arange( + pcd_size, device=queries.device + ) + + midx[:pcd_size] = False # attend to first points + else: + # we have more points in pcd as we like to sample + # take a subset (no padding or masking needed) + idx = torch.randperm( + decomposed_aux[k].shape[0], device=queries.device + )[:curr_sample_size] + midx = torch.zeros( + curr_sample_size, + dtype=torch.bool, + device=queries.device, + ) # attend to all + + rand_idx.append(idx) + mask_idx.append(midx) + + batched_aux = torch.stack( + [ + decomposed_aux[k][rand_idx[k], :] + for k in range(len(rand_idx)) + ] + ) + + batched_attn = torch.stack( + [ + decomposed_attn[k][rand_idx[k], :] + for k in range(len(rand_idx)) + ] + ) + + batched_pos_enc = torch.stack( + [ + pos_encodings_pcd[hlevel][0][k][rand_idx[k], :] + for k in range(len(rand_idx)) + ] + ) + + batched_attn.permute((0, 2, 1))[ + batched_attn.sum(1) == rand_idx[0].shape[0] + ] = False + + m = torch.stack(mask_idx) + batched_attn = torch.logical_or(batched_attn, m[..., None]) + + src_pcd = self.lin_squeeze[decoder_counter][i]( + batched_aux.permute((1, 0, 2)) + ) + if self.use_level_embed: + src_pcd += self.level_embed.weight[i] + + output = self.cross_attention[decoder_counter][i]( + queries.permute((1, 0, 2)), + src_pcd, + memory_mask=batched_attn.repeat_interleave( + self.num_heads, dim=0 + ).permute((0, 2, 1)), + memory_key_padding_mask=None, # here we do not apply masking on padded region + pos=batched_pos_enc.permute((1, 0, 2)), + query_pos=query_pos, + ) + + output = self.self_attention[decoder_counter][i]( + output, + tgt_mask=None, + tgt_key_padding_mask=None, + query_pos=query_pos, + ) + + # FFN + queries = self.ffn_attention[decoder_counter][i]( + output + ).permute((1, 0, 2)) + + predictions_class.append(output_class) + predictions_mask.append(outputs_mask) + + if point2segment is not None: + output_class, outputs_mask = self.mask_module( + queries, + mask_features, + mask_segments, + 0, + ret_attn_mask=False, + point2segment=point2segment, + coords=coords, + ) + else: + output_class, outputs_mask = self.mask_module( + queries, + mask_features, + None, + 0, + ret_attn_mask=False, + point2segment=None, + coords=coords, + ) + predictions_class.append(output_class) + predictions_mask.append(outputs_mask) + + return { + "pred_logits": predictions_class[-1], + "pred_masks": predictions_mask[-1], + "aux_outputs": self._set_aux_loss( + predictions_class, predictions_mask + ), + "sampled_coords": sampled_coords.detach().cpu().numpy() + if sampled_coords is not None + else None, + "backbone_features": pcd_features, + } + + def mask_module( + self, + query_feat, + mask_features, + mask_segments, + num_pooling_steps, + ret_attn_mask=True, + point2segment=None, + coords=None, + ): + query_feat = self.decoder_norm(query_feat) + mask_embed = self.mask_embed_head(query_feat) + outputs_class = self.class_embed_head(query_feat) + + output_masks = [] + + if point2segment is not None: + output_segments = [] + for i in range(len(mask_segments)): + output_segments.append(mask_segments[i] @ mask_embed[i].T) + output_masks.append(output_segments[-1][point2segment[i]]) + else: + for i in range(mask_features.C[-1, 0] + 1): + output_masks.append( + mask_features.decomposed_features[i] @ mask_embed[i].T + ) + + output_masks = torch.cat(output_masks) + outputs_mask = me.SparseTensor( + features=output_masks, + coordinate_manager=mask_features.coordinate_manager, + coordinate_map_key=mask_features.coordinate_map_key, + ) + + if ret_attn_mask: + attn_mask = outputs_mask + for _ in range(num_pooling_steps): + attn_mask = self.pooling(attn_mask.float()) + + attn_mask = me.SparseTensor( + features=(attn_mask.F.detach().sigmoid() < 0.5), + coordinate_manager=attn_mask.coordinate_manager, + coordinate_map_key=attn_mask.coordinate_map_key, + ) + + if point2segment is not None: + return outputs_class, output_segments, attn_mask + else: + return ( + outputs_class, + outputs_mask.decomposed_features, + attn_mask, + ) + + if point2segment is not None: + return outputs_class, output_segments + else: + return outputs_class, outputs_mask.decomposed_features + + @torch.jit.unused + def _set_aux_loss(self, outputs_class, outputs_seg_masks): + # this is a workaround to make torchscript happy, as torchscript + # doesn't support dictionary with non-homogeneous values, such + # as a dict having both a Tensor and a list. + return [ + {"pred_logits": a, "pred_masks": b} + for a, b in zip(outputs_class[:-1], outputs_seg_masks[:-1]) + ] + + +class PositionalEncoding3D(nn.Module): + def __init__(self, channels): + """ + :param channels: The last dimension of the tensor you want to apply pos emb to. + """ + self.orig_ch = channels + super(PositionalEncoding3D, self).__init__() + channels = int(np.ceil(channels / 6) * 2) + if channels % 2: + channels += 1 + self.channels = channels + inv_freq = 1.0 / ( + 10000 ** (torch.arange(0, channels, 2).float() / channels) + ) + self.register_buffer("inv_freq", inv_freq) + + def forward(self, tensor, input_range=None): + """ + :param tensor: A 5d tensor of size (batch_size, x, y, z, ch) + :return: Positional Encoding Matrix of size (batch_size, x, y, z, ch) + """ + pos_x, pos_y, pos_z = tensor[:, :, 0], tensor[:, :, 1], tensor[:, :, 2] + sin_inp_x = torch.einsum("bi,j->bij", pos_x, self.inv_freq) + sin_inp_y = torch.einsum("bi,j->bij", pos_y, self.inv_freq) + sin_inp_z = torch.einsum("bi,j->bij", pos_z, self.inv_freq) + emb_x = torch.cat((sin_inp_x.sin(), sin_inp_x.cos()), dim=-1) + + emb_y = torch.cat((sin_inp_y.sin(), sin_inp_y.cos()), dim=-1) + emb_z = torch.cat((sin_inp_z.sin(), sin_inp_z.cos()), dim=-1) + + emb = torch.cat((emb_x, emb_y, emb_z), dim=-1) + return emb[:, :, : self.orig_ch].permute((0, 2, 1)) + + +class SelfAttentionLayer(nn.Module): + def __init__( + self, + d_model, + nhead, + dropout=0.0, + activation="relu", + normalize_before=False, + ): + super().__init__() + self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) + + self.norm = nn.LayerNorm(d_model) + self.dropout = nn.Dropout(dropout) + + self.activation = _get_activation_fn(activation) + self.normalize_before = normalize_before + + self._reset_parameters() + + def _reset_parameters(self): + for p in self.parameters(): + if p.dim() > 1: + nn.init.xavier_uniform_(p) + + def with_pos_embed(self, tensor, pos): + return tensor if pos is None else tensor + pos + + def forward_post( + self, tgt, tgt_mask=None, tgt_key_padding_mask=None, query_pos=None + ): + q = k = self.with_pos_embed(tgt, query_pos) + tgt2 = self.self_attn( + q, + k, + value=tgt, + attn_mask=tgt_mask, + key_padding_mask=tgt_key_padding_mask, + )[0] + tgt = tgt + self.dropout(tgt2) + tgt = self.norm(tgt) + + return tgt + + def forward_pre( + self, tgt, tgt_mask=None, tgt_key_padding_mask=None, query_pos=None + ): + tgt2 = self.norm(tgt) + q = k = self.with_pos_embed(tgt2, query_pos) + tgt2 = self.self_attn( + q, + k, + value=tgt2, + attn_mask=tgt_mask, + key_padding_mask=tgt_key_padding_mask, + )[0] + tgt = tgt + self.dropout(tgt2) + + return tgt + + def forward( + self, tgt, tgt_mask=None, tgt_key_padding_mask=None, query_pos=None + ): + if self.normalize_before: + return self.forward_pre( + tgt, tgt_mask, tgt_key_padding_mask, query_pos + ) + return self.forward_post( + tgt, tgt_mask, tgt_key_padding_mask, query_pos + ) + + +class CrossAttentionLayer(nn.Module): + def __init__( + self, + d_model, + nhead, + dropout=0.0, + activation="relu", + normalize_before=False, + ): + super().__init__() + self.multihead_attn = nn.MultiheadAttention( + d_model, nhead, dropout=dropout + ) + + self.norm = nn.LayerNorm(d_model) + self.dropout = nn.Dropout(dropout) + + self.activation = _get_activation_fn(activation) + self.normalize_before = normalize_before + + self._reset_parameters() + + def _reset_parameters(self): + for p in self.parameters(): + if p.dim() > 1: + nn.init.xavier_uniform_(p) + + def with_pos_embed(self, tensor, pos): + return tensor if pos is None else tensor + pos + + def forward_post( + self, + tgt, + memory, + memory_mask=None, + memory_key_padding_mask=None, + pos=None, + query_pos=None, + ): + tgt2 = self.multihead_attn( + query=self.with_pos_embed(tgt, query_pos), + key=self.with_pos_embed(memory, pos), + value=memory, + attn_mask=memory_mask, + key_padding_mask=memory_key_padding_mask, + )[0] + tgt = tgt + self.dropout(tgt2) + tgt = self.norm(tgt) + + return tgt + + def forward_pre( + self, + tgt, + memory, + memory_mask=None, + memory_key_padding_mask=None, + pos=None, + query_pos=None, + ): + tgt2 = self.norm(tgt) + + tgt2 = self.multihead_attn( + query=self.with_pos_embed(tgt2, query_pos), + key=self.with_pos_embed(memory, pos), + value=memory, + attn_mask=memory_mask, + key_padding_mask=memory_key_padding_mask, + )[0] + tgt = tgt + self.dropout(tgt2) + + return tgt + + def forward( + self, + tgt, + memory, + memory_mask=None, + memory_key_padding_mask=None, + pos=None, + query_pos=None, + ): + if self.normalize_before: + return self.forward_pre( + tgt, + memory, + memory_mask, + memory_key_padding_mask, + pos, + query_pos, + ) + return self.forward_post( + tgt, memory, memory_mask, memory_key_padding_mask, pos, query_pos + ) + + +class FFNLayer(nn.Module): + def __init__( + self, + d_model, + dim_feedforward=2048, + dropout=0.0, + activation="relu", + normalize_before=False, + ): + super().__init__() + # Implementation of Feedforward model + self.linear1 = nn.Linear(d_model, dim_feedforward) + self.dropout = nn.Dropout(dropout) + self.linear2 = nn.Linear(dim_feedforward, d_model) + + self.norm = nn.LayerNorm(d_model) + + self.activation = _get_activation_fn(activation) + self.normalize_before = normalize_before + + self._reset_parameters() + + def _reset_parameters(self): + for p in self.parameters(): + if p.dim() > 1: + nn.init.xavier_uniform_(p) + + def with_pos_embed(self, tensor, pos): + return tensor if pos is None else tensor + pos + + def forward_post(self, tgt): + tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt)))) + tgt = tgt + self.dropout(tgt2) + tgt = self.norm(tgt) + return tgt + + def forward_pre(self, tgt): + tgt2 = self.norm(tgt) + tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2)))) + tgt = tgt + self.dropout(tgt2) + return tgt + + def forward(self, tgt): + if self.normalize_before: + return self.forward_pre(tgt) + return self.forward_post(tgt) + + +def _get_activation_fn(activation): + """Return an activation function given a string""" + if activation == "relu": + return F.relu + if activation == "gelu": + return F.gelu + if activation == "glu": + return F.glu + raise RuntimeError(f"activation should be relu/gelu, not {activation}.") diff --git a/models/Mask3D/mask3d/models/matcher.py b/models/Mask3D/mask3d/models/matcher.py new file mode 100644 index 0000000000000000000000000000000000000000..fc0e7a05bb76a078b1c3c3b9c877054e439b584c --- /dev/null +++ b/models/Mask3D/mask3d/models/matcher.py @@ -0,0 +1,226 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# Modified by Bowen Cheng from https://github.com/facebookresearch/detr/blob/master/models/matcher.py +""" +Modules to compute the matching cost and solve the corresponding LSAP. +""" +import torch +import torch.nn.functional as F +from scipy.optimize import linear_sum_assignment +from torch import nn +from torch.cuda.amp import autocast + +from detectron2.projects.point_rend.point_features import point_sample + + +def batch_dice_loss(inputs: torch.Tensor, targets: torch.Tensor): + """ + Compute the DICE loss, similar to generalized IOU for masks + Args: + inputs: A float tensor of arbitrary shape. + The predictions for each example. + targets: A float tensor with the same shape as inputs. Stores the binary + classification label for each element in inputs + (0 for the negative class and 1 for the positive class). + """ + inputs = inputs.sigmoid() + inputs = inputs.flatten(1) + numerator = 2 * torch.einsum("nc,mc->nm", inputs, targets) + denominator = inputs.sum(-1)[:, None] + targets.sum(-1)[None, :] + loss = 1 - (numerator + 1) / (denominator + 1) + return loss + + +batch_dice_loss_jit = torch.jit.script( + batch_dice_loss +) # type: torch.jit.ScriptModule + + +def batch_sigmoid_ce_loss(inputs: torch.Tensor, targets: torch.Tensor): + """ + Args: + inputs: A float tensor of arbitrary shape. + The predictions for each example. + targets: A float tensor with the same shape as inputs. Stores the binary + classification label for each element in inputs + (0 for the negative class and 1 for the positive class). + Returns: + Loss tensor + """ + hw = inputs.shape[1] + + pos = F.binary_cross_entropy_with_logits( + inputs, torch.ones_like(inputs), reduction="none" + ) + neg = F.binary_cross_entropy_with_logits( + inputs, torch.zeros_like(inputs), reduction="none" + ) + + loss = torch.einsum("nc,mc->nm", pos, targets) + torch.einsum( + "nc,mc->nm", neg, (1 - targets) + ) + + return loss / hw + + +batch_sigmoid_ce_loss_jit = torch.jit.script( + batch_sigmoid_ce_loss +) # type: torch.jit.ScriptModule + + +class HungarianMatcher(nn.Module): + """This class computes an assignment between the targets and the predictions of the network + + For efficiency reasons, the targets don't include the no_object. Because of this, in general, + there are more predictions than targets. In this case, we do a 1-to-1 matching of the best predictions, + while the others are un-matched (and thus treated as non-objects). + """ + + def __init__( + self, + cost_class: float = 1, + cost_mask: float = 1, + cost_dice: float = 1, + num_points: int = 0, + ): + """Creates the matcher + + Params: + cost_class: This is the relative weight of the classification error in the matching cost + cost_mask: This is the relative weight of the focal loss of the binary mask in the matching cost + cost_dice: This is the relative weight of the dice loss of the binary mask in the matching cost + """ + super().__init__() + self.cost_class = cost_class + self.cost_mask = cost_mask + self.cost_dice = cost_dice + + assert ( + cost_class != 0 or cost_mask != 0 or cost_dice != 0 + ), "all costs cant be 0" + + self.num_points = num_points + + @torch.no_grad() + def memory_efficient_forward(self, outputs, targets, mask_type): + """More memory-friendly matching""" + bs, num_queries = outputs["pred_logits"].shape[:2] + + indices = [] + + # Iterate through batch size + for b in range(bs): + + out_prob = outputs["pred_logits"][b].softmax( + -1 + ) # [num_queries, num_classes] + tgt_ids = targets[b]["labels"].clone() + + # Compute the classification cost. Contrary to the loss, we don't use the NLL, + # but approximate it in 1 - proba[target class]. + # The 1 is a constant that doesn't change the matching, it can be ommitted. + filter_ignore = tgt_ids == 253 + tgt_ids[filter_ignore] = 0 + cost_class = -out_prob[:, tgt_ids] + cost_class[ + :, filter_ignore + ] = ( + -1.0 + ) # for ignore classes pretend perfect match ;) TODO better worst class match? + + out_mask = outputs["pred_masks"][ + b + ].T # [num_queries, H_pred, W_pred] + # gt masks are already padded when preparing target + tgt_mask = targets[b][mask_type].to(out_mask) + + if self.num_points != -1: + point_idx = torch.randperm( + tgt_mask.shape[1], device=tgt_mask.device + )[: int(self.num_points * tgt_mask.shape[1])] + # point_idx = torch.randint(0, tgt_mask.shape[1], size=(self.num_points,), device=tgt_mask.device) + else: + # sample all points + point_idx = torch.arange( + tgt_mask.shape[1], device=tgt_mask.device + ) + + # out_mask = out_mask[:, None] + # tgt_mask = tgt_mask[:, None] + # all masks share the same set of points for efficient matching! + # point_coords = torch.rand(1, self.num_points, 2, device=out_mask.device) + # get gt labels + # tgt_mask = point_sample( + # tgt_mask, + # point_coords.repeat(tgt_mask.shape[0], 1, 1), + # align_corners=False, + # ).squeeze(1) + + # out_mask = point_sample( + # out_mask, + # point_coords.repeat(out_mask.shape[0], 1, 1), + # align_corners=False, + # ).squeeze(1) + + with autocast(enabled=False): + out_mask = out_mask.float() + tgt_mask = tgt_mask.float() + # Compute the focal loss between masks + cost_mask = batch_sigmoid_ce_loss_jit( + out_mask[:, point_idx], tgt_mask[:, point_idx] + ) + + # Compute the dice loss betwen masks + cost_dice = batch_dice_loss_jit( + out_mask[:, point_idx], tgt_mask[:, point_idx] + ) + + # Final cost matrix + C = ( + self.cost_mask * cost_mask + + self.cost_class * cost_class + + self.cost_dice * cost_dice + ) + C = C.reshape(num_queries, -1).cpu() + + indices.append(linear_sum_assignment(C)) + + return [ + ( + torch.as_tensor(i, dtype=torch.int64), + torch.as_tensor(j, dtype=torch.int64), + ) + for i, j in indices + ] + + @torch.no_grad() + def forward(self, outputs, targets, mask_type): + """Performs the matching + + Params: + outputs: This is a dict that contains at least these entries: + "pred_logits": Tensor of dim [batch_size, num_queries, num_classes] with the classification logits + "pred_masks": Tensor of dim [batch_size, num_queries, H_pred, W_pred] with the predicted masks + + targets: This is a list of targets (len(targets) = batch_size), where each target is a dict containing: + "labels": Tensor of dim [num_target_boxes] (where num_target_boxes is the number of ground-truth + objects in the target) containing the class labels + "masks": Tensor of dim [num_target_boxes, H_gt, W_gt] containing the target masks + + Returns: + A list of size batch_size, containing tuples of (index_i, index_j) where: + - index_i is the indices of the selected predictions (in order) + - index_j is the indices of the corresponding selected targets (in order) + For each batch element, it holds: + len(index_i) = len(index_j) = min(num_queries, num_target_boxes) + """ + return self.memory_efficient_forward(outputs, targets, mask_type) + + def __repr__(self, _repr_indent=4): + head = "Matcher " + self.__class__.__name__ + body = [ + "cost_class: {}".format(self.cost_class), + "cost_mask: {}".format(self.cost_mask), + "cost_dice: {}".format(self.cost_dice), + ] + lines = [head] + [" " * _repr_indent + line for line in body] + return "\n".join(lines) diff --git a/models/Mask3D/mask3d/models/metrics/__init__.py b/models/Mask3D/mask3d/models/metrics/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..bd7538b5868b93e4192dbee9ca0da9e91323cf0f --- /dev/null +++ b/models/Mask3D/mask3d/models/metrics/__init__.py @@ -0,0 +1,4 @@ +from .confusionmatrix import ConfusionMatrix +from .metrics import IoU + +__all__ = ["ConfusionMatrix", "IoU"] diff --git a/models/Mask3D/mask3d/models/metrics/confusionmatrix.py b/models/Mask3D/mask3d/models/metrics/confusionmatrix.py new file mode 100644 index 0000000000000000000000000000000000000000..2d92f12595d26f76f3c26d18550b1b1486b837ff --- /dev/null +++ b/models/Mask3D/mask3d/models/metrics/confusionmatrix.py @@ -0,0 +1,107 @@ +import numpy as np +import torch + + +class ConfusionMatrix: + """Constructs a confusion matrix for a multi-class classification problems. + + Does not support multi-label, multi-class problems. + + Keyword arguments: + - num_classes (int): number of classes in the classification problem. + - normalized (boolean, optional): Determines whether or not the confusion + matrix is normalized or not. Default: False. + + Modified from: https://github.com/pytorch/tnt/blob/master/torchnet/meter/confusionmeter.py + """ + + def __init__(self, num_classes, ignore_label): + super().__init__() + + self.conf = np.ndarray((num_classes, num_classes), dtype=np.int32) + self.ignore_label = ignore_label + self.num_classes = num_classes + self.reset() + + def reset(self): + self.conf.fill(0) + + def add(self, predicted, target): + """Computes the confusion matrix + + The shape of the confusion matrix is K x K, where K is the number + of classes. + + Keyword arguments: + - predicted (Tensor or numpy.ndarray): Can be an N x K tensor/array of + predicted scores obtained from the model for N examples and K classes, + or an N-tensor/array of integer values between 0 and K-1. + - target (Tensor or numpy.ndarray): Can be an N x K tensor/array of + ground-truth classes for N examples and K classes, or an N-tensor/array + of integer values between 0 and K-1. + + """ + # _, predicted = predicted.max(1) + + # predicted = predicted.view(-1) + # target = target.view(-1) + + # If target and/or predicted are tensors, convert them to numpy arrays + if torch.is_tensor(predicted): + predicted = predicted.cpu().numpy() + if torch.is_tensor(target): + target = target.cpu().numpy() + ind = ~np.isin(target, self.ignore_label) + predicted, target = predicted[ind], target[ind] + + assert ( + predicted.shape[0] == target.shape[0] + ), "number of targets and predicted outputs do not match" + + if np.ndim(predicted) != 1: + assert ( + predicted.shape[1] == self.num_classes + ), "number of predictions does not match size of confusion matrix" + predicted = np.argmax(predicted, 1) + else: + assert (predicted.max() < self.num_classes) and ( + predicted.min() >= 0 + ), "predicted values are not between 0 and k-1" + + if np.ndim(target) != 1: + assert ( + target.shape[1] == self.num_classes + ), "Onehot target does not match size of confusion matrix" + assert (target >= 0).all() and ( + target <= 1 + ).all(), "in one-hot encoding, target values should be 0 or 1" + assert ( + target.sum(1) == 1 + ).all(), "multi-label setting is not supported" + target = np.argmax(target, 1) + else: + assert (target.max() < self.num_classes) and ( + target.min() >= 0 + ), "target values are not between 0 and k-1" + + # hack for bincounting 2 arrays together + x = predicted + self.num_classes * target + bincount_2d = np.bincount( + x.astype(np.int32), minlength=self.num_classes**2 + ) + assert bincount_2d.size == self.num_classes**2 + conf = bincount_2d.reshape((self.num_classes, self.num_classes)) + + self.conf += conf + + def value(self, normalized=False): + """ + Returns: + Confustion matrix of K rows and K columns, where rows corresponds + to ground-truth targets and columns corresponds to predicted + targets. + """ + if normalized: + conf = self.conf.astype(np.float32) + return conf / conf.sum(1).clip(min=1e-12)[:, None] + return self.conf diff --git a/models/Mask3D/mask3d/models/metrics/metrics.py b/models/Mask3D/mask3d/models/metrics/metrics.py new file mode 100644 index 0000000000000000000000000000000000000000..f3f4b0ca4f7b0c5224ea242f459374a28485539f --- /dev/null +++ b/models/Mask3D/mask3d/models/metrics/metrics.py @@ -0,0 +1,48 @@ +import numpy as np + + +class IoU: + """Computes the intersection over union (IoU) per class and corresponding + mean (mIoU). + + Intersection over union (IoU) is a common evaluation metric for semantic + segmentation. The predictions are first accumulated in a confusion matrix + and the IoU is computed from it as follows: + + IoU = true_positive / (true_positive + false_positive + false_negative). + + Keyword arguments: + - num_classes (int): number of classes in the classification problem + - normalized (boolean, optional): Determines whether or not the confusion + matrix is normalized or not. Default: False. + - ignore_index (int or iterable, optional): Index of the classes to ignore + when computing the IoU. Can be an int, or any iterable of ints. + + Modified from: https://github.com/pytorch/tnt/blob/master/torchnet/meter + + """ + + def __init__(self): + super().__init__() + + def value(self, conf_matrix): + """Computes the IoU and mean IoU. + + The mean computation ignores NaN elements of the IoU array. + + Returns: + Tuple: (IoU, mIoU). The first output is the per class IoU, + for K classes it's numpy.ndarray with K elements. The second output, + is the mean IoU. + """ + true_positive = np.diag(conf_matrix) + false_positive = np.sum(conf_matrix, 0) - true_positive + false_negative = np.sum(conf_matrix, 1) - true_positive + + # Just in case we get a division by 0, ignore/hide the error + with np.errstate(divide="ignore", invalid="ignore"): + iou = true_positive / ( + true_positive + false_positive + false_negative + ) + + return iou diff --git a/models/Mask3D/mask3d/models/misc.py b/models/Mask3D/mask3d/models/misc.py new file mode 100644 index 0000000000000000000000000000000000000000..8416b62804fbc002bd02a457d896276bc307b070 --- /dev/null +++ b/models/Mask3D/mask3d/models/misc.py @@ -0,0 +1,119 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# Modified by Bowen Cheng from https://github.com/facebookresearch/detr/blob/master/util/misc.py +""" +Misc functions, including distributed helpers. + +Mostly copy-paste from torchvision references. +""" +from typing import List, Optional + +import torch +import torch.distributed as dist +import torchvision +from torch import Tensor + + +def _max_by_axis(the_list): + # type: (List[List[int]]) -> List[int] + maxes = the_list[0] + for sublist in the_list[1:]: + for index, item in enumerate(sublist): + maxes[index] = max(maxes[index], item) + return maxes + + +class NestedTensor(object): + def __init__(self, tensors, mask: Optional[Tensor]): + self.tensors = tensors + self.mask = mask + + def to(self, device): + # type: (Device) -> NestedTensor # noqa + cast_tensor = self.tensors.to(device) + mask = self.mask + if mask is not None: + assert mask is not None + cast_mask = mask.to(device) + else: + cast_mask = None + return NestedTensor(cast_tensor, cast_mask) + + def decompose(self): + return self.tensors, self.mask + + def __repr__(self): + return str(self.tensors) + + +def nested_tensor_from_tensor_list(tensor_list: List[Tensor]): + # TODO make this more general + if tensor_list[0].ndim == 3: + if torchvision._is_tracing(): + # nested_tensor_from_tensor_list() does not export well to ONNX + # call _onnx_nested_tensor_from_tensor_list() instead + return _onnx_nested_tensor_from_tensor_list(tensor_list) + + # TODO make it support different-sized images + max_size = _max_by_axis([list(img.shape) for img in tensor_list]) + # min_size = tuple(min(s) for s in zip(*[img.shape for img in tensor_list])) + batch_shape = [len(tensor_list)] + max_size + b, c, h, w = batch_shape + dtype = tensor_list[0].dtype + device = tensor_list[0].device + tensor = torch.zeros(batch_shape, dtype=dtype, device=device) + mask = torch.ones((b, h, w), dtype=torch.bool, device=device) + for img, pad_img, m in zip(tensor_list, tensor, mask): + pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img) + m[: img.shape[1], : img.shape[2]] = False + else: + raise ValueError("not supported") + return NestedTensor(tensor, mask) + + +# _onnx_nested_tensor_from_tensor_list() is an implementation of +# nested_tensor_from_tensor_list() that is supported by ONNX tracing. +@torch.jit.unused +def _onnx_nested_tensor_from_tensor_list( + tensor_list: List[Tensor], +) -> NestedTensor: + max_size = [] + for i in range(tensor_list[0].dim()): + max_size_i = torch.max( + torch.stack([img.shape[i] for img in tensor_list]).to( + torch.float32 + ) + ).to(torch.int64) + max_size.append(max_size_i) + max_size = tuple(max_size) + + # work around for + # pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img) + # m[: img.shape[1], :img.shape[2]] = False + # which is not yet supported in onnx + padded_imgs = [] + padded_masks = [] + for img in tensor_list: + padding = [(s1 - s2) for s1, s2 in zip(max_size, tuple(img.shape))] + padded_img = torch.nn.functional.pad( + img, (0, padding[2], 0, padding[1], 0, padding[0]) + ) + padded_imgs.append(padded_img) + + m = torch.zeros_like(img[0], dtype=torch.int, device=img.device) + padded_mask = torch.nn.functional.pad( + m, (0, padding[2], 0, padding[1]), "constant", 1 + ) + padded_masks.append(padded_mask.to(torch.bool)) + + tensor = torch.stack(padded_imgs) + mask = torch.stack(padded_masks) + + return NestedTensor(tensor, mask=mask) + + +def is_dist_avail_and_initialized(): + if not dist.is_available(): + return False + if not dist.is_initialized(): + return False + return True diff --git a/models/Mask3D/mask3d/models/model.py b/models/Mask3D/mask3d/models/model.py new file mode 100644 index 0000000000000000000000000000000000000000..d167fa58358f2c1a7ca4a509e38c61906e9dd7ac --- /dev/null +++ b/models/Mask3D/mask3d/models/model.py @@ -0,0 +1,27 @@ +from MinkowskiEngine import MinkowskiNetwork + + +class Model(MinkowskiNetwork): + """ + Base network for all sparse convnet + + By default, all networks are segmentation networks. + """ + + OUT_PIXEL_DIST = -1 + + def __init__(self, in_channels, out_channels, config, D, **kwargs): + super().__init__(D) + self.in_channels = in_channels + self.out_channels = out_channels + self.config = config + + +class HighDimensionalModel(Model): + """ + Base network for all spatio (temporal) chromatic sparse convnet + """ + + def __init__(self, in_channels, out_channels, config, D, **kwargs): + assert D > 4, "Num dimension smaller than 5" + super().__init__(in_channels, out_channels, config, D, **kwargs) diff --git a/models/Mask3D/mask3d/models/modules/3detr_helpers.py b/models/Mask3D/mask3d/models/modules/3detr_helpers.py new file mode 100644 index 0000000000000000000000000000000000000000..2c3f7ea57c0266a9781cdfec9f59896d15750a9d --- /dev/null +++ b/models/Mask3D/mask3d/models/modules/3detr_helpers.py @@ -0,0 +1,116 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import torch.nn as nn +from functools import partial +import copy + + +class BatchNormDim1Swap(nn.BatchNorm1d): + """ + Used for nn.Transformer that uses a HW x N x C rep + """ + + def forward(self, x): + """ + x: HW x N x C + permute to N x C x HW + Apply BN on C + permute back + """ + hw, n, c = x.shape + x = x.permute(1, 2, 0) + x = super(BatchNormDim1Swap, self).forward(x) + # x: n x c x hw -> hw x n x c + x = x.permute(2, 0, 1) + return x + + +NORM_DICT = { + "bn": BatchNormDim1Swap, + "bn1d": nn.BatchNorm1d, + "id": nn.Identity, + "ln": nn.LayerNorm, +} + +ACTIVATION_DICT = { + "relu": nn.ReLU, + "gelu": nn.GELU, + "leakyrelu": partial(nn.LeakyReLU, negative_slope=0.1), +} + +WEIGHT_INIT_DICT = { + "xavier_uniform": nn.init.xavier_uniform_, +} + + +class GenericMLP(nn.Module): + def __init__( + self, + input_dim, + hidden_dims, + output_dim, + norm_fn_name=None, + activation="relu", + use_conv=False, + dropout=None, + hidden_use_bias=False, + output_use_bias=True, + output_use_activation=False, + output_use_norm=False, + weight_init_name=None, + ): + super().__init__() + activation = ACTIVATION_DICT[activation] + norm = None + if norm_fn_name is not None: + norm = NORM_DICT[norm_fn_name] + if norm_fn_name == "ln" and use_conv: + norm = lambda x: nn.GroupNorm(1, x) # easier way to use LayerNorm + + if dropout is not None: + if not isinstance(dropout, list): + dropout = [dropout for _ in range(len(hidden_dims))] + + layers = [] + prev_dim = input_dim + for idx, x in enumerate(hidden_dims): + if use_conv: + layer = nn.Conv1d(prev_dim, x, 1, bias=hidden_use_bias) + else: + layer = nn.Linear(prev_dim, x, bias=hidden_use_bias) + layers.append(layer) + if norm: + layers.append(norm(x)) + layers.append(activation()) + if dropout is not None: + layers.append(nn.Dropout(p=dropout[idx])) + prev_dim = x + if use_conv: + layer = nn.Conv1d(prev_dim, output_dim, 1, bias=output_use_bias) + else: + layer = nn.Linear(prev_dim, output_dim, bias=output_use_bias) + layers.append(layer) + + if output_use_norm: + layers.append(norm(output_dim)) + + if output_use_activation: + layers.append(activation()) + + self.layers = nn.Sequential(*layers) + + if weight_init_name is not None: + self.do_weight_init(weight_init_name) + + def do_weight_init(self, weight_init_name): + func = WEIGHT_INIT_DICT[weight_init_name] + for (_, param) in self.named_parameters(): + if param.dim() > 1: # skips batchnorm/layernorm + func(param) + + def forward(self, x): + output = self.layers(x) + return output + + +def get_clones(module, N): + return nn.ModuleList([copy.deepcopy(module) for i in range(N)]) diff --git a/models/Mask3D/mask3d/models/modules/__init__.py b/models/Mask3D/mask3d/models/modules/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/Mask3D/mask3d/models/modules/common.py b/models/Mask3D/mask3d/models/modules/common.py new file mode 100644 index 0000000000000000000000000000000000000000..ae78b5b301cfd6ffcfc3417b543ebe2289602fb7 --- /dev/null +++ b/models/Mask3D/mask3d/models/modules/common.py @@ -0,0 +1,275 @@ +import sys + +if sys.version_info[:2] >= (3, 8): + from collections.abc import Sequence +else: + from collections import Sequence + +from enum import Enum + +import torch.nn as nn +import MinkowskiEngine as ME + + +class NormType(Enum): + BATCH_NORM = 0 + INSTANCE_NORM = 1 + INSTANCE_BATCH_NORM = 2 + + +def get_norm(norm_type, n_channels, D, bn_momentum=0.1): + if norm_type == NormType.BATCH_NORM: + return ME.MinkowskiBatchNorm(n_channels, momentum=bn_momentum) + elif norm_type == NormType.INSTANCE_NORM: + return ME.MinkowskiInstanceNorm(n_channels) + elif norm_type == NormType.INSTANCE_BATCH_NORM: + return nn.Sequential( + ME.MinkowskiInstanceNorm(n_channels), + ME.MinkowskiBatchNorm(n_channels, momentum=bn_momentum), + ) + else: + raise ValueError(f"Norm type: {norm_type} not supported") + + +class ConvType(Enum): + """ + Define the kernel region type + """ + + HYPERCUBE = 0, "HYPERCUBE" + SPATIAL_HYPERCUBE = 1, "SPATIAL_HYPERCUBE" + SPATIO_TEMPORAL_HYPERCUBE = 2, "SPATIO_TEMPORAL_HYPERCUBE" + HYPERCROSS = 3, "HYPERCROSS" + SPATIAL_HYPERCROSS = 4, "SPATIAL_HYPERCROSS" + SPATIO_TEMPORAL_HYPERCROSS = 5, "SPATIO_TEMPORAL_HYPERCROSS" + SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS = ( + 6, + "SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS ", + ) + + def __new__(cls, value, name): + member = object.__new__(cls) + member._value_ = value + member.fullname = name + return member + + def __int__(self): + return self.value + + +# Convert the ConvType var to a RegionType var +conv_to_region_type = { + # kernel_size = [k, k, k, 1] + ConvType.HYPERCUBE: ME.RegionType.HYPER_CUBE, + ConvType.SPATIAL_HYPERCUBE: ME.RegionType.HYPER_CUBE, + ConvType.SPATIO_TEMPORAL_HYPERCUBE: ME.RegionType.HYPER_CUBE, + ConvType.HYPERCROSS: ME.RegionType.HYPER_CROSS, + ConvType.SPATIAL_HYPERCROSS: ME.RegionType.HYPER_CROSS, + ConvType.SPATIO_TEMPORAL_HYPERCROSS: ME.RegionType.HYPER_CROSS, + ConvType.SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS: ME.RegionType.HYPER_CUBE, # JONAS CHANGE from HYBRID +} + +# int_to_region_type = {m.value: m for m in ME.RegionType} +int_to_region_type = {m: ME.RegionType(m) for m in range(3)} + + +def convert_region_type(region_type): + """ + Convert the integer region_type to the corresponding RegionType enum object. + """ + return int_to_region_type[region_type] + + +def convert_conv_type(conv_type, kernel_size, D): + assert isinstance(conv_type, ConvType), "conv_type must be of ConvType" + region_type = conv_to_region_type[conv_type] + axis_types = None + if conv_type == ConvType.SPATIAL_HYPERCUBE: + # No temporal convolution + if isinstance(kernel_size, Sequence): + kernel_size = kernel_size[:3] + else: + kernel_size = [ + kernel_size, + ] * 3 + if D == 4: + kernel_size.append(1) + elif conv_type == ConvType.SPATIO_TEMPORAL_HYPERCUBE: + # conv_type conversion already handled + assert D == 4 + elif conv_type == ConvType.HYPERCUBE: + # conv_type conversion already handled + pass + elif conv_type == ConvType.SPATIAL_HYPERCROSS: + if isinstance(kernel_size, Sequence): + kernel_size = kernel_size[:3] + else: + kernel_size = [ + kernel_size, + ] * 3 + if D == 4: + kernel_size.append(1) + elif conv_type == ConvType.HYPERCROSS: + # conv_type conversion already handled + pass + elif conv_type == ConvType.SPATIO_TEMPORAL_HYPERCROSS: + # conv_type conversion already handled + assert D == 4 + elif conv_type == ConvType.SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS: + # Define the CUBIC conv kernel for spatial dims and CROSS conv for temp dim + axis_types = [ + ME.RegionType.HYPER_CUBE, + ] * 3 + if D == 4: + axis_types.append(ME.RegionType.HYPER_CROSS) + return region_type, axis_types, kernel_size + + +def conv( + in_planes, + out_planes, + kernel_size, + stride=1, + dilation=1, + bias=False, + conv_type=ConvType.HYPERCUBE, + D=-1, +): + assert D > 0, "Dimension must be a positive integer" + region_type, axis_types, kernel_size = convert_conv_type( + conv_type, kernel_size, D + ) + kernel_generator = ME.KernelGenerator( + kernel_size, + stride, + dilation, + region_type=region_type, + axis_types=None, # axis_types JONAS + dimension=D, + ) + + return ME.MinkowskiConvolution( + in_channels=in_planes, + out_channels=out_planes, + kernel_size=kernel_size, + stride=stride, + dilation=dilation, + bias=bias, + kernel_generator=kernel_generator, + dimension=D, + ) + + +def conv_tr( + in_planes, + out_planes, + kernel_size, + upsample_stride=1, + dilation=1, + bias=False, + conv_type=ConvType.HYPERCUBE, + D=-1, +): + assert D > 0, "Dimension must be a positive integer" + region_type, axis_types, kernel_size = convert_conv_type( + conv_type, kernel_size, D + ) + kernel_generator = ME.KernelGenerator( + kernel_size, + upsample_stride, + dilation, + region_type=region_type, + axis_types=axis_types, + dimension=D, + ) + + return ME.MinkowskiConvolutionTranspose( + in_channels=in_planes, + out_channels=out_planes, + kernel_size=kernel_size, + stride=upsample_stride, + dilation=dilation, + bias=bias, + kernel_generator=kernel_generator, + dimension=D, + ) + + +def avg_pool( + kernel_size, + stride=1, + dilation=1, + conv_type=ConvType.HYPERCUBE, + in_coords_key=None, + D=-1, +): + assert D > 0, "Dimension must be a positive integer" + region_type, axis_types, kernel_size = convert_conv_type( + conv_type, kernel_size, D + ) + kernel_generator = ME.KernelGenerator( + kernel_size, + stride, + dilation, + region_type=region_type, + axis_types=axis_types, + dimension=D, + ) + + return ME.MinkowskiAvgPooling( + kernel_size=kernel_size, + stride=stride, + dilation=dilation, + kernel_generator=kernel_generator, + dimension=D, + ) + + +def avg_unpool( + kernel_size, stride=1, dilation=1, conv_type=ConvType.HYPERCUBE, D=-1 +): + assert D > 0, "Dimension must be a positive integer" + region_type, axis_types, kernel_size = convert_conv_type( + conv_type, kernel_size, D + ) + kernel_generator = ME.KernelGenerator( + kernel_size, + stride, + dilation, + region_type=region_type, + axis_types=axis_types, + dimension=D, + ) + + return ME.MinkowskiAvgUnpooling( + kernel_size=kernel_size, + stride=stride, + dilation=dilation, + kernel_generator=kernel_generator, + dimension=D, + ) + + +def sum_pool( + kernel_size, stride=1, dilation=1, conv_type=ConvType.HYPERCUBE, D=-1 +): + assert D > 0, "Dimension must be a positive integer" + region_type, axis_types, kernel_size = convert_conv_type( + conv_type, kernel_size, D + ) + kernel_generator = ME.KernelGenerator( + kernel_size, + stride, + dilation, + region_type=region_type, + axis_types=axis_types, + dimension=D, + ) + + return ME.MinkowskiSumPooling( + kernel_size=kernel_size, + stride=stride, + dilation=dilation, + kernel_generator=kernel_generator, + dimension=D, + ) diff --git a/models/Mask3D/mask3d/models/modules/helpers_3detr.py b/models/Mask3D/mask3d/models/modules/helpers_3detr.py new file mode 100644 index 0000000000000000000000000000000000000000..2c3f7ea57c0266a9781cdfec9f59896d15750a9d --- /dev/null +++ b/models/Mask3D/mask3d/models/modules/helpers_3detr.py @@ -0,0 +1,116 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import torch.nn as nn +from functools import partial +import copy + + +class BatchNormDim1Swap(nn.BatchNorm1d): + """ + Used for nn.Transformer that uses a HW x N x C rep + """ + + def forward(self, x): + """ + x: HW x N x C + permute to N x C x HW + Apply BN on C + permute back + """ + hw, n, c = x.shape + x = x.permute(1, 2, 0) + x = super(BatchNormDim1Swap, self).forward(x) + # x: n x c x hw -> hw x n x c + x = x.permute(2, 0, 1) + return x + + +NORM_DICT = { + "bn": BatchNormDim1Swap, + "bn1d": nn.BatchNorm1d, + "id": nn.Identity, + "ln": nn.LayerNorm, +} + +ACTIVATION_DICT = { + "relu": nn.ReLU, + "gelu": nn.GELU, + "leakyrelu": partial(nn.LeakyReLU, negative_slope=0.1), +} + +WEIGHT_INIT_DICT = { + "xavier_uniform": nn.init.xavier_uniform_, +} + + +class GenericMLP(nn.Module): + def __init__( + self, + input_dim, + hidden_dims, + output_dim, + norm_fn_name=None, + activation="relu", + use_conv=False, + dropout=None, + hidden_use_bias=False, + output_use_bias=True, + output_use_activation=False, + output_use_norm=False, + weight_init_name=None, + ): + super().__init__() + activation = ACTIVATION_DICT[activation] + norm = None + if norm_fn_name is not None: + norm = NORM_DICT[norm_fn_name] + if norm_fn_name == "ln" and use_conv: + norm = lambda x: nn.GroupNorm(1, x) # easier way to use LayerNorm + + if dropout is not None: + if not isinstance(dropout, list): + dropout = [dropout for _ in range(len(hidden_dims))] + + layers = [] + prev_dim = input_dim + for idx, x in enumerate(hidden_dims): + if use_conv: + layer = nn.Conv1d(prev_dim, x, 1, bias=hidden_use_bias) + else: + layer = nn.Linear(prev_dim, x, bias=hidden_use_bias) + layers.append(layer) + if norm: + layers.append(norm(x)) + layers.append(activation()) + if dropout is not None: + layers.append(nn.Dropout(p=dropout[idx])) + prev_dim = x + if use_conv: + layer = nn.Conv1d(prev_dim, output_dim, 1, bias=output_use_bias) + else: + layer = nn.Linear(prev_dim, output_dim, bias=output_use_bias) + layers.append(layer) + + if output_use_norm: + layers.append(norm(output_dim)) + + if output_use_activation: + layers.append(activation()) + + self.layers = nn.Sequential(*layers) + + if weight_init_name is not None: + self.do_weight_init(weight_init_name) + + def do_weight_init(self, weight_init_name): + func = WEIGHT_INIT_DICT[weight_init_name] + for (_, param) in self.named_parameters(): + if param.dim() > 1: # skips batchnorm/layernorm + func(param) + + def forward(self, x): + output = self.layers(x) + return output + + +def get_clones(module, N): + return nn.ModuleList([copy.deepcopy(module) for i in range(N)]) diff --git a/models/Mask3D/mask3d/models/modules/resnet_block.py b/models/Mask3D/mask3d/models/modules/resnet_block.py new file mode 100644 index 0000000000000000000000000000000000000000..ac16b72aa198964e343f57ad4f79193a22e830dc --- /dev/null +++ b/models/Mask3D/mask3d/models/modules/resnet_block.py @@ -0,0 +1,157 @@ +import torch.nn as nn +from MinkowskiEngine import MinkowskiReLU + +from mask3d.models.modules.common import ConvType, NormType, conv, get_norm + + +class BasicBlockBase(nn.Module): + expansion = 1 + NORM_TYPE = NormType.BATCH_NORM + + def __init__( + self, + inplanes, + planes, + stride=1, + dilation=1, + downsample=None, + conv_type=ConvType.HYPERCUBE, + bn_momentum=0.1, + D=3, + ): + super().__init__() + + self.conv1 = conv( + inplanes, + planes, + kernel_size=3, + stride=stride, + dilation=dilation, + conv_type=conv_type, + D=D, + ) + self.norm1 = get_norm( + self.NORM_TYPE, planes, D, bn_momentum=bn_momentum + ) + self.conv2 = conv( + planes, + planes, + kernel_size=3, + stride=1, + dilation=dilation, + bias=False, + conv_type=conv_type, + D=D, + ) + self.norm2 = get_norm( + self.NORM_TYPE, planes, D, bn_momentum=bn_momentum + ) + self.relu = MinkowskiReLU(inplace=True) + self.downsample = downsample + + def forward(self, x): + residual = x + + out = self.conv1(x) + out = self.norm1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.norm2(out) + + if self.downsample is not None: + residual = self.downsample(x) + + out += residual + out = self.relu(out) + + return out + + +class BasicBlock(BasicBlockBase): + NORM_TYPE = NormType.BATCH_NORM + + +class BasicBlockIN(BasicBlockBase): + NORM_TYPE = NormType.INSTANCE_NORM + + +class BasicBlockINBN(BasicBlockBase): + NORM_TYPE = NormType.INSTANCE_BATCH_NORM + + +class BottleneckBase(nn.Module): + expansion = 4 + NORM_TYPE = NormType.BATCH_NORM + + def __init__( + self, + inplanes, + planes, + stride=1, + dilation=1, + downsample=None, + conv_type=ConvType.HYPERCUBE, + bn_momentum=0.1, + D=3, + ): + super().__init__() + self.conv1 = conv(inplanes, planes, kernel_size=1, D=D) + self.norm1 = get_norm( + self.NORM_TYPE, planes, D, bn_momentum=bn_momentum + ) + + self.conv2 = conv( + planes, + planes, + kernel_size=3, + stride=stride, + dilation=dilation, + conv_type=conv_type, + D=D, + ) + self.norm2 = get_norm( + self.NORM_TYPE, planes, D, bn_momentum=bn_momentum + ) + + self.conv3 = conv(planes, planes * self.expansion, kernel_size=1, D=D) + self.norm3 = get_norm( + self.NORM_TYPE, planes * self.expansion, D, bn_momentum=bn_momentum + ) + + self.relu = MinkowskiReLU(inplace=True) + self.downsample = downsample + + def forward(self, x): + residual = x + + out = self.conv1(x) + out = self.norm1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.norm2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.norm3(out) + + if self.downsample is not None: + residual = self.downsample(x) + + out += residual + out = self.relu(out) + + return out + + +class Bottleneck(BottleneckBase): + NORM_TYPE = NormType.BATCH_NORM + + +class BottleneckIN(BottleneckBase): + NORM_TYPE = NormType.INSTANCE_NORM + + +class BottleneckINBN(BottleneckBase): + NORM_TYPE = NormType.INSTANCE_BATCH_NORM diff --git a/models/Mask3D/mask3d/models/modules/resnet_block.py.tmp b/models/Mask3D/mask3d/models/modules/resnet_block.py.tmp new file mode 100644 index 0000000000000000000000000000000000000000..00dba24b9ab660fd2fc2b6f2f88c508d0b62db0b --- /dev/null +++ b/models/Mask3D/mask3d/models/modules/resnet_block.py.tmp @@ -0,0 +1,149 @@ +import torch.nn as nn +from MinkowskiEngine import MinkowskiReLU + +from mix3d.models.modules.common import ConvType, NormType, conv, get_norm + + +class BasicBlockBase(nn.Module): + expansion = 1 + NORM_TYPE = NormType.BATCH_NORM + + def __init__( + self, + inplanes, + planes, + stride=1, + dilation=1, + downsample=None, + conv_type=ConvType.HYPERCUBE, + bn_momentum=0.1, + D=3, + ): + super().__init__() + + self.conv1 = conv( + inplanes, + planes, + kernel_size=3, + stride=stride, + dilation=dilation, + conv_type=conv_type, + D=D, + ) + self.norm1 = get_norm(self.NORM_TYPE, planes, D, bn_momentum=bn_momentum) + self.conv2 = conv( + planes, + planes, + kernel_size=3, + stride=1, + dilation=dilation, + bias=False, + conv_type=conv_type, + D=D, + ) + self.norm2 = get_norm(self.NORM_TYPE, planes, D, bn_momentum=bn_momentum) + self.relu = MinkowskiReLU(inplace=True) + self.downsample = downsample + + def forward(self, x): + residual = x + + out = self.conv1(x) + out = self.norm1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.norm2(out) + + if self.downsample is not None: + residual = self.downsample(x) + + out += residual + out = self.relu(out) + + return out + + +class BasicBlock(BasicBlockBase): + NORM_TYPE = NormType.BATCH_NORM + + +class BasicBlockIN(BasicBlockBase): + NORM_TYPE = NormType.INSTANCE_NORM + + +class BasicBlockINBN(BasicBlockBase): + NORM_TYPE = NormType.INSTANCE_BATCH_NORM + + +class BottleneckBase(nn.Module): + expansion = 4 + NORM_TYPE = NormType.BATCH_NORM + + def __init__( + self, + inplanes, + planes, + stride=1, + dilation=1, + downsample=None, + conv_type=ConvType.HYPERCUBE, + bn_momentum=0.1, + D=3, + ): + super().__init__() + self.conv1 = conv(inplanes, planes, kernel_size=1, D=D) + self.norm1 = get_norm(self.NORM_TYPE, planes, D, bn_momentum=bn_momentum) + + self.conv2 = conv( + planes, + planes, + kernel_size=3, + stride=stride, + dilation=dilation, + conv_type=conv_type, + D=D, + ) + self.norm2 = get_norm(self.NORM_TYPE, planes, D, bn_momentum=bn_momentum) + + self.conv3 = conv(planes, planes * self.expansion, kernel_size=1, D=D) + self.norm3 = get_norm( + self.NORM_TYPE, planes * self.expansion, D, bn_momentum=bn_momentum + ) + + self.relu = MinkowskiReLU(inplace=True) + self.downsample = downsample + + def forward(self, x): + residual = x + + out = self.conv1(x) + out = self.norm1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.norm2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.norm3(out) + + if self.downsample is not None: + residual = self.downsample(x) + + out += residual + out = self.relu(out) + + return out + + +class Bottleneck(BottleneckBase): + NORM_TYPE = NormType.BATCH_NORM + + +class BottleneckIN(BottleneckBase): + NORM_TYPE = NormType.INSTANCE_NORM + + +class BottleneckINBN(BottleneckBase): + NORM_TYPE = NormType.INSTANCE_BATCH_NORM diff --git a/models/Mask3D/mask3d/models/modules/senet_block.py b/models/Mask3D/mask3d/models/modules/senet_block.py new file mode 100644 index 0000000000000000000000000000000000000000..130082738505c79d5ecddb010595a5a66b9d8509 --- /dev/null +++ b/models/Mask3D/mask3d/models/modules/senet_block.py @@ -0,0 +1,138 @@ +import torch.nn as nn +import MinkowskiEngine as ME + +from mix3d.models.modules.common import ConvType, NormType +from mix3d.models.modules.resnet_block import BasicBlock, Bottleneck + + +class SELayer(nn.Module): + def __init__(self, channel, reduction=16, D=-1): + # Global coords does not require coords_key + super().__init__() + self.fc = nn.Sequential( + ME.MinkowskiLinear(channel, channel // reduction), + ME.MinkowskiReLU(inplace=True), + ME.MinkowskiLinear(channel // reduction, channel), + ME.MinkowskiSigmoid(), + ) + self.pooling = ME.MinkowskiGlobalPooling(dimension=D) + self.broadcast_mul = ME.MinkowskiBroadcastMultiplication(dimension=D) + + def forward(self, x): + y = self.pooling(x) + y = self.fc(y) + return self.broadcast_mul(x, y) + + +class SEBasicBlock(BasicBlock): + def __init__( + self, + inplanes, + planes, + stride=1, + dilation=1, + downsample=None, + conv_type=ConvType.HYPERCUBE, + reduction=16, + D=-1, + ): + super().__init__( + inplanes, + planes, + stride=stride, + dilation=dilation, + downsample=downsample, + conv_type=conv_type, + D=D, + ) + self.se = SELayer(planes, reduction=reduction, D=D) + + def forward(self, x): + residual = x + + out = self.conv1(x) + out = self.norm1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.norm2(out) + out = self.se(out) + + if self.downsample is not None: + residual = self.downsample(x) + + out += residual + out = self.relu(out) + + return out + + +class SEBasicBlockSN(SEBasicBlock): + NORM_TYPE = NormType.SPARSE_SWITCH_NORM + + +class SEBasicBlockIN(SEBasicBlock): + NORM_TYPE = NormType.SPARSE_INSTANCE_NORM + + +class SEBasicBlockLN(SEBasicBlock): + NORM_TYPE = NormType.SPARSE_LAYER_NORM + + +class SEBottleneck(Bottleneck): + def __init__( + self, + inplanes, + planes, + stride=1, + dilation=1, + downsample=None, + conv_type=ConvType.HYPERCUBE, + D=3, + reduction=16, + ): + super().__init__( + inplanes, + planes, + stride=stride, + dilation=dilation, + downsample=downsample, + conv_type=conv_type, + D=D, + ) + self.se = SELayer(planes * self.expansion, reduction=reduction, D=D) + + def forward(self, x): + residual = x + + out = self.conv1(x) + out = self.norm1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.norm2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.norm3(out) + out = self.se(out) + + if self.downsample is not None: + residual = self.downsample(x) + + out += residual + out = self.relu(out) + + return out + + +class SEBottleneckSN(SEBottleneck): + NORM_TYPE = NormType.SPARSE_SWITCH_NORM + + +class SEBottleneckIN(SEBottleneck): + NORM_TYPE = NormType.SPARSE_INSTANCE_NORM + + +class SEBottleneckLN(SEBottleneck): + NORM_TYPE = NormType.SPARSE_LAYER_NORM diff --git a/models/Mask3D/mask3d/models/position_embedding.py b/models/Mask3D/mask3d/models/position_embedding.py new file mode 100644 index 0000000000000000000000000000000000000000..70275f1610e1d3f5ec8d11d18d298b7877204b86 --- /dev/null +++ b/models/Mask3D/mask3d/models/position_embedding.py @@ -0,0 +1,179 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +""" +Various positional encodings for the transformer. +""" +import math +import torch +from torch import nn +import numpy as np + +# from utils.pc_util import shift_scale_points + + +def shift_scale_points(pred_xyz, src_range, dst_range=None): + """ + pred_xyz: B x N x 3 + src_range: [[B x 3], [B x 3]] - min and max XYZ coords + dst_range: [[B x 3], [B x 3]] - min and max XYZ coords + """ + if dst_range is None: + dst_range = [ + torch.zeros( + (src_range[0].shape[0], 3), device=src_range[0].device + ), + torch.ones((src_range[0].shape[0], 3), device=src_range[0].device), + ] + + if pred_xyz.ndim == 4: + src_range = [x[:, None] for x in src_range] + dst_range = [x[:, None] for x in dst_range] + + assert src_range[0].shape[0] == pred_xyz.shape[0] + assert dst_range[0].shape[0] == pred_xyz.shape[0] + assert src_range[0].shape[-1] == pred_xyz.shape[-1] + assert src_range[0].shape == src_range[1].shape + assert dst_range[0].shape == dst_range[1].shape + assert src_range[0].shape == dst_range[1].shape + + src_diff = src_range[1][:, None, :] - src_range[0][:, None, :] + dst_diff = dst_range[1][:, None, :] - dst_range[0][:, None, :] + prop_xyz = ( + ((pred_xyz - src_range[0][:, None, :]) * dst_diff) / src_diff + ) + dst_range[0][:, None, :] + return prop_xyz + + +class PositionEmbeddingCoordsSine(nn.Module): + def __init__( + self, + temperature=10000, + normalize=False, + scale=None, + pos_type="fourier", + d_pos=None, + d_in=3, + gauss_scale=1.0, + ): + super().__init__() + self.d_pos = d_pos + self.temperature = temperature + self.normalize = normalize + if scale is not None and normalize is False: + raise ValueError("normalize should be True if scale is passed") + if scale is None: + scale = 2 * math.pi + assert pos_type in ["sine", "fourier"] + self.pos_type = pos_type + self.scale = scale + if pos_type == "fourier": + assert d_pos is not None + assert d_pos % 2 == 0 + # define a gaussian matrix input_ch -> output_ch + B = torch.empty((d_in, d_pos // 2)).normal_() + B *= gauss_scale + self.register_buffer("gauss_B", B) + self.d_pos = d_pos + + def get_sine_embeddings(self, xyz, num_channels, input_range): + num_channels = self.d_pos + # clone coords so that shift/scale operations do not affect original tensor + orig_xyz = xyz + xyz = orig_xyz.clone() + + ncoords = xyz.shape[1] + if self.normalize: + xyz = shift_scale_points(xyz, src_range=input_range) + + ndim = num_channels // xyz.shape[2] + if ndim % 2 != 0: + ndim -= 1 + # automatically handle remainder by assiging it to the first dim + rems = num_channels - (ndim * xyz.shape[2]) + + assert ( + ndim % 2 == 0 + ), f"Cannot handle odd sized ndim={ndim} where num_channels={num_channels} and xyz={xyz.shape}" + + final_embeds = [] + prev_dim = 0 + + for d in range(xyz.shape[2]): + cdim = ndim + if rems > 0: + # add remainder in increments of two to maintain even size + cdim += 2 + rems -= 2 + + if cdim != prev_dim: + dim_t = torch.arange( + cdim, dtype=torch.float32, device=xyz.device + ) + dim_t = self.temperature ** (2 * (dim_t // 2) / cdim) + + # create batch x cdim x nccords embedding + raw_pos = xyz[:, :, d] + if self.scale: + raw_pos *= self.scale + pos = raw_pos[:, :, None] / dim_t + pos = torch.stack( + (pos[:, :, 0::2].sin(), pos[:, :, 1::2].cos()), dim=3 + ).flatten(2) + final_embeds.append(pos) + prev_dim = cdim + + final_embeds = torch.cat(final_embeds, dim=2).permute(0, 2, 1) + return final_embeds + + def get_fourier_embeddings(self, xyz, num_channels=None, input_range=None): + # Follows - https://people.eecs.berkeley.edu/~bmild/fourfeat/index.html + + if num_channels is None: + num_channels = self.gauss_B.shape[1] * 2 + + bsize, npoints = xyz.shape[0], xyz.shape[1] + assert num_channels > 0 and num_channels % 2 == 0 + d_in, max_d_out = self.gauss_B.shape[0], self.gauss_B.shape[1] + d_out = num_channels // 2 + assert d_out <= max_d_out + assert d_in == xyz.shape[-1] + + # clone coords so that shift/scale operations do not affect original tensor + orig_xyz = xyz + xyz = orig_xyz.clone() + + ncoords = xyz.shape[1] + if self.normalize: + xyz = shift_scale_points(xyz, src_range=input_range) + + xyz *= 2 * np.pi + xyz_proj = torch.mm(xyz.view(-1, d_in), self.gauss_B[:, :d_out]).view( + bsize, npoints, d_out + ) + final_embeds = [xyz_proj.sin(), xyz_proj.cos()] + + # return batch x d_pos x npoints embedding + final_embeds = torch.cat(final_embeds, dim=2).permute(0, 2, 1) + return final_embeds + + def forward(self, xyz, num_channels=None, input_range=None): + assert isinstance(xyz, torch.Tensor) + assert xyz.ndim == 3 + # xyz is batch x npoints x 3 + if self.pos_type == "sine": + with torch.no_grad(): + out = self.get_sine_embeddings(xyz, num_channels, input_range) + elif self.pos_type == "fourier": + with torch.no_grad(): + out = self.get_fourier_embeddings( + xyz, num_channels, input_range + ) + else: + raise ValueError(f"Unknown {self.pos_type}") + + return out + + def extra_repr(self): + st = f"type={self.pos_type}, scale={self.scale}, normalize={self.normalize}" + if hasattr(self, "gauss_B"): + st += f", gaussB={self.gauss_B.shape}, gaussBsum={self.gauss_B.sum().item()}" + return st diff --git a/models/Mask3D/mask3d/models/res16unet.py b/models/Mask3D/mask3d/models/res16unet.py new file mode 100644 index 0000000000000000000000000000000000000000..db771a6f12341b70d9e27e8f61efc2878b5d12c3 --- /dev/null +++ b/models/Mask3D/mask3d/models/res16unet.py @@ -0,0 +1,444 @@ +import MinkowskiEngine.MinkowskiOps as me +from MinkowskiEngine import MinkowskiReLU + +from mask3d.models.resnet import ResNetBase, get_norm +from mask3d.models.modules.common import ConvType, NormType, conv, conv_tr +from mask3d.models.modules.resnet_block import BasicBlock, Bottleneck + + +class Res16UNetBase(ResNetBase): + BLOCK = None + PLANES = (32, 64, 128, 256, 256, 256, 256, 256) + DILATIONS = (1, 1, 1, 1, 1, 1, 1, 1) + LAYERS = (2, 2, 2, 2, 2, 2, 2, 2) + INIT_DIM = 32 + OUT_PIXEL_DIST = 1 + NORM_TYPE = NormType.BATCH_NORM + NON_BLOCK_CONV_TYPE = ConvType.SPATIAL_HYPERCUBE + CONV_TYPE = ConvType.SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS + + # To use the model, must call initialize_coords before forward pass. + # Once data is processed, call clear to reset the model before calling initialize_coords + def __init__( + self, in_channels, out_channels, config, D=3, out_fpn=False, **kwargs + ): + super().__init__(in_channels, out_channels, config, D) + self.out_fpn = out_fpn + + def network_initialization(self, in_channels, out_channels, config, D): + # Setup net_metadata + dilations = self.DILATIONS + bn_momentum = config.bn_momentum + + def space_n_time_m(n, m): + return n if D == 3 else [n, n, n, m] + + if D == 4: + self.OUT_PIXEL_DIST = space_n_time_m(self.OUT_PIXEL_DIST, 1) + + # Output of the first conv concated to conv6 + self.inplanes = self.INIT_DIM + self.conv0p1s1 = conv( + in_channels, + self.inplanes, + kernel_size=space_n_time_m(config.conv1_kernel_size, 1), + stride=1, + dilation=1, + conv_type=self.NON_BLOCK_CONV_TYPE, + D=D, + ) + + self.bn0 = get_norm( + self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum + ) + + self.conv1p1s2 = conv( + self.inplanes, + self.inplanes, + kernel_size=space_n_time_m(2, 1), + stride=space_n_time_m(2, 1), + dilation=1, + conv_type=self.NON_BLOCK_CONV_TYPE, + D=D, + ) + self.bn1 = get_norm( + self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum + ) + self.block1 = self._make_layer( + self.BLOCK, + self.PLANES[0], + self.LAYERS[0], + dilation=dilations[0], + norm_type=self.NORM_TYPE, + bn_momentum=bn_momentum, + ) + + self.conv2p2s2 = conv( + self.inplanes, + self.inplanes, + kernel_size=space_n_time_m(2, 1), + stride=space_n_time_m(2, 1), + dilation=1, + conv_type=self.NON_BLOCK_CONV_TYPE, + D=D, + ) + self.bn2 = get_norm( + self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum + ) + self.block2 = self._make_layer( + self.BLOCK, + self.PLANES[1], + self.LAYERS[1], + dilation=dilations[1], + norm_type=self.NORM_TYPE, + bn_momentum=bn_momentum, + ) + + self.conv3p4s2 = conv( + self.inplanes, + self.inplanes, + kernel_size=space_n_time_m(2, 1), + stride=space_n_time_m(2, 1), + dilation=1, + conv_type=self.NON_BLOCK_CONV_TYPE, + D=D, + ) + self.bn3 = get_norm( + self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum + ) + self.block3 = self._make_layer( + self.BLOCK, + self.PLANES[2], + self.LAYERS[2], + dilation=dilations[2], + norm_type=self.NORM_TYPE, + bn_momentum=bn_momentum, + ) + + self.conv4p8s2 = conv( + self.inplanes, + self.inplanes, + kernel_size=space_n_time_m(2, 1), + stride=space_n_time_m(2, 1), + dilation=1, + conv_type=self.NON_BLOCK_CONV_TYPE, + D=D, + ) + self.bn4 = get_norm( + self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum + ) + self.block4 = self._make_layer( + self.BLOCK, + self.PLANES[3], + self.LAYERS[3], + dilation=dilations[3], + norm_type=self.NORM_TYPE, + bn_momentum=bn_momentum, + ) + self.convtr4p16s2 = conv_tr( + self.inplanes, + self.PLANES[4], + kernel_size=space_n_time_m(2, 1), + upsample_stride=space_n_time_m(2, 1), + dilation=1, + bias=False, + conv_type=self.NON_BLOCK_CONV_TYPE, + D=D, + ) + self.bntr4 = get_norm( + self.NORM_TYPE, self.PLANES[4], D, bn_momentum=bn_momentum + ) + + self.inplanes = self.PLANES[4] + self.PLANES[2] * self.BLOCK.expansion + self.block5 = self._make_layer( + self.BLOCK, + self.PLANES[4], + self.LAYERS[4], + dilation=dilations[4], + norm_type=self.NORM_TYPE, + bn_momentum=bn_momentum, + ) + self.convtr5p8s2 = conv_tr( + self.inplanes, + self.PLANES[5], + kernel_size=space_n_time_m(2, 1), + upsample_stride=space_n_time_m(2, 1), + dilation=1, + bias=False, + conv_type=self.NON_BLOCK_CONV_TYPE, + D=D, + ) + self.bntr5 = get_norm( + self.NORM_TYPE, self.PLANES[5], D, bn_momentum=bn_momentum + ) + + self.inplanes = self.PLANES[5] + self.PLANES[1] * self.BLOCK.expansion + self.block6 = self._make_layer( + self.BLOCK, + self.PLANES[5], + self.LAYERS[5], + dilation=dilations[5], + norm_type=self.NORM_TYPE, + bn_momentum=bn_momentum, + ) + self.convtr6p4s2 = conv_tr( + self.inplanes, + self.PLANES[6], + kernel_size=space_n_time_m(2, 1), + upsample_stride=space_n_time_m(2, 1), + dilation=1, + bias=False, + conv_type=self.NON_BLOCK_CONV_TYPE, + D=D, + ) + self.bntr6 = get_norm( + self.NORM_TYPE, self.PLANES[6], D, bn_momentum=bn_momentum + ) + + self.inplanes = self.PLANES[6] + self.PLANES[0] * self.BLOCK.expansion + self.block7 = self._make_layer( + self.BLOCK, + self.PLANES[6], + self.LAYERS[6], + dilation=dilations[6], + norm_type=self.NORM_TYPE, + bn_momentum=bn_momentum, + ) + self.convtr7p2s2 = conv_tr( + self.inplanes, + self.PLANES[7], + kernel_size=space_n_time_m(2, 1), + upsample_stride=space_n_time_m(2, 1), + dilation=1, + bias=False, + conv_type=self.NON_BLOCK_CONV_TYPE, + D=D, + ) + self.bntr7 = get_norm( + self.NORM_TYPE, self.PLANES[7], D, bn_momentum=bn_momentum + ) + + self.inplanes = self.PLANES[7] + self.INIT_DIM + self.block8 = self._make_layer( + self.BLOCK, + self.PLANES[7], + self.LAYERS[7], + dilation=dilations[7], + norm_type=self.NORM_TYPE, + bn_momentum=bn_momentum, + ) + + self.final = conv( + self.PLANES[7], + out_channels, + kernel_size=1, + stride=1, + bias=True, + D=D, + ) + self.relu = MinkowskiReLU(inplace=True) + + def forward(self, x): + feature_maps = [] + + out = self.conv0p1s1(x) + out = self.bn0(out) + out_p1 = self.relu(out) + + out = self.conv1p1s2(out_p1) + out = self.bn1(out) + out = self.relu(out) + out_b1p2 = self.block1(out) + + out = self.conv2p2s2(out_b1p2) + out = self.bn2(out) + out = self.relu(out) + out_b2p4 = self.block2(out) + + out = self.conv3p4s2(out_b2p4) + out = self.bn3(out) + out = self.relu(out) + out_b3p8 = self.block3(out) + + # pixel_dist=16 + out = self.conv4p8s2(out_b3p8) + out = self.bn4(out) + out = self.relu(out) + out = self.block4(out) + + feature_maps.append(out) + + # pixel_dist=8 + out = self.convtr4p16s2(out) + out = self.bntr4(out) + out = self.relu(out) + + out = me.cat(out, out_b3p8) + out = self.block5(out) + + feature_maps.append(out) + + # pixel_dist=4 + out = self.convtr5p8s2(out) + out = self.bntr5(out) + out = self.relu(out) + + out = me.cat(out, out_b2p4) + out = self.block6(out) + + feature_maps.append(out) + + # pixel_dist=2 + out = self.convtr6p4s2(out) + out = self.bntr6(out) + out = self.relu(out) + + out = me.cat(out, out_b1p2) + out = self.block7(out) + + feature_maps.append(out) + + # pixel_dist=1 + out = self.convtr7p2s2(out) + out = self.bntr7(out) + out = self.relu(out) + + out = me.cat(out, out_p1) + out = self.block8(out) + + feature_maps.append(out) + + if not self.out_fpn: + return out + else: + return out, feature_maps + + +class Res16UNet14(Res16UNetBase): + BLOCK = BasicBlock + LAYERS = (1, 1, 1, 1, 1, 1, 1, 1) + + +class Res16UNet18(Res16UNetBase): + BLOCK = BasicBlock + LAYERS = (2, 2, 2, 2, 2, 2, 2, 2) + + +class Res16UNet34(Res16UNetBase): + BLOCK = BasicBlock + LAYERS = (2, 3, 4, 6, 2, 2, 2, 2) + + +class Res16UNet50(Res16UNetBase): + BLOCK = Bottleneck + LAYERS = (2, 3, 4, 6, 2, 2, 2, 2) + + +class Res16UNet101(Res16UNetBase): + BLOCK = Bottleneck + LAYERS = (2, 3, 4, 23, 2, 2, 2, 2) + + +class Res16UNet14A(Res16UNet14): + PLANES = (32, 64, 128, 256, 128, 128, 96, 96) + + +class Res16UNet14A2(Res16UNet14A): + LAYERS = (1, 1, 1, 1, 2, 2, 2, 2) + + +class Res16UNet14B(Res16UNet14): + PLANES = (32, 64, 128, 256, 128, 128, 128, 128) + + +class Res16UNet14B2(Res16UNet14B): + LAYERS = (1, 1, 1, 1, 2, 2, 2, 2) + + +class Res16UNet14B3(Res16UNet14B): + LAYERS = (2, 2, 2, 2, 1, 1, 1, 1) + + +class Res16UNet14C(Res16UNet14): + PLANES = (32, 64, 128, 256, 192, 192, 128, 128) + + +class Res16UNet14D(Res16UNet14): + PLANES = (32, 64, 128, 256, 384, 384, 384, 384) + + +class Res16UNet18A(Res16UNet18): + PLANES = (32, 64, 128, 256, 128, 128, 96, 96) + + +class Res16UNet18B(Res16UNet18): + PLANES = (32, 64, 128, 256, 128, 128, 128, 128) + + +class Res16UNet18D(Res16UNet18): + PLANES = (32, 64, 128, 256, 384, 384, 384, 384) + + +class Res16UNet34A(Res16UNet34): + PLANES = (32, 64, 128, 256, 256, 128, 64, 64) + + +class Res16UNet34B(Res16UNet34): + PLANES = (32, 64, 128, 256, 256, 128, 64, 32) + + +class Res16UNet34C(Res16UNet34): + PLANES = (32, 64, 128, 256, 256, 128, 96, 96) + + +class Custom30M(Res16UNet34): + PLANES = (32, 64, 128, 256, 128, 64, 64, 32) + + +class Res16UNet34D(Res16UNet34): + PLANES = (32, 64, 128, 256, 256, 128, 96, 128) + + +class STRes16UNetBase(Res16UNetBase): + + CONV_TYPE = ConvType.SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS + + def __init__(self, in_channels, out_channels, config, D=4, **kwargs): + super().__init__(in_channels, out_channels, config, D, **kwargs) + + +class STRes16UNet14(STRes16UNetBase, Res16UNet14): + pass + + +class STRes16UNet14A(STRes16UNetBase, Res16UNet14A): + pass + + +class STRes16UNet18(STRes16UNetBase, Res16UNet18): + pass + + +class STRes16UNet34(STRes16UNetBase, Res16UNet34): + pass + + +class STRes16UNet50(STRes16UNetBase, Res16UNet50): + pass + + +class STRes16UNet101(STRes16UNetBase, Res16UNet101): + pass + + +class STRes16UNet18A(STRes16UNet18): + PLANES = (32, 64, 128, 256, 128, 128, 96, 96) + + +class STResTesseract16UNetBase(STRes16UNetBase): + pass + # CONV_TYPE = ConvType.HYPERCUBE + + +class STResTesseract16UNet18A(STRes16UNet18A, STResTesseract16UNetBase): + pass diff --git a/models/Mask3D/mask3d/models/resnet.py b/models/Mask3D/mask3d/models/resnet.py new file mode 100644 index 0000000000000000000000000000000000000000..f6ad622893d191fce0cf9db6edafbc83f684d218 --- /dev/null +++ b/models/Mask3D/mask3d/models/resnet.py @@ -0,0 +1,243 @@ +import torch.nn as nn +import MinkowskiEngine as ME + +from mask3d.models.model import Model +from mask3d.models.modules.common import ConvType, NormType, conv, get_norm, sum_pool +from mask3d.models.modules.resnet_block import BasicBlock, Bottleneck + + +class ResNetBase(Model): + BLOCK = None + LAYERS = () + INIT_DIM = 64 + PLANES = (64, 128, 256, 512) + OUT_PIXEL_DIST = 32 + HAS_LAST_BLOCK = False + CONV_TYPE = ConvType.HYPERCUBE + + def __init__(self, in_channels, out_channels, config, D=3, **kwargs): + assert self.BLOCK is not None + assert self.OUT_PIXEL_DIST > 0 + + super().__init__(in_channels, out_channels, config, D, **kwargs) + + self.network_initialization(in_channels, out_channels, config, D) + self.weight_initialization() + + def network_initialization(self, in_channels, out_channels, config, D): + def space_n_time_m(n, m): + return n if D == 3 else [n, n, n, m] + + if D == 4: + self.OUT_PIXEL_DIST = space_n_time_m(self.OUT_PIXEL_DIST, 1) + + dilations = config.dilations + bn_momentum = config.bn_momentum + self.inplanes = self.INIT_DIM + self.conv1 = conv( + in_channels, + self.inplanes, + kernel_size=space_n_time_m(config.conv1_kernel_size, 1), + stride=1, + D=D, + ) + + self.bn1 = get_norm( + NormType.BATCH_NORM, + self.inplanes, + D=self.D, + bn_momentum=bn_momentum, + ) + self.relu = ME.MinkowskiReLU(inplace=True) + self.pool = sum_pool( + kernel_size=space_n_time_m(2, 1), stride=space_n_time_m(2, 1), D=D + ) + + self.layer1 = self._make_layer( + self.BLOCK, + self.PLANES[0], + self.LAYERS[0], + stride=space_n_time_m(2, 1), + dilation=space_n_time_m(dilations[0], 1), + ) + self.layer2 = self._make_layer( + self.BLOCK, + self.PLANES[1], + self.LAYERS[1], + stride=space_n_time_m(2, 1), + dilation=space_n_time_m(dilations[1], 1), + ) + self.layer3 = self._make_layer( + self.BLOCK, + self.PLANES[2], + self.LAYERS[2], + stride=space_n_time_m(2, 1), + dilation=space_n_time_m(dilations[2], 1), + ) + self.layer4 = self._make_layer( + self.BLOCK, + self.PLANES[3], + self.LAYERS[3], + stride=space_n_time_m(2, 1), + dilation=space_n_time_m(dilations[3], 1), + ) + + self.final = conv( + self.PLANES[3] * self.BLOCK.expansion, + out_channels, + kernel_size=1, + bias=True, + D=D, + ) + + def weight_initialization(self): + for m in self.modules(): + if isinstance(m, ME.MinkowskiBatchNorm): + nn.init.constant_(m.bn.weight, 1) + nn.init.constant_(m.bn.bias, 0) + + def _make_layer( + self, + block, + planes, + blocks, + stride=1, + dilation=1, + norm_type=NormType.BATCH_NORM, + bn_momentum=0.1, + ): + downsample = None + if stride != 1 or self.inplanes != planes * block.expansion: + downsample = nn.Sequential( + conv( + self.inplanes, + planes * block.expansion, + kernel_size=1, + stride=stride, + bias=False, + D=self.D, + ), + get_norm( + norm_type, + planes * block.expansion, + D=self.D, + bn_momentum=bn_momentum, + ), + ) + layers = [] + layers.append( + block( + self.inplanes, + planes, + stride=stride, + dilation=dilation, + downsample=downsample, + conv_type=self.CONV_TYPE, + D=self.D, + ) + ) + self.inplanes = planes * block.expansion + for i in range(1, blocks): + layers.append( + block( + self.inplanes, + planes, + stride=1, + dilation=dilation, + conv_type=self.CONV_TYPE, + D=self.D, + ) + ) + + return nn.Sequential(*layers) + + def forward(self, x): + x = self.conv1(x) + x = self.bn1(x) + x = self.relu(x) + x = self.pool(x) + + x = self.layer1(x) + x = self.layer2(x) + x = self.layer3(x) + x = self.layer4(x) + + x = self.final(x) + return x + + +class ResNet14(ResNetBase): + BLOCK = BasicBlock + LAYERS = (1, 1, 1, 1) + + +class ResNet18(ResNetBase): + BLOCK = BasicBlock + LAYERS = (2, 2, 2, 2) + + +class ResNet34(ResNetBase): + BLOCK = BasicBlock + LAYERS = (3, 4, 6, 3) + + +class ResNet50(ResNetBase): + BLOCK = Bottleneck + LAYERS = (3, 4, 6, 3) + + +class ResNet101(ResNetBase): + BLOCK = Bottleneck + LAYERS = (3, 4, 23, 3) + + +class STResNetBase(ResNetBase): + + CONV_TYPE = ConvType.SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS + + def __init__(self, in_channels, out_channels, config, D=4, **kwargs): + super().__init__(in_channels, out_channels, config, D, **kwargs) + + +class STResNet14(STResNetBase, ResNet14): + pass + + +class STResNet18(STResNetBase, ResNet18): + pass + + +class STResNet34(STResNetBase, ResNet34): + pass + + +class STResNet50(STResNetBase, ResNet50): + pass + + +class STResNet101(STResNetBase, ResNet101): + pass + + +class STResTesseractNetBase(STResNetBase): + CONV_TYPE = ConvType.HYPERCUBE + + +class STResTesseractNet14(STResTesseractNetBase, STResNet14): + pass + + +class STResTesseractNet18(STResTesseractNetBase, STResNet18): + pass + + +class STResTesseractNet34(STResTesseractNetBase, STResNet34): + pass + + +class STResTesseractNet50(STResTesseractNetBase, STResNet50): + pass + + +class STResTesseractNet101(STResTesseractNetBase, STResNet101): + pass diff --git a/models/Mask3D/mask3d/models/resnet.py.tmp b/models/Mask3D/mask3d/models/resnet.py.tmp new file mode 100644 index 0000000000000000000000000000000000000000..5208c1f576bdd81528b305a27dc9302b867d853f --- /dev/null +++ b/models/Mask3D/mask3d/models/resnet.py.tmp @@ -0,0 +1,240 @@ +import torch.nn as nn +import MinkowskiEngine as ME + +from models.model import Model +from models.modules.common import ConvType, NormType, conv, get_norm, sum_pool +from models.modules.resnet_block import BasicBlock, Bottleneck + + +class ResNetBase(Model): + BLOCK = None + LAYERS = () + INIT_DIM = 64 + PLANES = (64, 128, 256, 512) + OUT_PIXEL_DIST = 32 + HAS_LAST_BLOCK = False + CONV_TYPE = ConvType.HYPERCUBE + + def __init__(self, in_channels, out_channels, config, D=3, **kwargs): + assert self.BLOCK is not None + assert self.OUT_PIXEL_DIST > 0 + + super().__init__(in_channels, out_channels, config, D, **kwargs) + + self.network_initialization(in_channels, out_channels, config, D) + self.weight_initialization() + + def network_initialization(self, in_channels, out_channels, config, D): + def space_n_time_m(n, m): + return n if D == 3 else [n, n, n, m] + + if D == 4: + self.OUT_PIXEL_DIST = space_n_time_m(self.OUT_PIXEL_DIST, 1) + + dilations = config.dilations + bn_momentum = config.bn_momentum + self.inplanes = self.INIT_DIM + self.conv1 = conv( + in_channels, + self.inplanes, + kernel_size=space_n_time_m(config.conv1_kernel_size, 1), + stride=1, + D=D, + ) + + self.bn1 = get_norm( + NormType.BATCH_NORM, self.inplanes, D=self.D, bn_momentum=bn_momentum + ) + self.relu = ME.MinkowskiReLU(inplace=True) + self.pool = sum_pool( + kernel_size=space_n_time_m(2, 1), stride=space_n_time_m(2, 1), D=D + ) + + self.layer1 = self._make_layer( + self.BLOCK, + self.PLANES[0], + self.LAYERS[0], + stride=space_n_time_m(2, 1), + dilation=space_n_time_m(dilations[0], 1), + ) + self.layer2 = self._make_layer( + self.BLOCK, + self.PLANES[1], + self.LAYERS[1], + stride=space_n_time_m(2, 1), + dilation=space_n_time_m(dilations[1], 1), + ) + self.layer3 = self._make_layer( + self.BLOCK, + self.PLANES[2], + self.LAYERS[2], + stride=space_n_time_m(2, 1), + dilation=space_n_time_m(dilations[2], 1), + ) + self.layer4 = self._make_layer( + self.BLOCK, + self.PLANES[3], + self.LAYERS[3], + stride=space_n_time_m(2, 1), + dilation=space_n_time_m(dilations[3], 1), + ) + + self.final = conv( + self.PLANES[3] * self.BLOCK.expansion, + out_channels, + kernel_size=1, + bias=True, + D=D, + ) + + def weight_initialization(self): + for m in self.modules(): + if isinstance(m, ME.MinkowskiBatchNorm): + nn.init.constant_(m.bn.weight, 1) + nn.init.constant_(m.bn.bias, 0) + + def _make_layer( + self, + block, + planes, + blocks, + stride=1, + dilation=1, + norm_type=NormType.BATCH_NORM, + bn_momentum=0.1, + ): + downsample = None + if stride != 1 or self.inplanes != planes * block.expansion: + downsample = nn.Sequential( + conv( + self.inplanes, + planes * block.expansion, + kernel_size=1, + stride=stride, + bias=False, + D=self.D, + ), + get_norm( + norm_type, + planes * block.expansion, + D=self.D, + bn_momentum=bn_momentum, + ), + ) + layers = [] + layers.append( + block( + self.inplanes, + planes, + stride=stride, + dilation=dilation, + downsample=downsample, + conv_type=self.CONV_TYPE, + D=self.D, + ) + ) + self.inplanes = planes * block.expansion + for i in range(1, blocks): + layers.append( + block( + self.inplanes, + planes, + stride=1, + dilation=dilation, + conv_type=self.CONV_TYPE, + D=self.D, + ) + ) + + return nn.Sequential(*layers) + + def forward(self, x): + x = self.conv1(x) + x = self.bn1(x) + x = self.relu(x) + x = self.pool(x) + + x = self.layer1(x) + x = self.layer2(x) + x = self.layer3(x) + x = self.layer4(x) + + x = self.final(x) + return x + + +class ResNet14(ResNetBase): + BLOCK = BasicBlock + LAYERS = (1, 1, 1, 1) + + +class ResNet18(ResNetBase): + BLOCK = BasicBlock + LAYERS = (2, 2, 2, 2) + + +class ResNet34(ResNetBase): + BLOCK = BasicBlock + LAYERS = (3, 4, 6, 3) + + +class ResNet50(ResNetBase): + BLOCK = Bottleneck + LAYERS = (3, 4, 6, 3) + + +class ResNet101(ResNetBase): + BLOCK = Bottleneck + LAYERS = (3, 4, 23, 3) + + +class STResNetBase(ResNetBase): + + CONV_TYPE = ConvType.SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS + + def __init__(self, in_channels, out_channels, config, D=4, **kwargs): + super().__init__(in_channels, out_channels, config, D, **kwargs) + + +class STResNet14(STResNetBase, ResNet14): + pass + + +class STResNet18(STResNetBase, ResNet18): + pass + + +class STResNet34(STResNetBase, ResNet34): + pass + + +class STResNet50(STResNetBase, ResNet50): + pass + + +class STResNet101(STResNetBase, ResNet101): + pass + + +class STResTesseractNetBase(STResNetBase): + CONV_TYPE = ConvType.HYPERCUBE + + +class STResTesseractNet14(STResTesseractNetBase, STResNet14): + pass + + +class STResTesseractNet18(STResTesseractNetBase, STResNet18): + pass + + +class STResTesseractNet34(STResTesseractNetBase, STResNet34): + pass + + +class STResTesseractNet50(STResTesseractNetBase, STResNet50): + pass + + +class STResTesseractNet101(STResTesseractNetBase, STResNet101): + pass diff --git a/models/Mask3D/mask3d/models/resunet.py b/models/Mask3D/mask3d/models/resunet.py new file mode 100644 index 0000000000000000000000000000000000000000..98a3adc56f09d534256960c080594e5df3a41c7c --- /dev/null +++ b/models/Mask3D/mask3d/models/resunet.py @@ -0,0 +1,617 @@ +import torch.nn as nn +import MinkowskiEngine as ME +import MinkowskiEngine.MinkowskiOps as me +from MinkowskiEngine import MinkowskiReLU + +from mask3d.models.resnet import ResNetBase, get_norm +from mask3d.models.modules.common import ConvType, NormType, conv, conv_tr +from mask3d.models.modules.resnet_block import BasicBlock, Bottleneck, BasicBlockINBN + + +class MinkUNetBase(ResNetBase): + BLOCK = None + PLANES = (64, 128, 256, 512, 256, 128, 128) + DILATIONS = (1, 1, 1, 1, 1, 1) + LAYERS = (2, 2, 2, 2, 2, 2) + INIT_DIM = 64 + OUT_PIXEL_DIST = 1 + NORM_TYPE = NormType.BATCH_NORM + NON_BLOCK_CONV_TYPE = ConvType.SPATIAL_HYPERCUBE + CONV_TYPE = ConvType.SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS + + # To use the model, must call initialize_coords before forward pass. + # Once data is processed, call clear to reset the model before calling initialize_coords + def __init__(self, in_channels, out_channels, config, D=3, **kwargs): + super().__init__(in_channels, out_channels, config, D) + + def network_initialization(self, in_channels, out_channels, config, D): + # Setup net_metadata + dilations = self.DILATIONS + bn_momentum = config.bn_momentum + + def space_n_time_m(n, m): + return n if D == 3 else [n, n, n, m] + + if D == 4: + self.OUT_PIXEL_DIST = space_n_time_m(self.OUT_PIXEL_DIST, 1) + + # Output of the first conv concated to conv6 + self.inplanes = self.INIT_DIM + self.conv1p1s1 = conv( + in_channels, + self.inplanes, + kernel_size=space_n_time_m(config.conv1_kernel_size, 1), + stride=1, + dilation=1, + conv_type=self.NON_BLOCK_CONV_TYPE, + D=D, + ) + + self.bn1 = get_norm( + self.NORM_TYPE, self.PLANES[0], D, bn_momentum=bn_momentum + ) + self.block1 = self._make_layer( + self.BLOCK, + self.PLANES[0], + self.LAYERS[0], + dilation=dilations[0], + norm_type=self.NORM_TYPE, + bn_momentum=bn_momentum, + ) + + self.conv2p1s2 = conv( + self.inplanes, + self.inplanes, + kernel_size=space_n_time_m(2, 1), + stride=space_n_time_m(2, 1), + dilation=1, + conv_type=self.NON_BLOCK_CONV_TYPE, + D=D, + ) + self.bn2 = get_norm( + self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum + ) + self.block2 = self._make_layer( + self.BLOCK, + self.PLANES[1], + self.LAYERS[1], + dilation=dilations[1], + norm_type=self.NORM_TYPE, + bn_momentum=bn_momentum, + ) + + self.conv3p2s2 = conv( + self.inplanes, + self.inplanes, + kernel_size=space_n_time_m(2, 1), + stride=space_n_time_m(2, 1), + dilation=1, + conv_type=self.NON_BLOCK_CONV_TYPE, + D=D, + ) + self.bn3 = get_norm( + self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum + ) + self.block3 = self._make_layer( + self.BLOCK, + self.PLANES[2], + self.LAYERS[2], + dilation=dilations[2], + norm_type=self.NORM_TYPE, + bn_momentum=bn_momentum, + ) + + self.conv4p4s2 = conv( + self.inplanes, + self.inplanes, + kernel_size=space_n_time_m(2, 1), + stride=space_n_time_m(2, 1), + dilation=1, + conv_type=self.NON_BLOCK_CONV_TYPE, + D=D, + ) + self.bn4 = get_norm( + self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum + ) + self.block4 = self._make_layer( + self.BLOCK, + self.PLANES[3], + self.LAYERS[3], + dilation=dilations[3], + norm_type=self.NORM_TYPE, + bn_momentum=bn_momentum, + ) + self.convtr4p8s2 = conv_tr( + self.inplanes, + self.PLANES[4], + kernel_size=space_n_time_m(2, 1), + upsample_stride=space_n_time_m(2, 1), + dilation=1, + bias=False, + conv_type=self.NON_BLOCK_CONV_TYPE, + D=D, + ) + self.bntr4 = get_norm( + self.NORM_TYPE, self.PLANES[4], D, bn_momentum=bn_momentum + ) + + self.inplanes = self.PLANES[4] + self.PLANES[2] * self.BLOCK.expansion + self.block5 = self._make_layer( + self.BLOCK, + self.PLANES[4], + self.LAYERS[4], + dilation=dilations[4], + norm_type=self.NORM_TYPE, + bn_momentum=bn_momentum, + ) + self.convtr5p4s2 = conv_tr( + self.inplanes, + self.PLANES[5], + kernel_size=space_n_time_m(2, 1), + upsample_stride=space_n_time_m(2, 1), + dilation=1, + bias=False, + conv_type=self.NON_BLOCK_CONV_TYPE, + D=D, + ) + self.bntr5 = get_norm( + self.NORM_TYPE, self.PLANES[5], D, bn_momentum=bn_momentum + ) + + self.inplanes = self.PLANES[5] + self.PLANES[1] * self.BLOCK.expansion + self.block6 = self._make_layer( + self.BLOCK, + self.PLANES[5], + self.LAYERS[5], + dilation=dilations[5], + norm_type=self.NORM_TYPE, + bn_momentum=bn_momentum, + ) + self.convtr6p2s2 = conv_tr( + self.inplanes, + self.PLANES[6], + kernel_size=space_n_time_m(2, 1), + upsample_stride=space_n_time_m(2, 1), + dilation=1, + bias=False, + conv_type=self.NON_BLOCK_CONV_TYPE, + D=D, + ) + self.bntr6 = get_norm( + self.NORM_TYPE, self.PLANES[6], D, bn_momentum=bn_momentum + ) + self.relu = MinkowskiReLU(inplace=True) + + self.final = nn.Sequential( + conv( + self.PLANES[6] + self.PLANES[0] * self.BLOCK.expansion, + 512, + kernel_size=1, + stride=1, + dilation=1, + bias=False, + D=D, + ), + ME.MinkowskiBatchNorm(512), + ME.MinkowskiReLU(), + conv( + 512, + out_channels, + kernel_size=1, + stride=1, + dilation=1, + bias=True, + D=D, + ), + ) + + def forward(self, x): + out = self.conv1p1s1(x) + out = self.bn1(out) + out = self.relu(out) + + out_b1p1 = self.block1(out) + + out = self.conv2p1s2(out_b1p1) + out = self.bn2(out) + out = self.relu(out) + + out_b2p2 = self.block2(out) + + out = self.conv3p2s2(out_b2p2) + out = self.bn3(out) + out = self.relu(out) + + out_b3p4 = self.block3(out) + + out = self.conv4p4s2(out_b3p4) + out = self.bn4(out) + out = self.relu(out) + + # pixel_dist=8 + out = self.block4(out) + + out = self.convtr4p8s2(out) + out = self.bntr4(out) + out = self.relu(out) + + out = me.cat(out, out_b3p4) + out = self.block5(out) + + out = self.convtr5p4s2(out) + out = self.bntr5(out) + out = self.relu(out) + + out = me.cat(out, out_b2p2) + out = self.block6(out) + + out = self.convtr6p2s2(out) + out = self.bntr6(out) + out = self.relu(out) + + out = me.cat(out, out_b1p1) + return self.final(out) + + +class ResUNet14(MinkUNetBase): + BLOCK = BasicBlock + LAYERS = (1, 1, 1, 1, 1, 1) + + +class ResUNet18(MinkUNetBase): + BLOCK = BasicBlock + LAYERS = (2, 2, 2, 2, 2, 2) + + +class ResUNet18INBN(ResUNet18): + NORM_TYPE = NormType.INSTANCE_BATCH_NORM + BLOCK = BasicBlockINBN + + +class ResUNet34(MinkUNetBase): + BLOCK = BasicBlock + LAYERS = (3, 4, 6, 3, 2, 2) + + +class ResUNet50(MinkUNetBase): + BLOCK = Bottleneck + LAYERS = (3, 4, 6, 3, 2, 2) + + +class ResUNet101(MinkUNetBase): + BLOCK = Bottleneck + LAYERS = (3, 4, 23, 3, 2, 2) + + +class ResUNet14D(ResUNet14): + PLANES = (64, 128, 256, 512, 512, 512, 512) + + +class ResUNet18D(ResUNet18): + PLANES = (64, 128, 256, 512, 512, 512, 512) + + +class ResUNet34D(ResUNet34): + PLANES = (64, 128, 256, 512, 512, 512, 512) + + +class ResUNet34E(ResUNet34): + INIT_DIM = 32 + PLANES = (32, 64, 128, 256, 128, 64, 64) + + +class ResUNet34F(ResUNet34): + INIT_DIM = 32 + PLANES = (32, 64, 128, 256, 128, 64, 32) + + +class MinkUNetHyper(MinkUNetBase): + BLOCK = None + PLANES = (64, 128, 256, 512, 256, 128, 128) + DILATIONS = (1, 1, 1, 1, 1, 1) + LAYERS = (2, 2, 2, 2, 2, 2) + INIT_DIM = 64 + OUT_PIXEL_DIST = 1 + NORM_TYPE = NormType.BATCH_NORM + NON_BLOCK_CONV_TYPE = ConvType.SPATIAL_HYPERCUBE + CONV_TYPE = ConvType.SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS + + # To use the model, must call initialize_coords before forward pass. + # Once data is processed, call clear to reset the model before calling initialize_coords + def __init__(self, in_channels, out_channels, config, D=3, **kwargs): + super(MinkUNetBase, self).__init__( + in_channels, out_channels, config, D + ) + + def network_initialization(self, in_channels, out_channels, config, D): + # Setup net_metadata + dilations = self.DILATIONS + bn_momentum = config.bn_momentum + + def space_n_time_m(n, m): + return n if D == 3 else [n, n, n, m] + + if D == 4: + self.OUT_PIXEL_DIST = space_n_time_m(self.OUT_PIXEL_DIST, 1) + + # Output of the first conv concated to conv6 + self.inplanes = self.INIT_DIM + self.conv1p1s1 = conv( + in_channels, + self.inplanes, + kernel_size=space_n_time_m(config.conv1_kernel_size, 1), + stride=1, + dilation=1, + conv_type=self.NON_BLOCK_CONV_TYPE, + D=D, + ) + + self.bn1 = get_norm( + self.NORM_TYPE, self.PLANES[0], D, bn_momentum=bn_momentum + ) + self.block1 = self._make_layer( + self.BLOCK, + self.PLANES[0], + self.LAYERS[0], + dilation=dilations[0], + norm_type=self.NORM_TYPE, + bn_momentum=bn_momentum, + ) + + self.conv2p1s2 = conv( + self.inplanes, + self.inplanes, + kernel_size=space_n_time_m(2, 1), + stride=space_n_time_m(2, 1), + dilation=1, + conv_type=self.NON_BLOCK_CONV_TYPE, + D=D, + ) + self.bn2 = get_norm( + self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum + ) + self.block2 = self._make_layer( + self.BLOCK, + self.PLANES[1], + self.LAYERS[1], + dilation=dilations[1], + norm_type=self.NORM_TYPE, + bn_momentum=bn_momentum, + ) + + self.conv3p2s2 = conv( + self.inplanes, + self.inplanes, + kernel_size=space_n_time_m(2, 1), + stride=space_n_time_m(2, 1), + dilation=1, + conv_type=self.NON_BLOCK_CONV_TYPE, + D=D, + ) + self.bn3 = get_norm( + self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum + ) + self.block3 = self._make_layer( + self.BLOCK, + self.PLANES[2], + self.LAYERS[2], + dilation=dilations[2], + norm_type=self.NORM_TYPE, + bn_momentum=bn_momentum, + ) + + self.conv4p4s2 = conv( + self.inplanes, + self.inplanes, + kernel_size=space_n_time_m(2, 1), + stride=space_n_time_m(2, 1), + dilation=1, + conv_type=self.NON_BLOCK_CONV_TYPE, + D=D, + ) + self.bn4 = get_norm( + self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum + ) + self.block4 = self._make_layer( + self.BLOCK, + self.PLANES[3], + self.LAYERS[3], + dilation=dilations[3], + norm_type=self.NORM_TYPE, + bn_momentum=bn_momentum, + ) + self.pool_tr4 = ME.MinkowskiPoolingTranspose( + kernel_size=8, stride=8, dimension=D + ) + _ = self.inplanes + self.convtr4p8s2 = conv_tr( + self.inplanes, + self.PLANES[4], + kernel_size=space_n_time_m(2, 1), + upsample_stride=space_n_time_m(2, 1), + dilation=1, + bias=False, + conv_type=self.NON_BLOCK_CONV_TYPE, + D=D, + ) + self.bntr4 = get_norm( + self.NORM_TYPE, self.PLANES[4], D, bn_momentum=bn_momentum + ) + + self.inplanes = self.PLANES[4] + self.PLANES[2] * self.BLOCK.expansion + self.block5 = self._make_layer( + self.BLOCK, + self.PLANES[4], + self.LAYERS[4], + dilation=dilations[4], + norm_type=self.NORM_TYPE, + bn_momentum=bn_momentum, + ) + self.pool_tr5 = ME.MinkowskiPoolingTranspose( + kernel_size=4, stride=4, dimension=D + ) + out_pool5 = self.inplanes + self.convtr5p4s2 = conv_tr( + self.inplanes, + self.PLANES[5], + kernel_size=space_n_time_m(2, 1), + upsample_stride=space_n_time_m(2, 1), + dilation=1, + bias=False, + conv_type=self.NON_BLOCK_CONV_TYPE, + D=D, + ) + self.bntr5 = get_norm( + self.NORM_TYPE, self.PLANES[5], D, bn_momentum=bn_momentum + ) + + self.inplanes = self.PLANES[5] + self.PLANES[1] * self.BLOCK.expansion + self.block6 = self._make_layer( + self.BLOCK, + self.PLANES[5], + self.LAYERS[5], + dilation=dilations[5], + norm_type=self.NORM_TYPE, + bn_momentum=bn_momentum, + ) + self.pool_tr6 = ME.MinkowskiPoolingTranspose( + kernel_size=2, stride=2, dimension=D + ) + out_pool6 = self.inplanes + self.convtr6p2s2 = conv_tr( + self.inplanes, + self.PLANES[6], + kernel_size=space_n_time_m(2, 1), + upsample_stride=space_n_time_m(2, 1), + dilation=1, + bias=False, + conv_type=self.NON_BLOCK_CONV_TYPE, + D=D, + ) + self.bntr6 = get_norm( + self.NORM_TYPE, self.PLANES[6], D, bn_momentum=bn_momentum + ) + + self.relu = MinkowskiReLU(inplace=True) + + self.final = nn.Sequential( + conv( + out_pool5 + + out_pool6 + + self.PLANES[6] + + self.PLANES[0] * self.BLOCK.expansion, + 512, + kernel_size=1, + bias=False, + D=D, + ), + ME.MinkowskiBatchNorm(512), + ME.MinkowskiReLU(), + conv(512, out_channels, kernel_size=1, bias=True, D=D), + ) + + def forward(self, x): + out = self.conv1p1s1(x) + out = self.bn1(out) + out = self.relu(out) + + out_b1p1 = self.block1(out) + + out = self.conv2p1s2(out_b1p1) + out = self.bn2(out) + out = self.relu(out) + + out_b2p2 = self.block2(out) + + out = self.conv3p2s2(out_b2p2) + out = self.bn3(out) + out = self.relu(out) + + out_b3p4 = self.block3(out) + + out = self.conv4p4s2(out_b3p4) + out = self.bn4(out) + out = self.relu(out) + + # pixel_dist=8 + out = self.block4(out) + + out = self.convtr4p8s2(out) + out = self.bntr4(out) + out = self.relu(out) + + out = me.cat(out, out_b3p4) + out = self.block5(out) + out_5 = self.pool_tr5(out) + + out = self.convtr5p4s2(out) + out = self.bntr5(out) + out = self.relu(out) + + out = me.cat(out, out_b2p2) + out = self.block6(out) + out_6 = self.pool_tr6(out) + + out = self.convtr6p2s2(out) + out = self.bntr6(out) + out = self.relu(out) + + out = me.cat(out, out_b1p1, out_6, out_5) + return self.final(out) + + +class MinkUNetHyper14INBN(MinkUNetHyper): + NORM_TYPE = NormType.INSTANCE_BATCH_NORM + BLOCK = BasicBlockINBN + + +class STMinkUNetBase(MinkUNetBase): + + CONV_TYPE = ConvType.SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS + + def __init__(self, in_channels, out_channels, config, D=4, **kwargs): + super().__init__(in_channels, out_channels, config, D, **kwargs) + + +class STResUNet14(STMinkUNetBase, ResUNet14): + pass + + +class STResUNet18(STMinkUNetBase, ResUNet18): + pass + + +class STResUNet34(STMinkUNetBase, ResUNet34): + pass + + +class STResUNet50(STMinkUNetBase, ResUNet50): + pass + + +class STResUNet101(STMinkUNetBase, ResUNet101): + pass + + +class STResTesseractUNetBase(STMinkUNetBase): + CONV_TYPE = ConvType.HYPERCUBE + + +class STResTesseractUNet14(STResTesseractUNetBase, ResUNet14): + pass + + +class STResTesseractUNet18(STResTesseractUNetBase, ResUNet18): + pass + + +class STResTesseractUNet34(STResTesseractUNetBase, ResUNet34): + pass + + +class STResTesseractUNet50(STResTesseractUNetBase, ResUNet50): + pass + + +class STResTesseractUNet101(STResTesseractUNetBase, ResUNet101): + pass diff --git a/models/Mask3D/mask3d/models/wrapper.py b/models/Mask3D/mask3d/models/wrapper.py new file mode 100644 index 0000000000000000000000000000000000000000..a6bf1678d2106049b8e6a2ac2f3a9aff37dcfc9c --- /dev/null +++ b/models/Mask3D/mask3d/models/wrapper.py @@ -0,0 +1,32 @@ +import random + +from torch.nn import Module +from MinkowskiEngine import SparseTensor + + +class Wrapper(Module): + """ + Wrapper for the segmentation networks. + """ + + OUT_PIXEL_DIST = -1 + + def __init__(self, NetClass, in_nchannel, out_nchannel, config): + super().__init__() + self.initialize_filter(NetClass, in_nchannel, out_nchannel, config) + + def initialize_filter(self, NetClass, in_nchannel, out_nchannel, config): + raise NotImplementedError("Must initialize a model and a filter") + + def forward(self, x, coords, colors=None): + soutput = self.model(x) + + # During training, make the network invariant to the filter + if not self.training or random.random() < 0.5: + # Filter requires the model to finish the forward pass + wrapper_coords = self.filter.initialize_coords( + self.model, coords, colors + ) + finput = SparseTensor(soutput.F, wrapper_coords) + soutput = self.filter(finput) + return soutput diff --git a/models/Mask3D/mask3d/predict.py b/models/Mask3D/mask3d/predict.py new file mode 100644 index 0000000000000000000000000000000000000000..4c085fd01897c13540da8eac9f941dcf0847ca6f --- /dev/null +++ b/models/Mask3D/mask3d/predict.py @@ -0,0 +1,187 @@ +import hydra +from omegaconf import DictConfig, OmegaConf +from models.mask3d import Mask3D +import os +import torch + +import MinkowskiEngine as ME +import open3d as o3d +import numpy as np +import albumentations as A + +from utils.utils import ( + flatten_dict, + load_baseline_model, + load_checkpoint_with_missing_or_exsessive_keys, + load_backbone_checkpoint_with_missing_or_exsessive_keys, +) + +from datasets.scannet200.scannet200_constants import ( + SCANNET_COLOR_MAP_200, + SCANNET_COLOR_MAP_20, + VALID_CLASS_IDS_200, + VALID_CLASS_IDS_20, + CLASS_LABELS_200, + CLASS_LABELS_20, +) + +root_dir = '/home/weders/scratch/scratch/scannetter/arkit/raw/Validation' + +class InstanceSegmentation(torch.nn.Module): + def __init__(self, cfg): + super().__init__() + self.model = hydra.utils.instantiate(cfg.model) + + + def forward(self, x, raw_coordinates=None): + return self.model(x, raw_coordinates=raw_coordinates) + +@hydra.main( + config_path="conf", config_name="config_base_instance_segmentation.yaml" +) +def main(cfg: DictConfig): + + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + os.chdir(hydra.utils.get_original_cwd()) + model = InstanceSegmentation(cfg) + + if cfg.general.backbone_checkpoint is not None: + cfg, model = load_backbone_checkpoint_with_missing_or_exsessive_keys( + cfg, model + ) + if cfg.general.checkpoint is not None: + cfg, model = load_checkpoint_with_missing_or_exsessive_keys(cfg, model) + + model = model.to(device) + # model.eval() + + color_mean = (0.47793125906962, 0.4303257521323044, 0.3749598901421883) + color_std = (0.2834475483823543, 0.27566157565723015, 0.27018971370874995) + normalize_color = A.Normalize(mean=color_mean, std=color_std) + + # iterate over data + for sc in os.listdir(root_dir): + + + if not os.path.exists(os.path.join(root_dir, sc, 'mesh_tsdf.ply')): + continue + + # save outputs + output_dir = os.path.join(root_dir, sc, 'pred_mask3d_ours') + if not os.path.exists(output_dir): + os.makedirs(output_dir) + + if sc != '42445991': + continue + + # if os.path.exists(os.path.join(output_dir, 'mask3d_predictions.txt')): + # print('Skipping', sc) + # continue + + print('Processing', sc) + + mesh = o3d.io.read_triangle_mesh(os.path.join(root_dir, sc, 'mesh_tsdf.ply')) + mesh.compute_vertex_normals() + + points = np.asarray(mesh.vertices) + colors = np.asarray(mesh.vertex_colors) + + + colors = colors * 255. + pseudo_image = colors.astype(np.uint8)[np.newaxis, :, :] + colors = np.squeeze(normalize_color(image=pseudo_image)["image"]) + + # voxelize data + coords = np.floor(points / 0.02) + + # maybe this change (_, _, ...) is not necessary and we can directly get out + # the sample coordinates? + _, _, unique_map, inverse_map = ME.utils.sparse_quantize(coordinates=coords, features=colors, return_index=True, return_inverse=True) + + sample_coordinates = coords[unique_map] + coordinates = [torch.from_numpy(sample_coordinates).int()] + sample_features = colors[unique_map] + features = [torch.from_numpy(sample_features).float()] + + coordinates, _ = ME.utils.sparse_collate(coords=coordinates, feats=features) + features = torch.cat(features, dim=0) + data = ME.SparseTensor( + coordinates=coordinates, + features=features, + device=device, + ) + + # run model + with torch.no_grad(): + outputs = model(data, raw_coordinates=features) + + del data + torch.cuda.empty_cache() + + # parse predictions + logits = outputs["pred_logits"] + masks = outputs["pred_masks"] + + + # reformat predictions + logits = logits[0].detach().cpu() + masks = masks[0].detach().cpu() + + labels = [] + confidences = [] + masks_binary = [] + + for i in range(len(logits)): + p_labels = torch.softmax(logits[i], dim=-1) + p_masks = torch.sigmoid(masks[:, i]) + l = torch.argmax(p_labels, dim=-1) + c_label = torch.max(p_labels) + m = p_masks > 0.5 + c_m = p_masks[m].sum() / (m.sum() + 1e-8) + c = c_label * c_m + if l < 200 and c > 0.5: + labels.append(l.item()) + confidences.append(c.item()) + masks_binary.append(m[inverse_map]) # mapping the mask back to the original point cloud + + + # save labelled mesh + mesh_labelled = o3d.geometry.TriangleMesh() + mesh_labelled.vertices = mesh.vertices + mesh_labelled.triangles = mesh.triangles + + labels_mapped = np.zeros((len(mesh.vertices), 1)) + colors_mapped = np.zeros((len(mesh.vertices), 3)) + + confidences, labels, masks_binary = zip(*sorted(zip(confidences, labels, masks_binary), reverse=False)) + for i, (l, c, m) in enumerate(zip(labels, confidences, masks_binary)): + labels_mapped[m == 1] = l + if l == 0: + l_ = -1 + 2 # label offset is 2 for scannet 200, 0 needs to be mapped to -1 before (see trainer.py in Mask3D) + else: + l_ = l + 2 + # print(VALID_CLASS_IDS_200[l_], SCANNET_COLOR_MAP_200[VALID_CLASS_IDS_200[l_]], l_, CLASS_LABELS_200[l_]) + colors_mapped[m == 1] = SCANNET_COLOR_MAP_200[VALID_CLASS_IDS_200[l_]] + + # colors_mapped[mask_mapped == 1] = SCANNET_COLOR_MAP_200[VALID_CLASS_IDS_200[l]] + + + + + mesh_labelled.vertex_colors = o3d.utility.Vector3dVector(colors_mapped.astype(np.float32) / 255.) + o3d.io.write_triangle_mesh(f'{output_dir}/mesh_tsdf_labelled.ply', mesh_labelled) + + mask_path = os.path.join(output_dir, 'pred_mask') + if not os.path.exists(mask_path): + os.makedirs(mask_path) + + # sorting by confidence + with open(os.path.join(output_dir, 'mask3d_predictions.txt'), 'w') as f: + for i, (l, c, m) in enumerate(zip(labels, confidences, masks_binary)): + mask_file = f'pred_mask/{str(i).zfill(3)}.txt' + f.write(f'{mask_file} {VALID_CLASS_IDS_200[l]} {c}\n') + np.savetxt(os.path.join(output_dir, mask_file), m.numpy(), fmt='%d') + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/models/Mask3D/mask3d/preprocess_arkitscenes.py b/models/Mask3D/mask3d/preprocess_arkitscenes.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/Mask3D/mask3d/scripts/arkitscenes/test.sh b/models/Mask3D/mask3d/scripts/arkitscenes/test.sh new file mode 100644 index 0000000000000000000000000000000000000000..64cee20547d22a6502ade31c199f342121c59c4b --- /dev/null +++ b/models/Mask3D/mask3d/scripts/arkitscenes/test.sh @@ -0,0 +1,23 @@ +export OMP_NUM_THREADS=3 # speeds up MinkowskiEngine + +CURR_DBSCAN=0.95 +CURR_TOPK=750 +CURR_QUERY=150 + +python predict.py \ +general.experiment_name="arkitscenes" \ +general.project_name="arktiscenes" \ +general.checkpoint="checkpoints/scannet200/scannet200_benchmark.ckpt" \ +data/datasets=scannet200 \ +general.num_targets=201 \ +data.num_labels=200 \ +general.eval_on_segments=false \ +general.train_on_segments=false \ +general.train_mode=false \ +model.num_queries=${CURR_QUERY} \ +general.topk_per_image=${CURR_TOPK} \ +general.use_dbscan=true \ +general.dbscan_eps=${CURR_DBSCAN} \ +general.export=true \ +data.test_mode=test \ +general.export_threshold=${CURR_T} \ No newline at end of file diff --git a/models/Mask3D/mask3d/scripts/s3dis/s3dis_from_scratch.sh b/models/Mask3D/mask3d/scripts/s3dis/s3dis_from_scratch.sh new file mode 100644 index 0000000000000000000000000000000000000000..373e067d050bd30a904fa955d3ea26f9414c0f2a --- /dev/null +++ b/models/Mask3D/mask3d/scripts/s3dis/s3dis_from_scratch.sh @@ -0,0 +1,33 @@ +#!/bin/bash +export OMP_NUM_THREADS=3 # speeds up MinkowskiEngine + +CURR_AREA=1 # set the area number accordingly [1,6] +CURR_DBSCAN=0.6 +CURR_TOPK=-1 +CURR_QUERY=100 + +python main_instance_segmentation.py \ + general.project_name="s3dis" \ + general.experiment_name="area${CURR_AREA}_from_scratch" \ + data.batch_size=4 \ + data/datasets=s3dis \ + general.num_targets=14 \ + data.num_labels=13 \ + trainer.max_epochs=1001 \ + general.area=${CURR_AREA} \ + trainer.check_val_every_n_epoch=10 + +python main_instance_segmentation.py \ +general.project_name="s3dis_eval" \ +general.experiment_name="area${CURR_AREA}_from_scratch_eps_${CURR_DBSCAN}_topk_${CURR_TOPK}_q_${CURR_QUERY}" \ +general.checkpoint="checkpoints/s3dis/from_scratch/area${CURR_AREA}.ckpt" \ +general.train_mode=false \ +data.batch_size=4 \ +data/datasets=s3dis \ +general.num_targets=14 \ +data.num_labels=13 \ +general.area=${CURR_AREA} \ +model.num_queries=${CURR_QUERY} \ +general.topk_per_image=${CURR_TOPK} \ +general.use_dbscan=true \ +general.dbscan_eps=${CURR_DBSCAN} diff --git a/models/Mask3D/mask3d/scripts/s3dis/s3dis_pretrained.sh b/models/Mask3D/mask3d/scripts/s3dis/s3dis_pretrained.sh new file mode 100644 index 0000000000000000000000000000000000000000..f5a1d08d8a4a17f9d6aa2f88c5043d23bd9b1fed --- /dev/null +++ b/models/Mask3D/mask3d/scripts/s3dis/s3dis_pretrained.sh @@ -0,0 +1,34 @@ +#!/bin/bash +export OMP_NUM_THREADS=3 # speeds up MinkowskiEngine + +CURR_AREA=1 # set the area number accordingly [1,6] +CURR_DBSCAN=0.6 +CURR_TOPK=-1 +CURR_QUERY=100 + +python main_instance_segmentation.py \ + general.project_name="s3dis" \ + general.experiment_name="area${CURR_AREA}_pretrained" \ + data.batch_size=4 \ + data/datasets=s3dis \ + general.num_targets=14 \ + data.num_labels=13 \ + general.area=${CURR_AREA} \ + general.checkpoint="checkpoints/s3dis/scannet_pretrained/scannet_pretrained.ckpt" \ + trainer.check_val_every_n_epoch=10 \ + optimizer.lr=0.00001 + +python main_instance_segmentation.py \ +general.project_name="s3dis_eval" \ +general.experiment_name="area${CURR_AREA}_pretrained_eps_${CURR_DBSCAN}_topk_${CURR_TOPK}_q_${CURR_QUERY}" \ +general.checkpoint="checkpoints/s3dis/scannet_pretrained/area${CURR_AREA}.ckpt" \ +general.train_mode=false \ +data.batch_size=4 \ +data/datasets=s3dis \ +general.num_targets=14 \ +data.num_labels=13 \ +general.area=${CURR_AREA} \ +model.num_queries=${CURR_QUERY} \ +general.topk_per_image=${CURR_TOPK} \ +general.use_dbscan=true \ +general.dbscan_eps=${CURR_DBSCAN} diff --git a/models/Mask3D/mask3d/scripts/scannet/scannet_benchmark.sh b/models/Mask3D/mask3d/scripts/scannet/scannet_benchmark.sh new file mode 100644 index 0000000000000000000000000000000000000000..d8a45ba9717a5488b3a387dc2f29028de6c1c5ae --- /dev/null +++ b/models/Mask3D/mask3d/scripts/scannet/scannet_benchmark.sh @@ -0,0 +1,28 @@ +#!/bin/bash +export OMP_NUM_THREADS=3 # speeds up MinkowskiEngine + +CURR_DBSCAN=0.95 +CURR_TOPK=300 +CURR_QUERY=150 + +# TRAIN +python main_instance_segmentation.py \ +general.experiment_name="benchmark" \ +general.eval_on_segments=true \ +general.train_on_segments=true \ +data.train_mode=train_validation + +# TEST +python main_instance_segmentation.py \ +general.experiment_name="benchmark_query_${CURR_QUERY}_topk_${CURR_TOPK}_dbscan_${CURR_DBSCAN}" \ +general.project_name="scannet_eval" \ +general.checkpoint='checkpoints/scannet/scannet_benchmark.ckpt' \ +general.eval_on_segments=true \ +general.train_on_segments=true \ +general.train_mode=false \ +general.export=true \ +data.test_mode=test \ +model.num_queries=${CURR_QUERY} \ +general.topk_per_image=${CURR_TOPK} \ +general.use_dbscan=true \ +general.dbscan_eps=${CURR_DBSCAN} diff --git a/models/Mask3D/mask3d/scripts/scannet/scannet_pretrain_for_s3dis.sh b/models/Mask3D/mask3d/scripts/scannet/scannet_pretrain_for_s3dis.sh new file mode 100644 index 0000000000000000000000000000000000000000..cfb1c1312257a7a4415c528d4935f160796e4ecf --- /dev/null +++ b/models/Mask3D/mask3d/scripts/scannet/scannet_pretrain_for_s3dis.sh @@ -0,0 +1,7 @@ +#!/bin/bash +export OMP_NUM_THREADS=3 # speeds up MinkowskiEngine + +# TRAIN +python main_instance_segmentation.py \ +general.experiment_name="pretrain_for_s3dis" \ +data.train_mode=train_validation \ No newline at end of file diff --git a/models/Mask3D/mask3d/scripts/scannet/scannet_val.sh b/models/Mask3D/mask3d/scripts/scannet/scannet_val.sh new file mode 100644 index 0000000000000000000000000000000000000000..8c82a26204f145f6eb20bd9fa2a1f632cdaea77d --- /dev/null +++ b/models/Mask3D/mask3d/scripts/scannet/scannet_val.sh @@ -0,0 +1,25 @@ +#!/bin/bash +export OMP_NUM_THREADS=3 # speeds up MinkowskiEngine + +CURR_DBSCAN=0.95 +CURR_TOPK=500 +CURR_QUERY=150 + +# TRAIN +python main_instance_segmentation.py \ +general.experiment_name="validation" \ +general.eval_on_segments=true \ +general.train_on_segments=true + +# TEST +python main_instance_segmentation.py \ +general.experiment_name="validation_query_${CURR_QUERY}_topk_${CURR_TOPK}_dbscan_${CURR_DBSCAN}" \ +general.project_name="scannet_eval" \ +general.checkpoint='checkpoints/scannet/scannet_val.ckpt' \ +general.train_mode=false \ +general.eval_on_segments=true \ +general.train_on_segments=true \ +model.num_queries=${CURR_QUERY} \ +general.topk_per_image=${CURR_TOPK} \ +general.use_dbscan=true \ +general.dbscan_eps=${CURR_DBSCAN} diff --git a/models/Mask3D/mask3d/scripts/scannet200/scannet200_benchmark.sh b/models/Mask3D/mask3d/scripts/scannet200/scannet200_benchmark.sh new file mode 100644 index 0000000000000000000000000000000000000000..7177d4a6742d485f63e5b878aeb292babf3364d5 --- /dev/null +++ b/models/Mask3D/mask3d/scripts/scannet200/scannet200_benchmark.sh @@ -0,0 +1,37 @@ +#!/bin/bash +export OMP_NUM_THREADS=3 # speeds up MinkowskiEngine + +CURR_DBSCAN=0.95 +CURR_TOPK=300 +CURR_QUERY=150 +CURR_T=0.001 + +# TRAIN +python main_instance_segmentation.py \ +general.experiment_name="scannet200_benchmark" \ +general.project_name="scannet200" \ +data/datasets=scannet200 \ +general.num_targets=201 \ +data.num_labels=200 \ +general.eval_on_segments=true \ +general.train_on_segments=true \ +data.train_mode=train_validation + +# TEST +python main_instance_segmentation.py \ +general.experiment_name="scannet200_benchmark_query_${CURR_QUERY}_topk_${CURR_TOPK}_dbscan_${CURR_DBSCAN}_export_${CURR_T}" \ +general.project_name="scannet200_eval" \ +general.checkpoint="checkpoints/scannet200/scannet200_benchmark.ckpt" \ +data/datasets=scannet200 \ +general.num_targets=201 \ +data.num_labels=200 \ +general.eval_on_segments=true \ +general.train_on_segments=true \ +general.train_mode=false \ +model.num_queries=${CURR_QUERY} \ +general.topk_per_image=${CURR_TOPK} \ +general.use_dbscan=true \ +general.dbscan_eps=${CURR_DBSCAN} \ +general.export=true \ +data.test_mode=test \ +general.export_threshold=${CURR_T} diff --git a/models/Mask3D/mask3d/scripts/scannet200/scannet200_val.sh b/models/Mask3D/mask3d/scripts/scannet200/scannet200_val.sh new file mode 100644 index 0000000000000000000000000000000000000000..80f030f575c6080e1f74316a6f126e66702e5b59 --- /dev/null +++ b/models/Mask3D/mask3d/scripts/scannet200/scannet200_val.sh @@ -0,0 +1,32 @@ +#!/bin/bash +export OMP_NUM_THREADS=3 # speeds up MinkowskiEngine + +CURR_DBSCAN=0.95 +CURR_TOPK=750 +CURR_QUERY=150 + +# TRAIN +python main_instance_segmentation.py \ +general.experiment_name="scannet200_val" \ +general.project_name="scannet200" \ +data/datasets=scannet200 \ +general.num_targets=201 \ +data.num_labels=200 \ +general.eval_on_segments=true \ +general.train_on_segments=true + +# TEST +python main_instance_segmentation.py \ +general.experiment_name="scannet200_val_query_${CURR_QUERY}_topk_${CURR_TOPK}_dbscan_${CURR_DBSCAN}" \ +general.project_name="scannet200_eval" \ +general.checkpoint="checkpoints/scannet200/scannet200_val.ckpt" \ +data/datasets=scannet200 \ +general.num_targets=201 \ +data.num_labels=200 \ +general.eval_on_segments=true \ +general.train_on_segments=true \ +general.train_mode=false \ +model.num_queries=${CURR_QUERY} \ +general.topk_per_image=${CURR_TOPK} \ +general.use_dbscan=true \ +general.dbscan_eps=${CURR_DBSCAN} diff --git a/models/Mask3D/mask3d/scripts/stpls3d/merge_exports.py b/models/Mask3D/mask3d/scripts/stpls3d/merge_exports.py new file mode 100644 index 0000000000000000000000000000000000000000..7a314a3b563d0f19cf1f0c6e0ce522d4df9c5bea --- /dev/null +++ b/models/Mask3D/mask3d/scripts/stpls3d/merge_exports.py @@ -0,0 +1,55 @@ +import os +import shutil +from glob import glob +from tqdm import tqdm + +base_path = "INSERT_WORKING_DIRECTORY" +vs03 = f"{base_path}/benchmark_03" +vs02 = f"{base_path}/benchmark_02" + +target_path = "INSERT_TARGET_DIRECTORY" + +print("COPY MASKS FILES 1/2 ...") +shutil.copytree(f"{vs02}/pred_mask", f"{target_path}/pred_mask_02") +print("COPY MASKS FILES 2/2 ...") +shutil.copytree(f"{vs03}/pred_mask", f"{target_path}/pred_mask_03") + +for scene03 in tqdm(glob(f"{vs03}/*.txt")): + instances = [] + with open(scene03, "r") as file03: + while line := file03.readline().rstrip(): + mask_path, class_id, score = line.split(" ") + + if int(class_id) in [1, 3, 4, 7, 8, 11, 12, 13]: + instances.append( + f'{mask_path.replace("pred_mask", "pred_mask_03")} {class_id} {score}' + ) + print(instances[-1]) + else: + print( + f'DELETE {target_path}/{mask_path.replace("pred_mask", "pred_mask_03")}' + ) + os.remove( + f'{target_path}/{mask_path.replace("pred_mask", "pred_mask_03")}' + ) + + with open(f'{vs02}/{scene03.split("/")[-1]}', "r") as file02: + while line := file02.readline().rstrip(): + mask_path, class_id, score = line.split(" ") + + if int(class_id) not in [1, 3, 4, 7, 8, 11, 12, 13]: + instances.append( + f'{mask_path.replace("pred_mask", "pred_mask_02")} {class_id} {score}' + ) + print(instances[-1]) + else: + print( + f'DELETE {target_path}/{mask_path.replace("pred_mask", "pred_mask_02")}' + ) + os.remove( + f'{target_path}/{mask_path.replace("pred_mask", "pred_mask_02")}' + ) + + with open(f'{target_path}/{scene03.split("/")[-1]}', "w") as fout: + for line in instances: + fout.write(f"{line}\n") diff --git a/models/Mask3D/mask3d/scripts/stpls3d/stpls3d_benchmark.sh b/models/Mask3D/mask3d/scripts/stpls3d/stpls3d_benchmark.sh new file mode 100644 index 0000000000000000000000000000000000000000..72443361774e05dc7a85c72754643a934b5891be --- /dev/null +++ b/models/Mask3D/mask3d/scripts/stpls3d/stpls3d_benchmark.sh @@ -0,0 +1,99 @@ +#!/bin/bash +export OMP_NUM_THREADS=3 + +CURR_DBSCAN=12.5 +CURR_TOPK=200 +CURR_QUERY=160 +CURR_SIZE=54 +CURR_THRESHOLD=0.01 + +# TRAIN network 1 with voxel size 0.333 +python main_instance_segmentation.py \ +general.experiment_name="benchmark_03" \ +general.project_name="stpls3d" \ +data/datasets=stpls3d \ +general.num_targets=15 \ +data.num_labels=15 \ +data.voxel_size=0.333 \ +data.num_workers=10 \ +data.cache_data=true \ +data.cropping_v1=false \ +general.reps_per_epoch=100 \ +model.num_queries=${CURR_QUERY} \ +general.on_crops=true \ +model.config.backbone._target_=models.Res16UNet18B \ +data.crop_length=${CURR_SIZE} \ +general.eval_inner_core=50.0 \ +data.train_mode=train_validation + +# TRAIN network 2 with voxel size 0.2 and larger backbone +python main_instance_segmentation.py \ +general.experiment_name="benchmark_02" \ +general.project_name="stpls3d" \ +data/datasets=stpls3d \ +general.num_targets=15 \ +data.num_labels=15 \ +data.voxel_size=0.2 \ +data.num_workers=10 \ +data.cache_data=true \ +data.cropping_v1=false \ +general.reps_per_epoch=100 \ +model.num_queries=${CURR_QUERY} \ +general.on_crops=true \ +data.crop_length=${CURR_SIZE} \ +general.eval_inner_core=50.0 \ +data.train_mode=train_validation + +# TEST network 1 +python main_instance_segmentation.py \ +general.experiment_name="benchmark_03_query_${CURR_QUERY}_topk_${CURR_TOPK}_dbscan_${CURR_DBSCAN}_size_${CURR_SIZE}_T_${CURR_THRESHOLD}" \ +general.project_name="stpls3d_eval" \ +data/datasets=stpls3d \ +general.num_targets=15 \ +data.num_labels=15 \ +data.voxel_size=0.333 \ +data.num_workers=10 \ +data.cache_data=true \ +data.cropping_v1=false \ +general.reps_per_epoch=100 \ +model.num_queries=${CURR_QUERY} \ +general.on_crops=true \ +model.config.backbone._target_=models.Res16UNet18B \ +general.train_mode=false \ +general.checkpoint="checkpoints/stpls3d/stpls3d_benchmark_03.ckpt" \ +data.crop_length=${CURR_SIZE} \ +general.eval_inner_core=50.0 \ +general.topk_per_image=${CURR_TOPK} \ +general.use_dbscan=true \ +general.dbscan_eps=${CURR_DBSCAN} \ +data.test_mode=test \ +general.export=true + +# TEST network 2 +python main_instance_segmentation.py \ +general.experiment_name="benchmark_02_query_${CURR_QUERY}_topk_${CURR_TOPK}_dbscan_${CURR_DBSCAN}_size_${CURR_SIZE}_T_${CURR_THRESHOLD}" \ +general.project_name="stpls3d_eval" \ +data/datasets=stpls3d \ +general.num_targets=15 \ +data.num_labels=15 \ +data.voxel_size=0.2 \ +data.num_workers=10 \ +data.cache_data=true \ +data.cropping_v1=false \ +general.reps_per_epoch=100 \ +model.num_queries=${CURR_QUERY} \ +general.on_crops=true \ +general.train_mode=false \ +general.checkpoint="checkpoints/stpls3d/stpls3d_benchmark_02.ckpt" \ +data.crop_length=${CURR_SIZE} \ +general.eval_inner_core=50.0 \ +general.topk_per_image=${CURR_TOPK} \ +general.use_dbscan=true \ +general.dbscan_eps=${CURR_DBSCAN} \ +data.test_mode=test \ +general.export=true + +# COMBINE OUTPUTS OF ENSEMBLE +# VOXEL SIZE 0.2 FOR OBJECTS OF SMALL CLASSES; VOXEL SIZE 0.333 FOR OBJECTS OF LARGE CLASS CATEGORIES +# TODO FILL IN PATHS +python merge_exports.py diff --git a/models/Mask3D/mask3d/scripts/stpls3d/stpls3d_val.sh b/models/Mask3D/mask3d/scripts/stpls3d/stpls3d_val.sh new file mode 100644 index 0000000000000000000000000000000000000000..4d5cdce1e34537c2d1d3940edb37f7693d55aba1 --- /dev/null +++ b/models/Mask3D/mask3d/scripts/stpls3d/stpls3d_val.sh @@ -0,0 +1,48 @@ +#!/bin/bash +export OMP_NUM_THREADS=3 + +CURR_DBSCAN=14.0 +CURR_TOPK=750 +CURR_QUERY=160 +CURR_SIZE=54 + +# TRAIN +python main_instance_segmentation.py \ +general.experiment_name="validation" \ +general.project_name="stpls3d" \ +data/datasets=stpls3d \ +general.num_targets=15 \ +data.num_labels=15 \ +data.voxel_size=0.333 \ +data.num_workers=10 \ +data.cache_data=true \ +data.cropping_v1=false \ +general.reps_per_epoch=100 \ +model.num_queries=${CURR_QUERY} \ +general.on_crops=true \ +model.config.backbone._target_=models.Res16UNet18B \ +data.crop_length=${CURR_SIZE} \ +general.eval_inner_core=50.0 + +# TEST +python main_instance_segmentation.py \ +general.experiment_name="validation_query_${CURR_QUERY}_topk_${CURR_TOPK}_dbscan_${CURR_DBSCAN}_size_${CURR_SIZE}" \ +general.project_name="stpls3d_eval" \ +data/datasets=stpls3d \ +general.num_targets=15 \ +data.num_labels=15 \ +data.voxel_size=0.333 \ +data.num_workers=10 \ +data.cache_data=true \ +data.cropping_v1=false \ +general.reps_per_epoch=100 \ +model.num_queries=${CURR_QUERY} \ +general.on_crops=true \ +model.config.backbone._target_=models.Res16UNet18B \ +general.train_mode=false \ +general.checkpoint="checkpoints/stpls3d/stpls3d_val.ckpt" \ +data.crop_length=${CURR_SIZE} \ +general.eval_inner_core=50.0 \ +general.topk_per_image=${CURR_TOPK} \ +general.use_dbscan=true \ +general.dbscan_eps=${CURR_DBSCAN} diff --git a/models/Mask3D/mask3d/trainer/__init__.py b/models/Mask3D/mask3d/trainer/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/Mask3D/mask3d/trainer/trainer.py b/models/Mask3D/mask3d/trainer/trainer.py new file mode 100644 index 0000000000000000000000000000000000000000..b794e38aa5b2cef7eb106f95ced43466768b3dba --- /dev/null +++ b/models/Mask3D/mask3d/trainer/trainer.py @@ -0,0 +1,1302 @@ +import gc +from contextlib import nullcontext +from pathlib import Path +import statistics +import shutil +import os +import math +import pyviz3d.visualizer as vis +from torch_scatter import scatter_mean +import matplotlib +from benchmark.evaluate_semantic_instance import evaluate +from collections import defaultdict +from sklearn.cluster import DBSCAN +from utils.votenet_utils.eval_det import eval_det +from datasets.scannet200.scannet200_splits import ( + HEAD_CATS_SCANNET_200, + TAIL_CATS_SCANNET_200, + COMMON_CATS_SCANNET_200, + VALID_CLASS_IDS_200_VALIDATION, +) + +import hydra +import MinkowskiEngine as ME +import numpy as np +import pytorch_lightning as pl +import torch +from models.metrics import IoU +import random +import colorsys +from typing import List, Tuple +import functools + + +@functools.lru_cache(20) +def get_evenly_distributed_colors( + count: int, +) -> List[Tuple[np.uint8, np.uint8, np.uint8]]: + # lru cache caches color tuples + HSV_tuples = [(x / count, 1.0, 1.0) for x in range(count)] + random.shuffle(HSV_tuples) + return list( + map( + lambda x: (np.array(colorsys.hsv_to_rgb(*x)) * 255).astype( + np.uint8 + ), + HSV_tuples, + ) + ) + + +class RegularCheckpointing(pl.Callback): + def on_train_epoch_end( + self, trainer: "pl.Trainer", pl_module: "pl.LightningModule" + ): + general = pl_module.config.general + trainer.save_checkpoint(f"{general.save_dir}/last-epoch.ckpt") + print("Checkpoint created") + + +class InstanceSegmentation(pl.LightningModule): + def __init__(self, config): + super().__init__() + + self.decoder_id = config.general.decoder_id + + if config.model.train_on_segments: + self.mask_type = "segment_mask" + else: + self.mask_type = "masks" + + self.eval_on_segments = config.general.eval_on_segments + + self.config = config + self.save_hyperparameters() + # model + self.model = hydra.utils.instantiate(config.model) + self.optional_freeze = nullcontext + if config.general.freeze_backbone: + self.optional_freeze = torch.no_grad + # loss + self.ignore_label = config.data.ignore_label + + matcher = hydra.utils.instantiate(config.matcher) + weight_dict = { + "loss_ce": matcher.cost_class, + "loss_mask": matcher.cost_mask, + "loss_dice": matcher.cost_dice, + } + + aux_weight_dict = {} + for i in range(self.model.num_levels * self.model.num_decoders): + if i not in self.config.general.ignore_mask_idx: + aux_weight_dict.update( + {k + f"_{i}": v for k, v in weight_dict.items()} + ) + else: + aux_weight_dict.update( + {k + f"_{i}": 0.0 for k, v in weight_dict.items()} + ) + weight_dict.update(aux_weight_dict) + + self.preds = dict() + self.bbox_preds = dict() + self.bbox_gt = dict() + + self.criterion = hydra.utils.instantiate( + config.loss, matcher=matcher, weight_dict=weight_dict + ) + + # metrics + self.confusion = hydra.utils.instantiate(config.metrics) + self.iou = IoU() + # misc + self.labels_info = dict() + + def forward( + self, x, point2segment=None, raw_coordinates=None, is_eval=False + ): + with self.optional_freeze(): + x = self.model( + x, + point2segment, + raw_coordinates=raw_coordinates, + is_eval=is_eval, + ) + return x + + def training_step(self, batch, batch_idx): + data, target, file_names = batch + + if data.features.shape[0] > self.config.general.max_batch_size: + print("data exceeds threshold") + raise RuntimeError("BATCH TOO BIG") + + if len(target) == 0: + print("no targets") + return None + + raw_coordinates = None + if self.config.data.add_raw_coordinates: + raw_coordinates = data.features[:, -3:] + data.features = data.features[:, :-3] + + data = ME.SparseTensor( + coordinates=data.coordinates, + features=data.features, + device=self.device, + ) + + try: + output = self.forward( + data, + point2segment=[ + target[i]["point2segment"] for i in range(len(target)) + ], + raw_coordinates=raw_coordinates, + ) + except RuntimeError as run_err: + print(run_err) + if ( + "only a single point gives nans in cross-attention" + == run_err.args[0] + ): + return None + else: + raise run_err + + try: + losses = self.criterion(output, target, mask_type=self.mask_type) + except ValueError as val_err: + print(f"ValueError: {val_err}") + print(f"data shape: {data.shape}") + print(f"data feat shape: {data.features.shape}") + print(f"data feat nans: {data.features.isnan().sum()}") + print(f"output: {output}") + print(f"target: {target}") + print(f"filenames: {file_names}") + raise val_err + + for k in list(losses.keys()): + if k in self.criterion.weight_dict: + losses[k] *= self.criterion.weight_dict[k] + else: + # remove this loss if not specified in `weight_dict` + losses.pop(k) + + logs = { + f"train_{k}": v.detach().cpu().item() for k, v in losses.items() + } + + logs["train_mean_loss_ce"] = statistics.mean( + [item for item in [v for k, v in logs.items() if "loss_ce" in k]] + ) + + logs["train_mean_loss_mask"] = statistics.mean( + [item for item in [v for k, v in logs.items() if "loss_mask" in k]] + ) + + logs["train_mean_loss_dice"] = statistics.mean( + [item for item in [v for k, v in logs.items() if "loss_dice" in k]] + ) + + self.log_dict(logs) + return sum(losses.values()) + + def validation_step(self, batch, batch_idx): + return self.eval_step(batch, batch_idx) + + def export(self, pred_masks, scores, pred_classes, file_names, decoder_id): + root_path = f"eval_output" + base_path = f"{root_path}/instance_evaluation_{self.config.general.experiment_name}_{self.current_epoch}/decoder_{decoder_id}" + pred_mask_path = f"{base_path}/pred_mask" + + Path(pred_mask_path).mkdir(parents=True, exist_ok=True) + + file_name = file_names + with open(f"{base_path}/{file_name}.txt", "w") as fout: + real_id = -1 + for instance_id in range(len(pred_classes)): + real_id += 1 + pred_class = pred_classes[instance_id] + score = scores[instance_id] + mask = pred_masks[:, instance_id].astype("uint8") + + if score > self.config.general.export_threshold: + # reduce the export size a bit. I guess no performance difference + np.savetxt( + f"{pred_mask_path}/{file_name}_{real_id}.txt", + mask, + fmt="%d", + ) + fout.write( + f"pred_mask/{file_name}_{real_id}.txt {pred_class} {score}\n" + ) + + def training_epoch_end(self, outputs): + train_loss = sum([out["loss"].cpu().item() for out in outputs]) / len( + outputs + ) + results = {"train_loss_mean": train_loss} + self.log_dict(results) + + def validation_epoch_end(self, outputs): + self.test_epoch_end(outputs) + + def save_visualizations( + self, + target_full, + full_res_coords, + sorted_masks, + sort_classes, + file_name, + original_colors, + original_normals, + sort_scores_values, + point_size=20, + sorted_heatmaps=None, + query_pos=None, + backbone_features=None, + ): + + full_res_coords -= full_res_coords.mean(axis=0) + + gt_pcd_pos = [] + gt_pcd_normals = [] + gt_pcd_color = [] + gt_inst_pcd_color = [] + gt_boxes = [] + + if "labels" in target_full: + instances_colors = torch.from_numpy( + np.vstack( + get_evenly_distributed_colors( + target_full["labels"].shape[0] + ) + ) + ) + for instance_counter, (label, mask) in enumerate( + zip(target_full["labels"], target_full["masks"]) + ): + if label == 255: + continue + + mask_tmp = mask.detach().cpu().numpy() + mask_coords = full_res_coords[mask_tmp.astype(bool), :] + + if len(mask_coords) == 0: + continue + + gt_pcd_pos.append(mask_coords) + mask_coords_min = full_res_coords[ + mask_tmp.astype(bool), : + ].min(axis=0) + mask_coords_max = full_res_coords[ + mask_tmp.astype(bool), : + ].max(axis=0) + size = mask_coords_max - mask_coords_min + mask_coords_middle = mask_coords_min + size / 2 + + gt_boxes.append( + { + "position": mask_coords_middle, + "size": size, + "color": self.validation_dataset.map2color([label])[0], + } + ) + + gt_pcd_color.append( + self.validation_dataset.map2color([label]).repeat( + gt_pcd_pos[-1].shape[0], 1 + ) + ) + gt_inst_pcd_color.append( + instances_colors[instance_counter % len(instances_colors)] + .unsqueeze(0) + .repeat(gt_pcd_pos[-1].shape[0], 1) + ) + + gt_pcd_normals.append( + original_normals[mask_tmp.astype(bool), :] + ) + + gt_pcd_pos = np.concatenate(gt_pcd_pos) + gt_pcd_normals = np.concatenate(gt_pcd_normals) + gt_pcd_color = np.concatenate(gt_pcd_color) + gt_inst_pcd_color = np.concatenate(gt_inst_pcd_color) + + v = vis.Visualizer() + + v.add_points( + "RGB Input", + full_res_coords, + colors=original_colors, + normals=original_normals, + visible=True, + point_size=point_size, + ) + + if backbone_features is not None: + v.add_points( + "PCA", + full_res_coords, + colors=backbone_features, + normals=original_normals, + visible=False, + point_size=point_size, + ) + + if "labels" in target_full: + v.add_points( + "Semantics (GT)", + gt_pcd_pos, + colors=gt_pcd_color, + normals=gt_pcd_normals, + alpha=0.8, + visible=False, + point_size=point_size, + ) + v.add_points( + "Instances (GT)", + gt_pcd_pos, + colors=gt_inst_pcd_color, + normals=gt_pcd_normals, + alpha=0.8, + visible=False, + point_size=point_size, + ) + + pred_coords = [] + pred_normals = [] + pred_sem_color = [] + pred_inst_color = [] + + for did in range(len(sorted_masks)): + instances_colors = torch.from_numpy( + np.vstack( + get_evenly_distributed_colors( + max(1, sorted_masks[did].shape[1]) + ) + ) + ) + + for i in reversed(range(sorted_masks[did].shape[1])): + coords = full_res_coords[ + sorted_masks[did][:, i].astype(bool), : + ] + + mask_coords = full_res_coords[ + sorted_masks[did][:, i].astype(bool), : + ] + mask_normals = original_normals[ + sorted_masks[did][:, i].astype(bool), : + ] + + label = sort_classes[did][i] + + if len(mask_coords) == 0: + continue + + pred_coords.append(mask_coords) + pred_normals.append(mask_normals) + + pred_sem_color.append( + self.validation_dataset.map2color([label]).repeat( + mask_coords.shape[0], 1 + ) + ) + + pred_inst_color.append( + instances_colors[i % len(instances_colors)] + .unsqueeze(0) + .repeat(mask_coords.shape[0], 1) + ) + + if len(pred_coords) > 0: + pred_coords = np.concatenate(pred_coords) + pred_normals = np.concatenate(pred_normals) + pred_sem_color = np.concatenate(pred_sem_color) + pred_inst_color = np.concatenate(pred_inst_color) + + v.add_points( + "Semantics (Mask3D)", + pred_coords, + colors=pred_sem_color, + normals=pred_normals, + visible=False, + alpha=0.8, + point_size=point_size, + ) + v.add_points( + "Instances (Mask3D)", + pred_coords, + colors=pred_inst_color, + normals=pred_normals, + visible=False, + alpha=0.8, + point_size=point_size, + ) + + v.save( + f"{self.config['general']['save_dir']}/visualizations/{file_name}" + ) + + def eval_step(self, batch, batch_idx): + data, target, file_names = batch + inverse_maps = data.inverse_maps + target_full = data.target_full + original_colors = data.original_colors + data_idx = data.idx + original_normals = data.original_normals + original_coordinates = data.original_coordinates + + # if len(target) == 0 or len(target_full) == 0: + # print("no targets") + # return None + + if len(data.coordinates) == 0: + return 0.0 + + raw_coordinates = None + if self.config.data.add_raw_coordinates: + raw_coordinates = data.features[:, -3:] + data.features = data.features[:, :-3] + + if raw_coordinates.shape[0] == 0: + return 0.0 + + data = ME.SparseTensor( + coordinates=data.coordinates, + features=data.features, + device=self.device, + ) + + try: + output = self.forward( + data, + point2segment=[ + target[i]["point2segment"] for i in range(len(target)) + ], + raw_coordinates=raw_coordinates, + is_eval=True, + ) + except RuntimeError as run_err: + print(run_err) + if ( + "only a single point gives nans in cross-attention" + == run_err.args[0] + ): + return None + else: + raise run_err + + if self.config.data.test_mode != "test": + if self.config.trainer.deterministic: + torch.use_deterministic_algorithms(False) + + try: + losses = self.criterion( + output, target, mask_type=self.mask_type + ) + except ValueError as val_err: + print(f"ValueError: {val_err}") + print(f"data shape: {data.shape}") + print(f"data feat shape: {data.features.shape}") + print(f"data feat nans: {data.features.isnan().sum()}") + print(f"output: {output}") + print(f"target: {target}") + print(f"filenames: {file_names}") + raise val_err + + for k in list(losses.keys()): + if k in self.criterion.weight_dict: + losses[k] *= self.criterion.weight_dict[k] + else: + # remove this loss if not specified in `weight_dict` + losses.pop(k) + if self.config.trainer.deterministic: + torch.use_deterministic_algorithms(True) + + if self.config.general.save_visualizations: + backbone_features = ( + output["backbone_features"].F.detach().cpu().numpy() + ) + from sklearn import decomposition + + pca = decomposition.PCA(n_components=3) + pca.fit(backbone_features) + pca_features = pca.transform(backbone_features) + rescaled_pca = ( + 255 + * (pca_features - pca_features.min()) + / (pca_features.max() - pca_features.min()) + ) + + self.eval_instance_step( + output, + target, + target_full, + inverse_maps, + file_names, + original_coordinates, + original_colors, + original_normals, + raw_coordinates, + data_idx, + backbone_features=rescaled_pca + if self.config.general.save_visualizations + else None, + ) + + if self.config.data.test_mode != "test": + return { + f"val_{k}": v.detach().cpu().item() for k, v in losses.items() + } + else: + return 0.0 + + def test_step(self, batch, batch_idx): + return self.eval_step(batch, batch_idx) + + def get_full_res_mask( + self, mask, inverse_map, point2segment_full, is_heatmap=False + ): + mask = mask.detach().cpu()[inverse_map] # full res + + if self.eval_on_segments and is_heatmap == False: + mask = scatter_mean( + mask, point2segment_full, dim=0 + ) # full res segments + mask = (mask > 0.5).float() + mask = mask.detach().cpu()[ + point2segment_full.cpu() + ] # full res points + + return mask + + def get_mask_and_scores( + self, mask_cls, mask_pred, num_queries=100, num_classes=18, device=None + ): + if device is None: + device = self.device + labels = ( + torch.arange(num_classes, device=device) + .unsqueeze(0) + .repeat(num_queries, 1) + .flatten(0, 1) + ) + + if self.config.general.topk_per_image != -1: + scores_per_query, topk_indices = mask_cls.flatten(0, 1).topk( + self.config.general.topk_per_image, sorted=True + ) + else: + scores_per_query, topk_indices = mask_cls.flatten(0, 1).topk( + num_queries, sorted=True + ) + + labels_per_query = labels[topk_indices] + topk_indices = topk_indices // num_classes + mask_pred = mask_pred[:, topk_indices] + + result_pred_mask = (mask_pred > 0).float() + heatmap = mask_pred.float().sigmoid() + + mask_scores_per_image = (heatmap * result_pred_mask).sum(0) / ( + result_pred_mask.sum(0) + 1e-6 + ) + score = scores_per_query * mask_scores_per_image + classes = labels_per_query + + return score, result_pred_mask, classes, heatmap + + def eval_instance_step( + self, + output, + target_low_res, + target_full_res, + inverse_maps, + file_names, + full_res_coords, + original_colors, + original_normals, + raw_coords, + idx, + first_full_res=False, + backbone_features=None, + ): + label_offset = self.validation_dataset.label_offset + prediction = output["aux_outputs"] + prediction.append( + { + "pred_logits": output["pred_logits"], + "pred_masks": output["pred_masks"], + } + ) + + prediction[self.decoder_id][ + "pred_logits" + ] = torch.functional.F.softmax( + prediction[self.decoder_id]["pred_logits"], dim=-1 + )[ + ..., :-1 + ] + + all_pred_classes = list() + all_pred_masks = list() + all_pred_scores = list() + all_heatmaps = list() + all_query_pos = list() + + offset_coords_idx = 0 + for bid in range(len(prediction[self.decoder_id]["pred_masks"])): + if not first_full_res: + if self.model.train_on_segments: + masks = ( + prediction[self.decoder_id]["pred_masks"][bid] + .detach() + .cpu()[target_low_res[bid]["point2segment"].cpu()] + ) + else: + masks = ( + prediction[self.decoder_id]["pred_masks"][bid] + .detach() + .cpu() + ) + + if self.config.general.use_dbscan: + new_preds = { + "pred_masks": list(), + "pred_logits": list(), + } + + curr_coords_idx = masks.shape[0] + curr_coords = raw_coords[ + offset_coords_idx : curr_coords_idx + offset_coords_idx + ] + offset_coords_idx += curr_coords_idx + + for curr_query in range(masks.shape[1]): + curr_masks = masks[:, curr_query] > 0 + + if curr_coords[curr_masks].shape[0] > 0: + clusters = ( + DBSCAN( + eps=self.config.general.dbscan_eps, + min_samples=self.config.general.dbscan_min_points, + n_jobs=-1, + ) + .fit(curr_coords[curr_masks]) + .labels_ + ) + + new_mask = torch.zeros(curr_masks.shape, dtype=int) + new_mask[curr_masks] = ( + torch.from_numpy(clusters) + 1 + ) + + for cluster_id in np.unique(clusters): + original_pred_masks = masks[:, curr_query] + if cluster_id != -1: + new_preds["pred_masks"].append( + original_pred_masks + * (new_mask == cluster_id + 1) + ) + new_preds["pred_logits"].append( + prediction[self.decoder_id][ + "pred_logits" + ][bid, curr_query] + ) + + scores, masks, classes, heatmap = self.get_mask_and_scores( + torch.stack(new_preds["pred_logits"]).cpu(), + torch.stack(new_preds["pred_masks"]).T, + len(new_preds["pred_logits"]), + self.model.num_classes - 1, + ) + else: + scores, masks, classes, heatmap = self.get_mask_and_scores( + prediction[self.decoder_id]["pred_logits"][bid] + .detach() + .cpu(), + masks, + prediction[self.decoder_id]["pred_logits"][bid].shape[ + 0 + ], + self.model.num_classes - 1, + ) + + masks = self.get_full_res_mask( + masks, + inverse_maps[bid], + target_full_res[bid]["point2segment"], + ) + + heatmap = self.get_full_res_mask( + heatmap, + inverse_maps[bid], + target_full_res[bid]["point2segment"], + is_heatmap=True, + ) + + if backbone_features is not None: + backbone_features = self.get_full_res_mask( + torch.from_numpy(backbone_features), + inverse_maps[bid], + target_full_res[bid]["point2segment"], + is_heatmap=True, + ) + backbone_features = backbone_features.numpy() + else: + assert False, "not tested" + masks = self.get_full_res_mask( + prediction[self.decoder_id]["pred_masks"][bid].cpu(), + inverse_maps[bid], + target_full_res[bid]["point2segment"], + ) + + scores, masks, classes, heatmap = self.get_mask_and_scores( + prediction[self.decoder_id]["pred_logits"][bid].cpu(), + masks, + prediction[self.decoder_id]["pred_logits"][bid].shape[0], + self.model.num_classes - 1, + device="cpu", + ) + + masks = masks.numpy() + heatmap = heatmap.numpy() + + sort_scores = scores.sort(descending=True) + sort_scores_index = sort_scores.indices.cpu().numpy() + sort_scores_values = sort_scores.values.cpu().numpy() + sort_classes = classes[sort_scores_index] + + sorted_masks = masks[:, sort_scores_index] + sorted_heatmap = heatmap[:, sort_scores_index] + + if self.config.general.filter_out_instances: + keep_instances = set() + pairwise_overlap = sorted_masks.T @ sorted_masks + normalization = pairwise_overlap.max(axis=0) + norm_overlaps = pairwise_overlap / normalization + + for instance_id in range(norm_overlaps.shape[0]): + # filter out unlikely masks and nearly empty masks + # if not(sort_scores_values[instance_id] < 0.3 or sorted_masks[:, instance_id].sum() < 500): + if not ( + sort_scores_values[instance_id] + < self.config.general.scores_threshold + ): + # check if mask != empty + if not sorted_masks[:, instance_id].sum() == 0.0: + overlap_ids = set( + np.nonzero( + norm_overlaps[instance_id, :] + > self.config.general.iou_threshold + )[0] + ) + + if len(overlap_ids) == 0: + keep_instances.add(instance_id) + else: + if instance_id == min(overlap_ids): + keep_instances.add(instance_id) + + keep_instances = sorted(list(keep_instances)) + all_pred_classes.append(sort_classes[keep_instances]) + all_pred_masks.append(sorted_masks[:, keep_instances]) + all_pred_scores.append(sort_scores_values[keep_instances]) + all_heatmaps.append(sorted_heatmap[:, keep_instances]) + else: + all_pred_classes.append(sort_classes) + all_pred_masks.append(sorted_masks) + all_pred_scores.append(sort_scores_values) + all_heatmaps.append(sorted_heatmap) + + if self.validation_dataset.dataset_name == "scannet200": + all_pred_classes[bid][all_pred_classes[bid] == 0] = -1 + if self.config.data.test_mode != "test": + target_full_res[bid]["labels"][ + target_full_res[bid]["labels"] == 0 + ] = -1 + + for bid in range(len(prediction[self.decoder_id]["pred_masks"])): + all_pred_classes[ + bid + ] = self.validation_dataset._remap_model_output( + all_pred_classes[bid].cpu() + label_offset + ) + + if ( + self.config.data.test_mode != "test" + and len(target_full_res) != 0 + ): + target_full_res[bid][ + "labels" + ] = self.validation_dataset._remap_model_output( + target_full_res[bid]["labels"].cpu() + label_offset + ) + + # PREDICTION BOX + bbox_data = [] + for query_id in range( + all_pred_masks[bid].shape[1] + ): # self.model.num_queries + obj_coords = full_res_coords[bid][ + all_pred_masks[bid][:, query_id].astype(bool), : + ] + if obj_coords.shape[0] > 0: + obj_center = obj_coords.mean(axis=0) + obj_axis_length = obj_coords.max( + axis=0 + ) - obj_coords.min(axis=0) + + bbox = np.concatenate((obj_center, obj_axis_length)) + + bbox_data.append( + ( + all_pred_classes[bid][query_id].item(), + bbox, + all_pred_scores[bid][query_id], + ) + ) + self.bbox_preds[file_names[bid]] = bbox_data + + # GT BOX + bbox_data = [] + for obj_id in range(target_full_res[bid]["masks"].shape[0]): + if target_full_res[bid]["labels"][obj_id].item() == 255: + continue + + obj_coords = full_res_coords[bid][ + target_full_res[bid]["masks"][obj_id, :] + .cpu() + .detach() + .numpy() + .astype(bool), + :, + ] + if obj_coords.shape[0] > 0: + obj_center = obj_coords.mean(axis=0) + obj_axis_length = obj_coords.max( + axis=0 + ) - obj_coords.min(axis=0) + + bbox = np.concatenate((obj_center, obj_axis_length)) + bbox_data.append( + ( + target_full_res[bid]["labels"][obj_id].item(), + bbox, + ) + ) + + self.bbox_gt[file_names[bid]] = bbox_data + + if self.config.general.eval_inner_core == -1: + self.preds[file_names[bid]] = { + "pred_masks": all_pred_masks[bid], + "pred_scores": all_pred_scores[bid], + "pred_classes": all_pred_classes[bid], + } + else: + # prev val_dataset + self.preds[file_names[bid]] = { + "pred_masks": all_pred_masks[bid][ + self.test_dataset.data[idx[bid]]["cond_inner"] + ], + "pred_scores": all_pred_scores[bid], + "pred_classes": all_pred_classes[bid], + } + + if self.config.general.save_visualizations: + if "cond_inner" in self.test_dataset.data[idx[bid]]: + target_full_res[bid]["masks"] = target_full_res[bid][ + "masks" + ][:, self.test_dataset.data[idx[bid]]["cond_inner"]] + self.save_visualizations( + target_full_res[bid], + full_res_coords[bid][ + self.test_dataset.data[idx[bid]]["cond_inner"] + ], + [self.preds[file_names[bid]]["pred_masks"]], + [self.preds[file_names[bid]]["pred_classes"]], + file_names[bid], + original_colors[bid][ + self.test_dataset.data[idx[bid]]["cond_inner"] + ], + original_normals[bid][ + self.test_dataset.data[idx[bid]]["cond_inner"] + ], + [self.preds[file_names[bid]]["pred_scores"]], + sorted_heatmaps=[ + all_heatmaps[bid][ + self.test_dataset.data[idx[bid]]["cond_inner"] + ] + ], + query_pos=all_query_pos[bid][ + self.test_dataset.data[idx[bid]]["cond_inner"] + ] + if len(all_query_pos) > 0 + else None, + backbone_features=backbone_features[ + self.test_dataset.data[idx[bid]]["cond_inner"] + ], + point_size=self.config.general.visualization_point_size, + ) + else: + self.save_visualizations( + target_full_res[bid], + full_res_coords[bid], + [self.preds[file_names[bid]]["pred_masks"]], + [self.preds[file_names[bid]]["pred_classes"]], + file_names[bid], + original_colors[bid], + original_normals[bid], + [self.preds[file_names[bid]]["pred_scores"]], + sorted_heatmaps=[all_heatmaps[bid]], + query_pos=all_query_pos[bid] + if len(all_query_pos) > 0 + else None, + backbone_features=backbone_features, + point_size=self.config.general.visualization_point_size, + ) + + if self.config.general.export: + if self.validation_dataset.dataset_name == "stpls3d": + scan_id, _, _, crop_id = file_names[bid].split("_") + crop_id = int(crop_id.replace(".txt", "")) + file_name = ( + f"{scan_id}_points_GTv3_0{crop_id}_inst_nostuff" + ) + + self.export( + self.preds[file_names[bid]]["pred_masks"], + self.preds[file_names[bid]]["pred_scores"], + self.preds[file_names[bid]]["pred_classes"], + file_name, + self.decoder_id, + ) + else: + self.export( + self.preds[file_names[bid]]["pred_masks"], + self.preds[file_names[bid]]["pred_scores"], + self.preds[file_names[bid]]["pred_classes"], + file_names[bid], + self.decoder_id, + ) + + def eval_instance_epoch_end(self): + log_prefix = f"val" + ap_results = {} + + head_results, tail_results, common_results = [], [], [] + + box_ap_50 = eval_det( + self.bbox_preds, self.bbox_gt, ovthresh=0.5, use_07_metric=False + ) + box_ap_25 = eval_det( + self.bbox_preds, self.bbox_gt, ovthresh=0.25, use_07_metric=False + ) + mean_box_ap_25 = sum([v for k, v in box_ap_25[-1].items()]) / len( + box_ap_25[-1].keys() + ) + mean_box_ap_50 = sum([v for k, v in box_ap_50[-1].items()]) / len( + box_ap_50[-1].keys() + ) + + ap_results[f"{log_prefix}_mean_box_ap_25"] = mean_box_ap_25 + ap_results[f"{log_prefix}_mean_box_ap_50"] = mean_box_ap_50 + + for class_id in box_ap_50[-1].keys(): + class_name = self.train_dataset.label_info[class_id]["name"] + ap_results[f"{log_prefix}_{class_name}_val_box_ap_50"] = box_ap_50[ + -1 + ][class_id] + + for class_id in box_ap_25[-1].keys(): + class_name = self.train_dataset.label_info[class_id]["name"] + ap_results[f"{log_prefix}_{class_name}_val_box_ap_25"] = box_ap_25[ + -1 + ][class_id] + + root_path = f"eval_output" + base_path = f"{root_path}/instance_evaluation_{self.config.general.experiment_name}_{self.current_epoch}" + + if self.validation_dataset.dataset_name in [ + "scannet", + "stpls3d", + "scannet200", + ]: + gt_data_path = f"{self.validation_dataset.data_dir[0]}/instance_gt/{self.validation_dataset.mode}" + else: + gt_data_path = f"{self.validation_dataset.data_dir[0]}/instance_gt/Area_{self.config.general.area}" + + pred_path = f"{base_path}/tmp_output.txt" + + log_prefix = f"val" + + if not os.path.exists(base_path): + os.makedirs(base_path) + + try: + if self.validation_dataset.dataset_name == "s3dis": + new_preds = {} + for key in self.preds.keys(): + new_preds[ + key.replace(f"Area_{self.config.general.area}_", "") + ] = { + "pred_classes": self.preds[key]["pred_classes"] + 1, + "pred_masks": self.preds[key]["pred_masks"], + "pred_scores": self.preds[key]["pred_scores"], + } + mprec, mrec = evaluate( + new_preds, gt_data_path, pred_path, dataset="s3dis" + ) + ap_results[f"{log_prefix}_mean_precision"] = mprec + ap_results[f"{log_prefix}_mean_recall"] = mrec + elif self.validation_dataset.dataset_name == "stpls3d": + new_preds = {} + for key in self.preds.keys(): + new_preds[key.replace(".txt", "")] = { + "pred_classes": self.preds[key]["pred_classes"], + "pred_masks": self.preds[key]["pred_masks"], + "pred_scores": self.preds[key]["pred_scores"], + } + + evaluate(new_preds, gt_data_path, pred_path, dataset="stpls3d") + else: + evaluate( + self.preds, + gt_data_path, + pred_path, + dataset=self.validation_dataset.dataset_name, + ) + with open(pred_path, "r") as fin: + for line_id, line in enumerate(fin): + if line_id == 0: + # ignore header + continue + class_name, _, ap, ap_50, ap_25 = line.strip().split(",") + + if self.validation_dataset.dataset_name == "scannet200": + if class_name in VALID_CLASS_IDS_200_VALIDATION: + ap_results[ + f"{log_prefix}_{class_name}_val_ap" + ] = float(ap) + ap_results[ + f"{log_prefix}_{class_name}_val_ap_50" + ] = float(ap_50) + ap_results[ + f"{log_prefix}_{class_name}_val_ap_25" + ] = float(ap_25) + + if class_name in HEAD_CATS_SCANNET_200: + head_results.append( + np.array( + (float(ap), float(ap_50), float(ap_25)) + ) + ) + elif class_name in COMMON_CATS_SCANNET_200: + common_results.append( + np.array( + (float(ap), float(ap_50), float(ap_25)) + ) + ) + elif class_name in TAIL_CATS_SCANNET_200: + tail_results.append( + np.array( + (float(ap), float(ap_50), float(ap_25)) + ) + ) + else: + assert (False, "class not known!") + else: + ap_results[ + f"{log_prefix}_{class_name}_val_ap" + ] = float(ap) + ap_results[ + f"{log_prefix}_{class_name}_val_ap_50" + ] = float(ap_50) + ap_results[ + f"{log_prefix}_{class_name}_val_ap_25" + ] = float(ap_25) + + if self.validation_dataset.dataset_name == "scannet200": + head_results = np.stack(head_results) + common_results = np.stack(common_results) + tail_results = np.stack(tail_results) + + mean_tail_results = np.nanmean(tail_results, axis=0) + mean_common_results = np.nanmean(common_results, axis=0) + mean_head_results = np.nanmean(head_results, axis=0) + + ap_results[ + f"{log_prefix}_mean_tail_ap_25" + ] = mean_tail_results[0] + ap_results[ + f"{log_prefix}_mean_common_ap_25" + ] = mean_common_results[0] + ap_results[ + f"{log_prefix}_mean_head_ap_25" + ] = mean_head_results[0] + + ap_results[ + f"{log_prefix}_mean_tail_ap_50" + ] = mean_tail_results[1] + ap_results[ + f"{log_prefix}_mean_common_ap_50" + ] = mean_common_results[1] + ap_results[ + f"{log_prefix}_mean_head_ap_50" + ] = mean_head_results[1] + + ap_results[ + f"{log_prefix}_mean_tail_ap_25" + ] = mean_tail_results[2] + ap_results[ + f"{log_prefix}_mean_common_ap_25" + ] = mean_common_results[2] + ap_results[ + f"{log_prefix}_mean_head_ap_25" + ] = mean_head_results[2] + + overall_ap_results = np.nanmean( + np.vstack((head_results, common_results, tail_results)), + axis=0, + ) + + ap_results[f"{log_prefix}_mean_ap"] = overall_ap_results[0] + ap_results[f"{log_prefix}_mean_ap_50"] = overall_ap_results[1] + ap_results[f"{log_prefix}_mean_ap_25"] = overall_ap_results[2] + + ap_results = { + key: 0.0 if math.isnan(score) else score + for key, score in ap_results.items() + } + else: + mean_ap = statistics.mean( + [ + item + for key, item in ap_results.items() + if key.endswith("val_ap") + ] + ) + mean_ap_50 = statistics.mean( + [ + item + for key, item in ap_results.items() + if key.endswith("val_ap_50") + ] + ) + mean_ap_25 = statistics.mean( + [ + item + for key, item in ap_results.items() + if key.endswith("val_ap_25") + ] + ) + + ap_results[f"{log_prefix}_mean_ap"] = mean_ap + ap_results[f"{log_prefix}_mean_ap_50"] = mean_ap_50 + ap_results[f"{log_prefix}_mean_ap_25"] = mean_ap_25 + + ap_results = { + key: 0.0 if math.isnan(score) else score + for key, score in ap_results.items() + } + except (IndexError, OSError) as e: + print("NO SCORES!!!") + ap_results[f"{log_prefix}_mean_ap"] = 0.0 + ap_results[f"{log_prefix}_mean_ap_50"] = 0.0 + ap_results[f"{log_prefix}_mean_ap_25"] = 0.0 + + self.log_dict(ap_results) + + if not self.config.general.export: + shutil.rmtree(base_path) + + del self.preds + del self.bbox_preds + del self.bbox_gt + + gc.collect() + + self.preds = dict() + self.bbox_preds = dict() + self.bbox_gt = dict() + + def test_epoch_end(self, outputs): + if self.config.general.export: + return + + self.eval_instance_epoch_end() + + dd = defaultdict(list) + for output in outputs: + for key, val in output.items(): # .items() in Python 3. + dd[key].append(val) + + dd = {k: statistics.mean(v) for k, v in dd.items()} + + dd["val_mean_loss_ce"] = statistics.mean( + [item for item in [v for k, v in dd.items() if "loss_ce" in k]] + ) + dd["val_mean_loss_mask"] = statistics.mean( + [item for item in [v for k, v in dd.items() if "loss_mask" in k]] + ) + dd["val_mean_loss_dice"] = statistics.mean( + [item for item in [v for k, v in dd.items() if "loss_dice" in k]] + ) + + self.log_dict(dd) + + def configure_optimizers(self): + optimizer = hydra.utils.instantiate( + self.config.optimizer, params=self.parameters() + ) + if "steps_per_epoch" in self.config.scheduler.scheduler.keys(): + self.config.scheduler.scheduler.steps_per_epoch = len( + self.train_dataloader() + ) + lr_scheduler = hydra.utils.instantiate( + self.config.scheduler.scheduler, optimizer=optimizer + ) + scheduler_config = {"scheduler": lr_scheduler} + scheduler_config.update(self.config.scheduler.pytorch_lightning_params) + return [optimizer], [scheduler_config] + + def prepare_data(self): + self.train_dataset = hydra.utils.instantiate( + self.config.data.train_dataset + ) + self.validation_dataset = hydra.utils.instantiate( + self.config.data.validation_dataset + ) + self.test_dataset = hydra.utils.instantiate( + self.config.data.test_dataset + ) + self.labels_info = self.train_dataset.label_info + + def train_dataloader(self): + c_fn = hydra.utils.instantiate(self.config.data.train_collation) + return hydra.utils.instantiate( + self.config.data.train_dataloader, + self.train_dataset, + collate_fn=c_fn, + ) + + def val_dataloader(self): + c_fn = hydra.utils.instantiate(self.config.data.validation_collation) + return hydra.utils.instantiate( + self.config.data.validation_dataloader, + self.validation_dataset, + collate_fn=c_fn, + ) + + def test_dataloader(self): + c_fn = hydra.utils.instantiate(self.config.data.test_collation) + return hydra.utils.instantiate( + self.config.data.test_dataloader, + self.test_dataset, + collate_fn=c_fn, + ) diff --git a/models/Mask3D/mask3d/utils/__init__.py b/models/Mask3D/mask3d/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/Mask3D/mask3d/utils/gradflow_check.py b/models/Mask3D/mask3d/utils/gradflow_check.py new file mode 100644 index 0000000000000000000000000000000000000000..2fedc91592d66d4e5bdef7531daafccc5b5f2e81 --- /dev/null +++ b/models/Mask3D/mask3d/utils/gradflow_check.py @@ -0,0 +1,62 @@ +""" https://github.com/alwynmathew/gradflow-check """ +import matplotlib.pyplot as plt +import numpy as np +from matplotlib.lines import Line2D + + +def plot_grad_flow(named_parameters): + ave_grads = [] + layers = [] + for n, p in named_parameters: + if (p.requires_grad) and ("bias" not in n): + if p.grad: + layers.append(n) + ave_grads.append(p.grad.abs().mean()) + else: + print(f"{n} - doesn't have gradient computed") + + plt.plot(ave_grads, alpha=0.3, color="b") + plt.hlines(0, 0, len(ave_grads) + 1, linewidth=1, color="k") + plt.xticks(range(0, len(ave_grads), 1), layers, rotation="vertical") + plt.xlim(xmin=0, xmax=len(ave_grads)) + plt.xlabel("Layers") + plt.ylabel("average gradient") + plt.title("Gradient flow") + plt.grid(True) + + +def plot_grad_flow_v2(named_parameters): + """Plots the gradients flowing through different layers in the net during training. + Can be used for checking for possible gradient vanishing / exploding problems. + + Usage: Plug this function in Trainer class after loss.backwards() as + "plot_grad_flow(self.model.named_parameters())" to visualize the gradient flow""" + ave_grads = [] + max_grads = [] + layers = [] + for n, p in named_parameters: + if (p.requires_grad) and ("bias" not in n): + layers.append(n) + if p.grad: + ave_grads.append(p.grad.abs().mean()) + max_grads.append(p.grad.abs().max()) + else: + print(f"{n} - doesn't have gradient computed") + plt.bar(np.arange(len(max_grads)), max_grads, alpha=0.1, lw=1, color="c") + plt.bar(np.arange(len(max_grads)), ave_grads, alpha=0.1, lw=1, color="b") + plt.hlines(0, 0, len(ave_grads) + 1, lw=2, color="k") + plt.xticks(range(0, len(ave_grads), 1), layers, rotation="vertical") + plt.xlim(left=0, right=len(ave_grads)) + plt.ylim(bottom=-0.001, top=0.02) # zoom in on the lower gradient regions + plt.xlabel("Layers") + plt.ylabel("average gradient") + plt.title("Gradient flow") + plt.grid(True) + plt.legend( + [ + Line2D([0], [0], color="c", lw=4), + Line2D([0], [0], color="b", lw=4), + Line2D([0], [0], color="k", lw=4), + ], + ["max-gradient", "mean-gradient", "zero-gradient"], + ) diff --git a/models/Mask3D/mask3d/utils/kfold.py b/models/Mask3D/mask3d/utils/kfold.py new file mode 100644 index 0000000000000000000000000000000000000000..5bfeba130c890eec35530adeb23f1362041f7cdc --- /dev/null +++ b/models/Mask3D/mask3d/utils/kfold.py @@ -0,0 +1,89 @@ +""" Author: https://github.com/yk-szk/stratified_group_kfold """ +import random +import numpy as np + + +class StratifiedGroupKFold: + """ + Stratified Group K-fold with sklearn.model_selection.KFold compabitility. + + Split dataset into k folds with balanced label distribution (stratified) and non-overlapping group. + + Args: + n_splits (int): # of splits + shuffle (bool): Shuffle + seed (int): Seed value for random number generator + """ + + def __init__(self, n_splits, shuffle=True, random_state=None): + self.n_splits = n_splits + self.shuffle = shuffle + self.seed = random_state + + def split(self, X, labels, groups): + assert len(X) == len(labels) == len(groups), "Invalid input length" + assert ( + len(set(groups)) >= self.n_splits + ), "The number of groups needs to be larger than n_splits" + + def encode(v): + s = set(v) + d = {l: i for i, l in enumerate(s)} + return [d[e] for e in v] + + labels, groups = encode(labels), encode(groups) + num_labels, num_groups = max(labels) + 1, max(groups) + 1 + label_counts_per_group = np.zeros((num_groups, num_labels), dtype=int) + global_label_dist = np.bincount(labels) + for label, g in zip(labels, groups): + label_counts_per_group[g][label] += 1 + + label_counts_per_fold = np.zeros( + (self.n_splits, num_labels), dtype=int + ) + groups_per_fold = [set() for _ in range(self.n_splits)] + + def eval_label_counts_per_fold(y_counts, fold): + fold += y_counts + std_per_label = ( + np.std(label_counts_per_fold, axis=0) / global_label_dist + ) + fold -= y_counts + return np.mean(std_per_label) + + groups_and_label_counts = list(enumerate(label_counts_per_group)) + if self.shuffle: + rng = random.Random(self.seed) + mean_std = np.mean(np.std(label_counts_per_group, axis=1)) + groups_and_label_counts.sort( + key=lambda g_counts: -np.std(g_counts[1]) + + rng.gauss(0, mean_std) + ) # add rng.gauss to increase the randomness + else: + groups_and_label_counts.sort( + key=lambda g_counts: -np.std(g_counts[1]) + ) + + for g, label_counts in groups_and_label_counts: + evals = [ + eval_label_counts_per_fold( + label_counts, label_counts_per_fold[i] + ) + for i in range(self.n_splits) + ] + best_fold = np.argmin(evals) + label_counts_per_fold[best_fold] += label_counts + groups_per_fold[best_fold].add(g) + + all_groups = set(groups) + for test_groups in groups_per_fold: + train_groups = all_groups - test_groups + + train_indices = [ + i for i, g in enumerate(groups) if g in train_groups + ] + test_indices = [ + i for i, g in enumerate(groups) if g in test_groups + ] + + yield train_indices, test_indices diff --git a/models/Mask3D/mask3d/utils/pc_visualizations.py b/models/Mask3D/mask3d/utils/pc_visualizations.py new file mode 100644 index 0000000000000000000000000000000000000000..26937b9f293f9cc2b87cc67d3c8742c80f770d60 --- /dev/null +++ b/models/Mask3D/mask3d/utils/pc_visualizations.py @@ -0,0 +1,202 @@ +from io import BytesIO +from imageio import imread + +import open3d as o3d +from PIL import Image +import numpy as np +import plotly.graph_objects as go +from plotly.subplots import make_subplots +from pandas import DataFrame +import matplotlib +import seaborn as sns +import pyviz3d.visualizer as viz + +matplotlib.use("Agg") +import matplotlib.pyplot as plt + + +def point_cloud_plolty( + coordinates, + label_color, + label_text, + prediction_color, + prediction_text, + normals, +): + def draw_point_cloud(coords, colors=None, label_text=None): + marker = dict(size=1, opacity=0.8) + if colors is not None: + marker.update({"color": colors}) + if (colors is None) and (label_text is not None): + marker.update({"color": label_text}) + fig = go.Scatter3d( + x=coords[:, 0], + y=coords[:, 1], + z=coords[:, 2], + text=label_text, + mode="markers", + marker=marker, + ) + return fig + + fig = make_subplots( + rows=1, + cols=2, + specs=[[{"type": "scatter3d"}, {"type": "scatter3d"}]], + ) + fig.add_trace( + draw_point_cloud(coordinates, prediction_color, prediction_text), + row=1, + col=1, + ) + # adding image with prediction + fig.add_trace( + draw_point_cloud(coordinates, label_color, label_text), row=1, col=2 + ) + fig.show() + # data = fig.to_image(width=1080, height=720, format="png") + # image = Image.open(BytesIO(data)) + # return image + + +def point_cloud_pyviz3d( + name, + coordinates, + path, + color=None, + normals=None, + label_color=None, + prediction_color=None, + point_size=25, + voxel_size=0.01, +): + + # because of visualization + coordinates = coordinates * voxel_size + # First, we set up a visualizer + visualizer = viz.Visualizer() + if label_color is not None: + visualizer.add_points( + name=f"{name}_label", + positions=coordinates, + colors=label_color, + point_size=point_size, + visible=False, + ) + + if prediction_color is not None: + visualizer.add_points( + name=f"{name}_prediction", + positions=coordinates, + colors=prediction_color, + point_size=point_size, + visible=False, + ) + + visualizer.add_points( + name=name, + positions=coordinates, + colors=color, + normals=normals, + point_size=point_size, + visible=False, + ) + # When we added everything we need to the visualizer, we save it. + visualizer.save(path, verbose=False) + + +def point_cloud_open3d(coordinates): + points = o3d.geometry.PointCloud(o3d.utility.Vector3dVector(coordinates)) + o3d.visualization.draw_geometries([points]) + + +def _remap_model_output(output, labels): + output = np.array(output) + output_remapped = output.copy() + for i, k in enumerate(labels.keys()): + output_remapped[output == i] = k + return output_remapped + + +def save_visualization( + coordinates, + name="none", + color=None, + normals=None, + target=None, + prediction=None, + target_info=None, + path="./saved", + backend="pyviz3d", + voxel_size=0.05, + color_mean=[0.47793125906962, 0.4303257521323044, 0.3749598901421883], + color_std=[0.2834475483823543, 0.27566157565723015, 0.27018971370874995], +): + target = _remap_model_output(target, target_info) + prediction = _remap_model_output(prediction, target_info) + coordinates = coordinates[:, :3] - coordinates[:, :3].mean(axis=0) + coordinates = coordinates * voxel_size + if color is not None: + color = (color * color_std + color_mean) * 255 + + target_color = np.zeros((len(target), 3)) + target_text = np.full((len(target)), "empty") + prediction_color = np.zeros((len(prediction), 3)) + prediction_text = np.full((len(prediction)), "empty") + if target_info is not None: + for k, v in target_info.items(): + target_color[target == k] = v["color"] + target_text[target == k] = v["name"] + prediction_color[prediction == k] = v["color"] + prediction_text[prediction == k] = v["name"] + if backend == "pyviz3d": + point_cloud_pyviz3d( + name=name, + coordinates=coordinates, + path=path, + color=color, + normals=normals, + label_color=target_color, + prediction_color=prediction_color, + voxel_size=1, + ) + elif backend == "plotly": + point_cloud_plolty( + coordinates=coordinates, + normals=normals, + label_color=target_color, + label_text=target_text, + prediction_color=prediction_color, + prediction_text=prediction_text, + ) + elif backend == "open3d": + point_cloud_open3d(coordinates) + else: + print("No such backend") + + +def draw_confsion_matrix(confusion_matrix, label_db): + index = [i for i in range(confusion_matrix.shape[0])] + index = _remap_model_output(index, label_db) + column_names = np.full((len(index)), "empty") + for k, v in label_db.items(): + column_names[index == k] = v["name"] + df_cm = DataFrame( + confusion_matrix, index=column_names, columns=column_names + ) + # pretty_plot_confusion_matrix(df_cm, fz=9) + sns.heatmap( + df_cm, + annot=True, + fmt="d", + linewidths=0.25, + annot_kws={"size": 5}, + vmax=10000, + ) + buf = BytesIO() + plt.savefig(buf, format="jpg") + plt.close() + buf.seek(0) + image = imread(buf, format="jpg") + buf.close() + return image diff --git a/models/Mask3D/mask3d/utils/point_cloud_utils.py b/models/Mask3D/mask3d/utils/point_cloud_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..7d2b5ec875da78d299c23afa70531cb0df04e278 --- /dev/null +++ b/models/Mask3D/mask3d/utils/point_cloud_utils.py @@ -0,0 +1,83 @@ +from pathlib import Path +from typing import List, Optional, Tuple + +import numpy as np +import open3d +from plyfile import PlyData, PlyElement + + +def load_ply(filepath): + with open(filepath, "rb") as f: + plydata = PlyData.read(f) + data = plydata.elements[0].data + coords = np.array([data["x"], data["y"], data["z"]], dtype=np.float32).T + feats = None + labels = None + if ({"red", "green", "blue"} - set(data.dtype.names)) == set(): + feats = np.array( + [data["red"], data["green"], data["blue"]], dtype=np.uint8 + ).T + if "label" in data.dtype.names: + labels = np.array(data["label"], dtype=np.uint32) + return coords, feats, labels + + +def load_ply_with_normals(filepath): + mesh = open3d.io.read_triangle_mesh(str(filepath)) + if not mesh.has_vertex_normals(): + mesh.compute_vertex_normals() + vertices = np.asarray(mesh.vertices) + normals = np.asarray(mesh.vertex_normals) + + coords, feats, labels = load_ply(filepath) + assert np.allclose(coords, vertices), "different coordinates" + feats = np.hstack((feats, normals)) + + return coords, feats, labels + + +def load_obj_with_normals(filepath): + mesh = open3d.io.read_triangle_mesh(str(filepath)) + if not mesh.has_vertex_normals(): + mesh.compute_vertex_normals() + coords = np.asarray(mesh.vertices) + normals = np.asarray(mesh.vertex_normals) + colors = np.asarray(mesh.vertex_colors) + feats = np.hstack((colors, normals)) + + return coords, feats + + +def write_point_cloud_in_ply( + filepath: Path, + coords: np.ndarray, + feats: Optional[np.ndarray] = None, + labels: Optional[np.ndarray] = None, + dtypes: Optional[List[Tuple[str, str]]] = [ + ("x", " +#include +#include +#include +#include "aggregation_cuda_kernel.h" + + +void aggregation_forward_cuda(int n, int nsample, int c, int w_c, at::Tensor input_tensor, at::Tensor position_tensor, at::Tensor weight_tensor, at::Tensor idx_tensor, at::Tensor output_tensor) +{ + const float *input = input_tensor.data_ptr(); + const float *position = position_tensor.data_ptr(); + const float *weight = weight_tensor.data_ptr(); + const int *idx = idx_tensor.data_ptr(); + float *output = output_tensor.data_ptr(); + aggregation_forward_cuda_launcher(n, nsample, c, w_c, input, position, weight, idx, output); +} + +void aggregation_backward_cuda(int n, int nsample, int c, int w_c, at::Tensor input_tensor, at::Tensor position_tensor, at::Tensor weight_tensor, at::Tensor idx_tensor, at::Tensor grad_output_tensor, at::Tensor grad_input_tensor, at::Tensor grad_position_tensor, at::Tensor grad_weight_tensor) +{ + const float *input = input_tensor.data_ptr(); + const float *position = position_tensor.data_ptr(); + const float *weight = weight_tensor.data_ptr(); + const int *idx = idx_tensor.data_ptr(); + const float *grad_output = grad_output_tensor.data_ptr(); + float *grad_input = grad_input_tensor.data_ptr(); + float *grad_position = grad_position_tensor.data_ptr(); + float *grad_weight = grad_weight_tensor.data_ptr(); + aggregation_backward_cuda_launcher(n, nsample, c, w_c, input, position, weight, idx, grad_output, grad_input, grad_position, grad_weight); +} diff --git a/models/Mask3D/mask3d/utils/pointops2/src/aggregation/aggregation_cuda_kernel.cu b/models/Mask3D/mask3d/utils/pointops2/src/aggregation/aggregation_cuda_kernel.cu new file mode 100644 index 0000000000000000000000000000000000000000..8339bb7e2088abffefba02c26b248edafed6cf47 --- /dev/null +++ b/models/Mask3D/mask3d/utils/pointops2/src/aggregation/aggregation_cuda_kernel.cu @@ -0,0 +1,53 @@ +#include "../cuda_utils.h" +#include "aggregation_cuda_kernel.h" + + +__global__ void aggregation_forward_cuda_kernel(int n, int nsample, int c, int w_c, const float *input, const float *position, const float *weight, const int *idx, float *output) { + // input: input: (n, c), position: (n, nsample, c), weight: (n, nsample, w_c), idx: (n, nsample), output: (n, c) + int index = blockIdx.x * blockDim.x + threadIdx.x; + if (index >= n * c) return; + const int c_idx = index % c; + const int n_idx = index / c; + const int w_c_idx = c_idx % w_c; + for (int nsample_idx = 0; nsample_idx < nsample; nsample_idx++) + { + int idx_idx = n_idx * nsample + nsample_idx; + int input_idx = idx[idx_idx] * c + c_idx; + int position_idx = n_idx * nsample * c + nsample_idx * c + c_idx; + int weight_idx = n_idx * nsample * w_c + nsample_idx * w_c + w_c_idx; + output[index] += (input[input_idx] + position[position_idx]) * weight[weight_idx]; + } +} + +__global__ void aggregation_backward_cuda_kernel(int n, int nsample, int c, int w_c, const float *input, const float *position, const float *weight, const int *idx, const float *grad_output, float *grad_input, float *grad_position, float *grad_weight) { + // input: grad_output: (n, c), output: grad_input: (n, c), grad_position: (n, nsample, c), grad_weight: (n, nsample, w_c) + int index = blockIdx.x * blockDim.x + threadIdx.x; + if (index >= n * c) return; + const int c_idx = index % c; + const int n_idx = index / c; + const int w_c_idx = c_idx % w_c; + for (int nsample_idx = 0; nsample_idx < nsample; nsample_idx++) + { + int idx_idx = n_idx * nsample + nsample_idx; + int input_idx = idx[idx_idx] * c + c_idx; + int position_idx = n_idx * nsample * c + nsample_idx * c + c_idx; + int weight_idx = n_idx * nsample * w_c + nsample_idx * w_c + w_c_idx; + atomicAdd(grad_input + input_idx, grad_output[index] * weight[weight_idx]); + grad_position[position_idx] = grad_output[index] * weight[weight_idx]; + atomicAdd(grad_weight + weight_idx, grad_output[index] * (input[input_idx] + position[position_idx])); + } +} + +void aggregation_forward_cuda_launcher(int n, int nsample, int c, int w_c, const float *input, const float *position, const float *weight, const int *idx, float *output) { + // input: input: (n, c), position: (n, nsample, c), weight: (n, nsample, w_c), idx: (n, nsample), output: (n, c) + dim3 blocks(DIVUP(n * c, THREADS_PER_BLOCK)); + dim3 threads(THREADS_PER_BLOCK); + aggregation_forward_cuda_kernel<<>>(n, nsample, c, w_c, input, position, weight, idx, output); +} + +void aggregation_backward_cuda_launcher(int n, int nsample, int c, int w_c, const float *input, const float *position, const float *weight, const int *idx, const float *grad_output, float *grad_input, float *grad_position, float *grad_weight) { + // input: grad_output: (n, c), output: grad_input: (n, c), grad_position: (n, nsample, c), grad_weight: (n, nsample, w_c) + dim3 blocks(DIVUP(n * c, THREADS_PER_BLOCK)); + dim3 threads(THREADS_PER_BLOCK); + aggregation_backward_cuda_kernel<<>>(n, nsample, c, w_c, input, position, weight, idx, grad_output, grad_input, grad_position, grad_weight); +} diff --git a/models/Mask3D/mask3d/utils/pointops2/src/aggregation/aggregation_cuda_kernel.h b/models/Mask3D/mask3d/utils/pointops2/src/aggregation/aggregation_cuda_kernel.h new file mode 100644 index 0000000000000000000000000000000000000000..5211a96aa2acbe0d9baf32bddc9ab4be87703072 --- /dev/null +++ b/models/Mask3D/mask3d/utils/pointops2/src/aggregation/aggregation_cuda_kernel.h @@ -0,0 +1,20 @@ +#ifndef _AGGREGATION_CUDA_KERNEL +#define _AGGREGATION_CUDA_KERNEL +#include +#include +#include + +void aggregation_forward_cuda(int n, int nsample, int c, int w_c, at::Tensor input_tensor, at::Tensor position_tensor, at::Tensor weight_tensor, at::Tensor idx_tensor, at::Tensor output_tensor); +void aggregation_backward_cuda(int n, int nsample, int c, int w_c, at::Tensor input_tensor, at::Tensor position_tensor, at::Tensor weight_tensor, at::Tensor idx_tensor, at::Tensor grad_output_tensor, at::Tensor grad_input_tensor, at::Tensor grad_position_tensor, at::Tensor grad_weight_tensor); + +#ifdef __cplusplus +extern "C" { +#endif + +void aggregation_forward_cuda_launcher(int n, int nsample, int c, int w_c, const float *input, const float *position, const float *weight, const int *idx, float *output); +void aggregation_backward_cuda_launcher(int n, int nsample, int c, int w_c, const float *input, const float *position, const float *weight, const int *idx, const float *grad_output, float *grad_input, float *grad_position, float *grad_weight); + +#ifdef __cplusplus +} +#endif +#endif diff --git a/models/Mask3D/mask3d/utils/pointops2/src/attention/attention_cuda.cpp b/models/Mask3D/mask3d/utils/pointops2/src/attention/attention_cuda.cpp new file mode 100644 index 0000000000000000000000000000000000000000..8d2c725ae0ed70c884a8643aa74ba0c0f6660d30 --- /dev/null +++ b/models/Mask3D/mask3d/utils/pointops2/src/attention/attention_cuda.cpp @@ -0,0 +1,56 @@ +#include +#include +#include +#include +#include "attention_cuda_kernel.h" + +void attention_step1_forward_cuda(int N, int M, int h, int C, at::Tensor q_tensor, at::Tensor k_tensor, + at::Tensor index0_tensor, at::Tensor index1_tensor, at::Tensor attn_tensor) +{ + const float *q = q_tensor.data_ptr(); + const float *k = k_tensor.data_ptr(); + const int *index0 = index0_tensor.data_ptr(); + const int *index1 = index1_tensor.data_ptr(); + float *attn = attn_tensor.data_ptr(); + attention_step1_forward_cuda_launcher(N, M, h, C, q, k, index0, index1, attn); +} + +void attention_step1_backward_cuda(int N, int M, int h, int C, at::Tensor grad_out_tensor, + at::Tensor index0_tensor, at::Tensor index1_tensor, at::Tensor q_tensor, at::Tensor k_tensor, + at::Tensor grad_q_tensor, at::Tensor grad_k_tensor) +{ + const float *grad_out = grad_out_tensor.data_ptr(); + const int *index0 = index0_tensor.data_ptr(); + const int *index1 = index1_tensor.data_ptr(); + const float *q = q_tensor.data_ptr(); + const float *k = k_tensor.data_ptr(); + float *grad_q = grad_q_tensor.data_ptr(); + float *grad_k = grad_k_tensor.data_ptr(); + attention_step1_backward_cuda_launcher(N, M, h, C, grad_out, index0, index1, q, k, grad_q, grad_k); +} + +void attention_step2_forward_cuda(int N, int M, int h, int C, at::Tensor attn_tensor, at::Tensor v_tensor, + at::Tensor index0_tensor, at::Tensor index1_tensor, at::Tensor output_tensor) +{ + const float *attn = attn_tensor.data_ptr(); + const float *v = v_tensor.data_ptr(); + const int *index0 = index0_tensor.data_ptr(); + const int *index1 = index1_tensor.data_ptr(); + float *output = output_tensor.data_ptr(); + attention_step2_forward_cuda_launcher(N, M, h, C, attn, v, index0, index1, output); +} + + +void attention_step2_backward_cuda(int N, int M, int h, int C, at::Tensor grad_out_tensor, + at::Tensor index0_tensor, at::Tensor index1_tensor, at::Tensor attn_tensor, at::Tensor v_tensor, + at::Tensor grad_attn_tensor, at::Tensor grad_v_tensor) +{ + const float *grad_out = grad_out_tensor.data_ptr(); + const int *index0 = index0_tensor.data_ptr(); + const int *index1 = index1_tensor.data_ptr(); + const float *attn = attn_tensor.data_ptr(); + const float *v = v_tensor.data_ptr(); + float *grad_attn = grad_attn_tensor.data_ptr(); + float *grad_v = grad_v_tensor.data_ptr(); + attention_step2_backward_cuda_launcher(N, M, h, C, grad_out, index0, index1, attn, v, grad_attn, grad_v); +} diff --git a/models/Mask3D/mask3d/utils/pointops2/src/attention/attention_cuda_kernel.cu b/models/Mask3D/mask3d/utils/pointops2/src/attention/attention_cuda_kernel.cu new file mode 100644 index 0000000000000000000000000000000000000000..f71ad62987233229fcb547b30cfb7b9191683050 --- /dev/null +++ b/models/Mask3D/mask3d/utils/pointops2/src/attention/attention_cuda_kernel.cu @@ -0,0 +1,103 @@ +#include "../cuda_utils.h" +#include "attention_cuda_kernel.h" + + +__global__ void attention_step1_forward_cuda_kernel( // M, h, C//h + int N, int M, int h, int C, const float *q, const float *k, + const int *index0, const int *index1, float *attn) { + + int c_idx = blockIdx.z; + int h_idx = blockIdx.y; + int m_idx = blockIdx.x * blockDim.x + threadIdx.x; + if (m_idx >= M || h_idx >= h || c_idx >= C / h) return; + + int idx0 = index0[m_idx]; + int idx1 = index1[m_idx]; + float val = q[idx0*C+h_idx*C/h+c_idx] * k[idx1*C+h_idx*C/h+c_idx]; + atomicAdd(attn+m_idx*h+h_idx, val); +} + +__global__ void attention_step1_backward_cuda_kernel( // M, h, C//h + int N, int M, int h, int C, const float *grad_out, const int *index0, const int *index1, const float *q, const float *k, + float *grad_q, float *grad_k) { + + int c_idx = blockIdx.z; + int h_idx = blockIdx.y; + int m_idx = blockIdx.x * blockDim.x + threadIdx.x; + if (m_idx >= M || h_idx >= h || c_idx >= C / h) return; + + int idx0 = index0[m_idx]; + int idx1 = index1[m_idx]; + int grad_out_idx = m_idx*h+h_idx; + int q_idx = idx0*C+h_idx*C/h+c_idx; + int k_idx = idx1*C+h_idx*C/h+c_idx; + atomicAdd(grad_q+q_idx, grad_out[grad_out_idx] * k[k_idx]); + atomicAdd(grad_k+k_idx, grad_out[grad_out_idx] * q[q_idx]); +} + +void attention_step1_forward_cuda_launcher(int N, int M, int h, int C, const float *q, const float *k, + const int *index0, const int *index1, float *attn) { + // input: attn: (M, h), v: (N, h, C/h), index0: (M, ), index1: (M, ) + //dim3 blocks(DIVUP(C/h, THREADS_PER_BLOCK), h, M); + dim3 blocks(DIVUP(M, THREADS_PER_BLOCK), h, C/h); + dim3 threads(THREADS_PER_BLOCK); + attention_step1_forward_cuda_kernel<<>>(N, M, h, C, q, k, index0, index1, attn); +} + +void attention_step1_backward_cuda_launcher(int N, int M, int h, int C, const float *grad_out, const int *index0, const int *index1, + const float *q, const float *k, float *grad_q, float *grad_k) { + // input: grad_output: (n, nsample, c), output: grad_input1: (n, c), grad_input2: (n, c) + //dim3 blocks(DIVUP(C/h, THREADS_PER_BLOCK), h, M); + dim3 blocks(DIVUP(M, THREADS_PER_BLOCK), h, C/h); + dim3 threads(THREADS_PER_BLOCK); + attention_step1_backward_cuda_kernel<<>>(N, M, h, C, grad_out, index0, index1, q, k, grad_q, grad_k); +} + +__global__ void attention_step2_forward_cuda_kernel( // M, h, C//h + int N, int M, int h, int C, const float *attn, const float *v, + const int *index0, const int *index1, float *output) { + + int c_idx = blockIdx.z; + int h_idx = blockIdx.y; + int m_idx = blockIdx.x * blockDim.x + threadIdx.x; + if (m_idx >= M || h_idx >= h || c_idx >= C / h) return; + + int idx1 = index1[m_idx]; + float val = attn[m_idx*h+h_idx] * v[idx1*C+h_idx*C/h+c_idx]; + int idx0 = index0[m_idx]; + atomicAdd(output+idx0*C+h_idx*C/h+c_idx, val); +} + +__global__ void attention_step2_backward_cuda_kernel( // M, h, C//h + int N, int M, int h, int C, const float *grad_out, const int *index0, const int *index1, const float *attn, const float *v, + float *grad_attn, float *grad_v) { + + int c_idx = blockIdx.z; + int h_idx = blockIdx.y; + int m_idx = blockIdx.x * blockDim.x + threadIdx.x; + if (m_idx >= M || h_idx >= h || c_idx >= C / h) return; + + int idx0 = index0[m_idx]; + int idx1 = index1[m_idx]; + int grad_out_idx = idx0*C+h_idx*C/h+c_idx; + atomicAdd(grad_attn+m_idx*h+h_idx, grad_out[grad_out_idx] * v[idx1*C+h_idx*C/h+c_idx]); + atomicAdd(grad_v+idx1*C+h_idx*C/h+c_idx, grad_out[grad_out_idx] * attn[m_idx*h+h_idx]); +} + +void attention_step2_forward_cuda_launcher(int N, int M, int h, int C, const float *attn, const float *v, + const int *index0, const int *index1, float *output) { + // input: attn: (M, h), v: (N, h, C/h), index0: (M, ), index1: (M, ) + //dim3 blocks(DIVUP(C/h, THREADS_PER_BLOCK), h, M); + dim3 blocks(DIVUP(M, THREADS_PER_BLOCK), h, C/h); + dim3 threads(THREADS_PER_BLOCK); + attention_step2_forward_cuda_kernel<<>>(N, M, h, C, attn, v, index0, index1, output); +} + +void attention_step2_backward_cuda_launcher(int N, int M, int h, int C, const float *grad_out, const int *index0, const int *index1, + const float *attn, const float *v, float *grad_attn, float *grad_v) { + // input: grad_output: (n, nsample, c), output: grad_input1: (n, c), grad_input2: (n, c) + //dim3 blocks(DIVUP(C/h, THREADS_PER_BLOCK), h, M); + dim3 blocks(DIVUP(M, THREADS_PER_BLOCK), h, C/h); + dim3 threads(THREADS_PER_BLOCK); + attention_step2_backward_cuda_kernel<<>>(N, M, h, C, grad_out, index0, index1, attn, v, grad_attn, grad_v); +} diff --git a/models/Mask3D/mask3d/utils/pointops2/src/attention/attention_cuda_kernel.h b/models/Mask3D/mask3d/utils/pointops2/src/attention/attention_cuda_kernel.h new file mode 100644 index 0000000000000000000000000000000000000000..cbd99b9b6a9c65af76aa95d00fff6306446114cd --- /dev/null +++ b/models/Mask3D/mask3d/utils/pointops2/src/attention/attention_cuda_kernel.h @@ -0,0 +1,26 @@ +#ifndef _ATTENTION_CUDA_KERNEL +#define _ATTENTION_CUDA_KERNEL +#include +#include +#include + +void attention_step1_forward_cuda(int N, int M, int h, int C, at::Tensor q_tensor, at::Tensor k_tensor, at::Tensor index0_tensor, at::Tensor index1_tensor, at::Tensor attn_tensor); +void attention_step1_backward_cuda(int N, int M, int h, int C, at::Tensor grad_out_tensor, at::Tensor index0_tensor, at::Tensor index1_tensor, at::Tensor q_tensor, at::Tensor k_tensor, at::Tensor grad_q_tensor, at::Tensor grad_k_tensor); + +void attention_step2_forward_cuda(int N, int M, int h, int C, at::Tensor attn_tensor, at::Tensor v_tensor, at::Tensor index0_tensor, at::Tensor index1_tensor, at::Tensor output_tensor); +void attention_step2_backward_cuda(int N, int M, int h, int C, at::Tensor grad_out_tensor, at::Tensor index0_tensor, at::Tensor index1_tensor, at::Tensor attn_tensor, at::Tensor v_tensor, at::Tensor grad_attn_tensor, at::Tensor grad_v_tensor); + +#ifdef __cplusplus +extern "C" { +#endif + +void attention_step1_forward_cuda_launcher(int N, int M, int h, int C, const float *q, const float *k, const int *index0, const int *index1, float *attn); +void attention_step1_backward_cuda_launcher(int N, int M, int h, int C, const float *grad_out, const int *index0, const int *index1, const float *q, const float *k, float *grad_q, float *grad_k); + +void attention_step2_forward_cuda_launcher(int N, int M, int h, int C, const float *attn, const float *v, const int *index0, const int *index1, float *output); +void attention_step2_backward_cuda_launcher(int N, int M, int h, int C, const float *grad_out, const int *index0, const int *index1, const float *attn, const float *v, float *grad_attn, float *grad_v); + +#ifdef __cplusplus +} +#endif +#endif diff --git a/models/Mask3D/mask3d/utils/pointops2/src/attention_v2/attention_cuda_kernel_v2.cu b/models/Mask3D/mask3d/utils/pointops2/src/attention_v2/attention_cuda_kernel_v2.cu new file mode 100644 index 0000000000000000000000000000000000000000..2e5343f5a3a0ad52aae7d06d22989f04390b68f6 --- /dev/null +++ b/models/Mask3D/mask3d/utils/pointops2/src/attention_v2/attention_cuda_kernel_v2.cu @@ -0,0 +1,193 @@ +#include "../cuda_utils.h" +#include "attention_cuda_kernel_v2.h" + + +template +__global__ void attention_step1_forward_cuda_kernel_v2( // M, h, C//h + int N, int M, int h, const float *q, const float *k, + const int *index0_offsets, const int *index1, float *attn) { + + int h_idx = blockIdx.y; + int q_idx = blockIdx.x; + int n_idx = threadIdx.x; + int C = h * d; + // if (m_idx >= M || h_idx >= h || c_idx >= C / h) return; + + __shared__ float query_vec[d]; + __shared__ int start, end; + + // if(n_idx == 0){ + // printf("blockDim.x: %d\n", blockDim.x); + // } + + if (n_idx == 0){ + start = index0_offsets[q_idx]; + end = index0_offsets[q_idx+1]; + // printf("start: %d, end: %d, blockDim.x: %d\n", start, end, blockDim.x); + } + for(int i = n_idx; i < d; i += blockDim.x) + query_vec[i] = q[q_idx*C + h_idx*d + i]; + + __syncthreads(); + + int m_idx = start + n_idx; + if(m_idx >= end) + return; + + float sum = 0; + for(int i = 0; i < d; i++){ + int k_idx = index1[m_idx]; + float key = k[k_idx * C + h_idx * d + i]; + sum += query_vec[i] * key; + } + attn[m_idx*h + h_idx] = sum; + // int idx0 = index0[m_idx]; + // int idx1 = index1[m_idx]; + // float val = q[idx0*C+h_idx*C/h+c_idx] * k[idx1*C+h_idx*C/h+c_idx]; + // atomicAdd(attn+m_idx*h+h_idx, val); +} + +template +__global__ void attention_step1_backward_cuda_kernel_v2( // M, h, C//h + int N, int M, int h, const float *grad_out, const int *index0_offsets, const int *index1, const float *q, const float *k, + float *grad_q, float *grad_k) { + + int h_idx = blockIdx.y; + int q_idx = blockIdx.x; + int n_idx = threadIdx.x; + int C = d * h; + + __shared__ float query_vec[d]; + __shared__ int start, end; + + if (n_idx == 0){ + start = index0_offsets[q_idx]; + end = index0_offsets[q_idx+1]; + } + for(int i = n_idx; i < d; i += blockDim.x) + query_vec[i] = q[q_idx*C + h_idx*d + i]; + + __shared__ float gradient_new[d]; + for(int i = n_idx; i < d; i += blockDim.x) + gradient_new[i] = 0; + + __syncthreads(); + + int m_idx = start + n_idx; + if(m_idx < end){ + float gradient = grad_out[m_idx*h + h_idx]; + for(int i = 0; i < d; i++){ + int k_idx = index1[m_idx]; + atomicAdd(&gradient_new[i], gradient * k[k_idx*C + h_idx*d + i]); + atomicAdd(grad_k + k_idx*C + h_idx*d + i, gradient * query_vec[i]); + } + } + __syncthreads(); + + for(int i = n_idx; i < d; i += blockDim.x) + grad_q[q_idx*C + h_idx*d + i] = gradient_new[i]; +} + +void attention_step1_forward_cuda_launcher_v2(int N, int M, int h, int C, const unsigned int n_max, + const float *q, const float *k, const int *index0_offsets, const int *index1, float *attn) { + // input: attn: (M, h), v: (N, h, C/h), index0: (M, ), index1: (M, ) + //dim3 blocks(DIVUP(C/h, THREADS_PER_BLOCK), h, M); + dim3 blocks(N, h); + unsigned int n_threads = opt_n_threads(n_max); + + n_threads = n_threads == n_max ? n_threads : n_threads * 2; + // n_threads = n_threads > 1024 ? 512 : n_threads; + + // printf("n_max: %d, n_threads: %d\n", n_max, n_threads); + + // dim3 threads(THREADS_PER_BLOCK); + // attention_step1_forward_cuda_kernel_v2<<>>(N, M, h, C, q, k, index0, index1, attn); + + switch (C / h) { + case 16: + attention_step1_forward_cuda_kernel_v2<16><<>>(N, M, h, q, k, index0_offsets, index1, attn); + break; + case 32: + attention_step1_forward_cuda_kernel_v2<32><<>>(N, M, h, q, k, index0_offsets, index1, attn); + break; + default: + throw "d != 16 and d != 32"; + } +} + +void attention_step1_backward_cuda_launcher_v2(int N, int M, int h, int C, const unsigned int n_max, + const float *grad_out, const int *index0_offsets, const int *index1, const float *q, const float *k, float *grad_q, float *grad_k) { + // input: grad_output: (n, nsample, c), output: grad_input1: (n, c), grad_input2: (n, c) + //dim3 blocks(DIVUP(C/h, THREADS_PER_BLOCK), h, M); + // dim3 blocks(DIVUP(M, THREADS_PER_BLOCK), h, C/h); + // dim3 threads(THREADS_PER_BLOCK); + dim3 blocks(N, h); + unsigned int n_threads = opt_n_threads(n_max); + // attention_step1_backward_cuda_kernel_v2<<>>(N, M, h, C/h, grad_out, index0_offsets, index1, q, k, grad_q, grad_k); + + n_threads = n_threads == n_max ? n_threads : n_threads * 2; + // n_threads = n_threads > 1024 ? 512 : n_threads; + + // printf("n_max: %d, n_threads: %d\n", n_max, n_threads); + + switch (C / h) { + case 16: + attention_step1_backward_cuda_kernel_v2<16><<>>(N, M, h, grad_out, index0_offsets, index1, q, k, grad_q, grad_k); + break; + case 32: + attention_step1_backward_cuda_kernel_v2<32><<>>(N, M, h, grad_out, index0_offsets, index1, q, k, grad_q, grad_k); + break; + default: + throw "d != 16 and d != 32"; + } + +} + +__global__ void attention_step2_forward_cuda_kernel_v2( // M, h, C//h + int N, int M, int h, int C, const float *attn, const float *v, + const int *index0, const int *index1, float *output) { + + int c_idx = blockIdx.z; + int h_idx = blockIdx.y; + int m_idx = blockIdx.x * blockDim.x + threadIdx.x; + if (m_idx >= M || h_idx >= h || c_idx >= C / h) return; + + int idx1 = index1[m_idx]; + float val = attn[m_idx*h+h_idx] * v[idx1*C+h_idx*C/h+c_idx]; + int idx0 = index0[m_idx]; + atomicAdd(output+idx0*C+h_idx*C/h+c_idx, val); +} + +__global__ void attention_step2_backward_cuda_kernel_v2( // M, h, C//h + int N, int M, int h, int C, const float *grad_out, const int *index0, const int *index1, const float *attn, const float *v, + float *grad_attn, float *grad_v) { + + int c_idx = blockIdx.z; + int h_idx = blockIdx.y; + int m_idx = blockIdx.x * blockDim.x + threadIdx.x; + if (m_idx >= M || h_idx >= h || c_idx >= C / h) return; + + int idx0 = index0[m_idx]; + int idx1 = index1[m_idx]; + int grad_out_idx = idx0*C+h_idx*C/h+c_idx; + atomicAdd(grad_attn+m_idx*h+h_idx, grad_out[grad_out_idx] * v[idx1*C+h_idx*C/h+c_idx]); + atomicAdd(grad_v+idx1*C+h_idx*C/h+c_idx, grad_out[grad_out_idx] * attn[m_idx*h+h_idx]); +} + +void attention_step2_forward_cuda_launcher_v2(int N, int M, int h, int C, const float *attn, const float *v, + const int *index0, const int *index1, float *output) { + // input: attn: (M, h), v: (N, h, C/h), index0: (M, ), index1: (M, ) + //dim3 blocks(DIVUP(C/h, THREADS_PER_BLOCK), h, M); + dim3 blocks(DIVUP(M, THREADS_PER_BLOCK), h, C/h); + dim3 threads(THREADS_PER_BLOCK); + attention_step2_forward_cuda_kernel_v2<<>>(N, M, h, C, attn, v, index0, index1, output); +} + +void attention_step2_backward_cuda_launcher_v2(int N, int M, int h, int C, const float *grad_out, const int *index0, const int *index1, + const float *attn, const float *v, float *grad_attn, float *grad_v) { + // input: grad_output: (n, nsample, c), output: grad_input1: (n, c), grad_input2: (n, c) + //dim3 blocks(DIVUP(C/h, THREADS_PER_BLOCK), h, M); + dim3 blocks(DIVUP(M, THREADS_PER_BLOCK), h, C/h); + dim3 threads(THREADS_PER_BLOCK); + attention_step2_backward_cuda_kernel_v2<<>>(N, M, h, C, grad_out, index0, index1, attn, v, grad_attn, grad_v); +} diff --git a/models/Mask3D/mask3d/utils/pointops2/src/attention_v2/attention_cuda_kernel_v2.h b/models/Mask3D/mask3d/utils/pointops2/src/attention_v2/attention_cuda_kernel_v2.h new file mode 100644 index 0000000000000000000000000000000000000000..d7e7f047bc318928ddb9402acbcdf20204596450 --- /dev/null +++ b/models/Mask3D/mask3d/utils/pointops2/src/attention_v2/attention_cuda_kernel_v2.h @@ -0,0 +1,26 @@ +#ifndef _ATTENTION_V2_CUDA_KERNEL +#define _ATTENTION_V2_CUDA_KERNEL +#include +#include +#include + +void attention_step1_forward_cuda_v2(int N, int M, int h, int C, const unsigned int n_max, at::Tensor q_tensor, at::Tensor k_tensor, at::Tensor index0_tensor_offsets, at::Tensor index1_tensor, at::Tensor attn_tensor); +void attention_step1_backward_cuda_v2(int N, int M, int h, int C, const unsigned int n_max, at::Tensor grad_out_tensor, at::Tensor index0_tensor_offsets, at::Tensor index1_tensor, at::Tensor q_tensor, at::Tensor k_tensor, at::Tensor grad_q_tensor, at::Tensor grad_k_tensor); + +void attention_step2_forward_cuda_v2(int N, int M, int h, int C, at::Tensor attn_tensor, at::Tensor v_tensor, at::Tensor index0_tensor, at::Tensor index1_tensor, at::Tensor output_tensor); +void attention_step2_backward_cuda_v2(int N, int M, int h, int C, at::Tensor grad_out_tensor, at::Tensor index0_tensor, at::Tensor index1_tensor, at::Tensor attn_tensor, at::Tensor v_tensor, at::Tensor grad_attn_tensor, at::Tensor grad_v_tensor); + +#ifdef __cplusplus +extern "C" { +#endif + +void attention_step1_forward_cuda_launcher_v2(int N, int M, int h, int C, const unsigned int n_max, const float *q, const float *k, const int *index0_offsets, const int *index1, float *attn); +void attention_step1_backward_cuda_launcher_v2(int N, int M, int h, int C, const unsigned int n_max, const float *grad_out, const int *index0_offsets, const int *index1, const float *q, const float *k, float *grad_q, float *grad_k); + +void attention_step2_forward_cuda_launcher_v2(int N, int M, int h, int C, const float *attn, const float *v, const int *index0, const int *index1, float *output); +void attention_step2_backward_cuda_launcher_v2(int N, int M, int h, int C, const float *grad_out, const int *index0, const int *index1, const float *attn, const float *v, float *grad_attn, float *grad_v); + +#ifdef __cplusplus +} +#endif +#endif diff --git a/models/Mask3D/mask3d/utils/pointops2/src/attention_v2/attention_cuda_v2.cpp b/models/Mask3D/mask3d/utils/pointops2/src/attention_v2/attention_cuda_v2.cpp new file mode 100644 index 0000000000000000000000000000000000000000..311adaf223928f83f3f238268fe0f189b5479657 --- /dev/null +++ b/models/Mask3D/mask3d/utils/pointops2/src/attention_v2/attention_cuda_v2.cpp @@ -0,0 +1,56 @@ +#include +#include +#include +#include +#include "attention_cuda_kernel_v2.h" + +void attention_step1_forward_cuda_v2(int N, int M, int h, int C, const unsigned int n_max, at::Tensor q_tensor, at::Tensor k_tensor, + at::Tensor index0_tensor_offsets, at::Tensor index1_tensor, at::Tensor attn_tensor) +{ + const float *q = q_tensor.data_ptr(); + const float *k = k_tensor.data_ptr(); + const int *index0_offsets = index0_tensor_offsets.data_ptr(); + const int *index1 = index1_tensor.data_ptr(); + float *attn = attn_tensor.data_ptr(); + attention_step1_forward_cuda_launcher_v2(N, M, h, C, n_max, q, k, index0_offsets, index1, attn); +} + +void attention_step1_backward_cuda_v2(int N, int M, int h, int C, const unsigned int n_max, at::Tensor grad_out_tensor, + at::Tensor index0_tensor_offsets, at::Tensor index1_tensor, at::Tensor q_tensor, at::Tensor k_tensor, + at::Tensor grad_q_tensor, at::Tensor grad_k_tensor) +{ + const float *grad_out = grad_out_tensor.data_ptr(); + const int *index0_offsets = index0_tensor_offsets.data_ptr(); + const int *index1 = index1_tensor.data_ptr(); + const float *q = q_tensor.data_ptr(); + const float *k = k_tensor.data_ptr(); + float *grad_q = grad_q_tensor.data_ptr(); + float *grad_k = grad_k_tensor.data_ptr(); + attention_step1_backward_cuda_launcher_v2(N, M, h, C, n_max, grad_out, index0_offsets, index1, q, k, grad_q, grad_k); +} + +void attention_step2_forward_cuda_v2(int N, int M, int h, int C, at::Tensor attn_tensor, at::Tensor v_tensor, + at::Tensor index0_tensor, at::Tensor index1_tensor, at::Tensor output_tensor) +{ + const float *attn = attn_tensor.data_ptr(); + const float *v = v_tensor.data_ptr(); + const int *index0 = index0_tensor.data_ptr(); + const int *index1 = index1_tensor.data_ptr(); + float *output = output_tensor.data_ptr(); + attention_step2_forward_cuda_launcher_v2(N, M, h, C, attn, v, index0, index1, output); +} + + +void attention_step2_backward_cuda_v2(int N, int M, int h, int C, at::Tensor grad_out_tensor, + at::Tensor index0_tensor, at::Tensor index1_tensor, at::Tensor attn_tensor, at::Tensor v_tensor, + at::Tensor grad_attn_tensor, at::Tensor grad_v_tensor) +{ + const float *grad_out = grad_out_tensor.data_ptr(); + const int *index0 = index0_tensor.data_ptr(); + const int *index1 = index1_tensor.data_ptr(); + const float *attn = attn_tensor.data_ptr(); + const float *v = v_tensor.data_ptr(); + float *grad_attn = grad_attn_tensor.data_ptr(); + float *grad_v = grad_v_tensor.data_ptr(); + attention_step2_backward_cuda_launcher_v2(N, M, h, C, grad_out, index0, index1, attn, v, grad_attn, grad_v); +} diff --git a/models/Mask3D/mask3d/utils/pointops2/src/cuda_utils.h b/models/Mask3D/mask3d/utils/pointops2/src/cuda_utils.h new file mode 100644 index 0000000000000000000000000000000000000000..e67749c4f5f8964ffb5916c13f5260cf8df45f52 --- /dev/null +++ b/models/Mask3D/mask3d/utils/pointops2/src/cuda_utils.h @@ -0,0 +1,23 @@ +#ifndef _CUDA_UTILS_H +#define _CUDA_UTILS_H + +#include +#include + +#define TOTAL_THREADS 1024 +#define THREADS_PER_BLOCK 256 +#define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0)) + +inline int opt_n_threads(int work_size) { + const int pow_2 = std::log(static_cast(work_size)) / std::log(2.0); + return std::max(std::min(1 << pow_2, TOTAL_THREADS), 1); +} + +inline dim3 opt_block_config(int x, int y) { + const int x_threads = opt_n_threads(x); + const int y_threads = std::max(std::min(opt_n_threads(y), TOTAL_THREADS / x_threads), 1); + dim3 block_config(x_threads, y_threads, 1); + return block_config; +} + +#endif diff --git a/models/Mask3D/mask3d/utils/pointops2/src/grouping/grouping_cuda.cpp b/models/Mask3D/mask3d/utils/pointops2/src/grouping/grouping_cuda.cpp new file mode 100644 index 0000000000000000000000000000000000000000..a00d3139db5a3b58261c825c4a9e46e168fea8ce --- /dev/null +++ b/models/Mask3D/mask3d/utils/pointops2/src/grouping/grouping_cuda.cpp @@ -0,0 +1,22 @@ +#include +#include +#include +#include +#include "grouping_cuda_kernel.h" + + +void grouping_forward_cuda(int m, int nsample, int c, at::Tensor input_tensor, at::Tensor idx_tensor, at::Tensor output_tensor) +{ + const float *input = input_tensor.data_ptr(); + const int *idx = idx_tensor.data_ptr(); + float *output = output_tensor.data_ptr(); + grouping_forward_cuda_launcher(m, nsample, c, input, idx, output); +} + +void grouping_backward_cuda(int m, int nsample, int c, at::Tensor grad_output_tensor, at::Tensor idx_tensor, at::Tensor grad_input_tensor) +{ + const float *grad_output = grad_output_tensor.data_ptr(); + const int *idx = idx_tensor.data_ptr(); + float *grad_input = grad_input_tensor.data_ptr(); + grouping_backward_cuda_launcher(m, nsample, c, grad_output, idx, grad_input); +} diff --git a/models/Mask3D/mask3d/utils/pointops2/src/grouping/grouping_cuda_kernel.cu b/models/Mask3D/mask3d/utils/pointops2/src/grouping/grouping_cuda_kernel.cu new file mode 100644 index 0000000000000000000000000000000000000000..58ec0a21a2949f9f82504ccd24597c544c50af40 --- /dev/null +++ b/models/Mask3D/mask3d/utils/pointops2/src/grouping/grouping_cuda_kernel.cu @@ -0,0 +1,40 @@ +#include "../cuda_utils.h" +#include "grouping_cuda_kernel.h" + + +__global__ void grouping_forward_cuda_kernel(int m, int nsample, int c, const float *__restrict__ input, const int *__restrict__ idx, float *__restrict__ output) { + // input: input: (n, c), idx: (m, nsample), output: (m, nsample, c) + int index = blockIdx.x * blockDim.x + threadIdx.x; + if (index >= m * nsample * c) return; + const int c_idx = index % c; + const int nsample_idx = (index / c) % nsample; + const int m_idx = index / nsample / c; + const int input_idx = idx[m_idx * nsample + nsample_idx] * c + c_idx; + output[index] = input[input_idx]; +} + +__global__ void grouping_backward_cuda_kernel(int m, int nsample, int c, const float *__restrict__ grad_output, const int *__restrict__ idx, float *__restrict__ grad_input) { + // input: grad_output: (m, nsample, c), idx: (m, nsample), output: grad_input: (n, c) + int index = blockIdx.x * blockDim.x + threadIdx.x; + if (index >= m * nsample * c) return; + const int c_idx = index % c; + const int nsample_idx = (index / c) % nsample; + const int m_idx = index / nsample / c; + const int input_idx = idx[m_idx * nsample + nsample_idx] * c + c_idx; + atomicAdd(grad_input + input_idx, grad_output[index]); +} + +void grouping_forward_cuda_launcher(int m, int nsample, int c, const float *input, const int *idx, float *output) { + // input: input: (n, c), idx: (m, nsample), output: (m, nsample, c) + dim3 blocks(DIVUP(m * nsample * c, THREADS_PER_BLOCK)); + dim3 threads(THREADS_PER_BLOCK); + grouping_forward_cuda_kernel<<>>(m, nsample, c, input, idx, output); +} + +void grouping_backward_cuda_launcher(int m, int nsample, int c, const float *grad_output, const int *idx, float *grad_input) +{ + // input: grad_output: (m, nsample, c), idx: (m, nsample), output: grad_input: (n, c) + dim3 blocks(DIVUP(m * nsample * c, THREADS_PER_BLOCK)); + dim3 threads(THREADS_PER_BLOCK); + grouping_backward_cuda_kernel<<>>(m, nsample, c, grad_output, idx, grad_input); +} diff --git a/models/Mask3D/mask3d/utils/pointops2/src/grouping/grouping_cuda_kernel.h b/models/Mask3D/mask3d/utils/pointops2/src/grouping/grouping_cuda_kernel.h new file mode 100644 index 0000000000000000000000000000000000000000..3db4aaa9fad5811d559d47c500e4b00f0165d9b4 --- /dev/null +++ b/models/Mask3D/mask3d/utils/pointops2/src/grouping/grouping_cuda_kernel.h @@ -0,0 +1,20 @@ +#ifndef _GROUPING_CUDA_KERNEL +#define _GROUPING_CUDA_KERNEL +#include +#include +#include + +void grouping_forward_cuda(int m, int nsample, int c, at::Tensor input_tensor, at::Tensor idx_tensor, at::Tensor output_tensor); +void grouping_backward_cuda(int m, int nsample, int c, at::Tensor grad_output_tensor, at::Tensor idx_tensor, at::Tensor grad_input_tensor); + +#ifdef __cplusplus +extern "C" { +#endif + +void grouping_forward_cuda_launcher(int m, int nsample, int c, const float *input, const int *idx, float *output); +void grouping_backward_cuda_launcher(int m, int nsample, int c, const float *grad_output, const int *idx, float *grad_input); + +#ifdef __cplusplus +} +#endif +#endif diff --git a/models/Mask3D/mask3d/utils/pointops2/src/interpolation/interpolation_cuda.cpp b/models/Mask3D/mask3d/utils/pointops2/src/interpolation/interpolation_cuda.cpp new file mode 100644 index 0000000000000000000000000000000000000000..a73c02b1193330af8e0bc66093749126561700b3 --- /dev/null +++ b/models/Mask3D/mask3d/utils/pointops2/src/interpolation/interpolation_cuda.cpp @@ -0,0 +1,24 @@ +#include +#include +#include +#include +#include "interpolation_cuda_kernel.h" + + +void interpolation_forward_cuda(int n, int c, int k, at::Tensor input_tensor, at::Tensor idx_tensor, at::Tensor weight_tensor, at::Tensor output_tensor) +{ + const float *input = input_tensor.data_ptr(); + const int *idx = idx_tensor.data_ptr(); + const float *weight = weight_tensor.data_ptr(); + float *output = output_tensor.data_ptr(); + interpolation_forward_cuda_launcher(n, c, k, input, idx, weight, output); +} + +void interpolation_backward_cuda(int n, int c, int k, at::Tensor grad_output_tensor, at::Tensor idx_tensor, at::Tensor weight_tensor, at::Tensor grad_input_tensor) +{ + const float *grad_output = grad_output_tensor.data_ptr(); + const int *idx = idx_tensor.data_ptr(); + const float *weight = weight_tensor.data_ptr(); + float *grad_input = grad_input_tensor.data_ptr(); + interpolation_backward_cuda_launcher(n, c, k, grad_output, idx, weight, grad_input); +} diff --git a/models/Mask3D/mask3d/utils/pointops2/src/interpolation/interpolation_cuda_kernel.cu b/models/Mask3D/mask3d/utils/pointops2/src/interpolation/interpolation_cuda_kernel.cu new file mode 100644 index 0000000000000000000000000000000000000000..f560d8c92c6eac865b8c1e1dc27140fe3fcc2250 --- /dev/null +++ b/models/Mask3D/mask3d/utils/pointops2/src/interpolation/interpolation_cuda_kernel.cu @@ -0,0 +1,47 @@ +#include "../cuda_utils.h" +#include "interpolation_cuda_kernel.h" + + +__global__ void interpolation_forward_cuda_kernel(int n, int c, int k, const float *input, const int *idx, const float *weight, float *output) +{ + // input: input: (m, c), idx: (n, k), weight: (n, k), output: output (n, c) + int index = blockIdx.x * blockDim.x + threadIdx.x; + if (index >= n * c) return; + int c_idx = index % c; + int n_idx = index / c; + for (int i = 0; i < k; i++) + { + int idx_idx = n_idx * k + i; + int input_idx = idx[idx_idx] * c + c_idx; + output[index] += input[input_idx] * weight[idx_idx]; + } +} + +__global__ void interpolation_backward_cuda_kernel(int n, int c, int k, const float *grad_output, const int *idx, const float *weight, float *grad_input) +{ + // input: grad_output: (n, c), idx: (n, k), weight: (n, k), output: grad_input (m, c) + int index = blockIdx.x * blockDim.x + threadIdx.x; + if (index >= n * c) return; + int c_idx = index % c; + int n_idx = index / c; + for (int i = 0; i < k; i++) + { + int idx_idx = n_idx * k + i; + int input_idx = idx[idx_idx] * c + c_idx; + atomicAdd(grad_input + input_idx, grad_output[index] * weight[idx_idx]); + } +} + +void interpolation_forward_cuda_launcher(int n, int c, int k, const float *input, const int *idx, const float *weight, float *output) { + // input: input: (m, c), idx: (n, k), weight: (n, k), output: output (n, c) + dim3 blocks(DIVUP(n * c, THREADS_PER_BLOCK)); + dim3 threads(THREADS_PER_BLOCK); + interpolation_forward_cuda_kernel<<>>(n, c, k, input, idx, weight, output); +} + +void interpolation_backward_cuda_launcher(int n, int c, int k, const float *grad_output, const int *idx, const float *weight, float *grad_input) { + // input: grad_output: (n, c), idx: (n, k), weight: (n, k), output: grad_input (m, c) + dim3 blocks(DIVUP(n * c, THREADS_PER_BLOCK)); + dim3 threads(THREADS_PER_BLOCK); + interpolation_backward_cuda_kernel<<>>(n, c, k, grad_output, idx, weight, grad_input); +} diff --git a/models/Mask3D/mask3d/utils/pointops2/src/interpolation/interpolation_cuda_kernel.h b/models/Mask3D/mask3d/utils/pointops2/src/interpolation/interpolation_cuda_kernel.h new file mode 100644 index 0000000000000000000000000000000000000000..309e5dd0a34ccb58807bbf32389ba65e7ee6961b --- /dev/null +++ b/models/Mask3D/mask3d/utils/pointops2/src/interpolation/interpolation_cuda_kernel.h @@ -0,0 +1,20 @@ +#ifndef _INTERPOLATION_CUDA_KERNEL +#define _INTERPOLATION_CUDA_KERNEL +#include +#include +#include + +void interpolation_forward_cuda(int n, int c, int k, at::Tensor input_tensor, at::Tensor idx_tensor, at::Tensor weight_tensor, at::Tensor output_tensor); +void interpolation_backward_cuda(int n, int c, int k, at::Tensor grad_output_tensor, at::Tensor idx_tensor, at::Tensor weight_tensor, at::Tensor grad_input_tensor); + +#ifdef __cplusplus +extern "C" { +#endif + +void interpolation_forward_cuda_launcher(int n, int c, int k, const float *input, const int *idx, const float *weight, float *output); +void interpolation_backward_cuda_launcher(int n, int c, int k, const float *grad_output, const int *idx, const float *weight, float *grad_input); + +#ifdef __cplusplus +} +#endif +#endif diff --git a/models/Mask3D/mask3d/utils/pointops2/src/knnquery/knnquery_cuda.cpp b/models/Mask3D/mask3d/utils/pointops2/src/knnquery/knnquery_cuda.cpp new file mode 100644 index 0000000000000000000000000000000000000000..568f1366f65dda9f57f037212a46d2552806e79f --- /dev/null +++ b/models/Mask3D/mask3d/utils/pointops2/src/knnquery/knnquery_cuda.cpp @@ -0,0 +1,17 @@ +#include +#include +#include +#include +#include "knnquery_cuda_kernel.h" + + +void knnquery_cuda(int m, int nsample, at::Tensor xyz_tensor, at::Tensor new_xyz_tensor, at::Tensor offset_tensor, at::Tensor new_offset_tensor, at::Tensor idx_tensor, at::Tensor dist2_tensor) +{ + const float *xyz = xyz_tensor.data_ptr(); + const float *new_xyz = new_xyz_tensor.data_ptr(); + const int *offset = offset_tensor.data_ptr(); + const int *new_offset = new_offset_tensor.data_ptr(); + int *idx = idx_tensor.data_ptr(); + float *dist2 = dist2_tensor.data_ptr(); + knnquery_cuda_launcher(m, nsample, xyz, new_xyz, offset, new_offset, idx, dist2); +} diff --git a/models/Mask3D/mask3d/utils/pointops2/src/knnquery/knnquery_cuda_kernel.cu b/models/Mask3D/mask3d/utils/pointops2/src/knnquery/knnquery_cuda_kernel.cu new file mode 100644 index 0000000000000000000000000000000000000000..83762bc0110e38c7b5fa8adf0ef4ce255bc9d0b9 --- /dev/null +++ b/models/Mask3D/mask3d/utils/pointops2/src/knnquery/knnquery_cuda_kernel.cu @@ -0,0 +1,116 @@ +#include "../cuda_utils.h" +#include "knnquery_cuda_kernel.h" + + +__device__ void swap_float(float *x, float *y) +{ + float tmp = *x; + *x = *y; + *y = tmp; +} + + +__device__ void swap_int(int *x, int *y) +{ + int tmp = *x; + *x = *y; + *y = tmp; +} + + +__device__ void reheap(float *dist, int *idx, int k) +{ + int root = 0; + int child = root * 2 + 1; + while (child < k) + { + if(child + 1 < k && dist[child+1] > dist[child]) + child++; + if(dist[root] > dist[child]) + return; + swap_float(&dist[root], &dist[child]); + swap_int(&idx[root], &idx[child]); + root = child; + child = root * 2 + 1; + } +} + + +__device__ void heap_sort(float *dist, int *idx, int k) +{ + int i; + for (i = k - 1; i > 0; i--) + { + swap_float(&dist[0], &dist[i]); + swap_int(&idx[0], &idx[i]); + reheap(dist, idx, i); + } +} + + +__device__ int get_bt_idx(int idx, const int *offset) +{ + int i = 0; + while (1) + { + if (idx < offset[i]) + break; + else + i++; + } + return i; +} + + +__global__ void knnquery_cuda_kernel(int m, int nsample, const float *__restrict__ xyz, const float *__restrict__ new_xyz, const int *__restrict__ offset, const int *__restrict__ new_offset, int *__restrict__ idx, float *__restrict__ dist2) { + // input: xyz (n, 3) new_xyz (m, 3) + // output: idx (m, nsample) dist2 (m, nsample) + int pt_idx = blockIdx.x * blockDim.x + threadIdx.x; + if (pt_idx >= m) return; + + new_xyz += pt_idx * 3; + idx += pt_idx * nsample; + dist2 += pt_idx * nsample; + int bt_idx = get_bt_idx(pt_idx, new_offset); + int start; + if (bt_idx == 0) + start = 0; + else + start = offset[bt_idx - 1]; + int end = offset[bt_idx]; + + float new_x = new_xyz[0]; + float new_y = new_xyz[1]; + float new_z = new_xyz[2]; + + float best_dist[100]; + int best_idx[100]; + for(int i = 0; i < nsample; i++){ + best_dist[i] = 1e10; + best_idx[i] = start; + } + for(int i = start; i < end; i++){ + float x = xyz[i * 3 + 0]; + float y = xyz[i * 3 + 1]; + float z = xyz[i * 3 + 2]; + float d2 = (new_x - x) * (new_x - x) + (new_y - y) * (new_y - y) + (new_z - z) * (new_z - z); + if (d2 < best_dist[0]){ + best_dist[0] = d2; + best_idx[0] = i; + reheap(best_dist, best_idx, nsample); + } + } + heap_sort(best_dist, best_idx, nsample); + for(int i = 0; i < nsample; i++){ + idx[i] = best_idx[i]; + dist2[i] = best_dist[i]; + } +} + + +void knnquery_cuda_launcher(int m, int nsample, const float *xyz, const float *new_xyz, const int *offset, const int *new_offset, int *idx, float *dist2) { + // input: new_xyz: (m, 3), xyz: (n, 3), idx: (m, nsample) + dim3 blocks(DIVUP(m, THREADS_PER_BLOCK)); + dim3 threads(THREADS_PER_BLOCK); + knnquery_cuda_kernel<<>>(m, nsample, xyz, new_xyz, offset, new_offset, idx, dist2); +} diff --git a/models/Mask3D/mask3d/utils/pointops2/src/knnquery/knnquery_cuda_kernel.h b/models/Mask3D/mask3d/utils/pointops2/src/knnquery/knnquery_cuda_kernel.h new file mode 100644 index 0000000000000000000000000000000000000000..3c0aedfe8fbe6c427ee15bb550c2c1829e9f4b97 --- /dev/null +++ b/models/Mask3D/mask3d/utils/pointops2/src/knnquery/knnquery_cuda_kernel.h @@ -0,0 +1,18 @@ +#ifndef _KNNQUERY_CUDA_KERNEL +#define _KNNQUERY_CUDA_KERNEL +#include +#include +#include + +void knnquery_cuda(int m, int nsample, at::Tensor xyz_tensor, at::Tensor new_xyz_tensor, at::Tensor offset_tensor, at::Tensor new_offset_tensor, at::Tensor idx_tensor, at::Tensor dist2_tensor); + +#ifdef __cplusplus +extern "C" { +#endif + +void knnquery_cuda_launcher(int m, int nsample, const float *xyz, const float *new_xyz, const int *offset, const int *new_offset, int *idx, float *dist2); + +#ifdef __cplusplus +} +#endif +#endif diff --git a/models/Mask3D/mask3d/utils/pointops2/src/pointops_api.cpp b/models/Mask3D/mask3d/utils/pointops2/src/pointops_api.cpp new file mode 100644 index 0000000000000000000000000000000000000000..812789f7d4fdf961b960641ba6c2fd660c16a654 --- /dev/null +++ b/models/Mask3D/mask3d/utils/pointops2/src/pointops_api.cpp @@ -0,0 +1,45 @@ +#include +#include + +#include "knnquery/knnquery_cuda_kernel.h" +#include "sampling/sampling_cuda_kernel.h" +#include "grouping/grouping_cuda_kernel.h" +#include "interpolation/interpolation_cuda_kernel.h" +#include "aggregation/aggregation_cuda_kernel.h" +#include "subtraction/subtraction_cuda_kernel.h" +#include "attention/attention_cuda_kernel.h" +#include "rpe/relative_pos_encoding_cuda_kernel.h" +#include "attention_v2/attention_cuda_kernel_v2.h" +#include "rpe_v2/relative_pos_encoding_cuda_kernel_v2.h" + + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("knnquery_cuda", &knnquery_cuda, "knnquery_cuda"); + m.def("furthestsampling_cuda", &furthestsampling_cuda, "furthestsampling_cuda"); + m.def("grouping_forward_cuda", &grouping_forward_cuda, "grouping_forward_cuda"); + m.def("grouping_backward_cuda", &grouping_backward_cuda, "grouping_backward_cuda"); + m.def("interpolation_forward_cuda", &interpolation_forward_cuda, "interpolation_forward_cuda"); + m.def("interpolation_backward_cuda", &interpolation_backward_cuda, "interpolation_backward_cuda"); + m.def("subtraction_forward_cuda", &subtraction_forward_cuda, "subtraction_forward_cuda"); + m.def("subtraction_backward_cuda", &subtraction_backward_cuda, "subtraction_backward_cuda"); + m.def("aggregation_forward_cuda", &aggregation_forward_cuda, "aggregation_forward_cuda"); + m.def("aggregation_backward_cuda", &aggregation_backward_cuda, "aggregation_backward_cuda"); + m.def("attention_step1_forward_cuda", &attention_step1_forward_cuda, "attention_step1_forward_cuda"); + m.def("attention_step1_backward_cuda", &attention_step1_backward_cuda, "attention_step1_backward_cuda"); + m.def("attention_step2_forward_cuda", &attention_step2_forward_cuda, "attention_step2_forward_cuda"); + m.def("attention_step2_backward_cuda", &attention_step2_backward_cuda, "attention_step2_backward_cuda"); + m.def("dot_prod_with_idx_forward_cuda", &dot_prod_with_idx_forward_cuda, "dot_prod_with_idx_forward_cuda"); + m.def("dot_prod_with_idx_backward_cuda", &dot_prod_with_idx_backward_cuda, "dot_prod_with_idx_backward_cuda"); + m.def("attention_step2_with_rel_pos_value_forward_cuda", &attention_step2_with_rel_pos_value_forward_cuda, "attention_step2_with_rel_pos_value_forward_cuda"); + m.def("attention_step2_with_rel_pos_value_backward_cuda", &attention_step2_with_rel_pos_value_backward_cuda, "attention_step2_with_rel_pos_value_backward_cuda"); + m.def("attention_step1_forward_cuda_v2", &attention_step1_forward_cuda_v2, "attention_step1_forward_cuda_v2"); + m.def("attention_step1_backward_cuda_v2", &attention_step1_backward_cuda_v2, "attention_step1_backward_cuda_v2"); + m.def("attention_step2_forward_cuda_v2", &attention_step2_forward_cuda_v2, "attention_step2_forward_cuda_v2"); + m.def("attention_step2_backward_cuda_v2", &attention_step2_backward_cuda_v2, "attention_step2_backward_cuda_v2"); + m.def("dot_prod_with_idx_forward_cuda_v2", &dot_prod_with_idx_forward_cuda_v2, "dot_prod_with_idx_forward_cuda_v2"); + m.def("dot_prod_with_idx_backward_cuda_v2", &dot_prod_with_idx_backward_cuda_v2, "dot_prod_with_idx_backward_cuda_v2"); + m.def("attention_step2_with_rel_pos_value_forward_cuda_v2", &attention_step2_with_rel_pos_value_forward_cuda_v2, "attention_step2_with_rel_pos_value_forward_cuda_v2"); + m.def("attention_step2_with_rel_pos_value_backward_cuda_v2", &attention_step2_with_rel_pos_value_backward_cuda_v2, "attention_step2_with_rel_pos_value_backward_cuda_v2"); + m.def("dot_prod_with_idx_forward_cuda_v3", &dot_prod_with_idx_forward_cuda_v3, "dot_prod_with_idx_forward_cuda_v3"); + m.def("dot_prod_with_idx_backward_cuda_v3", &dot_prod_with_idx_backward_cuda_v3, "dot_prod_with_idx_backward_cuda_v3"); + } diff --git a/models/Mask3D/mask3d/utils/pointops2/src/rpe/relative_pos_encoding_cuda.cpp b/models/Mask3D/mask3d/utils/pointops2/src/rpe/relative_pos_encoding_cuda.cpp new file mode 100644 index 0000000000000000000000000000000000000000..634ebb07520a0bd6fbcdf856679cc908eb2bec40 --- /dev/null +++ b/models/Mask3D/mask3d/utils/pointops2/src/rpe/relative_pos_encoding_cuda.cpp @@ -0,0 +1,60 @@ +#include +#include +#include +#include +#include "relative_pos_encoding_cuda_kernel.h" + +void dot_prod_with_idx_forward_cuda(int N, int M, int h, int hdim, at::Tensor q_tensor, at::Tensor index_tensor, + at::Tensor table_tensor, at::Tensor rel_idx_tensor, at::Tensor output_tensor) +{ + const float *q = q_tensor.data_ptr(); + const float *table = table_tensor.data_ptr(); + const int *index = index_tensor.data_ptr(); + const int *rel_idx = rel_idx_tensor.data_ptr(); + float *output = output_tensor.data_ptr(); + dot_prod_with_idx_forward_cuda_launcher(N, M, h, hdim, q, index, table, rel_idx, output); +} + +void dot_prod_with_idx_backward_cuda(int N, int M, int h, int hdim, at::Tensor grad_out_tensor, + at::Tensor q_tensor, at::Tensor index_tensor, at::Tensor table_tensor, at::Tensor rel_idx_tensor, + at::Tensor grad_q_tensor, at::Tensor grad_table_tensor) +{ + const float *grad_out = grad_out_tensor.data_ptr(); + const float *q = q_tensor.data_ptr(); + const int *index = index_tensor.data_ptr(); + const float *table = table_tensor.data_ptr(); + const int *rel_idx = rel_idx_tensor.data_ptr(); + float *grad_q = grad_q_tensor.data_ptr(); + float *grad_table = grad_table_tensor.data_ptr(); + dot_prod_with_idx_backward_cuda_launcher(N, M, h, hdim, grad_out, q, index, table, rel_idx, grad_q, grad_table); +} + +void attention_step2_with_rel_pos_value_forward_cuda(int N, int M, int h, int hdim, at::Tensor attn_tensor, at::Tensor v_tensor, + at::Tensor index0_tensor, at::Tensor index1_tensor, at::Tensor table_tensor, at::Tensor rel_idx_tensor, at::Tensor output_tensor) +{ + const float *attn = attn_tensor.data_ptr(); + const float *v = v_tensor.data_ptr(); + const int *index0 = index0_tensor.data_ptr(); + const int *index1 = index1_tensor.data_ptr(); + const float *table = table_tensor.data_ptr(); + const int *rel_idx = rel_idx_tensor.data_ptr(); + float *output = output_tensor.data_ptr(); + attention_step2_with_rel_pos_value_forward_cuda_launcher(N, M, h, hdim, attn, v, index0, index1, table, rel_idx, output); +} + +void attention_step2_with_rel_pos_value_backward_cuda(int N, int M, int h, int hdim, at::Tensor grad_out_tensor, + at::Tensor index0_tensor, at::Tensor index1_tensor, at::Tensor attn_tensor, at::Tensor v_tensor, at::Tensor table_tensor, + at::Tensor rel_idx_tensor, at::Tensor grad_attn_tensor, at::Tensor grad_v_tensor, at::Tensor grad_table_tensor) +{ + const float *grad_out = grad_out_tensor.data_ptr(); + const int *index0 = index0_tensor.data_ptr(); + const int *index1 = index1_tensor.data_ptr(); + const float *attn = attn_tensor.data_ptr(); + const float *v = v_tensor.data_ptr(); + const float *table = table_tensor.data_ptr(); + const int *rel_idx = rel_idx_tensor.data_ptr(); + float *grad_attn = grad_attn_tensor.data_ptr(); + float *grad_v = grad_v_tensor.data_ptr(); + float *grad_table = grad_table_tensor.data_ptr(); + attention_step2_with_rel_pos_value_backward_cuda_launcher(N, M, h, hdim, grad_out, index0, index1, attn, v, table, rel_idx, grad_attn, grad_v, grad_table); +} diff --git a/models/Mask3D/mask3d/utils/pointops2/src/rpe/relative_pos_encoding_cuda_kernel.cu b/models/Mask3D/mask3d/utils/pointops2/src/rpe/relative_pos_encoding_cuda_kernel.cu new file mode 100644 index 0000000000000000000000000000000000000000..b8fd8f42116ae0487c741c9b856c10c491f215f9 --- /dev/null +++ b/models/Mask3D/mask3d/utils/pointops2/src/rpe/relative_pos_encoding_cuda_kernel.cu @@ -0,0 +1,134 @@ +#include "../cuda_utils.h" +#include "relative_pos_encoding_cuda_kernel.h" + + +__global__ void dot_prod_with_idx_forward_cuda_kernel( // M, h, hdim + int N, int M, int h, int hdim, const float *q, const int *index, + const float *table, const int *rel_idx, float *output) { + // input: q: (N, h, hdim), index: (M), table: (L, h, hdim, 3), rel_idx: (M, 3), output: (M, h) + + int c_idx = blockIdx.z; + int h_idx = blockIdx.y; + int thread_idx = blockIdx.x * blockDim.x + threadIdx.x; + if (thread_idx >= M*3 || h_idx >= h || c_idx >= hdim) return; + + int dim = thread_idx % 3; + int m_idx = thread_idx / 3; + + int q_idx = index[m_idx]; + int rel_idx_dim = rel_idx[thread_idx]; + float rel_table_val = table[rel_idx_dim*h*hdim*3+h_idx*hdim*3+c_idx*3+dim]; + float val = q[q_idx*h*hdim+h_idx*hdim+c_idx] * rel_table_val; + atomicAdd(output+m_idx*h+h_idx, val); +} + +__global__ void dot_prod_with_idx_backward_cuda_kernel( // M, h, hdim + int N, int M, int h, int hdim, const float *grad_out, const float *q, const int *index, + const float *table, const int *rel_idx, float *grad_q, float *grad_table) { + + int c_idx = blockIdx.z; + int h_idx = blockIdx.y; + int thread_idx = blockIdx.x * blockDim.x + threadIdx.x; + if (thread_idx >= M*3 || h_idx >= h || c_idx >= hdim) return; + + int dim = thread_idx % 3; + int m_idx = thread_idx / 3; + + int q_idx = index[m_idx]; + int rel_idx_dim = rel_idx[thread_idx]; + int grad_out_idx = m_idx*h+h_idx; + float grad_out_value = grad_out[grad_out_idx]; + + float rel_table_val = table[rel_idx_dim*h*hdim*3+h_idx*hdim*3+c_idx*3+dim]; + atomicAdd(grad_q+q_idx*h*hdim+h_idx*hdim+c_idx, grad_out_value * rel_table_val); + + float q_value = q[q_idx*h*hdim+h_idx*hdim+c_idx]; + atomicAdd(grad_table+rel_idx_dim*h*hdim*3+h_idx*hdim*3+c_idx*3+dim, grad_out_value * q_value); +} + +void dot_prod_with_idx_forward_cuda_launcher(int N, int M, int h, int hdim, const float *q, const int *index, + const float *table, const int *rel_idx, float *output) { + // input: q: (N, h, hdim), index: (M), table: (L, h, hdim, 3), rel_idx: (M, 3) + //dim3 blocks(DIVUP(hdim, THREADS_PER_BLOCK), h, M); + dim3 blocks(DIVUP(M*3, THREADS_PER_BLOCK), h, hdim); + dim3 threads(THREADS_PER_BLOCK); + dot_prod_with_idx_forward_cuda_kernel<<>>(N, M, h, hdim, q, index, table, rel_idx, output); +} + +void dot_prod_with_idx_backward_cuda_launcher(int N, int M, int h, int hdim, const float *grad_out, + const float *q, const int *index, const float *table, const int *rel_idx, float *grad_q, float *grad_table) { + // input: grad_out: (M, h), output: grad_q: (N, h, hdim), grad_table: (L, h, hdim, 3) + //dim3 blocks(DIVUP(hdim, THREADS_PER_BLOCK), h, M); + dim3 blocks(DIVUP(M*3, THREADS_PER_BLOCK), h, hdim); + dim3 threads(THREADS_PER_BLOCK); + dot_prod_with_idx_backward_cuda_kernel<<>>(N, M, h, hdim, grad_out, q, index, table, rel_idx, grad_q, grad_table); +} + +__global__ void attention_step2_with_rel_pos_value_forward_cuda_kernel( // M, h, hdim + int N, int M, int h, int hdim, const float *attn, const float *v, + const int *index0, const int *index1, const float *table, const int *rel_idx, float *output) { + // input: attn: (M, h), v: (N, h, hdim), index0: (M, ), index1: (M, ), table: (L, h, hdim, 3), rel_idx: (M, 3) + + int c_idx = blockIdx.z; + int h_idx = blockIdx.y; + int thread_idx = blockIdx.x * blockDim.x + threadIdx.x; + if (thread_idx >= M*3 || h_idx >= h || c_idx >= hdim) return; + + int dim = thread_idx % 3; + int m_idx = thread_idx / 3; + + int idx1 = index1[m_idx]; + + int rel_idx_dim = rel_idx[thread_idx]; + float table_val = table[rel_idx_dim*h*hdim*3+h_idx*hdim*3+c_idx*3+dim]; + + float val = attn[m_idx*h+h_idx] * (v[idx1*h*hdim+h_idx*hdim+c_idx] / 3.0 + table_val); + + int idx0 = index0[m_idx]; + atomicAdd(output+idx0*h*hdim+h_idx*hdim+c_idx, val); +} + + +__global__ void attention_step2_with_rel_pos_value_backward_cuda_kernel( // M, h, hdim + int N, int M, int h, int hdim, const float *grad_out, const int *index0, const int *index1, const float *attn, const float *v, const float *table, + const int *rel_idx, float *grad_attn, float *grad_v, float *grad_table) { + // input: attn: (M, h), v: (N, h, hdim), index0: (M, ), index1: (M, ), table: (L, h, hdim, 3), rel_idx: (M, 3) + + int c_idx = blockIdx.z; + int h_idx = blockIdx.y; + int thread_idx = blockIdx.x * blockDim.x + threadIdx.x; + if (thread_idx >= M*3 || h_idx >= h || c_idx >= hdim) return; + + int dim = thread_idx % 3; + int m_idx = thread_idx / 3; + + int idx0 = index0[m_idx]; + int idx1 = index1[m_idx]; + int grad_out_idx = idx0*h*hdim+h_idx*hdim+c_idx; + + int rel_idx_dim = rel_idx[thread_idx]; + float table_val = table[rel_idx_dim*h*hdim*3+h_idx*hdim*3+c_idx*3+dim]; + float grad_out_value = grad_out[grad_out_idx]; + + atomicAdd(grad_attn+m_idx*h+h_idx, grad_out_value * (v[idx1*h*hdim+h_idx*hdim+c_idx]/3 + table_val)); + atomicAdd(grad_v+idx1*h*hdim+h_idx*hdim+c_idx, grad_out_value * attn[m_idx*h+h_idx]/3); + atomicAdd(grad_table+rel_idx_dim*h*hdim*3+h_idx*hdim*3+c_idx*3+dim, grad_out_value * attn[m_idx*h+h_idx]); +} + +void attention_step2_with_rel_pos_value_forward_cuda_launcher(int N, int M, int h, int hdim, const float *attn, const float *v, const int *index0, + const int *index1, const float *table, const int *rel_idx, float *output) { + // input: attn: (M, h), v: (N, h, hdim), index0: (M, ), index1: (M, ), table: (L, h, hdim, 3), rel_idx: (M, 3) + //dim3 blocks(DIVUP(hdim, THREADS_PER_BLOCK), h, M); + dim3 blocks(DIVUP(M*3, THREADS_PER_BLOCK), h, hdim); + dim3 threads(THREADS_PER_BLOCK); + attention_step2_with_rel_pos_value_forward_cuda_kernel<<>>(N, M, h, hdim, attn, v, index0, index1, table, rel_idx, output); +} + +void attention_step2_with_rel_pos_value_backward_cuda_launcher(int N, int M, int h, int hdim, const float *grad_out, const int *index0, + const int *index1, const float *attn, const float *v, const float *table, const int *rel_idx, float *grad_attn, float *grad_v, float *grad_table) { + // input: grad_output: (n, nsample, c), output: grad_input1: (n, c), grad_input2: (n, c) + //dim3 blocks(DIVUP(hdim, THREADS_PER_BLOCK), h, M); + dim3 blocks(DIVUP(M*3, THREADS_PER_BLOCK), h, hdim); + dim3 threads(THREADS_PER_BLOCK); + attention_step2_with_rel_pos_value_backward_cuda_kernel<<>>(N, M, h, hdim, grad_out, index0, index1, attn, v, table, rel_idx, grad_attn, grad_v, grad_table); +} diff --git a/models/Mask3D/mask3d/utils/pointops2/src/rpe/relative_pos_encoding_cuda_kernel.h b/models/Mask3D/mask3d/utils/pointops2/src/rpe/relative_pos_encoding_cuda_kernel.h new file mode 100644 index 0000000000000000000000000000000000000000..cafc7b69152fff9c0c440a093346fb6005923db0 --- /dev/null +++ b/models/Mask3D/mask3d/utils/pointops2/src/rpe/relative_pos_encoding_cuda_kernel.h @@ -0,0 +1,26 @@ +#ifndef _RPE_CUDA_KERNEL +#define _RPE_CUDA_KERNEL +#include +#include +#include + +void dot_prod_with_idx_forward_cuda(int N, int M, int h, int hdim, at::Tensor q_tensor, at::Tensor index_tensor, at::Tensor table_tensor, at::Tensor rel_idx_tensor, at::Tensor output_tensor); +void dot_prod_with_idx_backward_cuda(int N, int M, int h, int hdim, at::Tensor grad_out_tensor, at::Tensor q_tensor, at::Tensor index_tensor, at::Tensor table_tensor, at::Tensor rel_idx_tensor, at::Tensor grad_q_tensor, at::Tensor grad_table_tensor); + +void attention_step2_with_rel_pos_value_forward_cuda(int N, int M, int h, int hdim, at::Tensor attn_tensor, at::Tensor v_tensor, at::Tensor index0_tensor, at::Tensor index1_tensor, at::Tensor table_tensor, at::Tensor rel_idx_tensor, at::Tensor output_tensor); +void attention_step2_with_rel_pos_value_backward_cuda(int N, int M, int h, int hdim, at::Tensor grad_out_tensor, at::Tensor index0_tensor, at::Tensor index1_tensor, at::Tensor attn_tensor, at::Tensor v_tensor, at::Tensor table_tensor, at::Tensor rel_idx_tensor, at::Tensor grad_attn_tensor, at::Tensor grad_v_tensor, at::Tensor grad_table_tensor); + +#ifdef __cplusplus +extern "C" { +#endif + +void dot_prod_with_idx_forward_cuda_launcher(int N, int M, int h, int hdim, const float *q, const int *index, const float *table, const int *rel_idx, float *output); +void dot_prod_with_idx_backward_cuda_launcher(int N, int M, int h, int hdim, const float *grad_out, const float *q, const int *index, const float *table, const int *rel_idx, float *grad_q, float *grad_table); + +void attention_step2_with_rel_pos_value_forward_cuda_launcher(int N, int M, int h, int hdim, const float *attn, const float *v, const int *index0, const int *index1, const float *table, const int *rel_idx, float *output); +void attention_step2_with_rel_pos_value_backward_cuda_launcher(int N, int M, int h, int hdim, const float *grad_out, const int *index0, const int *index1, const float *attn, const float *v, const float *table, const int *rel_idx, float *grad_attn, float *grad_v, float *grad_table); + +#ifdef __cplusplus +} +#endif +#endif diff --git a/models/Mask3D/mask3d/utils/pointops2/src/rpe_v2/relative_pos_encoding_cuda_kernel_v2.cu b/models/Mask3D/mask3d/utils/pointops2/src/rpe_v2/relative_pos_encoding_cuda_kernel_v2.cu new file mode 100644 index 0000000000000000000000000000000000000000..628d8e3ab9679ac14fc89872595927c6f997198f --- /dev/null +++ b/models/Mask3D/mask3d/utils/pointops2/src/rpe_v2/relative_pos_encoding_cuda_kernel_v2.cu @@ -0,0 +1,525 @@ +#include "../cuda_utils.h" +#include "relative_pos_encoding_cuda_kernel_v2.h" + + +// N, M, h, q, index_q, k, index_k, table_q, table_k, rel_idx, rel_idx_offsets, output + +template +__global__ void dot_prod_with_idx_forward_cuda_kernel_v2( // M, h, hdim + int N, int M, int h, const float *q, const int *index_q, const float *k, const int *index_k, + const float *table_q, const float *table_k, const int *rel_idx, const int *rel_idx_offsets, + const int *sort_indices, float *output) { + // input: q: (N, h, hdim), index: (M), table: (L, h, hdim, 3), rel_idx: (M, 3), output: (M, h) + + int h_idx = blockIdx.y; + int t_idx = blockIdx.x; + int n_idx = threadIdx.x; + int C = h*d; + + __shared__ int start, end; + if(n_idx == 0){ + start = rel_idx_offsets[t_idx]; + end = rel_idx_offsets[t_idx+1]; + // printf("e2: start: %d, end: %d\n", start, end); + } + + __syncthreads(); + + int m_idx_prev = start + n_idx; + // if(m_idx_prev >= end) + // return; + + __shared__ int m_idx; + if(n_idx == 0) + m_idx = sort_indices[m_idx_prev]; + + __syncthreads(); + + __shared__ int rel_idx_vec[3]; + if(n_idx < 3) + rel_idx_vec[n_idx] = rel_idx[m_idx*3 + n_idx]; + + __syncthreads(); + + __shared__ float table_q_vec[d]; + __shared__ float table_k_vec[d]; + + for(int i = n_idx; i < 2*d; i += blockDim.x){ + if (i < d){ + int ind0 = rel_idx_vec[0] * C * 3 + h_idx * d * 3 + i * 3 + 0; + int ind1 = rel_idx_vec[1] * C * 3 + h_idx * d * 3 + i * 3 + 1; + int ind2 = rel_idx_vec[2] * C * 3 + h_idx * d * 3 + i * 3 + 2; + table_q_vec[i] = table_q[ind0] + table_q[ind1] + table_q[ind2]; + } else{ + int ind0 = rel_idx_vec[0] * C * 3 + h_idx * d * 3 + (i-d) * 3 + 0; + int ind1 = rel_idx_vec[1] * C * 3 + h_idx * d * 3 + (i-d) * 3 + 1; + int ind2 = rel_idx_vec[2] * C * 3 + h_idx * d * 3 + (i-d) * 3 + 2; + table_k_vec[i-d] = table_k[ind0] + table_k[ind1] + table_k[ind2]; + } + } + + __syncthreads(); + + for(int i = m_idx_prev; i < end; i += blockDim.x){ + float sum = 0; + int m_idx_i = sort_indices[i]; + int q_idx = index_q[m_idx_i]; + int k_idx = index_k[m_idx_i]; + for(int j = 0; j < d; j++){ + sum += q[q_idx*C + h_idx*d + j] * table_q_vec[j]; + sum += k[k_idx*C + h_idx*d + j] * table_k_vec[j]; + } + output[m_idx_i*h + h_idx] = sum; + } +} + +// N, M, h, hdim, grad_out, q, index_q, k, index_k, table_q, table_k, rel_idx, rel_idx_offsets, sort_indices, grad_q, grad_k, grad_table_q, grad_table_k + +template +__global__ void dot_prod_with_idx_backward_cuda_kernel_v2( // M, h, hdim + int N, int M, int h, const float *grad_out, const float *q, const int *index_q, + const float *k, const int *index_k, const float *table_q, const float *table_k, + const int *rel_idx, const int *rel_idx_offsets, const int *sort_indices, float *grad_q, + float *grad_k, float *grad_table_q, float *grad_table_k) { + + int h_idx = blockIdx.y; + int t_idx = blockIdx.x; + int n_idx = threadIdx.x; + int C = h*d; + + __shared__ int start, end; + if(n_idx == 0){ + start = rel_idx_offsets[t_idx]; + end = rel_idx_offsets[t_idx+1]; + } + + __syncthreads(); + + int m_idx_prev = start + n_idx; + // if(m_idx_prev >= end) + // return; + + __shared__ int m_idx; + if(n_idx == 0) + m_idx = sort_indices[m_idx_prev]; + + __syncthreads(); + + __shared__ int rel_idx_vec[3]; + if(n_idx < 3) + rel_idx_vec[n_idx] = rel_idx[m_idx*3 + n_idx]; + + __syncthreads(); + + __shared__ float table_q_vec[d]; + __shared__ float table_k_vec[d]; + + for(int i = n_idx; i < 2*d; i += blockDim.x){ + if (i < d){ + int ind0 = rel_idx_vec[0] * C * 3 + h_idx * d * 3 + i * 3 + 0; + int ind1 = rel_idx_vec[1] * C * 3 + h_idx * d * 3 + i * 3 + 1; + int ind2 = rel_idx_vec[2] * C * 3 + h_idx * d * 3 + i * 3 + 2; + table_q_vec[i] = table_q[ind0] + table_q[ind1] + table_q[ind2]; + } else{ + int ind0 = rel_idx_vec[0] * C * 3 + h_idx * d * 3 + (i-d) * 3 + 0; + int ind1 = rel_idx_vec[1] * C * 3 + h_idx * d * 3 + (i-d) * 3 + 1; + int ind2 = rel_idx_vec[2] * C * 3 + h_idx * d * 3 + (i-d) * 3 + 2; + table_k_vec[i-d] = table_k[ind0] + table_k[ind1] + table_k[ind2]; + } + } + + __shared__ float gradient_q[d]; + __shared__ float gradient_k[d]; + for(int i = n_idx; i < d; i += blockDim.x){ + gradient_q[i] = 0; + gradient_k[i] = 0; + } + + __syncthreads(); + + for(int i = m_idx_prev; i < end; i += blockDim.x){ + int m_idx_i = sort_indices[i]; + int q_idx = index_q[m_idx_i]; + int k_idx = index_k[m_idx_i]; + float grad_out_i = grad_out[m_idx_i*h+h_idx]; + for(int j = 0; j < d; j++){ + atomicAdd(&gradient_q[j], q[q_idx*C + h_idx*d + j] * grad_out_i); + atomicAdd(&gradient_k[j], k[k_idx*C + h_idx*d + j] * grad_out_i); + atomicAdd(grad_q + q_idx*C + h_idx*d + j, table_q_vec[j] * grad_out_i); + atomicAdd(grad_k + k_idx*C + h_idx*d + j, table_k_vec[j] * grad_out_i); + } + } + + __syncthreads(); + + for(int i = n_idx; i < d*2; i += blockDim.x){ + if(i < d){ + atomicAdd(grad_table_q + rel_idx_vec[0] * C * 3 + h_idx * d * 3 + i * 3, gradient_q[i]); + atomicAdd(grad_table_q + rel_idx_vec[1] * C * 3 + h_idx * d * 3 + i * 3 + 1, gradient_q[i]); + atomicAdd(grad_table_q + rel_idx_vec[2] * C * 3 + h_idx * d * 3 + i * 3 + 2, gradient_q[i]); + }else{ + atomicAdd(grad_table_k + rel_idx_vec[0] * C * 3 + h_idx * d * 3 + (i-d) * 3, gradient_k[i-d]); + atomicAdd(grad_table_k + rel_idx_vec[1] * C * 3 + h_idx * d * 3 + (i-d) * 3 + 1, gradient_k[i-d]); + atomicAdd(grad_table_k + rel_idx_vec[2] * C * 3 + h_idx * d * 3 + (i-d) * 3 + 2, gradient_k[i-d]); + } + } + + // int c_idx = blockIdx.z; + // int h_idx = blockIdx.y; + // int thread_idx = blockIdx.x * blockDim.x + threadIdx.x; + // if (thread_idx >= M*3 || h_idx >= h || c_idx >= hdim) return; + + // int dim = thread_idx % 3; + // int m_idx = thread_idx / 3; + + // int q_idx = index[m_idx]; + // int rel_idx_dim = rel_idx[thread_idx]; + // int grad_out_idx = m_idx*h+h_idx; + // float grad_out_value = grad_out[grad_out_idx]; + + // float rel_table_val = table[rel_idx_dim*h*hdim*3+h_idx*hdim*3+c_idx*3+dim]; + // atomicAdd(grad_q+q_idx*h*hdim+h_idx*hdim+c_idx, grad_out_value * rel_table_val); + + // float q_value = q[q_idx*h*hdim+h_idx*hdim+c_idx]; + // atomicAdd(grad_table+rel_idx_dim*h*hdim*3+h_idx*hdim*3+c_idx*3+dim, grad_out_value * q_value); +} + +void dot_prod_with_idx_forward_cuda_launcher_v2(int N, int M, int h, int hdim, int n_max, int T, const float *q, + const int *index_q, const float *k, const int *index_k, const float *table_q, const float *table_k, + const int *rel_idx, const int *rel_idx_offsets, const int *sort_indices, float *output) +{ + // input: q: (N, h, hdim), index: (M), table: (L, h, hdim, 3), rel_idx: (M, 3) + //dim3 blocks(DIVUP(hdim, THREADS_PER_BLOCK), h, M); + dim3 blocks(T, h); + // dim3 threads(THREADS_PER_BLOCK); + + unsigned int n_threads = opt_n_threads(n_max); + n_threads = n_threads == n_max ? n_threads : n_threads * 2; + n_threads = n_threads > 1024 ? 512 : n_threads; + + // printf("e1: T: %d, h: %d, n_threads: %d\n", T, h, n_threads); + + switch (hdim) { + case 16: + dot_prod_with_idx_forward_cuda_kernel_v2<16><<>>(N, M, h, q, index_q, k, index_k, table_q, table_k, rel_idx, rel_idx_offsets, sort_indices, output); + break; + case 32: + dot_prod_with_idx_forward_cuda_kernel_v2<32><<>>(N, M, h, q, index_q, k, index_k, table_q, table_k, rel_idx, rel_idx_offsets, sort_indices, output); + break; + default: + throw "d != 16 and d != 32"; + } +} + +void dot_prod_with_idx_backward_cuda_launcher_v2(int N, int M, int h, int hdim, int n_max, int T, + const float *grad_out, const float *q, const int *index_q, const float *k, const int *index_k, + const float *table_q, const float *table_k, const int *rel_idx, const int *rel_idx_offsets, const int *sort_indices, + float *grad_q, float *grad_k, float *grad_table_q, float *grad_table_k) +{ + // input: grad_out: (M, h), output: grad_q: (N, h, hdim), grad_table: (L, h, hdim, 3) + //dim3 blocks(DIVUP(hdim, THREADS_PER_BLOCK), h, M); + // dim3 blocks(DIVUP(M*3, THREADS_PER_BLOCK), h, hdim); + // dim3 threads(THREADS_PER_BLOCK); + + dim3 blocks(T, h); + // dim3 threads(THREADS_PER_BLOCK); + + unsigned int n_threads = opt_n_threads(n_max); + n_threads = n_threads == n_max ? n_threads : n_threads * 2; + n_threads = n_threads > 1024 ? 512 : n_threads; + + switch (hdim) { + case 16: + dot_prod_with_idx_backward_cuda_kernel_v2<16><<>>(N, M, h, grad_out, q, index_q, k, index_k, table_q, table_k, rel_idx, rel_idx_offsets, sort_indices, grad_q, grad_k, grad_table_q, grad_table_k); + break; + case 32: + dot_prod_with_idx_backward_cuda_kernel_v2<32><<>>(N, M, h, grad_out, q, index_q, k, index_k, table_q, table_k, rel_idx, rel_idx_offsets, sort_indices, grad_q, grad_k, grad_table_q, grad_table_k); + break; + default: + throw "d != 16 and d != 32"; + } +} + + + +template +__global__ void dot_prod_with_idx_forward_cuda_kernel_v3( // M, h, hdim + int N, int M, int h, const float *q, const int *index_q_offsets, const float *k, const int *index_k, + const float *table_q, const float *table_k, const int *rel_idx, float *output) { + // input: q: (N, h, hdim), index: (M), table: (L, h, hdim, 3), rel_idx: (M, 3), output: (M, h) + int q_idx = blockIdx.x; + int h_idx = blockIdx.y; + int n_idx = threadIdx.x; + int C = h*d; + + __shared__ float query_vec[d]; + __shared__ int start, end; + if (n_idx == 0){ + start = index_q_offsets[q_idx]; + end = index_q_offsets[q_idx+1]; + } + for(int i = n_idx; i < d; i += blockDim.x) + query_vec[i] = q[q_idx*C + h_idx*d + i]; + + __syncthreads(); + + int m_idx = start + n_idx; + if(m_idx >= end) + return; + + int k_idx = index_k[m_idx]; + int r_idx1 = rel_idx[m_idx*3], r_idx2 = rel_idx[m_idx*3+1], r_idx3 = rel_idx[m_idx*3+2]; + float sum = 0; + for(int i = 0; i < d; i++){ + float table_q_scalar_i = table_q[r_idx1*C*3+h_idx*d*3+i*3] + table_q[r_idx2*C*3+h_idx*d*3+i*3+1] + table_q[r_idx3*C*3+h_idx*d*3+i*3+2]; + sum += query_vec[i] * table_q_scalar_i; + float table_k_scalar_i = table_k[r_idx1*C*3+h_idx*d*3+i*3] + table_k[r_idx2*C*3+h_idx*d*3+i*3+1] + table_k[r_idx3*C*3+h_idx*d*3+i*3+2]; + sum += k[k_idx*C+h_idx*d+i] * table_k_scalar_i; + } + output[m_idx*h + h_idx] = sum; + +} + +// N, M, h, hdim, grad_out, q, index_q, k, index_k, table_q, table_k, rel_idx, rel_idx_offsets, sort_indices, grad_q, grad_k, grad_table_q, grad_table_k + +template +__global__ void dot_prod_with_idx_backward_cuda_kernel_v3( // M, h, hdim + int N, int M, int h, const float *grad_out, const float *q, const int *index_q_offsets, + const float *k, const int *index_k, const float *table_q, const float *table_k, + const int *rel_idx, float *grad_q, float *grad_k, float *grad_table_q, float *grad_table_k) { + + int q_idx = blockIdx.x; + int h_idx = blockIdx.y; + int n_idx = threadIdx.x; + int C = h*d; + + __shared__ float query_vec[d]; + __shared__ int start, end; + if (n_idx == 0){ + start = index_q_offsets[q_idx]; + end = index_q_offsets[q_idx+1]; + } + for(int i = n_idx; i < d; i += blockDim.x) + query_vec[i] = q[q_idx*C + h_idx*d + i]; + + __shared__ float gradients_q[d]; + for(int i = n_idx; i < d; i += blockDim.x){ + gradients_q[i] = 0; + } + + __syncthreads(); + + int m_idx = start + n_idx; + + if(m_idx < end){ + int k_idx = index_k[m_idx]; + int r_idx1 = rel_idx[m_idx*3], r_idx2 = rel_idx[m_idx*3+1], r_idx3 = rel_idx[m_idx*3+2]; + float gradient = grad_out[m_idx*h + h_idx]; + for(int i = 0; i < d; i++){ + float table_q_scalar_i = table_q[r_idx1*C*3+h_idx*d*3+i*3] + table_q[r_idx2*C*3+h_idx*d*3+i*3+1] + table_q[r_idx3*C*3+h_idx*d*3+i*3+2]; + float table_k_scalar_i = table_k[r_idx1*C*3+h_idx*d*3+i*3] + table_k[r_idx2*C*3+h_idx*d*3+i*3+1] + table_k[r_idx3*C*3+h_idx*d*3+i*3+2]; + float q_scalar_i = query_vec[i]; + float k_scalar_i = k[k_idx*C+h_idx*d+i]; + atomicAdd(&gradients_q[i], table_q_scalar_i * gradient); + atomicAdd(grad_k+k_idx*C+h_idx*d+i, table_k_scalar_i * gradient); + atomicAdd(grad_table_q+r_idx1*C*3+h_idx*d*3+i*3, q_scalar_i * gradient); + atomicAdd(grad_table_q+r_idx2*C*3+h_idx*d*3+i*3+1, q_scalar_i * gradient); + atomicAdd(grad_table_q+r_idx3*C*3+h_idx*d*3+i*3+2, q_scalar_i * gradient); + atomicAdd(grad_table_k+r_idx1*C*3+h_idx*d*3+i*3, k_scalar_i * gradient); + atomicAdd(grad_table_k+r_idx2*C*3+h_idx*d*3+i*3+1, k_scalar_i * gradient); + atomicAdd(grad_table_k+r_idx3*C*3+h_idx*d*3+i*3+2, k_scalar_i * gradient); + } + } + __syncthreads(); + + for(int i = n_idx; i < d; i += blockDim.x){ + grad_q[q_idx*C+h_idx*d+i] = gradients_q[i]; + } +} + +void dot_prod_with_idx_forward_cuda_launcher_v3(int N, int M, int h, int hdim, int n_max, const float *q, + const int *index_q_offsets, const float *k, const int *index_k, const float *table_q, const float *table_k, + const int *rel_idx, float *output) +{ + // input: q: (N, h, hdim), index: (M), table: (L, h, hdim, 3), rel_idx: (M, 3) + //dim3 blocks(DIVUP(hdim, THREADS_PER_BLOCK), h, M); + dim3 blocks(N, h); + // dim3 threads(THREADS_PER_BLOCK); + + unsigned int n_threads = opt_n_threads(n_max); + n_threads = n_threads == n_max ? n_threads : n_threads * 2; + + // printf("e1: h: %d, n_max: %d, n_threads: %d\n", h, n_max, n_threads); + + switch (hdim) { + case 16: + dot_prod_with_idx_forward_cuda_kernel_v3<16><<>>(N, M, h, q, index_q_offsets, k, index_k, table_q, table_k, rel_idx, output); + break; + case 32: + dot_prod_with_idx_forward_cuda_kernel_v3<32><<>>(N, M, h, q, index_q_offsets, k, index_k, table_q, table_k, rel_idx, output); + break; + default: + throw "d != 16 and d != 32"; + } +} + +void dot_prod_with_idx_backward_cuda_launcher_v3(int N, int M, int h, int hdim, int n_max, + const float *grad_out, const float *q, const int *index_q_offsets, const float *k, const int *index_k, + const float *table_q, const float *table_k, const int *rel_idx, + float *grad_q, float *grad_k, float *grad_table_q, float *grad_table_k) +{ + // input: grad_out: (M, h), output: grad_q: (N, h, hdim), grad_table: (L, h, hdim, 3) + //dim3 blocks(DIVUP(hdim, THREADS_PER_BLOCK), h, M); + // dim3 blocks(DIVUP(M*3, THREADS_PER_BLOCK), h, hdim); + // dim3 threads(THREADS_PER_BLOCK); + + dim3 blocks(N, h); + // dim3 threads(THREADS_PER_BLOCK); + + unsigned int n_threads = opt_n_threads(n_max); + n_threads = n_threads == n_max ? n_threads : n_threads * 2; + + switch (hdim) { + case 16: + dot_prod_with_idx_backward_cuda_kernel_v3<16><<>>(N, M, h, grad_out, q, index_q_offsets, k, index_k, table_q, table_k, rel_idx, grad_q, grad_k, grad_table_q, grad_table_k); + break; + case 32: + dot_prod_with_idx_backward_cuda_kernel_v3<32><<>>(N, M, h, grad_out, q, index_q_offsets, k, index_k, table_q, table_k, rel_idx, grad_q, grad_k, grad_table_q, grad_table_k); + break; + default: + throw "d != 16 and d != 32"; + } +} + + +template +__global__ void attention_step2_with_rel_pos_value_forward_cuda_kernel_v2( // M, h, hdim + int N, int M, int h, const float *attn, const float *v, + const int *index0_offsets, const int *index1, const float *table, const int *rel_idx, float *output) { + // input: attn: (M, h), v: (N, h, hdim), index0: (M, ), index1: (M, ), table: (L, h, hdim, 3), rel_idx: (M, 3) + + int q_idx = blockIdx.x; + int h_idx = blockIdx.y; + int n_idx = threadIdx.x; + + int C = h*d; + + __shared__ int start, end; + __shared__ float result[d]; + + if (n_idx == 0){ + start = index0_offsets[q_idx]; + end = index0_offsets[q_idx+1]; + } + for (int i = n_idx; i < d; i += blockDim.x){ + result[i] = 0; + } + + __syncthreads(); + + int m_idx = start + n_idx; + if (m_idx < end){ + float attn_scalar = attn[m_idx*h + h_idx]; + int r_idx1 = rel_idx[m_idx*3], r_idx2 = rel_idx[m_idx*3+1], r_idx3 = rel_idx[m_idx*3+2]; + for(int i = 0; i < d; i ++){ + int v_idx = index1[m_idx]; + float table_scaler_i = table[r_idx1*C*3+h_idx*d*3+i*3] + table[r_idx2*C*3+h_idx*d*3+i*3+1] + table[r_idx3*C*3+h_idx*d*3+i*3+2]; + float value_scaler_i = v[v_idx*C + h_idx*d + i]; + atomicAdd(&result[i], (table_scaler_i + value_scaler_i) * attn_scalar); + } + } + + __syncthreads(); + + for (int i = n_idx; i < d; i += blockDim.x) + output[q_idx*C + h_idx*d + i] = result[i]; +} + + +template +__global__ void attention_step2_with_rel_pos_value_backward_cuda_kernel_v2( // M, h, hdim + int N, int M, int h, const float *grad_out, const int *index0_offsets, const int *index1, const float *attn, const float *v, const float *table, + const int *rel_idx, float *grad_attn, float *grad_v, float *grad_table) { + // input: attn: (M, h), v: (N, h, hdim), index0: (M, ), index1: (M, ), table: (L, h, hdim, 3), rel_idx: (M, 3) + + int q_idx = blockIdx.x; + int h_idx = blockIdx.y; + int n_idx = threadIdx.x; + + int C = h*d; + + __shared__ int start, end; + __shared__ float gradients[d]; + + if (n_idx == 0){ + start = index0_offsets[q_idx]; + end = index0_offsets[q_idx+1]; + } + for (int i = n_idx; i < d; i += blockDim.x){ + gradients[i] = grad_out[q_idx*C + h_idx*d + i]; + } + + __syncthreads(); + + int m_idx = start + n_idx; + if (m_idx < end){ + int v_idx = index1[m_idx]; + int r_idx1 = rel_idx[m_idx*3], r_idx2 = rel_idx[m_idx*3+1], r_idx3 = rel_idx[m_idx*3+2]; + float attn_scalar = attn[m_idx*h + h_idx]; + float grad_attn_sum = 0; + for (int i = 0; i < d; i++){ + float grad_out_scaler_i = gradients[i]; + float table_scaler_i = table[r_idx1*C*3+h_idx*d*3+i*3] + table[r_idx2*C*3+h_idx*d*3+i*3+1] + table[r_idx3*C*3+h_idx*d*3+i*3+2]; + float value_scaler_i = v[v_idx*C + h_idx*d + i]; + grad_attn_sum += (table_scaler_i + value_scaler_i) * grad_out_scaler_i; + atomicAdd(grad_v + v_idx*C + h_idx*d + i, attn_scalar * grad_out_scaler_i); + atomicAdd(grad_table + r_idx1*C*3 + h_idx*d*3 + i*3, attn_scalar * grad_out_scaler_i); + atomicAdd(grad_table + r_idx2*C*3 + h_idx*d*3 + i*3 + 1, attn_scalar * grad_out_scaler_i); + atomicAdd(grad_table + r_idx3*C*3 + h_idx*d*3 + i*3 + 2, attn_scalar * grad_out_scaler_i); + } + grad_attn[m_idx*h + h_idx] = grad_attn_sum; + } +} + +void attention_step2_with_rel_pos_value_forward_cuda_launcher_v2(int N, int M, int h, int hdim, int n_max, const float *attn, const float *v, const int *index0_offsets, + const int *index1, const float *table, const int *rel_idx, float *output) { + // input: attn: (M, h), v: (N, h, hdim), index0: (M, ), index1: (M, ), table: (L, h, hdim, 3), rel_idx: (M, 3) + //dim3 blocks(DIVUP(hdim, THREADS_PER_BLOCK), h, M); + // dim3 blocks(DIVUP(M*3, THREADS_PER_BLOCK), h, hdim); + // dim3 threads(THREADS_PER_BLOCK); + dim3 blocks(N, h); + unsigned int n_threads = opt_n_threads(n_max); + n_threads = n_threads == n_max ? n_threads : n_threads * 2; + + switch (hdim) { + case 16: + attention_step2_with_rel_pos_value_forward_cuda_kernel_v2<16><<>>(N, M, h, attn, v, index0_offsets, index1, table, rel_idx, output); + break; + case 32: + attention_step2_with_rel_pos_value_forward_cuda_kernel_v2<32><<>>(N, M, h, attn, v, index0_offsets, index1, table, rel_idx, output); + break; + default: + throw "d != 16 and d != 32"; + } +} + +void attention_step2_with_rel_pos_value_backward_cuda_launcher_v2(int N, int M, int h, int hdim, int n_max, const float *grad_out, const int *index0_offsets, + const int *index1, const float *attn, const float *v, const float *table, const int *rel_idx, float *grad_attn, float *grad_v, float *grad_table) { + // input: grad_output: (n, nsample, c), output: grad_input1: (n, c), grad_input2: (n, c) + //dim3 blocks(DIVUP(hdim, THREADS_PER_BLOCK), h, M); + + dim3 blocks(N, h); + unsigned int n_threads = opt_n_threads(n_max); + n_threads = n_threads == n_max ? n_threads : n_threads * 2; + + switch (hdim) { + case 16: + attention_step2_with_rel_pos_value_backward_cuda_kernel_v2<16><<>>(N, M, h, grad_out, index0_offsets, index1, attn, v, table, rel_idx, grad_attn, grad_v, grad_table); + break; + case 32: + attention_step2_with_rel_pos_value_backward_cuda_kernel_v2<32><<>>(N, M, h, grad_out, index0_offsets, index1, attn, v, table, rel_idx, grad_attn, grad_v, grad_table); + break; + default: + throw "d != 16 and d != 32"; + } +} diff --git a/models/Mask3D/mask3d/utils/pointops2/src/rpe_v2/relative_pos_encoding_cuda_kernel_v2.h b/models/Mask3D/mask3d/utils/pointops2/src/rpe_v2/relative_pos_encoding_cuda_kernel_v2.h new file mode 100644 index 0000000000000000000000000000000000000000..648b152afe16d3011b62ff141a4e20b2a83579b4 --- /dev/null +++ b/models/Mask3D/mask3d/utils/pointops2/src/rpe_v2/relative_pos_encoding_cuda_kernel_v2.h @@ -0,0 +1,32 @@ +#ifndef _RPE_V2_CUDA_KERNEL +#define _RPE_V2_CUDA_KERNEL +#include +#include +#include + +void dot_prod_with_idx_forward_cuda_v2(int N, int M, int h, int hdim, int n_max, int T, at::Tensor q_tensor, at::Tensor index_q_tensor, at::Tensor k_tensor, at::Tensor index_k_tensor, at::Tensor table_q_tensor, at::Tensor table_k_tensor, at::Tensor rel_idx_tensor, at::Tensor rel_idx_offsets_tensor, at::Tensor sort_indices_tensor, at::Tensor output_tensor); +void dot_prod_with_idx_backward_cuda_v2(int N, int M, int h, int hdim, int n_max, int T, at::Tensor grad_out_tensor, at::Tensor q_tensor, at::Tensor index_q_tensor, at::Tensor k_tensor, at::Tensor index_k_tensor, at::Tensor table_q_tensor, at::Tensor table_k_tensor, at::Tensor rel_idx_tensor, at::Tensor rel_idx_offsets_tensor, at::Tensor sort_indices_tensor, at::Tensor grad_q_tensor, at::Tensor grad_k_tensor, at::Tensor grad_table_q_tensor, at::Tensor grad_table_k_tensor); + +void dot_prod_with_idx_forward_cuda_v3(int N, int M, int h, int hdim, int n_max, at::Tensor q_tensor, at::Tensor index_q_offsets_tensor, at::Tensor k_tensor, at::Tensor index_k_tensor, at::Tensor table_q_tensor, at::Tensor table_k_tensor, at::Tensor rel_idx_tensor, at::Tensor output_tensor); +void dot_prod_with_idx_backward_cuda_v3(int N, int M, int h, int hdim, int n_max, at::Tensor grad_out_tensor, at::Tensor q_tensor, at::Tensor index_q_offsets_tensor, at::Tensor k_tensor, at::Tensor index_k_tensor, at::Tensor table_q_tensor, at::Tensor table_k_tensor, at::Tensor rel_idx_tensor, at::Tensor grad_q_tensor, at::Tensor grad_k_tensor, at::Tensor grad_table_q_tensor, at::Tensor grad_table_k_tensor); + +void attention_step2_with_rel_pos_value_forward_cuda_v2(int N, int M, int h, int hdim, int n_max, at::Tensor attn_tensor, at::Tensor v_tensor, at::Tensor index0_offsets_tensor, at::Tensor index1_tensor, at::Tensor table_tensor, at::Tensor rel_idx_tensor, at::Tensor output_tensor); +void attention_step2_with_rel_pos_value_backward_cuda_v2(int N, int M, int h, int hdim, int n_max, at::Tensor grad_out_tensor, at::Tensor index0_offsets_tensor, at::Tensor index1_tensor, at::Tensor attn_tensor, at::Tensor v_tensor, at::Tensor table_tensor, at::Tensor rel_idx_tensor, at::Tensor grad_attn_tensor, at::Tensor grad_v_tensor, at::Tensor grad_table_tensor); + +#ifdef __cplusplus +extern "C" { +#endif + +void dot_prod_with_idx_forward_cuda_launcher_v2(int N, int M, int h, int hdim, int n_max, int T, const float *q, const int *index_q, const float *k, const int *index_k, const float *table_q, const float *table_k, const int *rel_idx, const int *rel_idx_offsets, const int *sort_indices, float *output); +void dot_prod_with_idx_backward_cuda_launcher_v2(int N, int M, int h, int hdim, int n_max, int T, const float *grad_out, const float *q, const int *index_q, const float *k, const int *index_k, const float *table_q, const float *table_k, const int *rel_idx, const int *rel_idx_offsets, const int *sort_indices, float *grad_q, float *grad_k, float *grad_table_q, float *grad_table_k); + +void dot_prod_with_idx_forward_cuda_launcher_v3(int N, int M, int h, int hdim, int n_max, const float *q, const int *index_q_offsets, const float *k, const int *index_k, const float *table_q, const float *table_k, const int *rel_idx, float *output); +void dot_prod_with_idx_backward_cuda_launcher_v3(int N, int M, int h, int hdim, int n_max, const float *grad_out, const float *q, const int *index_q_offsets, const float *k, const int *index_k, const float *table_q, const float *table_k, const int *rel_idx, float *grad_q, float *grad_k, float *grad_table_q, float *grad_table_k); + +void attention_step2_with_rel_pos_value_forward_cuda_launcher_v2(int N, int M, int h, int hdim, int n_max, const float *attn, const float *v, const int *index0_offsets, const int *index1, const float *table, const int *rel_idx, float *output); +void attention_step2_with_rel_pos_value_backward_cuda_launcher_v2(int N, int M, int h, int hdim, int n_max, const float *grad_out, const int *index0_offsets, const int *index1, const float *attn, const float *v, const float *table, const int *rel_idx, float *grad_attn, float *grad_v, float *grad_table); + +#ifdef __cplusplus +} +#endif +#endif diff --git a/models/Mask3D/mask3d/utils/pointops2/src/rpe_v2/relative_pos_encoding_cuda_v2.cpp b/models/Mask3D/mask3d/utils/pointops2/src/rpe_v2/relative_pos_encoding_cuda_v2.cpp new file mode 100644 index 0000000000000000000000000000000000000000..0a4c96a8688536d19611a57a2017ae1ba44f12bf --- /dev/null +++ b/models/Mask3D/mask3d/utils/pointops2/src/rpe_v2/relative_pos_encoding_cuda_v2.cpp @@ -0,0 +1,111 @@ +#include +#include +#include +#include +#include "relative_pos_encoding_cuda_kernel_v2.h" + +void dot_prod_with_idx_forward_cuda_v2(int N, int M, int h, int hdim, int n_max, int T, at::Tensor q_tensor, + at::Tensor index_q_tensor, at::Tensor k_tensor, at::Tensor index_k_tensor, at::Tensor table_q_tensor, + at::Tensor table_k_tensor, at::Tensor rel_idx_tensor, at::Tensor rel_idx_offsets_tensor, at::Tensor sort_indices_tensor, at::Tensor output_tensor) +{ + const float *q = q_tensor.data_ptr(); + const int *index_q = index_q_tensor.data_ptr(); + const float *k = k_tensor.data_ptr(); + const int *index_k = index_k_tensor.data_ptr(); + const float *table_q = table_q_tensor.data_ptr(); + const float *table_k = table_k_tensor.data_ptr(); + const int *rel_idx = rel_idx_tensor.data_ptr(); + const int *rel_idx_offsets = rel_idx_offsets_tensor.data_ptr(); + const int *sort_indices = sort_indices_tensor.data_ptr(); + float *output = output_tensor.data_ptr(); + dot_prod_with_idx_forward_cuda_launcher_v2(N, M, h, hdim, n_max, T, q, index_q, k, index_k, table_q, table_k, rel_idx, rel_idx_offsets, sort_indices, output); +} + +void dot_prod_with_idx_backward_cuda_v2(int N, int M, int h, int hdim, int n_max, int T, at::Tensor grad_out_tensor, + at::Tensor q_tensor, at::Tensor index_q_tensor, at::Tensor k_tensor, at::Tensor index_k_tensor, + at::Tensor table_q_tensor, at::Tensor table_k_tensor, at::Tensor rel_idx_tensor, at::Tensor rel_idx_offsets_tensor, + at::Tensor sort_indices_tensor, at::Tensor grad_q_tensor, at::Tensor grad_k_tensor, at::Tensor grad_table_q_tensor, at::Tensor grad_table_k_tensor) +{ + const float *grad_out = grad_out_tensor.data_ptr(); + const float *q = q_tensor.data_ptr(); + const int *index_q = index_q_tensor.data_ptr(); + const float *k = k_tensor.data_ptr(); + const int *index_k = index_k_tensor.data_ptr(); + const float *table_q = table_q_tensor.data_ptr(); + const float *table_k = table_k_tensor.data_ptr(); + const int *rel_idx = rel_idx_tensor.data_ptr(); + const int *rel_idx_offsets = rel_idx_offsets_tensor.data_ptr(); + const int *sort_indices = sort_indices_tensor.data_ptr(); + float *grad_q = grad_q_tensor.data_ptr(); + float *grad_k = grad_k_tensor.data_ptr(); + float *grad_table_q = grad_table_q_tensor.data_ptr(); + float *grad_table_k = grad_table_k_tensor.data_ptr(); + dot_prod_with_idx_backward_cuda_launcher_v2(N, M, h, hdim, n_max, T, grad_out, q, index_q, k, index_k, table_q, table_k, rel_idx, rel_idx_offsets, sort_indices, grad_q, grad_k, grad_table_q, grad_table_k); +} + + +void dot_prod_with_idx_forward_cuda_v3(int N, int M, int h, int hdim, int n_max, at::Tensor q_tensor, + at::Tensor index_q_offsets_tensor, at::Tensor k_tensor, at::Tensor index_k_tensor, at::Tensor table_q_tensor, + at::Tensor table_k_tensor, at::Tensor rel_idx_tensor, at::Tensor output_tensor) +{ + const float *q = q_tensor.data_ptr(); + const int *index_q_offsets = index_q_offsets_tensor.data_ptr(); + const float *k = k_tensor.data_ptr(); + const int *index_k = index_k_tensor.data_ptr(); + const float *table_q = table_q_tensor.data_ptr(); + const float *table_k = table_k_tensor.data_ptr(); + const int *rel_idx = rel_idx_tensor.data_ptr(); + float *output = output_tensor.data_ptr(); + dot_prod_with_idx_forward_cuda_launcher_v3(N, M, h, hdim, n_max, q, index_q_offsets, k, index_k, table_q, table_k, rel_idx, output); +} + +void dot_prod_with_idx_backward_cuda_v3(int N, int M, int h, int hdim, int n_max, at::Tensor grad_out_tensor, + at::Tensor q_tensor, at::Tensor index_q_offsets_tensor, at::Tensor k_tensor, at::Tensor index_k_tensor, + at::Tensor table_q_tensor, at::Tensor table_k_tensor, at::Tensor rel_idx_tensor, at::Tensor grad_q_tensor, + at::Tensor grad_k_tensor, at::Tensor grad_table_q_tensor, at::Tensor grad_table_k_tensor) +{ + const float *grad_out = grad_out_tensor.data_ptr(); + const float *q = q_tensor.data_ptr(); + const int *index_q_offsets = index_q_offsets_tensor.data_ptr(); + const float *k = k_tensor.data_ptr(); + const int *index_k = index_k_tensor.data_ptr(); + const float *table_q = table_q_tensor.data_ptr(); + const float *table_k = table_k_tensor.data_ptr(); + const int *rel_idx = rel_idx_tensor.data_ptr(); + float *grad_q = grad_q_tensor.data_ptr(); + float *grad_k = grad_k_tensor.data_ptr(); + float *grad_table_q = grad_table_q_tensor.data_ptr(); + float *grad_table_k = grad_table_k_tensor.data_ptr(); + dot_prod_with_idx_backward_cuda_launcher_v3(N, M, h, hdim, n_max, grad_out, q, index_q_offsets, k, index_k, table_q, table_k, rel_idx, grad_q, grad_k, grad_table_q, grad_table_k); +} + + +void attention_step2_with_rel_pos_value_forward_cuda_v2(int N, int M, int h, int hdim, int n_max, at::Tensor attn_tensor, at::Tensor v_tensor, + at::Tensor index0_offsets_tensor, at::Tensor index1_tensor, at::Tensor table_tensor, at::Tensor rel_idx_tensor, at::Tensor output_tensor) +{ + const float *attn = attn_tensor.data_ptr(); + const float *v = v_tensor.data_ptr(); + const int *index0_offsets = index0_offsets_tensor.data_ptr(); + const int *index1 = index1_tensor.data_ptr(); + const float *table = table_tensor.data_ptr(); + const int *rel_idx = rel_idx_tensor.data_ptr(); + float *output = output_tensor.data_ptr(); + attention_step2_with_rel_pos_value_forward_cuda_launcher_v2(N, M, h, hdim, n_max, attn, v, index0_offsets, index1, table, rel_idx, output); +} + +void attention_step2_with_rel_pos_value_backward_cuda_v2(int N, int M, int h, int hdim, int n_max, at::Tensor grad_out_tensor, + at::Tensor index0_offsets_tensor, at::Tensor index1_tensor, at::Tensor attn_tensor, at::Tensor v_tensor, at::Tensor table_tensor, + at::Tensor rel_idx_tensor, at::Tensor grad_attn_tensor, at::Tensor grad_v_tensor, at::Tensor grad_table_tensor) +{ + const float *grad_out = grad_out_tensor.data_ptr(); + const int *index0_offsets = index0_offsets_tensor.data_ptr(); + const int *index1 = index1_tensor.data_ptr(); + const float *attn = attn_tensor.data_ptr(); + const float *v = v_tensor.data_ptr(); + const float *table = table_tensor.data_ptr(); + const int *rel_idx = rel_idx_tensor.data_ptr(); + float *grad_attn = grad_attn_tensor.data_ptr(); + float *grad_v = grad_v_tensor.data_ptr(); + float *grad_table = grad_table_tensor.data_ptr(); + attention_step2_with_rel_pos_value_backward_cuda_launcher_v2(N, M, h, hdim, n_max, grad_out, index0_offsets, index1, attn, v, table, rel_idx, grad_attn, grad_v, grad_table); +} diff --git a/models/Mask3D/mask3d/utils/pointops2/src/sampling/sampling_cuda.cpp b/models/Mask3D/mask3d/utils/pointops2/src/sampling/sampling_cuda.cpp new file mode 100644 index 0000000000000000000000000000000000000000..7b2622ae6c4ed8c2f361a555d1c4b5b9ee6a2db7 --- /dev/null +++ b/models/Mask3D/mask3d/utils/pointops2/src/sampling/sampling_cuda.cpp @@ -0,0 +1,16 @@ +#include +#include +#include +#include +#include "sampling_cuda_kernel.h" + + +void furthestsampling_cuda(int b, int n, at::Tensor xyz_tensor, at::Tensor offset_tensor, at::Tensor new_offset_tensor, at::Tensor tmp_tensor, at::Tensor idx_tensor) +{ + const float *xyz = xyz_tensor.data_ptr(); + const int *offset = offset_tensor.data_ptr(); + const int *new_offset = new_offset_tensor.data_ptr(); + float *tmp = tmp_tensor.data_ptr(); + int *idx = idx_tensor.data_ptr(); + furthestsampling_cuda_launcher(b, n, xyz, offset, new_offset, tmp, idx); +} diff --git a/models/Mask3D/mask3d/utils/pointops2/src/sampling/sampling_cuda_kernel.cu b/models/Mask3D/mask3d/utils/pointops2/src/sampling/sampling_cuda_kernel.cu new file mode 100644 index 0000000000000000000000000000000000000000..d2c70b5c9e6374e4b52fa9f9327d6cae9337d17e --- /dev/null +++ b/models/Mask3D/mask3d/utils/pointops2/src/sampling/sampling_cuda_kernel.cu @@ -0,0 +1,171 @@ +#include "../cuda_utils.h" +#include "sampling_cuda_kernel.h" + + +__device__ void __update(float *dists, int *dists_i, int idx1, int idx2) { + const float v1 = dists[idx1], v2 = dists[idx2]; + const int i1 = dists_i[idx1], i2 = dists_i[idx2]; + dists[idx1] = max(v1, v2); + dists_i[idx1] = v2 > v1 ? i2 : i1; +} + +// input xyz: (n, 3), tmp: (b, n_max) +// ouput idx (m) +template +__global__ void furthestsampling_cuda_kernel(const float *xyz, const int *offset, const int *new_offset, float *tmp, int *idx) +{ + __shared__ float dists[block_size]; + __shared__ int dists_i[block_size]; + + int bid = blockIdx.x; + int start_n, end_n, start_m, end_m, old; + if (bid == 0) { + start_n = 0; + end_n = offset[0]; + start_m = 0; + end_m = new_offset[0]; + old = 0; + } + else { + start_n = offset[bid - 1]; + end_n = offset[bid]; + start_m = new_offset[bid - 1]; + end_m = new_offset[bid]; + old = offset[bid - 1]; + } + + const int stride = block_size; + int tid = threadIdx.x; + if (tid == 0) idx[start_m] = start_n; + + __syncthreads(); + for (int j = start_m + 1; j < end_m; j++) + { + int besti = start_n; + float best = -1; + float x1 = xyz[old * 3 + 0]; + float y1 = xyz[old * 3 + 1]; + float z1 = xyz[old * 3 + 2]; + for (int k = start_n + tid; k < end_n; k += stride) + { + float x2 = xyz[k * 3 + 0]; + float y2 = xyz[k * 3 + 1]; + float z2 = xyz[k * 3 + 2]; + float d = (x2 - x1) * (x2 - x1) + (y2 - y1) * (y2 - y1) + (z2 - z1) * (z2 - z1); + float d2 = min(d, tmp[k]); + tmp[k] = d2; + besti = d2 > best ? k : besti; + best = d2 > best ? d2 : best; + } + dists[tid] = best; + dists_i[tid] = besti; + __syncthreads(); + + if (block_size >= 1024) { + if (tid < 512) { + __update(dists, dists_i, tid, tid + 512); + } + __syncthreads(); + } + if (block_size >= 512) { + if (tid < 256) { + __update(dists, dists_i, tid, tid + 256); + } + __syncthreads(); + } + if (block_size >= 256) { + if (tid < 128) { + __update(dists, dists_i, tid, tid + 128); + } + __syncthreads(); + } + if (block_size >= 128) { + if (tid < 64) { + __update(dists, dists_i, tid, tid + 64); + } + __syncthreads(); + } + if (block_size >= 64) { + if (tid < 32) { + __update(dists, dists_i, tid, tid + 32); + } + __syncthreads(); + } + if (block_size >= 32) { + if (tid < 16) { + __update(dists, dists_i, tid, tid + 16); + } + __syncthreads(); + } + if (block_size >= 16) { + if (tid < 8) { + __update(dists, dists_i, tid, tid + 8); + } + __syncthreads(); + } + if (block_size >= 8) { + if (tid < 4) { + __update(dists, dists_i, tid, tid + 4); + } + __syncthreads(); + } + if (block_size >= 4) { + if (tid < 2) { + __update(dists, dists_i, tid, tid + 2); + } + __syncthreads(); + } + if (block_size >= 2) { + if (tid < 1) { + __update(dists, dists_i, tid, tid + 1); + } + __syncthreads(); + } + + old = dists_i[0]; + if (tid == 0) + idx[j] = old; + } +} + +void furthestsampling_cuda_launcher(int b, int n, const float *xyz, const int *offset, const int *new_offset, float *tmp, int *idx) +{ + unsigned int n_threads = opt_n_threads(n); + switch (n_threads) { + case 1024: + furthestsampling_cuda_kernel<1024><<>>(xyz, offset, new_offset, tmp, idx); + break; + case 512: + furthestsampling_cuda_kernel<512><<>>(xyz, offset, new_offset, tmp, idx); + break; + case 256: + furthestsampling_cuda_kernel<256><<>>(xyz, offset, new_offset, tmp, idx); + break; + case 128: + furthestsampling_cuda_kernel<128><<>>(xyz, offset, new_offset, tmp, idx); + break; + case 64: + furthestsampling_cuda_kernel<64><<>>(xyz, offset, new_offset, tmp, idx); + break; + case 32: + furthestsampling_cuda_kernel<32><<>>(xyz, offset, new_offset, tmp, idx); + break; + case 16: + furthestsampling_cuda_kernel<16><<>>(xyz, offset, new_offset, tmp, idx); + break; + case 8: + furthestsampling_cuda_kernel<8><<>>(xyz, offset, new_offset, tmp, idx); + break; + case 4: + furthestsampling_cuda_kernel<4><<>>(xyz, offset, new_offset, tmp, idx); + break; + case 2: + furthestsampling_cuda_kernel<2><<>>(xyz, offset, new_offset, tmp, idx); + break; + case 1: + furthestsampling_cuda_kernel<1><<>>(xyz, offset, new_offset, tmp, idx); + break; + default: + furthestsampling_cuda_kernel<512><<>>(xyz, offset, new_offset, tmp, idx); + } +} diff --git a/models/Mask3D/mask3d/utils/pointops2/src/sampling/sampling_cuda_kernel.h b/models/Mask3D/mask3d/utils/pointops2/src/sampling/sampling_cuda_kernel.h new file mode 100644 index 0000000000000000000000000000000000000000..c903f638eb30bbf5bf01141ed2740cc0cd37452e --- /dev/null +++ b/models/Mask3D/mask3d/utils/pointops2/src/sampling/sampling_cuda_kernel.h @@ -0,0 +1,18 @@ +#ifndef _SAMPLING_CUDA_KERNEL +#define _SAMPLING_CUDA_KERNEL +#include +#include +#include + +void furthestsampling_cuda(int b, int n, at::Tensor xyz_tensor, at::Tensor offset_tensor, at::Tensor new_offset_tensor, at::Tensor tmp_tensor, at::Tensor idx_tensor); + +#ifdef __cplusplus +extern "C" { +#endif + +void furthestsampling_cuda_launcher(int b, int n, const float *xyz, const int *offset, const int *new_offset, float *tmp, int *idx); + +#ifdef __cplusplus +} +#endif +#endif diff --git a/models/Mask3D/mask3d/utils/pointops2/src/subtraction/subtraction_cuda.cpp b/models/Mask3D/mask3d/utils/pointops2/src/subtraction/subtraction_cuda.cpp new file mode 100644 index 0000000000000000000000000000000000000000..fa38dc5697312dfe6111931f2d6abcde7c8f0e77 --- /dev/null +++ b/models/Mask3D/mask3d/utils/pointops2/src/subtraction/subtraction_cuda.cpp @@ -0,0 +1,24 @@ +#include +#include +#include +#include +#include "subtraction_cuda_kernel.h" + + +void subtraction_forward_cuda(int n, int nsample, int c, at::Tensor input1_tensor, at::Tensor input2_tensor, at::Tensor idx_tensor, at::Tensor output_tensor) +{ + const float *input1 = input1_tensor.data_ptr(); + const float *input2 = input2_tensor.data_ptr(); + const int *idx = idx_tensor.data_ptr(); + float *output = output_tensor.data_ptr(); + subtraction_forward_cuda_launcher(n, nsample, c, input1, input2, idx, output); +} + +void subtraction_backward_cuda(int n, int nsample, int c, at::Tensor idx_tensor, at::Tensor grad_output_tensor, at::Tensor grad_input1_tensor, at::Tensor grad_input2_tensor) +{ + const int *idx = idx_tensor.data_ptr(); + const float *grad_output = grad_output_tensor.data_ptr(); + float *grad_input1 = grad_input1_tensor.data_ptr(); + float *grad_input2 = grad_input2_tensor.data_ptr(); + subtraction_backward_cuda_launcher(n, nsample, c, idx, grad_output, grad_input1, grad_input2); +} diff --git a/models/Mask3D/mask3d/utils/pointops2/src/subtraction/subtraction_cuda_kernel.cu b/models/Mask3D/mask3d/utils/pointops2/src/subtraction/subtraction_cuda_kernel.cu new file mode 100644 index 0000000000000000000000000000000000000000..9b8d4f752940d580ee2b49f1b2946a8d6386d11a --- /dev/null +++ b/models/Mask3D/mask3d/utils/pointops2/src/subtraction/subtraction_cuda_kernel.cu @@ -0,0 +1,44 @@ +#include "../cuda_utils.h" +#include "subtraction_cuda_kernel.h" + + +__global__ void subtraction_forward_cuda_kernel(int n, int nsample, int c, const float *input1, const float *input2, const int *idx, float *output) { + // input: input1: (n, c), input2: (n, c), idx: (n, nsample), output: (n, nsample, c) + int index = blockIdx.x * blockDim.x + threadIdx.x; + if (index >= n * nsample * c) return; + const int c_idx = index % c; + const int nsample_idx = (index / c) % nsample; + const int n_idx = index / nsample / c; + const int idx_idx = n_idx * nsample + nsample_idx; + const int input1_idx = n_idx * c + c_idx; + const int input2_idx = idx[idx_idx] * c + c_idx; + output[index] = input1[input1_idx] - input2[input2_idx]; +} + +__global__ void subtraction_backward_cuda_kernel(int n, int nsample, int c, const int *idx, const float *grad_output, float *grad_input1, float *grad_input2) { + // input: grad_output: (n, nsample, c), output: grad_input1: (n, c), grad_input2: (n, c) + int index = blockIdx.x * blockDim.x + threadIdx.x; + if (index >= n * nsample * c) return; + const int c_idx = index % c; + const int nsample_idx = (index / c) % nsample; + const int n_idx = index / nsample / c; + const int idx_idx = n_idx * nsample + nsample_idx; + const int input1_idx = n_idx * c + c_idx; + const int input2_idx = idx[idx_idx] * c + c_idx; + atomicAdd(grad_input1 + input1_idx, grad_output[index]); + atomicAdd(grad_input2 + input2_idx, -grad_output[index]); +} + +void subtraction_forward_cuda_launcher(int n, int nsample, int c, const float *input1, const float *input2, const int *idx, float *output) { + // input: input1: (n, c), input2: (n, c), idx: (n, nsample), output: (n, nsample, c) + dim3 blocks(DIVUP(n * nsample * c, THREADS_PER_BLOCK)); + dim3 threads(THREADS_PER_BLOCK); + subtraction_forward_cuda_kernel<<>>(n, nsample, c, input1, input2, idx, output); +} + +void subtraction_backward_cuda_launcher(int n, int nsample, int c, const int *idx, const float *grad_output, float *grad_input1, float *grad_input2) { + // input: grad_output: (n, nsample, c), output: grad_input1: (n, c), grad_input2: (n, c) + dim3 blocks(DIVUP(n * nsample * c, THREADS_PER_BLOCK)); + dim3 threads(THREADS_PER_BLOCK); + subtraction_backward_cuda_kernel<<>>(n, nsample, c, idx, grad_output, grad_input1, grad_input2); +} diff --git a/models/Mask3D/mask3d/utils/pointops2/src/subtraction/subtraction_cuda_kernel.h b/models/Mask3D/mask3d/utils/pointops2/src/subtraction/subtraction_cuda_kernel.h new file mode 100644 index 0000000000000000000000000000000000000000..856133d97bdd3dc58f29c746ff240fc9d489c22e --- /dev/null +++ b/models/Mask3D/mask3d/utils/pointops2/src/subtraction/subtraction_cuda_kernel.h @@ -0,0 +1,20 @@ +#ifndef _SUBTRACTION_CUDA_KERNEL +#define _SUBTRACTION_CUDA_KERNEL +#include +#include +#include + +void subtraction_forward_cuda(int n, int nsample, int c, at::Tensor input1_tensor, at::Tensor input2_tensor, at::Tensor idx_tensor, at::Tensor output_tensor); +void subtraction_backward_cuda(int n, int nsample, int c, at::Tensor idx_tensor, at::Tensor grad_output_tensor, at::Tensor grad_input1_tensor, at::Tensor grad_input2_tensor); + +#ifdef __cplusplus +extern "C" { +#endif + +void subtraction_forward_cuda_launcher(int n, int nsample, int c, const float *input1, const float *input2, const int *idx, float *output); +void subtraction_backward_cuda_launcher(int n, int nsample, int c, const int *idx, const float *grad_output, float *grad_input1, float *grad_input2); + +#ifdef __cplusplus +} +#endif +#endif diff --git a/models/Mask3D/mask3d/utils/utils.py b/models/Mask3D/mask3d/utils/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..b7bb374e072bc893a6b2e68aa390478cad63671a --- /dev/null +++ b/models/Mask3D/mask3d/utils/utils.py @@ -0,0 +1,133 @@ +import sys + +if sys.version_info[:2] >= (3, 8): + from collections.abc import MutableMapping +else: + from collections import MutableMapping + +import torch +from loguru import logger + + +def flatten_dict(d, parent_key="", sep="_"): + """ + https://stackoverflow.com/questions/6027558/flatten-nested-dictionaries-compressing-keys + """ + items = [] + for k, v in d.items(): + new_key = parent_key + sep + k if parent_key else k + if isinstance(v, MutableMapping): + items.extend(flatten_dict(v, new_key, sep=sep).items()) + else: + items.append((new_key, v)) + return dict(items) + + +def load_baseline_model(cfg, model): + # if it is Minkoski weights + cfg.model.in_channels = 3 + cfg.model.config.conv1_kernel_size = 5 + cfg.data.add_normals = False + cfg.data.train_dataset.color_mean_std = [(0.5, 0.5, 0.5), (1, 1, 1)] + cfg.data.validation_dataset.color_mean_std = [(0.5, 0.5, 0.5), (1, 1, 1)] + cfg.data.test_dataset.color_mean_std = [(0.5, 0.5, 0.5), (1, 1, 1)] + cfg.data.voxel_size = 0.02 + model = model(cfg) + state_dict = torch.load(cfg.general.checkpoint)["state_dict"] + model.model.load_state_dict(state_dict) + return cfg, model + + +def load_backbone_checkpoint_with_missing_or_exsessive_keys(cfg, model): + state_dict = torch.load(cfg.general.backbone_checkpoint)["state_dict"] + correct_dict = dict(model.state_dict()) + + # if parametrs not found in checkpoint they will be randomly initialized + for key in state_dict.keys(): + if correct_dict.pop(f"model.backbone.{key}", None) is None: + logger.warning( + f"Key not found, it will be initialized randomly: {key}" + ) + + # if parametrs have different shape, it will randomly initialize + state_dict = torch.load(cfg.general.backbone_checkpoint)["state_dict"] + correct_dict = dict(model.state_dict()) + for key in correct_dict.keys(): + if key.replace("model.backbone.", "") not in state_dict: + logger.warning(f"{key} not in loaded checkpoint") + state_dict.update( + {key.replace("model.backbone.", ""): correct_dict[key]} + ) + elif ( + state_dict[key.replace("model.backbone.", "")].shape + != correct_dict[key].shape + ): + logger.warning( + f"incorrect shape {key}:{state_dict[key.replace('model.backbone.', '')].shape} vs {correct_dict[key].shape}" + ) + state_dict.update({key: correct_dict[key]}) + + # if we have more keys just discard them + correct_dict = dict(model.state_dict()) + new_state_dict = dict() + for key in state_dict.keys(): + if f"model.backbone.{key}" in correct_dict.keys(): + new_state_dict.update({f"model.backbone.{key}": state_dict[key]}) + elif key in correct_dict.keys(): + new_state_dict.update({key: correct_dict[key]}) + else: + logger.warning(f"excessive key: {key}") + model.load_state_dict(new_state_dict) + return cfg, model + + +def load_checkpoint_with_missing_or_exsessive_keys(cfg, model): + state_dict = torch.load(cfg.general.checkpoint)["state_dict"] + correct_dict = dict(model.state_dict()) + + # if parametrs not found in checkpoint they will be randomly initialized + for key in state_dict.keys(): + if correct_dict.pop(key, None) is None: + logger.warning( + f"Key not found, it will be initialized randomly: {key}" + ) + + # if parametrs have different shape, it will randomly initialize + state_dict = torch.load(cfg.general.checkpoint)["state_dict"] + correct_dict = dict(model.state_dict()) + for key in correct_dict.keys(): + if key not in state_dict: + logger.warning(f"{key} not in loaded checkpoint") + state_dict.update({key: correct_dict[key]}) + elif state_dict[key].shape != correct_dict[key].shape: + logger.warning( + f"incorrect shape {key}:{state_dict[key].shape} vs {correct_dict[key].shape}" + ) + state_dict.update({key: correct_dict[key]}) + + # if we have more keys just discard them + correct_dict = dict(model.state_dict()) + new_state_dict = dict() + for key in state_dict.keys(): + if key in correct_dict.keys(): + new_state_dict.update({key: state_dict[key]}) + else: + logger.warning(f"excessive key: {key}") + model.load_state_dict(new_state_dict) + return cfg, model + + +def freeze_until(net, param_name: str = None): + """ + Freeze net until param_name + https://opendatascience.slack.com/archives/CGK4KQBHD/p1588373239292300?thread_ts=1588105223.275700&cid=CGK4KQBHD + Args: + net: + param_name: + Returns: + """ + found_name = False + for name, params in net.named_parameters(): + if name == param_name: + found_name = True + params.requires_grad = found_name diff --git a/models/Mask3D/mask3d/utils/votenet_utils/box_util.py b/models/Mask3D/mask3d/utils/votenet_utils/box_util.py new file mode 100644 index 0000000000000000000000000000000000000000..3c5a56e775ea5d44c9b425f3fab1ba353c452d56 --- /dev/null +++ b/models/Mask3D/mask3d/utils/votenet_utils/box_util.py @@ -0,0 +1,330 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +""" Helper functions for calculating 2D and 3D bounding box IoU. + +Collected and written by Charles R. Qi +Last modified: Jul 2019 +""" +from __future__ import print_function + +import numpy as np +from scipy.spatial import ConvexHull + + +def polygon_clip(subjectPolygon, clipPolygon): + """Clip a polygon with another polygon. + + Ref: https://rosettacode.org/wiki/Sutherland-Hodgman_polygon_clipping#Python + + Args: + subjectPolygon: a list of (x,y) 2d points, any polygon. + clipPolygon: a list of (x,y) 2d points, has to be *convex* + Note: + **points have to be counter-clockwise ordered** + + Return: + a list of (x,y) vertex point for the intersection polygon. + """ + + def inside(p): + return (cp2[0] - cp1[0]) * (p[1] - cp1[1]) > (cp2[1] - cp1[1]) * ( + p[0] - cp1[0] + ) + + def computeIntersection(): + dc = [cp1[0] - cp2[0], cp1[1] - cp2[1]] + dp = [s[0] - e[0], s[1] - e[1]] + n1 = cp1[0] * cp2[1] - cp1[1] * cp2[0] + n2 = s[0] * e[1] - s[1] * e[0] + n3 = 1.0 / (dc[0] * dp[1] - dc[1] * dp[0]) + return [(n1 * dp[0] - n2 * dc[0]) * n3, (n1 * dp[1] - n2 * dc[1]) * n3] + + outputList = subjectPolygon + cp1 = clipPolygon[-1] + + for clipVertex in clipPolygon: + cp2 = clipVertex + inputList = outputList + outputList = [] + s = inputList[-1] + + for subjectVertex in inputList: + e = subjectVertex + if inside(e): + if not inside(s): + outputList.append(computeIntersection()) + outputList.append(e) + elif inside(s): + outputList.append(computeIntersection()) + s = e + cp1 = cp2 + if len(outputList) == 0: + return None + return outputList + + +def poly_area(x, y): + """Ref: http://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates""" + return 0.5 * np.abs(np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1))) + + +def convex_hull_intersection(p1, p2): + """Compute area of two convex hull's intersection area. + p1,p2 are a list of (x,y) tuples of hull vertices. + return a list of (x,y) for the intersection and its volume + """ + inter_p = polygon_clip(p1, p2) + if inter_p is not None: + hull_inter = ConvexHull(inter_p) + return inter_p, hull_inter.volume + else: + return None, 0.0 + + +def box3d_vol(corners): + """corners: (8,3) no assumption on axis direction""" + a = np.sqrt(np.sum((corners[0, :] - corners[1, :]) ** 2)) + b = np.sqrt(np.sum((corners[1, :] - corners[2, :]) ** 2)) + c = np.sqrt(np.sum((corners[0, :] - corners[4, :]) ** 2)) + return a * b * c + + +def is_clockwise(p): + x = p[:, 0] + y = p[:, 1] + return np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1)) > 0 + + +def box3d_iou(corners1, corners2): + """Compute 3D bounding box IoU. + + Input: + corners1: numpy array (8,3), assume up direction is negative Y + corners2: numpy array (8,3), assume up direction is negative Y + Output: + iou: 3D bounding box IoU + iou_2d: bird's eye view 2D bounding box IoU + + todo (rqi): add more description on corner points' orders. + """ + # corner points are in counter clockwise order + rect1 = [(corners1[i, 0], corners1[i, 2]) for i in range(3, -1, -1)] + rect2 = [(corners2[i, 0], corners2[i, 2]) for i in range(3, -1, -1)] + area1 = poly_area(np.array(rect1)[:, 0], np.array(rect1)[:, 1]) + area2 = poly_area(np.array(rect2)[:, 0], np.array(rect2)[:, 1]) + inter, inter_area = convex_hull_intersection(rect1, rect2) + iou_2d = inter_area / (area1 + area2 - inter_area) + ymax = min(corners1[0, 1], corners2[0, 1]) + ymin = max(corners1[4, 1], corners2[4, 1]) + inter_vol = inter_area * max(0.0, ymax - ymin) + vol1 = box3d_vol(corners1) + vol2 = box3d_vol(corners2) + iou = inter_vol / (vol1 + vol2 - inter_vol) + return iou, iou_2d + + +def get_iou(bb1, bb2): + """ + Calculate the Intersection over Union (IoU) of two 2D bounding boxes. + + Parameters + ---------- + bb1 : dict + Keys: {'x1', 'x2', 'y1', 'y2'} + The (x1, y1) position is at the top left corner, + the (x2, y2) position is at the bottom right corner + bb2 : dict + Keys: {'x1', 'x2', 'y1', 'y2'} + The (x, y) position is at the top left corner, + the (x2, y2) position is at the bottom right corner + + Returns + ------- + float + in [0, 1] + """ + assert bb1["x1"] < bb1["x2"] + assert bb1["y1"] < bb1["y2"] + assert bb2["x1"] < bb2["x2"] + assert bb2["y1"] < bb2["y2"] + + # determine the coordinates of the intersection rectangle + x_left = max(bb1["x1"], bb2["x1"]) + y_top = max(bb1["y1"], bb2["y1"]) + x_right = min(bb1["x2"], bb2["x2"]) + y_bottom = min(bb1["y2"], bb2["y2"]) + + if x_right < x_left or y_bottom < y_top: + return 0.0 + + # The intersection of two axis-aligned bounding boxes is always an + # axis-aligned bounding box + intersection_area = (x_right - x_left) * (y_bottom - y_top) + + # compute the area of both AABBs + bb1_area = (bb1["x2"] - bb1["x1"]) * (bb1["y2"] - bb1["y1"]) + bb2_area = (bb2["x2"] - bb2["x1"]) * (bb2["y2"] - bb2["y1"]) + + # compute the intersection over union by taking the intersection + # area and dividing it by the sum of prediction + ground-truth + # areas - the interesection area + iou = intersection_area / float(bb1_area + bb2_area - intersection_area) + assert iou >= 0.0 + assert iou <= 1.0 + return iou + + +def box2d_iou(box1, box2): + """Compute 2D bounding box IoU. + + Input: + box1: tuple of (xmin,ymin,xmax,ymax) + box2: tuple of (xmin,ymin,xmax,ymax) + Output: + iou: 2D IoU scalar + """ + return get_iou( + {"x1": box1[0], "y1": box1[1], "x2": box1[2], "y2": box1[3]}, + {"x1": box2[0], "y1": box2[1], "x2": box2[2], "y2": box2[3]}, + ) + + +# ----------------------------------------------------------- +# Convert from box parameters to +# ----------------------------------------------------------- +def roty(t): + """Rotation about the y-axis.""" + c = np.cos(t) + s = np.sin(t) + return np.array([[c, 0, s], [0, 1, 0], [-s, 0, c]]) + + +def roty_batch(t): + """Rotation about the y-axis. + t: (x1,x2,...xn) + return: (x1,x2,...,xn,3,3) + """ + input_shape = t.shape + output = np.zeros(tuple(list(input_shape) + [3, 3])) + c = np.cos(t) + s = np.sin(t) + output[..., 0, 0] = c + output[..., 0, 2] = s + output[..., 1, 1] = 1 + output[..., 2, 0] = -s + output[..., 2, 2] = c + return output + + +def get_3d_box(box_size, heading_angle, center): + """box_size is array(l,w,h), heading_angle is radius clockwise from pos x axis, center is xyz of box center + output (8,3) array for 3D box cornders + Similar to utils/compute_orientation_3d + """ + R = roty(heading_angle) + l, w, h = box_size + x_corners = [l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2] + y_corners = [h / 2, h / 2, h / 2, h / 2, -h / 2, -h / 2, -h / 2, -h / 2] + z_corners = [w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2] + corners_3d = np.dot(R, np.vstack([x_corners, y_corners, z_corners])) + corners_3d[0, :] = corners_3d[0, :] + center[0] + corners_3d[1, :] = corners_3d[1, :] + center[1] + corners_3d[2, :] = corners_3d[2, :] + center[2] + corners_3d = np.transpose(corners_3d) + return corners_3d + + +def get_3d_box_batch(box_size, heading_angle, center): + """box_size: [x1,x2,...,xn,3] + heading_angle: [x1,x2,...,xn] + center: [x1,x2,...,xn,3] + Return: + [x1,x3,...,xn,8,3] + """ + input_shape = heading_angle.shape + R = roty_batch(heading_angle) + l = np.expand_dims(box_size[..., 0], -1) # [x1,...,xn,1] + w = np.expand_dims(box_size[..., 1], -1) + h = np.expand_dims(box_size[..., 2], -1) + corners_3d = np.zeros(tuple(list(input_shape) + [8, 3])) + corners_3d[..., :, 0] = np.concatenate( + (l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2), -1 + ) + corners_3d[..., :, 1] = np.concatenate( + (h / 2, h / 2, h / 2, h / 2, -h / 2, -h / 2, -h / 2, -h / 2), -1 + ) + corners_3d[..., :, 2] = np.concatenate( + (w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2), -1 + ) + tlist = [i for i in range(len(input_shape))] + tlist += [len(input_shape) + 1, len(input_shape)] + corners_3d = np.matmul(corners_3d, np.transpose(R, tuple(tlist))) + corners_3d += np.expand_dims(center, -2) + return corners_3d + + +if __name__ == "__main__": + + # Function for polygon ploting + import matplotlib + from matplotlib.patches import Polygon + from matplotlib.collections import PatchCollection + import matplotlib.pyplot as plt + + def plot_polys(plist, scale=500.0): + fig, ax = plt.subplots() + patches = [] + for p in plist: + poly = Polygon(np.array(p) / scale, True) + patches.append(poly) + + pc = PatchCollection(patches, cmap=matplotlib.cm.jet, alpha=0.5) + colors = 100 * np.random.rand(len(patches)) + pc.set_array(np.array(colors)) + ax.add_collection(pc) + plt.show() + + # Demo on ConvexHull + points = np.random.rand(30, 2) # 30 random points in 2-D + hull = ConvexHull(points) + # **In 2D "volume" is is area, "area" is perimeter + print(("Hull area: ", hull.volume)) + for simplex in hull.simplices: + print(simplex) + + # Demo on convex hull overlaps + sub_poly = [(0, 0), (300, 0), (300, 300), (0, 300)] + clip_poly = [(150, 150), (300, 300), (150, 450), (0, 300)] + inter_poly = polygon_clip(sub_poly, clip_poly) + print(poly_area(np.array(inter_poly)[:, 0], np.array(inter_poly)[:, 1])) + + # Test convex hull interaction function + rect1 = [(50, 0), (50, 300), (300, 300), (300, 0)] + rect2 = [(150, 150), (300, 300), (150, 450), (0, 300)] + plot_polys([rect1, rect2]) + inter, area = convex_hull_intersection(rect1, rect2) + print((inter, area)) + if inter is not None: + print(poly_area(np.array(inter)[:, 0], np.array(inter)[:, 1])) + + print("------------------") + rect1 = [ + (0.30026005199835404, 8.9408694211408424), + (-1.1571105364358421, 9.4686676477075533), + (0.1777082043006144, 13.154404877812102), + (1.6350787927348105, 12.626606651245391), + ] + rect1 = [rect1[0], rect1[3], rect1[2], rect1[1]] + rect2 = [ + (0.23908745901608636, 8.8551095691132886), + (-1.2771419487733995, 9.4269062966181956), + (0.13138836963152717, 13.161896351296868), + (1.647617777421013, 12.590099623791961), + ] + rect2 = [rect2[0], rect2[3], rect2[2], rect2[1]] + plot_polys([rect1, rect2]) + inter, area = convex_hull_intersection(rect1, rect2) + print((inter, area)) diff --git a/models/Mask3D/mask3d/utils/votenet_utils/eval_det.py b/models/Mask3D/mask3d/utils/votenet_utils/eval_det.py new file mode 100644 index 0000000000000000000000000000000000000000..77f4f73bfd428cd31c7de9b78286ddb6d40473ff --- /dev/null +++ b/models/Mask3D/mask3d/utils/votenet_utils/eval_det.py @@ -0,0 +1,310 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +""" Generic Code for Object Detection Evaluation + + Input: + For each class: + For each image: + Predictions: box, score + Groundtruths: box + + Output: + For each class: + precision-recal and average precision + + Author: Charles R. Qi + + Ref: https://raw.githubusercontent.com/rbgirshick/py-faster-rcnn/master/lib/datasets/voc_eval.py +""" +import numpy as np + + +def voc_ap(rec, prec, use_07_metric=False): + """ap = voc_ap(rec, prec, [use_07_metric]) + Compute VOC AP given precision and recall. + If use_07_metric is true, uses the + VOC 07 11 point method (default:False). + """ + if use_07_metric: + # 11 point metric + ap = 0.0 + for t in np.arange(0.0, 1.1, 0.1): + if np.sum(rec >= t) == 0: + p = 0 + else: + p = np.max(prec[rec >= t]) + ap = ap + p / 11.0 + else: + # correct AP calculation + # first append sentinel values at the end + mrec = np.concatenate(([0.0], rec, [1.0])) + mpre = np.concatenate(([0.0], prec, [0.0])) + + # compute the precision envelope + for i in range(mpre.size - 1, 0, -1): + mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i]) + + # to calculate area under PR curve, look for points + # where X axis (recall) changes value + i = np.where(mrec[1:] != mrec[:-1])[0] + + # and sum (\Delta recall) * prec + ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) + return ap + + +import os +import sys + +# BASE_DIR = os.path.dirname(os.path.abspath(__file__)) +from utils.votenet_utils.metric_util import calc_iou # axis-aligned 3D box IoU + + +def get_iou(bb1, bb2): + """Compute IoU of two bounding boxes. + ** Define your bod IoU function HERE ** + """ + # pass + iou3d = calc_iou(bb1, bb2) + return iou3d + + +from box_util import box3d_iou + + +def get_iou_obb(bb1, bb2): + iou3d, iou2d = box3d_iou(bb1, bb2) + return iou3d + + +def get_iou_main(get_iou_func, args): + return get_iou_func(*args) + + +def eval_det_cls( + pred, gt, ovthresh=0.25, use_07_metric=False, get_iou_func=get_iou +): + """Generic functions to compute precision/recall for object detection + for a single class. + Input: + pred: map of {img_id: [(bbox, score)]} where bbox is numpy array + gt: map of {img_id: [bbox]} + ovthresh: scalar, iou threshold + use_07_metric: bool, if True use VOC07 11 point method + Output: + rec: numpy array of length nd + prec: numpy array of length nd + ap: scalar, average precision + """ + + # construct gt objects + class_recs = {} # {img_id: {'bbox': bbox list, 'det': matched list}} + npos = 0 + for img_id in gt.keys(): + bbox = np.array(gt[img_id]) + det = [False] * len(bbox) + npos += len(bbox) + class_recs[img_id] = {"bbox": bbox, "det": det} + # pad empty list to all other imgids + for img_id in pred.keys(): + if img_id not in gt: + class_recs[img_id] = {"bbox": np.array([]), "det": []} + + # construct dets + image_ids = [] + confidence = [] + BB = [] + for img_id in pred.keys(): + for box, score in pred[img_id]: + image_ids.append(img_id) + confidence.append(score) + BB.append(box) + confidence = np.array(confidence) + BB = np.array(BB) # (nd,4 or 8,3 or 6) + + # sort by confidence + sorted_ind = np.argsort(-confidence) + sorted_scores = np.sort(-confidence) + BB = BB[sorted_ind, ...] + image_ids = [image_ids[x] for x in sorted_ind] + + # go down dets and mark TPs and FPs + nd = len(image_ids) + tp = np.zeros(nd) + fp = np.zeros(nd) + for d in range(nd): + # if d%100==0: print(d) + R = class_recs[image_ids[d]] + bb = BB[d, ...].astype(float) + ovmax = -np.inf + BBGT = R["bbox"].astype(float) + + if BBGT.size > 0: + # compute overlaps + for j in range(BBGT.shape[0]): + iou = get_iou_main(get_iou_func, (bb, BBGT[j, ...])) + if iou > ovmax: + ovmax = iou + jmax = j + + # print d, ovmax + if ovmax > ovthresh: + if not R["det"][jmax]: + tp[d] = 1.0 + R["det"][jmax] = 1 + else: + fp[d] = 1.0 + else: + fp[d] = 1.0 + + # compute precision recall + fp = np.cumsum(fp) + tp = np.cumsum(tp) + rec = tp / float(npos) + # print('NPOS: ', npos) + # avoid divide by zero in case the first detection matches a difficult + # ground truth + prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps) + ap = voc_ap(rec, prec, use_07_metric) + + return rec, prec, ap + + +def eval_det_cls_wrapper(arguments): + pred, gt, ovthresh, use_07_metric, get_iou_func = arguments + rec, prec, ap = eval_det_cls( + pred, gt, ovthresh, use_07_metric, get_iou_func + ) + return (rec, prec, ap) + + +def eval_det( + pred_all, gt_all, ovthresh=0.25, use_07_metric=False, get_iou_func=get_iou +): + """Generic functions to compute precision/recall for object detection + for multiple classes. + Input: + pred_all: map of {img_id: [(classname, bbox, score)]} + gt_all: map of {img_id: [(classname, bbox)]} + ovthresh: scalar, iou threshold + use_07_metric: bool, if true use VOC07 11 point method + Output: + rec: {classname: rec} + prec: {classname: prec_all} + ap: {classname: scalar} + """ + pred = {} # map {classname: pred} + gt = {} # map {classname: gt} + for img_id in pred_all.keys(): + for classname, bbox, score in pred_all[img_id]: + if classname not in pred: + pred[classname] = {} + if img_id not in pred[classname]: + pred[classname][img_id] = [] + if classname not in gt: + gt[classname] = {} + if img_id not in gt[classname]: + gt[classname][img_id] = [] + pred[classname][img_id].append((bbox, score)) + for img_id in gt_all.keys(): + for classname, bbox in gt_all[img_id]: + if classname not in gt: + gt[classname] = {} + if img_id not in gt[classname]: + gt[classname][img_id] = [] + # JONAS ADAPTATION TODO + if classname not in pred: + pred[classname] = {} + if img_id not in pred[classname]: + pred[classname][img_id] = [] + # ===================== + gt[classname][img_id].append(bbox) + + rec = {} + prec = {} + ap = {} + for classname in gt.keys(): + print("Computing AP for class: ", classname) + rec[classname], prec[classname], ap[classname] = eval_det_cls( + pred[classname], + gt[classname], + ovthresh, + use_07_metric, + get_iou_func, + ) + print(classname, ap[classname]) + + return rec, prec, ap + + +from multiprocessing import Pool + + +def eval_det_multiprocessing( + pred_all, gt_all, ovthresh=0.25, use_07_metric=False, get_iou_func=get_iou +): + """Generic functions to compute precision/recall for object detection + for multiple classes. + Input: + pred_all: map of {img_id: [(classname, bbox, score)]} + gt_all: map of {img_id: [(classname, bbox)]} + ovthresh: scalar, iou threshold + use_07_metric: bool, if true use VOC07 11 point method + Output: + rec: {classname: rec} + prec: {classname: prec_all} + ap: {classname: scalar} + """ + pred = {} # map {classname: pred} + gt = {} # map {classname: gt} + for img_id in pred_all.keys(): + for classname, bbox, score in pred_all[img_id]: + if classname not in pred: + pred[classname] = {} + if img_id not in pred[classname]: + pred[classname][img_id] = [] + if classname not in gt: + gt[classname] = {} + if img_id not in gt[classname]: + gt[classname][img_id] = [] + pred[classname][img_id].append((bbox, score)) + for img_id in gt_all.keys(): + for classname, bbox in gt_all[img_id]: + if classname not in gt: + gt[classname] = {} + if img_id not in gt[classname]: + gt[classname][img_id] = [] + gt[classname][img_id].append(bbox) + + rec = {} + prec = {} + ap = {} + p = Pool(processes=10) + ret_values = p.map( + eval_det_cls_wrapper, + [ + ( + pred[classname], + gt[classname], + ovthresh, + use_07_metric, + get_iou_func, + ) + for classname in gt.keys() + if classname in pred + ], + ) + p.close() + for i, classname in enumerate(gt.keys()): + if classname in pred: + rec[classname], prec[classname], ap[classname] = ret_values[i] + else: + rec[classname] = 0 + prec[classname] = 0 + ap[classname] = 0 + print(classname, ap[classname]) + + return rec, prec, ap diff --git a/models/Mask3D/mask3d/utils/votenet_utils/metric_util.py b/models/Mask3D/mask3d/utils/votenet_utils/metric_util.py new file mode 100644 index 0000000000000000000000000000000000000000..312589a5e0fa96e2153026a2fa4bd9053b890f54 --- /dev/null +++ b/models/Mask3D/mask3d/utils/votenet_utils/metric_util.py @@ -0,0 +1,194 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +""" Utility functions for metric evaluation. + +Author: Or Litany and Charles R. Qi +""" + +import os +import sys +import torch + +BASE_DIR = os.path.dirname(os.path.abspath(__file__)) +sys.path.append(BASE_DIR) + +import numpy as np + +# Mesh IO +import trimesh + + +# ---------------------------------------- +# Precision and Recall +# ---------------------------------------- + + +def multi_scene_precision_recall( + labels, pred, iou_thresh, conf_thresh, label_mask, pred_mask=None +): + """ + Args: + labels: (B, N, 6) + pred: (B, M, 6) + iou_thresh: scalar + conf_thresh: scalar + label_mask: (B, N,) with values in 0 or 1 to indicate which GT boxes to consider. + pred_mask: (B, M,) with values in 0 or 1 to indicate which PRED boxes to consider. + Returns: + TP,FP,FN,Precision,Recall + """ + # Make sure the masks are not Torch tensor, otherwise the mask==1 returns uint8 array instead + # of True/False array as in numpy + assert not torch.is_tensor(label_mask) + assert not torch.is_tensor(pred_mask) + TP, FP, FN = 0, 0, 0 + if label_mask is None: + label_mask = np.ones((labels.shape[0], labels.shape[1])) + if pred_mask is None: + pred_mask = np.ones((pred.shape[0], pred.shape[1])) + for batch_idx in range(labels.shape[0]): + TP_i, FP_i, FN_i = single_scene_precision_recall( + labels[batch_idx, label_mask[batch_idx, :] == 1, :], + pred[batch_idx, pred_mask[batch_idx, :] == 1, :], + iou_thresh, + conf_thresh, + ) + TP += TP_i + FP += FP_i + FN += FN_i + + return TP, FP, FN, precision_recall(TP, FP, FN) + + +def single_scene_precision_recall(labels, pred, iou_thresh, conf_thresh): + """Compute P and R for predicted bounding boxes. Ignores classes! + Args: + labels: (N x bbox) ground-truth bounding boxes (6 dims) + pred: (M x (bbox + conf)) predicted bboxes with confidence and maybe classification + Returns: + TP, FP, FN + """ + + # for each pred box with high conf (C), compute IoU with all gt boxes. + # TP = number of times IoU > th ; FP = C - TP + # FN - number of scene objects without good match + + gt_bboxes = labels[:, :6] + + num_scene_bboxes = gt_bboxes.shape[0] + conf = pred[:, 6] + + conf_pred_bbox = pred[np.where(conf > conf_thresh)[0], :6] + num_conf_pred_bboxes = conf_pred_bbox.shape[0] + + # init an array to keep iou between generated and scene bboxes + iou_arr = np.zeros([num_conf_pred_bboxes, num_scene_bboxes]) + for g_idx in range(num_conf_pred_bboxes): + for s_idx in range(num_scene_bboxes): + iou_arr[g_idx, s_idx] = calc_iou( + conf_pred_bbox[g_idx, :], gt_bboxes[s_idx, :] + ) + + good_match_arr = iou_arr >= iou_thresh + + TP = good_match_arr.any(axis=1).sum() + FP = num_conf_pred_bboxes - TP + FN = num_scene_bboxes - good_match_arr.any(axis=0).sum() + + return TP, FP, FN + + +def precision_recall(TP, FP, FN): + Prec = 1.0 * TP / (TP + FP) if TP + FP > 0 else 0 + Rec = 1.0 * TP / (TP + FN) + return Prec, Rec + + +def calc_iou(box_a, box_b): + """Computes IoU of two axis aligned bboxes. + Args: + box_a, box_b: 6D of center and lengths + Returns: + iou + """ + + max_a = box_a[0:3] + box_a[3:6] / 2 + max_b = box_b[0:3] + box_b[3:6] / 2 + min_max = np.array([max_a, max_b]).min(0) + + min_a = box_a[0:3] - box_a[3:6] / 2 + min_b = box_b[0:3] - box_b[3:6] / 2 + max_min = np.array([min_a, min_b]).max(0) + if not ((min_max > max_min).all()): + return 0.0 + + intersection = (min_max - max_min).prod() + vol_a = box_a[3:6].prod() + vol_b = box_b[3:6].prod() + union = vol_a + vol_b - intersection + return 1.0 * intersection / union + + +if __name__ == "__main__": + print("running some tests") + + ############ + ## Test IoU + ############ + box_a = np.array([0, 0, 0, 1, 1, 1]) + box_b = np.array([0, 0, 0, 2, 2, 2]) + expected_iou = 1.0 / 8 + pred_iou = calc_iou(box_a, box_b) + assert expected_iou == pred_iou, "function returned wrong IoU" + + box_a = np.array([0, 0, 0, 1, 1, 1]) + box_b = np.array([10, 10, 10, 2, 2, 2]) + expected_iou = 0.0 + pred_iou = calc_iou(box_a, box_b) + assert expected_iou == pred_iou, "function returned wrong IoU" + + print("IoU test -- PASSED") + + ######################### + ## Test Precition Recall + ######################### + gt_boxes = np.array([[0, 0, 0, 1, 1, 1], [3, 0, 1, 1, 10, 1]]) + detected_boxes = np.array( + [[0, 0, 0, 1, 1, 1, 1.0], [3, 0, 1, 1, 10, 1, 0.9]] + ) + TP, FP, FN = single_scene_precision_recall( + gt_boxes, detected_boxes, 0.5, 0.5 + ) + assert TP == 2 and FP == 0 and FN == 0 + assert precision_recall(TP, FP, FN) == (1, 1) + + detected_boxes = np.array([[0, 0, 0, 1, 1, 1, 1.0]]) + TP, FP, FN = single_scene_precision_recall( + gt_boxes, detected_boxes, 0.5, 0.5 + ) + assert TP == 1 and FP == 0 and FN == 1 + assert precision_recall(TP, FP, FN) == (1, 0.5) + + detected_boxes = np.array( + [[0, 0, 0, 1, 1, 1, 1.0], [-1, -1, 0, 0.1, 0.1, 1, 1.0]] + ) + TP, FP, FN = single_scene_precision_recall( + gt_boxes, detected_boxes, 0.5, 0.5 + ) + assert TP == 1 and FP == 1 and FN == 1 + assert precision_recall(TP, FP, FN) == (0.5, 0.5) + + # wrong box has low confidence + detected_boxes = np.array( + [[0, 0, 0, 1, 1, 1, 1.0], [-1, -1, 0, 0.1, 0.1, 1, 0.1]] + ) + TP, FP, FN = single_scene_precision_recall( + gt_boxes, detected_boxes, 0.5, 0.5 + ) + assert TP == 1 and FP == 0 and FN == 1 + assert precision_recall(TP, FP, FN) == (1, 0.5) + + print("Precition Recall test -- PASSED") diff --git a/models/Mask3D/mask3d/utils/votenet_utils/nms.py b/models/Mask3D/mask3d/utils/votenet_utils/nms.py new file mode 100644 index 0000000000000000000000000000000000000000..9ad74f846ce16396190a6772546d7c0785c308a2 --- /dev/null +++ b/models/Mask3D/mask3d/utils/votenet_utils/nms.py @@ -0,0 +1,195 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +import numpy as np +from pc_util import bbox_corner_dist_measure + +# boxes are axis aigned 2D boxes of shape (n,5) in FLOAT numbers with (x1,y1,x2,y2,score) +""" Ref: https://www.pyimagesearch.com/2015/02/16/faster-non-maximum-suppression-python/ +Ref: https://github.com/vickyboy47/nms-python/blob/master/nms.py +""" + + +def nms_2d(boxes, overlap_threshold): + x1 = boxes[:, 0] + y1 = boxes[:, 1] + x2 = boxes[:, 2] + y2 = boxes[:, 3] + score = boxes[:, 4] + area = (x2 - x1) * (y2 - y1) + + I = np.argsort(score) + pick = [] + while I.size != 0: + last = I.size + i = I[-1] + pick.append(i) + suppress = [last - 1] + for pos in range(last - 1): + j = I[pos] + xx1 = max(x1[i], x1[j]) + yy1 = max(y1[i], y1[j]) + xx2 = min(x2[i], x2[j]) + yy2 = min(y2[i], y2[j]) + w = xx2 - xx1 + h = yy2 - yy1 + if w > 0 and h > 0: + o = w * h / area[j] + print("Overlap is", o) + if o > overlap_threshold: + suppress.append(pos) + I = np.delete(I, suppress) + return pick + + +def nms_2d_faster(boxes, overlap_threshold, old_type=False): + x1 = boxes[:, 0] + y1 = boxes[:, 1] + x2 = boxes[:, 2] + y2 = boxes[:, 3] + score = boxes[:, 4] + area = (x2 - x1) * (y2 - y1) + + I = np.argsort(score) + pick = [] + while I.size != 0: + last = I.size + i = I[-1] + pick.append(i) + + xx1 = np.maximum(x1[i], x1[I[: last - 1]]) + yy1 = np.maximum(y1[i], y1[I[: last - 1]]) + xx2 = np.minimum(x2[i], x2[I[: last - 1]]) + yy2 = np.minimum(y2[i], y2[I[: last - 1]]) + + w = np.maximum(0, xx2 - xx1) + h = np.maximum(0, yy2 - yy1) + + if old_type: + o = (w * h) / area[I[: last - 1]] + else: + inter = w * h + o = inter / (area[i] + area[I[: last - 1]] - inter) + + I = np.delete( + I, np.concatenate(([last - 1], np.where(o > overlap_threshold)[0])) + ) + + return pick + + +def nms_3d_faster(boxes, overlap_threshold, old_type=False): + x1 = boxes[:, 0] + y1 = boxes[:, 1] + z1 = boxes[:, 2] + x2 = boxes[:, 3] + y2 = boxes[:, 4] + z2 = boxes[:, 5] + score = boxes[:, 6] + area = (x2 - x1) * (y2 - y1) * (z2 - z1) + + I = np.argsort(score) + pick = [] + while I.size != 0: + last = I.size + i = I[-1] + pick.append(i) + + xx1 = np.maximum(x1[i], x1[I[: last - 1]]) + yy1 = np.maximum(y1[i], y1[I[: last - 1]]) + zz1 = np.maximum(z1[i], z1[I[: last - 1]]) + xx2 = np.minimum(x2[i], x2[I[: last - 1]]) + yy2 = np.minimum(y2[i], y2[I[: last - 1]]) + zz2 = np.minimum(z2[i], z2[I[: last - 1]]) + + l = np.maximum(0, xx2 - xx1) + w = np.maximum(0, yy2 - yy1) + h = np.maximum(0, zz2 - zz1) + + if old_type: + o = (l * w * h) / area[I[: last - 1]] + else: + inter = l * w * h + o = inter / (area[i] + area[I[: last - 1]] - inter) + + I = np.delete( + I, np.concatenate(([last - 1], np.where(o > overlap_threshold)[0])) + ) + + return pick + + +def nms_3d_faster_samecls(boxes, overlap_threshold, old_type=False): + x1 = boxes[:, 0] + y1 = boxes[:, 1] + z1 = boxes[:, 2] + x2 = boxes[:, 3] + y2 = boxes[:, 4] + z2 = boxes[:, 5] + score = boxes[:, 6] + cls = boxes[:, 7] + area = (x2 - x1) * (y2 - y1) * (z2 - z1) + + I = np.argsort(score) + pick = [] + while I.size != 0: + last = I.size + i = I[-1] + pick.append(i) + + xx1 = np.maximum(x1[i], x1[I[: last - 1]]) + yy1 = np.maximum(y1[i], y1[I[: last - 1]]) + zz1 = np.maximum(z1[i], z1[I[: last - 1]]) + xx2 = np.minimum(x2[i], x2[I[: last - 1]]) + yy2 = np.minimum(y2[i], y2[I[: last - 1]]) + zz2 = np.minimum(z2[i], z2[I[: last - 1]]) + cls1 = cls[i] + cls2 = cls[I[: last - 1]] + + l = np.maximum(0, xx2 - xx1) + w = np.maximum(0, yy2 - yy1) + h = np.maximum(0, zz2 - zz1) + + if old_type: + o = (l * w * h) / area[I[: last - 1]] + else: + inter = l * w * h + o = inter / (area[i] + area[I[: last - 1]] - inter) + o = o * (cls1 == cls2) + + I = np.delete( + I, np.concatenate(([last - 1], np.where(o > overlap_threshold)[0])) + ) + + return pick + + +def nms_crnr_dist(boxes, conf, overlap_threshold): + + I = np.argsort(conf) + pick = [] + while I.size != 0: + last = I.size + i = I[-1] + pick.append(i) + + scores = [] + for ind in I[:-1]: + scores.append(bbox_corner_dist_measure(boxes[i, :], boxes[ind, :])) + + I = np.delete( + I, + np.concatenate( + ([last - 1], np.where(np.array(scores) > overlap_threshold)[0]) + ), + ) + + return pick + + +if __name__ == "__main__": + a = np.random.random((100, 5)) + print(nms_2d(a, 0.9)) + print(nms_2d_faster(a, 0.9)) diff --git a/models/Mask3D/mask3d/utils/votenet_utils/nn_distance.py b/models/Mask3D/mask3d/utils/votenet_utils/nn_distance.py new file mode 100644 index 0000000000000000000000000000000000000000..b713cee2be890120cf4430b71309727793879c53 --- /dev/null +++ b/models/Mask3D/mask3d/utils/votenet_utils/nn_distance.py @@ -0,0 +1,96 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +""" Chamfer distance in Pytorch. +Author: Charles R. Qi +""" + +import torch +import torch.nn as nn +import numpy as np + + +def huber_loss(error, delta=1.0): + """ + Args: + error: Torch tensor (d1,d2,...,dk) + Returns: + loss: Torch tensor (d1,d2,...,dk) + + x = error = pred - gt or dist(pred,gt) + 0.5 * |x|^2 if |x|<=d + 0.5 * d^2 + d * (|x|-d) if |x|>d + Ref: https://github.com/charlesq34/frustum-pointnets/blob/master/models/model_util.py + """ + abs_error = torch.abs(error) + # quadratic = torch.min(abs_error, torch.FloatTensor([delta])) + quadratic = torch.clamp(abs_error, max=delta) + linear = abs_error - quadratic + loss = 0.5 * quadratic**2 + delta * linear + return loss + + +def nn_distance(pc1, pc2, l1smooth=False, delta=1.0, l1=False): + """ + Input: + pc1: (B,N,C) torch tensor + pc2: (B,M,C) torch tensor + l1smooth: bool, whether to use l1smooth loss + delta: scalar, the delta used in l1smooth loss + Output: + dist1: (B,N) torch float32 tensor + idx1: (B,N) torch int64 tensor + dist2: (B,M) torch float32 tensor + idx2: (B,M) torch int64 tensor + """ + N = pc1.shape[1] + M = pc2.shape[1] + pc1_expand_tile = pc1.unsqueeze(2).repeat(1, 1, M, 1) + pc2_expand_tile = pc2.unsqueeze(1).repeat(1, N, 1, 1) + pc_diff = pc1_expand_tile - pc2_expand_tile + + if l1smooth: + pc_dist = torch.sum(huber_loss(pc_diff, delta), dim=-1) # (B,N,M) + elif l1: + pc_dist = torch.sum(torch.abs(pc_diff), dim=-1) # (B,N,M) + else: + pc_dist = torch.sum(pc_diff**2, dim=-1) # (B,N,M) + dist1, idx1 = torch.min(pc_dist, dim=2) # (B,N) + dist2, idx2 = torch.min(pc_dist, dim=1) # (B,M) + return dist1, idx1, dist2, idx2 + + +def demo_nn_distance(): + np.random.seed(0) + pc1arr = np.random.random((1, 5, 3)) + pc2arr = np.random.random((1, 6, 3)) + pc1 = torch.from_numpy(pc1arr.astype(np.float32)) + pc2 = torch.from_numpy(pc2arr.astype(np.float32)) + dist1, idx1, dist2, idx2 = nn_distance(pc1, pc2) + print(dist1) + print(idx1) + dist = np.zeros((5, 6)) + for i in range(5): + for j in range(6): + dist[i, j] = np.sum((pc1arr[0, i, :] - pc2arr[0, j, :]) ** 2) + print(dist) + print("-" * 30) + print("L1smooth dists:") + dist1, idx1, dist2, idx2 = nn_distance(pc1, pc2, True) + print(dist1) + print(idx1) + dist = np.zeros((5, 6)) + for i in range(5): + for j in range(6): + error = np.abs(pc1arr[0, i, :] - pc2arr[0, j, :]) + quad = np.minimum(error, 1.0) + linear = error - quad + loss = 0.5 * quad**2 + 1.0 * linear + dist[i, j] = np.sum(loss) + print(dist) + + +if __name__ == "__main__": + demo_nn_distance() diff --git a/models/Mask3D/mask3d/utils/votenet_utils/pc_util.py b/models/Mask3D/mask3d/utils/votenet_utils/pc_util.py new file mode 100644 index 0000000000000000000000000000000000000000..765c064a3da328f7b30f7798512b811f8b9e75a7 --- /dev/null +++ b/models/Mask3D/mask3d/utils/votenet_utils/pc_util.py @@ -0,0 +1,607 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +""" Utility functions for processing point clouds. + +Author: Charles R. Qi and Or Litany +""" + +import os +import sys + +BASE_DIR = os.path.dirname(os.path.abspath(__file__)) +sys.path.append(BASE_DIR) + +# Point cloud IO +import numpy as np + +try: + from plyfile import PlyData, PlyElement +except: + print("Please install the module 'plyfile' for PLY i/o, e.g.") + print("pip install plyfile") + sys.exit(-1) + + +# Mesh IO +import trimesh + +import matplotlib.pyplot as pyplot + +# ---------------------------------------- +# Point Cloud Sampling +# ---------------------------------------- + + +def random_sampling(pc, num_sample, replace=None, return_choices=False): + """Input is NxC, output is num_samplexC""" + if replace is None: + replace = pc.shape[0] < num_sample + choices = np.random.choice(pc.shape[0], num_sample, replace=replace) + if return_choices: + return pc[choices], choices + else: + return pc[choices] + + +# ---------------------------------------- +# Point Cloud/Volume Conversions +# ---------------------------------------- + + +def point_cloud_to_volume_batch( + point_clouds, vsize=12, radius=1.0, flatten=True +): + """Input is BxNx3 batch of point cloud + Output is Bx(vsize^3) + """ + vol_list = [] + for b in range(point_clouds.shape[0]): + vol = point_cloud_to_volume( + np.squeeze(point_clouds[b, :, :]), vsize, radius + ) + if flatten: + vol_list.append(vol.flatten()) + else: + vol_list.append(np.expand_dims(np.expand_dims(vol, -1), 0)) + if flatten: + return np.vstack(vol_list) + else: + return np.concatenate(vol_list, 0) + + +def point_cloud_to_volume(points, vsize, radius=1.0): + """input is Nx3 points. + output is vsize*vsize*vsize + assumes points are in range [-radius, radius] + """ + vol = np.zeros((vsize, vsize, vsize)) + voxel = 2 * radius / float(vsize) + locations = (points + radius) / voxel + locations = locations.astype(int) + vol[locations[:, 0], locations[:, 1], locations[:, 2]] = 1.0 + return vol + + +def volume_to_point_cloud(vol): + """vol is occupancy grid (value = 0 or 1) of size vsize*vsize*vsize + return Nx3 numpy array. + """ + vsize = vol.shape[0] + assert vol.shape[1] == vsize and vol.shape[1] == vsize + points = [] + for a in range(vsize): + for b in range(vsize): + for c in range(vsize): + if vol[a, b, c] == 1: + points.append(np.array([a, b, c])) + if len(points) == 0: + return np.zeros((0, 3)) + points = np.vstack(points) + return points + + +def point_cloud_to_volume_v2_batch( + point_clouds, vsize=12, radius=1.0, num_sample=128 +): + """Input is BxNx3 a batch of point cloud + Output is BxVxVxVxnum_samplex3 + Added on Feb 19 + """ + vol_list = [] + for b in range(point_clouds.shape[0]): + vol = point_cloud_to_volume_v2( + point_clouds[b, :, :], vsize, radius, num_sample + ) + vol_list.append(np.expand_dims(vol, 0)) + return np.concatenate(vol_list, 0) + + +def point_cloud_to_volume_v2(points, vsize, radius=1.0, num_sample=128): + """input is Nx3 points + output is vsize*vsize*vsize*num_sample*3 + assumes points are in range [-radius, radius] + samples num_sample points in each voxel, if there are less than + num_sample points, replicate the points + Added on Feb 19 + """ + vol = np.zeros((vsize, vsize, vsize, num_sample, 3)) + voxel = 2 * radius / float(vsize) + locations = (points + radius) / voxel + locations = locations.astype(int) + loc2pc = {} + for n in range(points.shape[0]): + loc = tuple(locations[n, :]) + if loc not in loc2pc: + loc2pc[loc] = [] + loc2pc[loc].append(points[n, :]) + + for i in range(vsize): + for j in range(vsize): + for k in range(vsize): + if (i, j, k) not in loc2pc: + vol[i, j, k, :, :] = np.zeros((num_sample, 3)) + else: + pc = loc2pc[(i, j, k)] # a list of (3,) arrays + pc = np.vstack(pc) # kx3 + # Sample/pad to num_sample points + if pc.shape[0] > num_sample: + pc = random_sampling(pc, num_sample, False) + elif pc.shape[0] < num_sample: + pc = np.lib.pad( + pc, ((0, num_sample - pc.shape[0]), (0, 0)), "edge" + ) + # Normalize + pc_center = (np.array([i, j, k]) + 0.5) * voxel - radius + pc = (pc - pc_center) / voxel # shift and scale + vol[i, j, k, :, :] = pc + return vol + + +def point_cloud_to_image_batch( + point_clouds, imgsize, radius=1.0, num_sample=128 +): + """Input is BxNx3 a batch of point cloud + Output is BxIxIxnum_samplex3 + Added on Feb 19 + """ + img_list = [] + for b in range(point_clouds.shape[0]): + img = point_cloud_to_image( + point_clouds[b, :, :], imgsize, radius, num_sample + ) + img_list.append(np.expand_dims(img, 0)) + return np.concatenate(img_list, 0) + + +def point_cloud_to_image(points, imgsize, radius=1.0, num_sample=128): + """input is Nx3 points + output is imgsize*imgsize*num_sample*3 + assumes points are in range [-radius, radius] + samples num_sample points in each pixel, if there are less than + num_sample points, replicate the points + Added on Feb 19 + """ + img = np.zeros((imgsize, imgsize, num_sample, 3)) + pixel = 2 * radius / float(imgsize) + locations = (points[:, 0:2] + radius) / pixel # Nx2 + locations = locations.astype(int) + loc2pc = {} + for n in range(points.shape[0]): + loc = tuple(locations[n, :]) + if loc not in loc2pc: + loc2pc[loc] = [] + loc2pc[loc].append(points[n, :]) + for i in range(imgsize): + for j in range(imgsize): + if (i, j) not in loc2pc: + img[i, j, :, :] = np.zeros((num_sample, 3)) + else: + pc = loc2pc[(i, j)] + pc = np.vstack(pc) + if pc.shape[0] > num_sample: + pc = random_sampling(pc, num_sample, False) + elif pc.shape[0] < num_sample: + pc = np.lib.pad( + pc, ((0, num_sample - pc.shape[0]), (0, 0)), "edge" + ) + pc_center = (np.array([i, j]) + 0.5) * pixel - radius + pc[:, 0:2] = (pc[:, 0:2] - pc_center) / pixel + img[i, j, :, :] = pc + return img + + +# ---------------------------------------- +# Point cloud IO +# ---------------------------------------- + + +def read_ply(filename): + """read XYZ point cloud from filename PLY file""" + plydata = PlyData.read(filename) + pc = plydata["vertex"].data + pc_array = np.array([[x, y, z] for x, y, z in pc]) + return pc_array + + +def write_ply(points, filename, text=True): + """input: Nx3, write points to filename as PLY format.""" + points = [ + (points[i, 0], points[i, 1], points[i, 2]) + for i in range(points.shape[0]) + ] + vertex = np.array(points, dtype=[("x", "f4"), ("y", "f4"), ("z", "f4")]) + el = PlyElement.describe(vertex, "vertex", comments=["vertices"]) + PlyData([el], text=text).write(filename) + + +def write_ply_color( + points, labels, filename, num_classes=None, colormap=pyplot.cm.jet +): + """Color (N,3) points with labels (N) within range 0 ~ num_classes-1 as OBJ file""" + labels = labels.astype(int) + N = points.shape[0] + if num_classes is None: + num_classes = np.max(labels) + 1 + else: + assert num_classes > np.max(labels) + + vertex = [] + # colors = [pyplot.cm.jet(i/float(num_classes)) for i in range(num_classes)] + colors = [colormap(i / float(num_classes)) for i in range(num_classes)] + for i in range(N): + c = colors[labels[i]] + c = [int(x * 255) for x in c] + vertex.append( + (points[i, 0], points[i, 1], points[i, 2], c[0], c[1], c[2]) + ) + vertex = np.array( + vertex, + dtype=[ + ("x", "f4"), + ("y", "f4"), + ("z", "f4"), + ("red", "u1"), + ("green", "u1"), + ("blue", "u1"), + ], + ) + + el = PlyElement.describe(vertex, "vertex", comments=["vertices"]) + PlyData([el], text=True).write(filename) + + +def write_ply_rgb(points, colors, out_filename, num_classes=None): + """Color (N,3) points with RGB colors (N,3) within range [0,255] as OBJ file""" + colors = colors.astype(int) + N = points.shape[0] + fout = open(out_filename, "w") + for i in range(N): + c = colors[i, :] + fout.write( + "v %f %f %f %d %d %d\n" + % (points[i, 0], points[i, 1], points[i, 2], c[0], c[1], c[2]) + ) + fout.close() + + +# ---------------------------------------- +# Simple Point cloud and Volume Renderers +# ---------------------------------------- + + +def pyplot_draw_point_cloud(points, output_filename): + """points is a Nx3 numpy array""" + import matplotlib.pyplot as plt + + fig = plt.figure() + ax = fig.add_subplot(111, projection="3d") + ax.scatter(points[:, 0], points[:, 1], points[:, 2]) + ax.set_xlabel("x") + ax.set_ylabel("y") + ax.set_zlabel("z") + # savefig(output_filename) + + +def pyplot_draw_volume(vol, output_filename): + """vol is of size vsize*vsize*vsize + output an image to output_filename + """ + points = volume_to_point_cloud(vol) + pyplot_draw_point_cloud(points, output_filename) + + +# ---------------------------------------- +# Simple Point manipulations +# ---------------------------------------- +def rotate_point_cloud(points, rotation_matrix=None): + """Input: (n,3), Output: (n,3)""" + # Rotate in-place around Z axis. + if rotation_matrix is None: + rotation_angle = np.random.uniform() * 2 * np.pi + sinval, cosval = np.sin(rotation_angle), np.cos(rotation_angle) + rotation_matrix = np.array( + [[cosval, sinval, 0], [-sinval, cosval, 0], [0, 0, 1]] + ) + ctr = points.mean(axis=0) + rotated_data = np.dot(points - ctr, rotation_matrix) + ctr + return rotated_data, rotation_matrix + + +def rotate_pc_along_y(pc, rot_angle): + """Input ps is NxC points with first 3 channels as XYZ + z is facing forward, x is left ward, y is downward + """ + cosval = np.cos(rot_angle) + sinval = np.sin(rot_angle) + rotmat = np.array([[cosval, -sinval], [sinval, cosval]]) + pc[:, [0, 2]] = np.dot(pc[:, [0, 2]], np.transpose(rotmat)) + return pc + + +def roty(t): + """Rotation about the y-axis.""" + c = np.cos(t) + s = np.sin(t) + return np.array([[c, 0, s], [0, 1, 0], [-s, 0, c]]) + + +def roty_batch(t): + """Rotation about the y-axis. + t: (x1,x2,...xn) + return: (x1,x2,...,xn,3,3) + """ + input_shape = t.shape + output = np.zeros(tuple(list(input_shape) + [3, 3])) + c = np.cos(t) + s = np.sin(t) + output[..., 0, 0] = c + output[..., 0, 2] = s + output[..., 1, 1] = 1 + output[..., 2, 0] = -s + output[..., 2, 2] = c + return output + + +def rotz(t): + """Rotation about the z-axis.""" + c = np.cos(t) + s = np.sin(t) + return np.array([[c, -s, 0], [s, c, 0], [0, 0, 1]]) + + +# ---------------------------------------- +# BBox +# ---------------------------------------- +def bbox_corner_dist_measure(crnr1, crnr2): + """compute distance between box corners to replace iou + Args: + crnr1, crnr2: Nx3 points of box corners in camera axis (y points down) + output is a scalar between 0 and 1 + """ + + dist = sys.maxsize + for y in range(4): + rows = [(x + y) % 4 for x in range(4)] + [ + 4 + (x + y) % 4 for x in range(4) + ] + d_ = np.linalg.norm(crnr2[rows, :] - crnr1, axis=1).sum() / 8.0 + if d_ < dist: + dist = d_ + + u = sum([np.linalg.norm(x[0, :] - x[6, :]) for x in [crnr1, crnr2]]) / 2.0 + + measure = max(1.0 - dist / u, 0) + print(measure) + + return measure + + +def point_cloud_to_bbox(points): + """Extract the axis aligned box from a pcl or batch of pcls + Args: + points: Nx3 points or BxNx3 + output is 6 dim: xyz pos of center and 3 lengths + """ + which_dim = ( + len(points.shape) - 2 + ) # first dim if a single cloud and second if batch + mn, mx = points.min(which_dim), points.max(which_dim) + lengths = mx - mn + cntr = 0.5 * (mn + mx) + return np.concatenate([cntr, lengths], axis=which_dim) + + +def write_bbox(scene_bbox, out_filename): + """Export scene bbox to meshes + Args: + scene_bbox: (N x 6 numpy array): xyz pos of center and 3 lengths + out_filename: (string) filename + + Note: + To visualize the boxes in MeshLab. + 1. Select the objects (the boxes) + 2. Filters -> Polygon and Quad Mesh -> Turn into Quad-Dominant Mesh + 3. Select Wireframe view. + """ + + def convert_box_to_trimesh_fmt(box): + ctr = box[:3] + lengths = box[3:] + trns = np.eye(4) + trns[0:3, 3] = ctr + trns[3, 3] = 1.0 + box_trimesh_fmt = trimesh.creation.box(lengths, trns) + return box_trimesh_fmt + + scene = trimesh.scene.Scene() + for box in scene_bbox: + scene.add_geometry(convert_box_to_trimesh_fmt(box)) + + mesh_list = trimesh.util.concatenate(scene.dump()) + # save to ply file + trimesh.io.export.export_mesh(mesh_list, out_filename, file_type="ply") + + return + + +def write_oriented_bbox(scene_bbox, out_filename): + """Export oriented (around Z axis) scene bbox to meshes + Args: + scene_bbox: (N x 7 numpy array): xyz pos of center and 3 lengths (dx,dy,dz) + and heading angle around Z axis. + Y forward, X right, Z upward. heading angle of positive X is 0, + heading angle of positive Y is 90 degrees. + out_filename: (string) filename + """ + + def heading2rotmat(heading_angle): + pass + rotmat = np.zeros((3, 3)) + rotmat[2, 2] = 1 + cosval = np.cos(heading_angle) + sinval = np.sin(heading_angle) + rotmat[0:2, 0:2] = np.array([[cosval, -sinval], [sinval, cosval]]) + return rotmat + + def convert_oriented_box_to_trimesh_fmt(box): + ctr = box[:3] + lengths = box[3:6] + trns = np.eye(4) + trns[0:3, 3] = ctr + trns[3, 3] = 1.0 + trns[0:3, 0:3] = heading2rotmat(box[6]) + box_trimesh_fmt = trimesh.creation.box(lengths, trns) + return box_trimesh_fmt + + scene = trimesh.scene.Scene() + for box in scene_bbox: + scene.add_geometry(convert_oriented_box_to_trimesh_fmt(box)) + + mesh_list = trimesh.util.concatenate(scene.dump()) + # save to ply file + trimesh.io.export.export_mesh(mesh_list, out_filename, file_type="ply") + + return + + +def write_oriented_bbox_camera_coord(scene_bbox, out_filename): + """Export oriented (around Y axis) scene bbox to meshes + Args: + scene_bbox: (N x 7 numpy array): xyz pos of center and 3 lengths (dx,dy,dz) + and heading angle around Y axis. + Z forward, X rightward, Y downward. heading angle of positive X is 0, + heading angle of negative Z is 90 degrees. + out_filename: (string) filename + """ + + def heading2rotmat(heading_angle): + pass + rotmat = np.zeros((3, 3)) + rotmat[1, 1] = 1 + cosval = np.cos(heading_angle) + sinval = np.sin(heading_angle) + rotmat[0, :] = np.array([cosval, 0, sinval]) + rotmat[2, :] = np.array([-sinval, 0, cosval]) + return rotmat + + def convert_oriented_box_to_trimesh_fmt(box): + ctr = box[:3] + lengths = box[3:6] + trns = np.eye(4) + trns[0:3, 3] = ctr + trns[3, 3] = 1.0 + trns[0:3, 0:3] = heading2rotmat(box[6]) + box_trimesh_fmt = trimesh.creation.box(lengths, trns) + return box_trimesh_fmt + + scene = trimesh.scene.Scene() + for box in scene_bbox: + scene.add_geometry(convert_oriented_box_to_trimesh_fmt(box)) + + mesh_list = trimesh.util.concatenate(scene.dump()) + # save to ply file + trimesh.io.export.export_mesh(mesh_list, out_filename, file_type="ply") + + return + + +def write_lines_as_cylinders(pcl, filename, rad=0.005, res=64): + """Create lines represented as cylinders connecting pairs of 3D points + Args: + pcl: (N x 2 x 3 numpy array): N pairs of xyz pos + filename: (string) filename for the output mesh (ply) file + rad: radius for the cylinder + res: number of sections used to create the cylinder + """ + scene = trimesh.scene.Scene() + for src, tgt in pcl: + # compute line + vec = tgt - src + M = trimesh.geometry.align_vectors([0, 0, 1], vec, False) + vec = ( + tgt - src + ) # compute again since align_vectors modifies vec in-place! + M[:3, 3] = 0.5 * src + 0.5 * tgt + height = np.sqrt(np.dot(vec, vec)) + scene.add_geometry( + trimesh.creation.cylinder( + radius=rad, height=height, sections=res, transform=M + ) + ) + mesh_list = trimesh.util.concatenate(scene.dump()) + trimesh.io.export.export_mesh( + mesh_list, "%s.ply" % (filename), file_type="ply" + ) + + +# ---------------------------------------- +# Testing +# ---------------------------------------- +if __name__ == "__main__": + print("running some tests") + + ############ + ## Test "write_lines_as_cylinders" + ############ + pcl = np.random.rand(32, 2, 3) + write_lines_as_cylinders(pcl, "point_connectors") + input() + + scene_bbox = np.zeros((1, 7)) + scene_bbox[0, 3:6] = np.array([1, 2, 3]) # dx,dy,dz + scene_bbox[0, 6] = np.pi / 4 # 45 degrees + write_oriented_bbox(scene_bbox, "single_obb_45degree.ply") + ############ + ## Test point_cloud_to_bbox + ############ + pcl = np.random.rand(32, 16, 3) + pcl_bbox = point_cloud_to_bbox(pcl) + assert pcl_bbox.shape == (32, 6) + + pcl = np.random.rand(16, 3) + pcl_bbox = point_cloud_to_bbox(pcl) + assert pcl_bbox.shape == (6,) + + ############ + ## Test corner distance + ############ + crnr1 = np.array( + [ + [2.59038660e00, 8.96107932e-01, 4.73305349e00], + [4.12281644e-01, 8.96107932e-01, 4.48046631e00], + [2.97129656e-01, 8.96107932e-01, 5.47344275e00], + [2.47523462e00, 8.96107932e-01, 5.72602993e00], + [2.59038660e00, 4.41155793e-03, 4.73305349e00], + [4.12281644e-01, 4.41155793e-03, 4.48046631e00], + [2.97129656e-01, 4.41155793e-03, 5.47344275e00], + [2.47523462e00, 4.41155793e-03, 5.72602993e00], + ] + ) + crnr2 = crnr1 + + print(bbox_corner_dist_measure(crnr1, crnr2)) + + print("tests PASSED") diff --git a/models/Mask3D/mask3d/utils/votenet_utils/tf_logger.py b/models/Mask3D/mask3d/utils/votenet_utils/tf_logger.py new file mode 100644 index 0000000000000000000000000000000000000000..2d12e719d1fb189ae60b2c06ab17a143304d73d3 --- /dev/null +++ b/models/Mask3D/mask3d/utils/votenet_utils/tf_logger.py @@ -0,0 +1,81 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +import tensorflow as tf +import numpy as np +import scipy.misc + +try: + from StringIO import StringIO # Python 2.7 +except ImportError: + from io import BytesIO # Python 3.x + + +class Logger(object): + def __init__(self, log_dir): + """Create a summary writer logging to log_dir.""" + self.writer = tf.summary.FileWriter(log_dir) + + def scalar_summary(self, tag, value, step): + """Log a scalar variable.""" + summary = tf.Summary( + value=[tf.Summary.Value(tag=tag, simple_value=value)] + ) + self.writer.add_summary(summary, step) + + def image_summary(self, tag, images, step): + """Log a list of images.""" + + img_summaries = [] + for i, img in enumerate(images): + # Write the image to a string + try: + s = StringIO() + except: + s = BytesIO() + scipy.misc.toimage(img).save(s, format="png") + + # Create an Image object + img_sum = tf.Summary.Image( + encoded_image_string=s.getvalue(), + height=img.shape[0], + width=img.shape[1], + ) + # Create a Summary value + img_summaries.append( + tf.Summary.Value(tag="%s/%d" % (tag, i), image=img_sum) + ) + + # Create and write Summary + summary = tf.Summary(value=img_summaries) + self.writer.add_summary(summary, step) + + def histo_summary(self, tag, values, step, bins=1000): + """Log a histogram of the tensor of values.""" + + # Create a histogram using numpy + counts, bin_edges = np.histogram(values, bins=bins) + + # Fill the fields of the histogram proto + hist = tf.HistogramProto() + hist.min = float(np.min(values)) + hist.max = float(np.max(values)) + hist.num = int(np.prod(values.shape)) + hist.sum = float(np.sum(values)) + hist.sum_squares = float(np.sum(values**2)) + + # Drop the start of the first bin + bin_edges = bin_edges[1:] + + # Add bin edges and counts + for edge in bin_edges: + hist.bucket_limit.append(edge) + for c in counts: + hist.bucket.append(c) + + # Create and write Summary + summary = tf.Summary(value=[tf.Summary.Value(tag=tag, histo=hist)]) + self.writer.add_summary(summary, step) + self.writer.flush() diff --git a/models/Mask3D/mask3d/utils/votenet_utils/tf_visualizer.py b/models/Mask3D/mask3d/utils/votenet_utils/tf_visualizer.py new file mode 100644 index 0000000000000000000000000000000000000000..b0795e6077694f344bb79496fd9133fa79ddee66 --- /dev/null +++ b/models/Mask3D/mask3d/utils/votenet_utils/tf_visualizer.py @@ -0,0 +1,52 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +"""Code adapted from https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix""" +import os +import time + +BASE_DIR = os.path.dirname(os.path.abspath(__file__)) +import sys + +sys.path.append(BASE_DIR) +import tf_logger + + +class Visualizer: + def __init__(self, opt, name="train"): + # self.opt = opt + # self.logger = tf_logger.Logger(os.path.join(opt.logging_dir, opt.name)) + # self.log_name = os.path.join(opt.checkpoint_dir, opt.name, 'loss_log.txt') + self.logger = tf_logger.Logger(os.path.join(opt.log_dir, name)) + self.log_name = os.path.join(opt.log_dir, "tf_visualizer_log.txt") + with open(self.log_name, "a") as log_file: + now = time.strftime("%c") + log_file.write( + "================ Training Loss (%s) ================\n" % now + ) + + # |visuals|: dictionary of images to save + def log_images(self, visuals, step): + for label, image_numpy in visuals.items(): + self.logger.image_summary(label, [image_numpy], step) + + # scalars: dictionary of scalar labels and values + def log_scalars(self, scalars, step): + for label, val in scalars.items(): + self.logger.scalar_summary(label, val, step) + + # scatter plots + def plot_current_points(self, points, disp_offset=10): + pass + + # scalars: same format as |scalars| of plot_current_scalars + def print_current_scalars(self, epoch, i, scalars): + message = "(epoch: %d, iters: %d) " % (epoch, i) + for k, v in scalars.items(): + message += "%s: %.3f " % (k, v) + + print(message) + with open(self.log_name, "a") as log_file: + log_file.write("%s\n" % message) diff --git a/models/Mask3D/setup.py b/models/Mask3D/setup.py new file mode 100644 index 0000000000000000000000000000000000000000..06b2008c6805d451762930df34e3eddf32fe3bcc --- /dev/null +++ b/models/Mask3D/setup.py @@ -0,0 +1,22 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os +import os.path as osp +import platform +import shutil +import sys +import warnings +from setuptools import find_packages, setup + +setup( + name="mask3d", + version="0.1", # Consider using semantic versioning + packages=find_packages(), + package_data={"": ["*.yaml"]}, + install_requires=[ + # List your dependencies here, e.g., + # 'numpy', + # 'pandas', + ], + include_package_data=True, + # zip_safe=False, +) diff --git a/models/Mask3D/test.py b/models/Mask3D/test.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/Mask3D/third_party/pointnet2/build/lib.linux-x86_64-cpython-310/pointnet2/__init__.py b/models/Mask3D/third_party/pointnet2/build/lib.linux-x86_64-cpython-310/pointnet2/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/Mask3D/third_party/pointnet2/build/lib.linux-x86_64-cpython-310/pointnet2/_ext.cpython-310-x86_64-linux-gnu.so b/models/Mask3D/third_party/pointnet2/build/lib.linux-x86_64-cpython-310/pointnet2/_ext.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..4d12ddb8ced11fc154ea87487dc795f7d50808a7 Binary files /dev/null and b/models/Mask3D/third_party/pointnet2/build/lib.linux-x86_64-cpython-310/pointnet2/_ext.cpython-310-x86_64-linux-gnu.so differ diff --git a/models/Mask3D/third_party/pointnet2/build/lib.linux-x86_64-cpython-310/pointnet2/pointnet2_modules.py b/models/Mask3D/third_party/pointnet2/build/lib.linux-x86_64-cpython-310/pointnet2/pointnet2_modules.py new file mode 100644 index 0000000000000000000000000000000000000000..2e82cdc249bd2a6cd8e87940a2103ce4438908d8 --- /dev/null +++ b/models/Mask3D/third_party/pointnet2/build/lib.linux-x86_64-cpython-310/pointnet2/pointnet2_modules.py @@ -0,0 +1,581 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +""" Pointnet2 layers. +Modified based on: https://github.com/erikwijmans/Pointnet2_PyTorch +Extended with the following: +1. Uniform sampling in each local region (sample_uniformly) +2. Return sampled points indices to support votenet. +""" +import torch +import torch.nn as nn +import torch.nn.functional as F + +import os +import sys + +BASE_DIR = os.path.dirname(os.path.abspath(__file__)) +sys.path.append(BASE_DIR) + +import pointnet2_utils +import pytorch_utils as pt_utils +from typing import List + + +class _PointnetSAModuleBase(nn.Module): + def __init__(self): + super().__init__() + self.npoint = None + self.groupers = None + self.mlps = None + + def forward( + self, xyz: torch.Tensor, features: torch.Tensor = None + ) -> (torch.Tensor, torch.Tensor): + r""" + Parameters + ---------- + xyz : torch.Tensor + (B, N, 3) tensor of the xyz coordinates of the features + features : torch.Tensor + (B, N, C) tensor of the descriptors of the the features + + Returns + ------- + new_xyz : torch.Tensor + (B, npoint, 3) tensor of the new features' xyz + new_features : torch.Tensor + (B, npoint, \sum_k(mlps[k][-1])) tensor of the new_features descriptors + """ + + new_features_list = [] + + xyz_flipped = xyz.transpose(1, 2).contiguous() + new_xyz = ( + pointnet2_utils.gather_operation( + xyz_flipped, + pointnet2_utils.furthest_point_sample(xyz, self.npoint), + ) + .transpose(1, 2) + .contiguous() + if self.npoint is not None + else None + ) + + for i in range(len(self.groupers)): + new_features = self.groupers[i]( + xyz, new_xyz, features + ) # (B, C, npoint, nsample) + + new_features = self.mlps[i]( + new_features + ) # (B, mlp[-1], npoint, nsample) + new_features = F.max_pool2d( + new_features, kernel_size=[1, new_features.size(3)] + ) # (B, mlp[-1], npoint, 1) + new_features = new_features.squeeze(-1) # (B, mlp[-1], npoint) + + new_features_list.append(new_features) + + return new_xyz, torch.cat(new_features_list, dim=1) + + +class PointnetSAModuleMSG(_PointnetSAModuleBase): + r"""Pointnet set abstrction layer with multiscale grouping + + Parameters + ---------- + npoint : int + Number of features + radii : list of float32 + list of radii to group with + nsamples : list of int32 + Number of samples in each ball query + mlps : list of list of int32 + Spec of the pointnet before the global max_pool for each scale + bn : bool + Use batchnorm + """ + + def __init__( + self, + *, + npoint: int, + radii: List[float], + nsamples: List[int], + mlps: List[List[int]], + bn: bool = True, + use_xyz: bool = True, + sample_uniformly: bool = False + ): + super().__init__() + + assert len(radii) == len(nsamples) == len(mlps) + + self.npoint = npoint + self.groupers = nn.ModuleList() + self.mlps = nn.ModuleList() + for i in range(len(radii)): + radius = radii[i] + nsample = nsamples[i] + self.groupers.append( + pointnet2_utils.QueryAndGroup( + radius, + nsample, + use_xyz=use_xyz, + sample_uniformly=sample_uniformly, + ) + if npoint is not None + else pointnet2_utils.GroupAll(use_xyz) + ) + mlp_spec = mlps[i] + if use_xyz: + mlp_spec[0] += 3 + + self.mlps.append(pt_utils.SharedMLP(mlp_spec, bn=bn)) + + +class PointnetSAModule(PointnetSAModuleMSG): + r"""Pointnet set abstrction layer + + Parameters + ---------- + npoint : int + Number of features + radius : float + Radius of ball + nsample : int + Number of samples in the ball query + mlp : list + Spec of the pointnet before the global max_pool + bn : bool + Use batchnorm + """ + + def __init__( + self, + *, + mlp: List[int], + npoint: int = None, + radius: float = None, + nsample: int = None, + bn: bool = True, + use_xyz: bool = True + ): + super().__init__( + mlps=[mlp], + npoint=npoint, + radii=[radius], + nsamples=[nsample], + bn=bn, + use_xyz=use_xyz, + ) + + +class PointnetSAModuleVotes(nn.Module): + """Modified based on _PointnetSAModuleBase and PointnetSAModuleMSG + with extra support for returning point indices for getting their GT votes""" + + def __init__( + self, + *, + mlp: List[int], + npoint: int = None, + radius: float = None, + nsample: int = None, + bn: bool = True, + use_xyz: bool = True, + pooling: str = "max", + sigma: float = None, # for RBF pooling + normalize_xyz: bool = False, # noramlize local XYZ with radius + sample_uniformly: bool = False, + ret_unique_cnt: bool = False + ): + super().__init__() + self.npoint = npoint + self.radius = radius + self.nsample = nsample + self.pooling = pooling + self.mlp_module = None + self.use_xyz = use_xyz + self.sigma = sigma + if self.sigma is None: + self.sigma = self.radius / 2 + self.normalize_xyz = normalize_xyz + self.ret_unique_cnt = ret_unique_cnt + + if npoint is not None: + self.grouper = pointnet2_utils.QueryAndGroup( + radius, + nsample, + use_xyz=use_xyz, + ret_grouped_xyz=True, + normalize_xyz=normalize_xyz, + sample_uniformly=sample_uniformly, + ret_unique_cnt=ret_unique_cnt, + ) + else: + self.grouper = pointnet2_utils.GroupAll( + use_xyz, ret_grouped_xyz=True + ) + + mlp_spec = mlp + if use_xyz and len(mlp_spec) > 0: + mlp_spec[0] += 3 + self.mlp_module = pt_utils.SharedMLP(mlp_spec, bn=bn) + + def forward( + self, + xyz: torch.Tensor, + features: torch.Tensor = None, + inds: torch.Tensor = None, + ) -> (torch.Tensor, torch.Tensor): + r""" + Parameters + ---------- + xyz : torch.Tensor + (B, N, 3) tensor of the xyz coordinates of the features + features : torch.Tensor + (B, C, N) tensor of the descriptors of the the features + inds : torch.Tensor + (B, npoint) tensor that stores index to the xyz points (values in 0-N-1) + + Returns + ------- + new_xyz : torch.Tensor + (B, npoint, 3) tensor of the new features' xyz + new_features : torch.Tensor + (B, \sum_k(mlps[k][-1]), npoint) tensor of the new_features descriptors + inds: torch.Tensor + (B, npoint) tensor of the inds + """ + + xyz_flipped = xyz.transpose(1, 2).contiguous() + if inds is None: + inds = pointnet2_utils.furthest_point_sample(xyz, self.npoint) + else: + assert inds.shape[1] == self.npoint + new_xyz = ( + pointnet2_utils.gather_operation(xyz_flipped, inds) + .transpose(1, 2) + .contiguous() + if self.npoint is not None + else None + ) + + if not self.ret_unique_cnt: + grouped_features, grouped_xyz = self.grouper( + xyz, new_xyz, features + ) # (B, C, npoint, nsample) + else: + grouped_features, grouped_xyz, unique_cnt = self.grouper( + xyz, new_xyz, features + ) # (B, C, npoint, nsample), (B,3,npoint,nsample), (B,npoint) + + new_features = self.mlp_module( + grouped_features + ) # (B, mlp[-1], npoint, nsample) + if self.pooling == "max": + new_features = F.max_pool2d( + new_features, kernel_size=[1, new_features.size(3)] + ) # (B, mlp[-1], npoint, 1) + elif self.pooling == "avg": + new_features = F.avg_pool2d( + new_features, kernel_size=[1, new_features.size(3)] + ) # (B, mlp[-1], npoint, 1) + elif self.pooling == "rbf": + # Use radial basis function kernel for weighted sum of features (normalized by nsample and sigma) + # Ref: https://en.wikipedia.org/wiki/Radial_basis_function_kernel + rbf = torch.exp( + -1 + * grouped_xyz.pow(2).sum(1, keepdim=False) + / (self.sigma**2) + / 2 + ) # (B, npoint, nsample) + new_features = torch.sum( + new_features * rbf.unsqueeze(1), -1, keepdim=True + ) / float( + self.nsample + ) # (B, mlp[-1], npoint, 1) + new_features = new_features.squeeze(-1) # (B, mlp[-1], npoint) + + if not self.ret_unique_cnt: + return new_xyz, new_features, inds + else: + return new_xyz, new_features, inds, unique_cnt + + +class PointnetSAModuleMSGVotes(nn.Module): + """Modified based on _PointnetSAModuleBase and PointnetSAModuleMSG + with extra support for returning point indices for getting their GT votes""" + + def __init__( + self, + *, + mlps: List[List[int]], + npoint: int, + radii: List[float], + nsamples: List[int], + bn: bool = True, + use_xyz: bool = True, + sample_uniformly: bool = False + ): + super().__init__() + + assert len(mlps) == len(nsamples) == len(radii) + + self.npoint = npoint + self.groupers = nn.ModuleList() + self.mlps = nn.ModuleList() + for i in range(len(radii)): + radius = radii[i] + nsample = nsamples[i] + self.groupers.append( + pointnet2_utils.QueryAndGroup( + radius, + nsample, + use_xyz=use_xyz, + sample_uniformly=sample_uniformly, + ) + if npoint is not None + else pointnet2_utils.GroupAll(use_xyz) + ) + mlp_spec = mlps[i] + if use_xyz: + mlp_spec[0] += 3 + + self.mlps.append(pt_utils.SharedMLP(mlp_spec, bn=bn)) + + def forward( + self, + xyz: torch.Tensor, + features: torch.Tensor = None, + inds: torch.Tensor = None, + ) -> (torch.Tensor, torch.Tensor): + r""" + Parameters + ---------- + xyz : torch.Tensor + (B, N, 3) tensor of the xyz coordinates of the features + features : torch.Tensor + (B, C, C) tensor of the descriptors of the the features + inds : torch.Tensor + (B, npoint) tensor that stores index to the xyz points (values in 0-N-1) + + Returns + ------- + new_xyz : torch.Tensor + (B, npoint, 3) tensor of the new features' xyz + new_features : torch.Tensor + (B, \sum_k(mlps[k][-1]), npoint) tensor of the new_features descriptors + inds: torch.Tensor + (B, npoint) tensor of the inds + """ + new_features_list = [] + + xyz_flipped = xyz.transpose(1, 2).contiguous() + if inds is None: + inds = pointnet2_utils.furthest_point_sample(xyz, self.npoint) + new_xyz = ( + pointnet2_utils.gather_operation(xyz_flipped, inds) + .transpose(1, 2) + .contiguous() + if self.npoint is not None + else None + ) + + for i in range(len(self.groupers)): + new_features = self.groupers[i]( + xyz, new_xyz, features + ) # (B, C, npoint, nsample) + new_features = self.mlps[i]( + new_features + ) # (B, mlp[-1], npoint, nsample) + new_features = F.max_pool2d( + new_features, kernel_size=[1, new_features.size(3)] + ) # (B, mlp[-1], npoint, 1) + new_features = new_features.squeeze(-1) # (B, mlp[-1], npoint) + + new_features_list.append(new_features) + + return new_xyz, torch.cat(new_features_list, dim=1), inds + + +class PointnetFPModule(nn.Module): + r"""Propigates the features of one set to another + + Parameters + ---------- + mlp : list + Pointnet module parameters + bn : bool + Use batchnorm + """ + + def __init__(self, *, mlp: List[int], bn: bool = True): + super().__init__() + self.mlp = pt_utils.SharedMLP(mlp, bn=bn) + + def forward( + self, + unknown: torch.Tensor, + known: torch.Tensor, + unknow_feats: torch.Tensor, + known_feats: torch.Tensor, + ) -> torch.Tensor: + r""" + Parameters + ---------- + unknown : torch.Tensor + (B, n, 3) tensor of the xyz positions of the unknown features + known : torch.Tensor + (B, m, 3) tensor of the xyz positions of the known features + unknow_feats : torch.Tensor + (B, C1, n) tensor of the features to be propigated to + known_feats : torch.Tensor + (B, C2, m) tensor of features to be propigated + + Returns + ------- + new_features : torch.Tensor + (B, mlp[-1], n) tensor of the features of the unknown features + """ + + if known is not None: + dist, idx = pointnet2_utils.three_nn(unknown, known) + dist_recip = 1.0 / (dist + 1e-8) + norm = torch.sum(dist_recip, dim=2, keepdim=True) + weight = dist_recip / norm + + interpolated_feats = pointnet2_utils.three_interpolate( + known_feats, idx, weight + ) + else: + interpolated_feats = known_feats.expand( + *known_feats.size()[0:2], unknown.size(1) + ) + + if unknow_feats is not None: + new_features = torch.cat( + [interpolated_feats, unknow_feats], dim=1 + ) # (B, C2 + C1, n) + else: + new_features = interpolated_feats + + new_features = new_features.unsqueeze(-1) + new_features = self.mlp(new_features) + + return new_features.squeeze(-1) + + +class PointnetLFPModuleMSG(nn.Module): + """Modified based on _PointnetSAModuleBase and PointnetSAModuleMSG + learnable feature propagation layer.""" + + def __init__( + self, + *, + mlps: List[List[int]], + radii: List[float], + nsamples: List[int], + post_mlp: List[int], + bn: bool = True, + use_xyz: bool = True, + sample_uniformly: bool = False + ): + super().__init__() + + assert len(mlps) == len(nsamples) == len(radii) + + self.post_mlp = pt_utils.SharedMLP(post_mlp, bn=bn) + + self.groupers = nn.ModuleList() + self.mlps = nn.ModuleList() + for i in range(len(radii)): + radius = radii[i] + nsample = nsamples[i] + self.groupers.append( + pointnet2_utils.QueryAndGroup( + radius, + nsample, + use_xyz=use_xyz, + sample_uniformly=sample_uniformly, + ) + ) + mlp_spec = mlps[i] + if use_xyz: + mlp_spec[0] += 3 + + self.mlps.append(pt_utils.SharedMLP(mlp_spec, bn=bn)) + + def forward( + self, + xyz2: torch.Tensor, + xyz1: torch.Tensor, + features2: torch.Tensor, + features1: torch.Tensor, + ) -> torch.Tensor: + r"""Propagate features from xyz1 to xyz2. + Parameters + ---------- + xyz2 : torch.Tensor + (B, N2, 3) tensor of the xyz coordinates of the features + xyz1 : torch.Tensor + (B, N1, 3) tensor of the xyz coordinates of the features + features2 : torch.Tensor + (B, C2, N2) tensor of the descriptors of the the features + features1 : torch.Tensor + (B, C1, N1) tensor of the descriptors of the the features + + Returns + ------- + new_features1 : torch.Tensor + (B, \sum_k(mlps[k][-1]), N1) tensor of the new_features descriptors + """ + new_features_list = [] + + for i in range(len(self.groupers)): + new_features = self.groupers[i]( + xyz1, xyz2, features1 + ) # (B, C1, N2, nsample) + new_features = self.mlps[i]( + new_features + ) # (B, mlp[-1], N2, nsample) + new_features = F.max_pool2d( + new_features, kernel_size=[1, new_features.size(3)] + ) # (B, mlp[-1], N2, 1) + new_features = new_features.squeeze(-1) # (B, mlp[-1], N2) + + if features2 is not None: + new_features = torch.cat( + [new_features, features2], dim=1 + ) # (B, mlp[-1] + C2, N2) + + new_features = new_features.unsqueeze(-1) + new_features = self.post_mlp(new_features) + + new_features_list.append(new_features) + + return torch.cat(new_features_list, dim=1).squeeze(-1) + + +if __name__ == "__main__": + from torch.autograd import Variable + + torch.manual_seed(1) + torch.cuda.manual_seed_all(1) + xyz = Variable(torch.randn(2, 9, 3).cuda(), requires_grad=True) + xyz_feats = Variable(torch.randn(2, 9, 6).cuda(), requires_grad=True) + + test_module = PointnetSAModuleMSG( + npoint=2, radii=[5.0, 10.0], nsamples=[6, 3], mlps=[[9, 3], [9, 6]] + ) + test_module.cuda() + print(test_module(xyz, xyz_feats)) + + for _ in range(1): + _, new_features = test_module(xyz, xyz_feats) + new_features.backward( + torch.cuda.FloatTensor(*new_features.size()).fill_(1) + ) + print(new_features) + print(xyz.grad) diff --git a/models/Mask3D/third_party/pointnet2/build/lib.linux-x86_64-cpython-310/pointnet2/pointnet2_test.py b/models/Mask3D/third_party/pointnet2/build/lib.linux-x86_64-cpython-310/pointnet2/pointnet2_test.py new file mode 100644 index 0000000000000000000000000000000000000000..0994ff56ccbb1e9c97316ba7125b164c34f0d66b --- /dev/null +++ b/models/Mask3D/third_party/pointnet2/build/lib.linux-x86_64-cpython-310/pointnet2/pointnet2_test.py @@ -0,0 +1,39 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +""" Testing customized ops. """ + +import torch +from torch.autograd import gradcheck +import numpy as np + +import os +import sys + +BASE_DIR = os.path.dirname(os.path.abspath(__file__)) +sys.path.append(BASE_DIR) +import pointnet2_utils + + +def test_interpolation_grad(): + batch_size = 1 + feat_dim = 2 + m = 4 + feats = ( + torch.randn(batch_size, feat_dim, m, requires_grad=True).float().cuda() + ) + + def interpolate_func(inputs): + idx = torch.from_numpy(np.array([[[0, 1, 2], [1, 2, 3]]])).int().cuda() + weight = ( + torch.from_numpy(np.array([[[1, 1, 1], [2, 2, 2]]])).float().cuda() + ) + interpolated_feats = pointnet2_utils.three_interpolate( + inputs, idx, weight + ) + return interpolated_feats + + assert gradcheck(interpolate_func, feats, atol=1e-1, rtol=1e-1) + + +if __name__ == "__main__": + test_interpolation_grad() diff --git a/models/Mask3D/third_party/pointnet2/build/lib.linux-x86_64-cpython-310/pointnet2/pointnet2_utils.py b/models/Mask3D/third_party/pointnet2/build/lib.linux-x86_64-cpython-310/pointnet2/pointnet2_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..24be3136b1c465b10599393b12a92bcb4ee96e8f --- /dev/null +++ b/models/Mask3D/third_party/pointnet2/build/lib.linux-x86_64-cpython-310/pointnet2/pointnet2_utils.py @@ -0,0 +1,438 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +""" Modified based on: https://github.com/erikwijmans/Pointnet2_PyTorch """ +from __future__ import ( + division, + absolute_import, + with_statement, + print_function, + unicode_literals, +) +import torch +from torch.autograd import Function +import torch.nn as nn +import pointnet2.pytorch_utils as pt_utils +import sys + +try: + import builtins +except: + import __builtin__ as builtins + +try: + import pointnet2._ext as _ext +except ImportError: + if not getattr(builtins, "__POINTNET2_SETUP__", False): + raise ImportError( + "Could not import _ext module.\n" + "Please see the setup instructions in the README: " + "https://github.com/erikwijmans/Pointnet2_PyTorch/blob/master/README.rst" + ) + +if False: + # Workaround for type hints without depending on the `typing` module + from typing import * + + +class RandomDropout(nn.Module): + def __init__(self, p=0.5, inplace=False): + super(RandomDropout, self).__init__() + self.p = p + self.inplace = inplace + + def forward(self, X): + theta = torch.Tensor(1).uniform_(0, self.p)[0] + return pt_utils.feature_dropout_no_scaling( + X, theta, self.train, self.inplace + ) + + +class FurthestPointSampling(Function): + @staticmethod + def forward(ctx, xyz, npoint): + # type: (Any, torch.Tensor, int) -> torch.Tensor + r""" + Uses iterative furthest point sampling to select a set of npoint features that have the largest + minimum distance + + Parameters + ---------- + xyz : torch.Tensor + (B, N, 3) tensor where N > npoint + npoint : int32 + number of features in the sampled set + + Returns + ------- + torch.Tensor + (B, npoint) tensor containing the set + """ + fps_inds = _ext.furthest_point_sampling(xyz, npoint) + ctx.mark_non_differentiable(fps_inds) + return fps_inds + + @staticmethod + def backward(xyz, a=None): + return None, None + + +furthest_point_sample = FurthestPointSampling.apply + + +class GatherOperation(Function): + @staticmethod + def forward(ctx, features, idx): + # type: (Any, torch.Tensor, torch.Tensor) -> torch.Tensor + r""" + + Parameters + ---------- + features : torch.Tensor + (B, C, N) tensor + + idx : torch.Tensor + (B, npoint) tensor of the features to gather + + Returns + ------- + torch.Tensor + (B, C, npoint) tensor + """ + + _, C, N = features.size() + + ctx.for_backwards = (idx, C, N) + + return _ext.gather_points(features, idx) + + @staticmethod + def backward(ctx, grad_out): + idx, C, N = ctx.for_backwards + + grad_features = _ext.gather_points_grad(grad_out.contiguous(), idx, N) + return grad_features, None + + +gather_operation = GatherOperation.apply + + +class ThreeNN(Function): + @staticmethod + def forward(ctx, unknown, known): + # type: (Any, torch.Tensor, torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor] + r""" + Find the three nearest neighbors of unknown in known + Parameters + ---------- + unknown : torch.Tensor + (B, n, 3) tensor of known features + known : torch.Tensor + (B, m, 3) tensor of unknown features + + Returns + ------- + dist : torch.Tensor + (B, n, 3) l2 distance to the three nearest neighbors + idx : torch.Tensor + (B, n, 3) index of 3 nearest neighbors + """ + dist2, idx = _ext.three_nn(unknown, known) + + return torch.sqrt(dist2), idx + + @staticmethod + def backward(ctx, a=None, b=None): + return None, None + + +three_nn = ThreeNN.apply + + +class ThreeInterpolate(Function): + @staticmethod + def forward(ctx, features, idx, weight): + # type(Any, torch.Tensor, torch.Tensor, torch.Tensor) -> Torch.Tensor + r""" + Performs weight linear interpolation on 3 features + Parameters + ---------- + features : torch.Tensor + (B, c, m) Features descriptors to be interpolated from + idx : torch.Tensor + (B, n, 3) three nearest neighbors of the target features in features + weight : torch.Tensor + (B, n, 3) weights + + Returns + ------- + torch.Tensor + (B, c, n) tensor of the interpolated features + """ + B, c, m = features.size() + n = idx.size(1) + + ctx.three_interpolate_for_backward = (idx, weight, m) + + return _ext.three_interpolate(features, idx, weight) + + @staticmethod + def backward(ctx, grad_out): + # type: (Any, torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor] + r""" + Parameters + ---------- + grad_out : torch.Tensor + (B, c, n) tensor with gradients of ouputs + + Returns + ------- + grad_features : torch.Tensor + (B, c, m) tensor with gradients of features + + None + + None + """ + idx, weight, m = ctx.three_interpolate_for_backward + + grad_features = _ext.three_interpolate_grad( + grad_out.contiguous(), idx, weight, m + ) + + return grad_features, None, None + + +three_interpolate = ThreeInterpolate.apply + + +class GroupingOperation(Function): + @staticmethod + def forward(ctx, features, idx): + # type: (Any, torch.Tensor, torch.Tensor) -> torch.Tensor + r""" + + Parameters + ---------- + features : torch.Tensor + (B, C, N) tensor of features to group + idx : torch.Tensor + (B, npoint, nsample) tensor containing the indicies of features to group with + + Returns + ------- + torch.Tensor + (B, C, npoint, nsample) tensor + """ + B, nfeatures, nsample = idx.size() + _, C, N = features.size() + + ctx.for_backwards = (idx, N) + + return _ext.group_points(features, idx) + + @staticmethod + def backward(ctx, grad_out): + # type: (Any, torch.tensor) -> Tuple[torch.Tensor, torch.Tensor] + r""" + + Parameters + ---------- + grad_out : torch.Tensor + (B, C, npoint, nsample) tensor of the gradients of the output from forward + + Returns + ------- + torch.Tensor + (B, C, N) gradient of the features + None + """ + idx, N = ctx.for_backwards + + grad_features = _ext.group_points_grad(grad_out.contiguous(), idx, N) + + return grad_features, None + + +grouping_operation = GroupingOperation.apply + + +class BallQuery(Function): + @staticmethod + def forward(ctx, radius, nsample, xyz, new_xyz): + # type: (Any, float, int, torch.Tensor, torch.Tensor) -> torch.Tensor + r""" + + Parameters + ---------- + radius : float + radius of the balls + nsample : int + maximum number of features in the balls + xyz : torch.Tensor + (B, N, 3) xyz coordinates of the features + new_xyz : torch.Tensor + (B, npoint, 3) centers of the ball query + + Returns + ------- + torch.Tensor + (B, npoint, nsample) tensor with the indicies of the features that form the query balls + """ + inds = _ext.ball_query(new_xyz, xyz, radius, nsample) + ctx.mark_non_differentiable(inds) + return inds + + @staticmethod + def backward(ctx, a=None): + return None, None, None, None + + +ball_query = BallQuery.apply + + +class QueryAndGroup(nn.Module): + r""" + Groups with a ball query of radius + + Parameters + --------- + radius : float32 + Radius of ball + nsample : int32 + Maximum number of features to gather in the ball + """ + + def __init__( + self, + radius, + nsample, + use_xyz=True, + ret_grouped_xyz=False, + normalize_xyz=False, + sample_uniformly=False, + ret_unique_cnt=False, + ): + super(QueryAndGroup, self).__init__() + self.radius, self.nsample, self.use_xyz = radius, nsample, use_xyz + self.ret_grouped_xyz = ret_grouped_xyz + self.normalize_xyz = normalize_xyz + self.sample_uniformly = sample_uniformly + self.ret_unique_cnt = ret_unique_cnt + if self.ret_unique_cnt: + assert self.sample_uniformly + + def forward(self, xyz, new_xyz, features=None): + # type: (QueryAndGroup, torch.Tensor. torch.Tensor, torch.Tensor) -> Tuple[Torch.Tensor] + r""" + Parameters + ---------- + xyz : torch.Tensor + xyz coordinates of the features (B, N, 3) + new_xyz : torch.Tensor + centriods (B, npoint, 3) + features : torch.Tensor + Descriptors of the features (B, C, N) + + Returns + ------- + new_features : torch.Tensor + (B, 3 + C, npoint, nsample) tensor + """ + idx = ball_query(self.radius, self.nsample, xyz, new_xyz) + + if self.sample_uniformly: + unique_cnt = torch.zeros((idx.shape[0], idx.shape[1])) + for i_batch in range(idx.shape[0]): + for i_region in range(idx.shape[1]): + unique_ind = torch.unique(idx[i_batch, i_region, :]) + num_unique = unique_ind.shape[0] + unique_cnt[i_batch, i_region] = num_unique + sample_ind = torch.randint( + 0, + num_unique, + (self.nsample - num_unique,), + dtype=torch.long, + ) + all_ind = torch.cat((unique_ind, unique_ind[sample_ind])) + idx[i_batch, i_region, :] = all_ind + + xyz_trans = xyz.transpose(1, 2).contiguous() + grouped_xyz = grouping_operation( + xyz_trans, idx + ) # (B, 3, npoint, nsample) + grouped_xyz -= new_xyz.transpose(1, 2).unsqueeze(-1) + if self.normalize_xyz: + grouped_xyz /= self.radius + + if features is not None: + grouped_features = grouping_operation(features, idx) + if self.use_xyz: + new_features = torch.cat( + [grouped_xyz, grouped_features], dim=1 + ) # (B, C + 3, npoint, nsample) + else: + new_features = grouped_features + else: + assert ( + self.use_xyz + ), "Cannot have not features and not use xyz as a feature!" + new_features = grouped_xyz + + ret = [new_features] + if self.ret_grouped_xyz: + ret.append(grouped_xyz) + if self.ret_unique_cnt: + ret.append(unique_cnt) + if len(ret) == 1: + return ret[0] + else: + return tuple(ret) + + +class GroupAll(nn.Module): + r""" + Groups all features + + Parameters + --------- + """ + + def __init__(self, use_xyz=True, ret_grouped_xyz=False): + # type: (GroupAll, bool) -> None + super(GroupAll, self).__init__() + self.use_xyz = use_xyz + + def forward(self, xyz, new_xyz, features=None): + # type: (GroupAll, torch.Tensor, torch.Tensor, torch.Tensor) -> Tuple[torch.Tensor] + r""" + Parameters + ---------- + xyz : torch.Tensor + xyz coordinates of the features (B, N, 3) + new_xyz : torch.Tensor + Ignored + features : torch.Tensor + Descriptors of the features (B, C, N) + + Returns + ------- + new_features : torch.Tensor + (B, C + 3, 1, N) tensor + """ + + grouped_xyz = xyz.transpose(1, 2).unsqueeze(2) + if features is not None: + grouped_features = features.unsqueeze(2) + if self.use_xyz: + new_features = torch.cat( + [grouped_xyz, grouped_features], dim=1 + ) # (B, 3 + C, 1, N) + else: + new_features = grouped_features + else: + new_features = grouped_xyz + + if self.ret_grouped_xyz: + return new_features, grouped_xyz + else: + return new_features diff --git a/models/Mask3D/third_party/pointnet2/build/lib.linux-x86_64-cpython-310/pointnet2/pytorch_utils.py b/models/Mask3D/third_party/pointnet2/build/lib.linux-x86_64-cpython-310/pointnet2/pytorch_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..59ece654dcda0cc3c0bb25c84f63bd06563dcfcd --- /dev/null +++ b/models/Mask3D/third_party/pointnet2/build/lib.linux-x86_64-cpython-310/pointnet2/pytorch_utils.py @@ -0,0 +1,283 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +""" Modified based on Ref: https://github.com/erikwijmans/Pointnet2_PyTorch """ +import torch +import torch.nn as nn +from typing import List, Tuple + + +class SharedMLP(nn.Sequential): + def __init__( + self, + args: List[int], + *, + bn: bool = False, + activation=nn.ReLU(inplace=True), + preact: bool = False, + first: bool = False, + name: str = "" + ): + super().__init__() + + for i in range(len(args) - 1): + self.add_module( + name + "layer{}".format(i), + Conv2d( + args[i], + args[i + 1], + bn=(not first or not preact or (i != 0)) and bn, + activation=activation + if (not first or not preact or (i != 0)) + else None, + preact=preact, + ), + ) + + +class _BNBase(nn.Sequential): + def __init__(self, in_size, batch_norm=None, name=""): + super().__init__() + self.add_module(name + "bn", batch_norm(in_size)) + + nn.init.constant_(self[0].weight, 1.0) + nn.init.constant_(self[0].bias, 0) + + +class BatchNorm1d(_BNBase): + def __init__(self, in_size: int, *, name: str = ""): + super().__init__(in_size, batch_norm=nn.BatchNorm1d, name=name) + + +class BatchNorm2d(_BNBase): + def __init__(self, in_size: int, name: str = ""): + super().__init__(in_size, batch_norm=nn.BatchNorm2d, name=name) + + +class BatchNorm3d(_BNBase): + def __init__(self, in_size: int, name: str = ""): + super().__init__(in_size, batch_norm=nn.BatchNorm3d, name=name) + + +class _ConvBase(nn.Sequential): + def __init__( + self, + in_size, + out_size, + kernel_size, + stride, + padding, + activation, + bn, + init, + conv=None, + batch_norm=None, + bias=True, + preact=False, + name="", + ): + super().__init__() + + bias = bias and (not bn) + conv_unit = conv( + in_size, + out_size, + kernel_size=kernel_size, + stride=stride, + padding=padding, + bias=bias, + ) + init(conv_unit.weight) + if bias: + nn.init.constant_(conv_unit.bias, 0) + + if bn: + if not preact: + bn_unit = batch_norm(out_size) + else: + bn_unit = batch_norm(in_size) + + if preact: + if bn: + self.add_module(name + "bn", bn_unit) + + if activation is not None: + self.add_module(name + "activation", activation) + + self.add_module(name + "conv", conv_unit) + + if not preact: + if bn: + self.add_module(name + "bn", bn_unit) + + if activation is not None: + self.add_module(name + "activation", activation) + + +class Conv1d(_ConvBase): + def __init__( + self, + in_size: int, + out_size: int, + *, + kernel_size: int = 1, + stride: int = 1, + padding: int = 0, + activation=nn.ReLU(inplace=True), + bn: bool = False, + init=nn.init.kaiming_normal_, + bias: bool = True, + preact: bool = False, + name: str = "" + ): + super().__init__( + in_size, + out_size, + kernel_size, + stride, + padding, + activation, + bn, + init, + conv=nn.Conv1d, + batch_norm=BatchNorm1d, + bias=bias, + preact=preact, + name=name, + ) + + +class Conv2d(_ConvBase): + def __init__( + self, + in_size: int, + out_size: int, + *, + kernel_size: Tuple[int, int] = (1, 1), + stride: Tuple[int, int] = (1, 1), + padding: Tuple[int, int] = (0, 0), + activation=nn.ReLU(inplace=True), + bn: bool = False, + init=nn.init.kaiming_normal_, + bias: bool = True, + preact: bool = False, + name: str = "" + ): + super().__init__( + in_size, + out_size, + kernel_size, + stride, + padding, + activation, + bn, + init, + conv=nn.Conv2d, + batch_norm=BatchNorm2d, + bias=bias, + preact=preact, + name=name, + ) + + +class Conv3d(_ConvBase): + def __init__( + self, + in_size: int, + out_size: int, + *, + kernel_size: Tuple[int, int, int] = (1, 1, 1), + stride: Tuple[int, int, int] = (1, 1, 1), + padding: Tuple[int, int, int] = (0, 0, 0), + activation=nn.ReLU(inplace=True), + bn: bool = False, + init=nn.init.kaiming_normal_, + bias: bool = True, + preact: bool = False, + name: str = "" + ): + super().__init__( + in_size, + out_size, + kernel_size, + stride, + padding, + activation, + bn, + init, + conv=nn.Conv3d, + batch_norm=BatchNorm3d, + bias=bias, + preact=preact, + name=name, + ) + + +class FC(nn.Sequential): + def __init__( + self, + in_size: int, + out_size: int, + *, + activation=nn.ReLU(inplace=True), + bn: bool = False, + init=None, + preact: bool = False, + name: str = "" + ): + super().__init__() + + fc = nn.Linear(in_size, out_size, bias=not bn) + if init is not None: + init(fc.weight) + if not bn: + nn.init.constant_(fc.bias, 0) + + if preact: + if bn: + self.add_module(name + "bn", BatchNorm1d(in_size)) + + if activation is not None: + self.add_module(name + "activation", activation) + + self.add_module(name + "fc", fc) + + if not preact: + if bn: + self.add_module(name + "bn", BatchNorm1d(out_size)) + + if activation is not None: + self.add_module(name + "activation", activation) + + +def set_bn_momentum_default(bn_momentum): + def fn(m): + if isinstance(m, (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d)): + m.momentum = bn_momentum + + return fn + + +class BNMomentumScheduler(object): + def __init__( + self, model, bn_lambda, last_epoch=-1, setter=set_bn_momentum_default + ): + if not isinstance(model, nn.Module): + raise RuntimeError( + "Class '{}' is not a PyTorch nn Module".format( + type(model).__name__ + ) + ) + + self.model = model + self.setter = setter + self.lmbd = bn_lambda + + self.step(last_epoch + 1) + self.last_epoch = last_epoch + + def step(self, epoch=None): + if epoch is None: + epoch = self.last_epoch + 1 + + self.last_epoch = epoch + self.model.apply(self.setter(self.lmbd(epoch))) diff --git a/models/Mask3D/third_party/pointnet2/build/temp.linux-x86_64-cpython-310/.ninja_deps b/models/Mask3D/third_party/pointnet2/build/temp.linux-x86_64-cpython-310/.ninja_deps new file mode 100644 index 0000000000000000000000000000000000000000..5b475e17b7fcd7d14f3250049b8f720b99f9c651 Binary files /dev/null and b/models/Mask3D/third_party/pointnet2/build/temp.linux-x86_64-cpython-310/.ninja_deps differ diff --git a/models/Mask3D/third_party/pointnet2/build/temp.linux-x86_64-cpython-310/.ninja_log b/models/Mask3D/third_party/pointnet2/build/temp.linux-x86_64-cpython-310/.ninja_log new file mode 100644 index 0000000000000000000000000000000000000000..3ed9825e1ed5af8b235173a266522663c23194b8 --- /dev/null +++ b/models/Mask3D/third_party/pointnet2/build/temp.linux-x86_64-cpython-310/.ninja_log @@ -0,0 +1,10 @@ +# ninja log v5 +3 16339 1716815443290452011 /home/jean/Amine/OpenYolo3D/models/Mask3D/third_party/pointnet2/build/temp.linux-x86_64-cpython-310/pointnet2/_ext_src/src/interpolate_gpu.o 1110d6105579f2ec +2 16424 1716815443382452213 /home/jean/Amine/OpenYolo3D/models/Mask3D/third_party/pointnet2/build/temp.linux-x86_64-cpython-310/pointnet2/_ext_src/src/group_points_gpu.o d52b03808d6848ab +1 17576 1716815444530454744 /home/jean/Amine/OpenYolo3D/models/Mask3D/third_party/pointnet2/build/temp.linux-x86_64-cpython-310/pointnet2/_ext_src/src/ball_query_gpu.o 4f1d3dbcad794901 +4 17716 1716815444666455043 /home/jean/Amine/OpenYolo3D/models/Mask3D/third_party/pointnet2/build/temp.linux-x86_64-cpython-310/pointnet2/_ext_src/src/sampling_gpu.o 13eb66e506d5b99f +2 18032 1716815444986455748 /home/jean/Amine/OpenYolo3D/models/Mask3D/third_party/pointnet2/build/temp.linux-x86_64-cpython-310/pointnet2/_ext_src/src/group_points.o 85685dcd2a08be4c +2 18178 1716815445154456117 /home/jean/Amine/OpenYolo3D/models/Mask3D/third_party/pointnet2/build/temp.linux-x86_64-cpython-310/pointnet2/_ext_src/src/bindings.o f599a3ced814a9dd +1 18190 1716815445162456135 /home/jean/Amine/OpenYolo3D/models/Mask3D/third_party/pointnet2/build/temp.linux-x86_64-cpython-310/pointnet2/_ext_src/src/ball_query.o 3be9456cf5d2266e +3 18398 1716815445374456601 /home/jean/Amine/OpenYolo3D/models/Mask3D/third_party/pointnet2/build/temp.linux-x86_64-cpython-310/pointnet2/_ext_src/src/sampling.o 5d937f5708244c2e +3 18865 1716815445834457612 /home/jean/Amine/OpenYolo3D/models/Mask3D/third_party/pointnet2/build/temp.linux-x86_64-cpython-310/pointnet2/_ext_src/src/interpolate.o 42bd13a6265f3342 diff --git a/models/Mask3D/third_party/pointnet2/build/temp.linux-x86_64-cpython-310/build.ninja b/models/Mask3D/third_party/pointnet2/build/temp.linux-x86_64-cpython-310/build.ninja new file mode 100644 index 0000000000000000000000000000000000000000..27569443f73a22d3f2c7881939f0dcea82262717 --- /dev/null +++ b/models/Mask3D/third_party/pointnet2/build/temp.linux-x86_64-cpython-310/build.ninja @@ -0,0 +1,36 @@ +ninja_required_version = 1.3 +cxx = c++ +nvcc = /share/softwares/cuda_cudnn/cuda-11.1/bin/nvcc + +cflags = -pthread -B /share/softwares/anaconda/anaconda3/envs/openyolo3d/compiler_compat -Wno-unused-result -Wsign-compare -DNDEBUG -fwrapv -O2 -Wall -fPIC -O2 -isystem /share/softwares/anaconda/anaconda3/envs/openyolo3d/include -fPIC -O2 -isystem /share/softwares/anaconda/anaconda3/envs/openyolo3d/include -fPIC -I/home/jean/Amine/OpenYolo3D/models/Mask3D/third_party/pointnet2/pointnet2/_ext_src/include -I/share/softwares/anaconda/anaconda3/envs/openyolo3d/lib/python3.10/site-packages/torch/include -I/share/softwares/anaconda/anaconda3/envs/openyolo3d/lib/python3.10/site-packages/torch/include/torch/csrc/api/include -I/share/softwares/anaconda/anaconda3/envs/openyolo3d/lib/python3.10/site-packages/torch/include/TH -I/share/softwares/anaconda/anaconda3/envs/openyolo3d/lib/python3.10/site-packages/torch/include/THC -I/share/softwares/cuda_cudnn/cuda-11.1/include -I/share/softwares/anaconda/anaconda3/envs/openyolo3d/include/python3.10 -c +post_cflags = -O2 -Ipointnet2/_ext_src/include -DTORCH_API_INCLUDE_EXTENSION_H '-DPYBIND11_COMPILER_TYPE="_gcc"' '-DPYBIND11_STDLIB="_libstdcpp"' '-DPYBIND11_BUILD_ABI="_cxxabi1011"' -DTORCH_EXTENSION_NAME=_ext -D_GLIBCXX_USE_CXX11_ABI=0 -std=c++14 +cuda_cflags = -I/home/jean/Amine/OpenYolo3D/models/Mask3D/third_party/pointnet2/pointnet2/_ext_src/include -I/share/softwares/anaconda/anaconda3/envs/openyolo3d/lib/python3.10/site-packages/torch/include -I/share/softwares/anaconda/anaconda3/envs/openyolo3d/lib/python3.10/site-packages/torch/include/torch/csrc/api/include -I/share/softwares/anaconda/anaconda3/envs/openyolo3d/lib/python3.10/site-packages/torch/include/TH -I/share/softwares/anaconda/anaconda3/envs/openyolo3d/lib/python3.10/site-packages/torch/include/THC -I/share/softwares/cuda_cudnn/cuda-11.1/include -I/share/softwares/anaconda/anaconda3/envs/openyolo3d/include/python3.10 -c +cuda_post_cflags = -D__CUDA_NO_HALF_OPERATORS__ -D__CUDA_NO_HALF_CONVERSIONS__ -D__CUDA_NO_BFLOAT16_CONVERSIONS__ -D__CUDA_NO_HALF2_OPERATORS__ --expt-relaxed-constexpr --compiler-options ''"'"'-fPIC'"'"'' -O2 -Ipointnet2/_ext_src/include -DTORCH_API_INCLUDE_EXTENSION_H '-DPYBIND11_COMPILER_TYPE="_gcc"' '-DPYBIND11_STDLIB="_libstdcpp"' '-DPYBIND11_BUILD_ABI="_cxxabi1011"' -DTORCH_EXTENSION_NAME=_ext -D_GLIBCXX_USE_CXX11_ABI=0 -gencode=arch=compute_80,code=compute_80 -gencode=arch=compute_80,code=sm_80 -std=c++14 +ldflags = + +rule compile + command = $cxx -MMD -MF $out.d $cflags -c $in -o $out $post_cflags + depfile = $out.d + deps = gcc + +rule cuda_compile + depfile = $out.d + deps = gcc + command = $nvcc $cuda_cflags -c $in -o $out $cuda_post_cflags + + + +build /home/jean/Amine/OpenYolo3D/models/Mask3D/third_party/pointnet2/build/temp.linux-x86_64-cpython-310/pointnet2/_ext_src/src/ball_query.o: compile /home/jean/Amine/OpenYolo3D/models/Mask3D/third_party/pointnet2/pointnet2/_ext_src/src/ball_query.cpp +build /home/jean/Amine/OpenYolo3D/models/Mask3D/third_party/pointnet2/build/temp.linux-x86_64-cpython-310/pointnet2/_ext_src/src/ball_query_gpu.o: cuda_compile /home/jean/Amine/OpenYolo3D/models/Mask3D/third_party/pointnet2/pointnet2/_ext_src/src/ball_query_gpu.cu +build /home/jean/Amine/OpenYolo3D/models/Mask3D/third_party/pointnet2/build/temp.linux-x86_64-cpython-310/pointnet2/_ext_src/src/bindings.o: compile /home/jean/Amine/OpenYolo3D/models/Mask3D/third_party/pointnet2/pointnet2/_ext_src/src/bindings.cpp +build /home/jean/Amine/OpenYolo3D/models/Mask3D/third_party/pointnet2/build/temp.linux-x86_64-cpython-310/pointnet2/_ext_src/src/group_points.o: compile /home/jean/Amine/OpenYolo3D/models/Mask3D/third_party/pointnet2/pointnet2/_ext_src/src/group_points.cpp +build /home/jean/Amine/OpenYolo3D/models/Mask3D/third_party/pointnet2/build/temp.linux-x86_64-cpython-310/pointnet2/_ext_src/src/group_points_gpu.o: cuda_compile /home/jean/Amine/OpenYolo3D/models/Mask3D/third_party/pointnet2/pointnet2/_ext_src/src/group_points_gpu.cu +build /home/jean/Amine/OpenYolo3D/models/Mask3D/third_party/pointnet2/build/temp.linux-x86_64-cpython-310/pointnet2/_ext_src/src/interpolate.o: compile /home/jean/Amine/OpenYolo3D/models/Mask3D/third_party/pointnet2/pointnet2/_ext_src/src/interpolate.cpp +build /home/jean/Amine/OpenYolo3D/models/Mask3D/third_party/pointnet2/build/temp.linux-x86_64-cpython-310/pointnet2/_ext_src/src/interpolate_gpu.o: cuda_compile /home/jean/Amine/OpenYolo3D/models/Mask3D/third_party/pointnet2/pointnet2/_ext_src/src/interpolate_gpu.cu +build /home/jean/Amine/OpenYolo3D/models/Mask3D/third_party/pointnet2/build/temp.linux-x86_64-cpython-310/pointnet2/_ext_src/src/sampling.o: compile /home/jean/Amine/OpenYolo3D/models/Mask3D/third_party/pointnet2/pointnet2/_ext_src/src/sampling.cpp +build /home/jean/Amine/OpenYolo3D/models/Mask3D/third_party/pointnet2/build/temp.linux-x86_64-cpython-310/pointnet2/_ext_src/src/sampling_gpu.o: cuda_compile /home/jean/Amine/OpenYolo3D/models/Mask3D/third_party/pointnet2/pointnet2/_ext_src/src/sampling_gpu.cu + + + + + diff --git a/models/Mask3D/third_party/pointnet2/build/temp.linux-x86_64-cpython-310/pointnet2/_ext_src/src/ball_query.o b/models/Mask3D/third_party/pointnet2/build/temp.linux-x86_64-cpython-310/pointnet2/_ext_src/src/ball_query.o new file mode 100644 index 0000000000000000000000000000000000000000..d7cf059dd2a10519a1a2dfa1606e17d419148da8 Binary files /dev/null and b/models/Mask3D/third_party/pointnet2/build/temp.linux-x86_64-cpython-310/pointnet2/_ext_src/src/ball_query.o differ diff --git a/models/Mask3D/third_party/pointnet2/build/temp.linux-x86_64-cpython-310/pointnet2/_ext_src/src/ball_query_gpu.o b/models/Mask3D/third_party/pointnet2/build/temp.linux-x86_64-cpython-310/pointnet2/_ext_src/src/ball_query_gpu.o new file mode 100644 index 0000000000000000000000000000000000000000..47f49a83344f0df199aa93a25772b33380bcb391 Binary files /dev/null and b/models/Mask3D/third_party/pointnet2/build/temp.linux-x86_64-cpython-310/pointnet2/_ext_src/src/ball_query_gpu.o differ diff --git a/models/Mask3D/third_party/pointnet2/build/temp.linux-x86_64-cpython-310/pointnet2/_ext_src/src/bindings.o b/models/Mask3D/third_party/pointnet2/build/temp.linux-x86_64-cpython-310/pointnet2/_ext_src/src/bindings.o new file mode 100644 index 0000000000000000000000000000000000000000..a18c246b7f759d4c07a684c31676cf0948c914d2 Binary files /dev/null and b/models/Mask3D/third_party/pointnet2/build/temp.linux-x86_64-cpython-310/pointnet2/_ext_src/src/bindings.o differ diff --git a/models/Mask3D/third_party/pointnet2/build/temp.linux-x86_64-cpython-310/pointnet2/_ext_src/src/group_points.o b/models/Mask3D/third_party/pointnet2/build/temp.linux-x86_64-cpython-310/pointnet2/_ext_src/src/group_points.o new file mode 100644 index 0000000000000000000000000000000000000000..71750c1433bc62049a1838265a12e50659fbe6ea Binary files /dev/null and b/models/Mask3D/third_party/pointnet2/build/temp.linux-x86_64-cpython-310/pointnet2/_ext_src/src/group_points.o differ diff --git a/models/Mask3D/third_party/pointnet2/build/temp.linux-x86_64-cpython-310/pointnet2/_ext_src/src/group_points_gpu.o b/models/Mask3D/third_party/pointnet2/build/temp.linux-x86_64-cpython-310/pointnet2/_ext_src/src/group_points_gpu.o new file mode 100644 index 0000000000000000000000000000000000000000..2d84a952fe152e6cd7309be9471ff0931e6c169c Binary files /dev/null and b/models/Mask3D/third_party/pointnet2/build/temp.linux-x86_64-cpython-310/pointnet2/_ext_src/src/group_points_gpu.o differ diff --git a/models/Mask3D/third_party/pointnet2/build/temp.linux-x86_64-cpython-310/pointnet2/_ext_src/src/interpolate.o b/models/Mask3D/third_party/pointnet2/build/temp.linux-x86_64-cpython-310/pointnet2/_ext_src/src/interpolate.o new file mode 100644 index 0000000000000000000000000000000000000000..8f9705a09079a62f19eee309133381e53ba46a57 Binary files /dev/null and b/models/Mask3D/third_party/pointnet2/build/temp.linux-x86_64-cpython-310/pointnet2/_ext_src/src/interpolate.o differ diff --git a/models/Mask3D/third_party/pointnet2/build/temp.linux-x86_64-cpython-310/pointnet2/_ext_src/src/interpolate_gpu.o b/models/Mask3D/third_party/pointnet2/build/temp.linux-x86_64-cpython-310/pointnet2/_ext_src/src/interpolate_gpu.o new file mode 100644 index 0000000000000000000000000000000000000000..0d7e1c0499487c2543c01b992af187b2e4fe0440 Binary files /dev/null and b/models/Mask3D/third_party/pointnet2/build/temp.linux-x86_64-cpython-310/pointnet2/_ext_src/src/interpolate_gpu.o differ diff --git a/models/Mask3D/third_party/pointnet2/build/temp.linux-x86_64-cpython-310/pointnet2/_ext_src/src/sampling.o b/models/Mask3D/third_party/pointnet2/build/temp.linux-x86_64-cpython-310/pointnet2/_ext_src/src/sampling.o new file mode 100644 index 0000000000000000000000000000000000000000..342c77261194bc02ff57a9e45077a887a225aa89 Binary files /dev/null and b/models/Mask3D/third_party/pointnet2/build/temp.linux-x86_64-cpython-310/pointnet2/_ext_src/src/sampling.o differ diff --git a/models/Mask3D/third_party/pointnet2/build/temp.linux-x86_64-cpython-310/pointnet2/_ext_src/src/sampling_gpu.o b/models/Mask3D/third_party/pointnet2/build/temp.linux-x86_64-cpython-310/pointnet2/_ext_src/src/sampling_gpu.o new file mode 100644 index 0000000000000000000000000000000000000000..56dd2e791c21a139b8794f5b88f022b64ae3a459 Binary files /dev/null and b/models/Mask3D/third_party/pointnet2/build/temp.linux-x86_64-cpython-310/pointnet2/_ext_src/src/sampling_gpu.o differ diff --git a/models/Mask3D/third_party/pointnet2/dist/pointnet2-0.0.0-py3.10-linux-x86_64.egg b/models/Mask3D/third_party/pointnet2/dist/pointnet2-0.0.0-py3.10-linux-x86_64.egg new file mode 100644 index 0000000000000000000000000000000000000000..ef040e5e6dddece40687ac9a0232871391ddf082 Binary files /dev/null and b/models/Mask3D/third_party/pointnet2/dist/pointnet2-0.0.0-py3.10-linux-x86_64.egg differ diff --git a/models/Mask3D/third_party/pointnet2/pointnet2.egg-info/PKG-INFO b/models/Mask3D/third_party/pointnet2/pointnet2.egg-info/PKG-INFO new file mode 100644 index 0000000000000000000000000000000000000000..c44293899fb386f21d7a5c12d93d5cfbfb733e65 --- /dev/null +++ b/models/Mask3D/third_party/pointnet2/pointnet2.egg-info/PKG-INFO @@ -0,0 +1,3 @@ +Metadata-Version: 2.1 +Name: pointnet2 +Version: 0.0.0 diff --git a/models/Mask3D/third_party/pointnet2/pointnet2.egg-info/SOURCES.txt b/models/Mask3D/third_party/pointnet2/pointnet2.egg-info/SOURCES.txt new file mode 100644 index 0000000000000000000000000000000000000000..da8c33f523afb3cf3c8c95eab3ac929831e01c75 --- /dev/null +++ b/models/Mask3D/third_party/pointnet2/pointnet2.egg-info/SOURCES.txt @@ -0,0 +1,19 @@ +setup.py +pointnet2/__init__.py +pointnet2/pointnet2_modules.py +pointnet2/pointnet2_test.py +pointnet2/pointnet2_utils.py +pointnet2/pytorch_utils.py +pointnet2.egg-info/PKG-INFO +pointnet2.egg-info/SOURCES.txt +pointnet2.egg-info/dependency_links.txt +pointnet2.egg-info/top_level.txt +pointnet2/_ext_src/src/ball_query.cpp +pointnet2/_ext_src/src/ball_query_gpu.cu +pointnet2/_ext_src/src/bindings.cpp +pointnet2/_ext_src/src/group_points.cpp +pointnet2/_ext_src/src/group_points_gpu.cu +pointnet2/_ext_src/src/interpolate.cpp +pointnet2/_ext_src/src/interpolate_gpu.cu +pointnet2/_ext_src/src/sampling.cpp +pointnet2/_ext_src/src/sampling_gpu.cu \ No newline at end of file diff --git a/models/Mask3D/third_party/pointnet2/pointnet2.egg-info/dependency_links.txt b/models/Mask3D/third_party/pointnet2/pointnet2.egg-info/dependency_links.txt new file mode 100644 index 0000000000000000000000000000000000000000..8b137891791fe96927ad78e64b0aad7bded08bdc --- /dev/null +++ b/models/Mask3D/third_party/pointnet2/pointnet2.egg-info/dependency_links.txt @@ -0,0 +1 @@ + diff --git a/models/Mask3D/third_party/pointnet2/pointnet2.egg-info/top_level.txt b/models/Mask3D/third_party/pointnet2/pointnet2.egg-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..047cf37410a3a0fb88a8ba9fce0fa74cefa80d8f --- /dev/null +++ b/models/Mask3D/third_party/pointnet2/pointnet2.egg-info/top_level.txt @@ -0,0 +1 @@ +pointnet2 diff --git a/models/Mask3D/third_party/pointnet2/pointnet2/__init__.py b/models/Mask3D/third_party/pointnet2/pointnet2/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/Mask3D/third_party/pointnet2/pointnet2/_ext_src/include/ball_query.h b/models/Mask3D/third_party/pointnet2/pointnet2/_ext_src/include/ball_query.h new file mode 100644 index 0000000000000000000000000000000000000000..b4feff83f2a307f13e13b4794ff942850c99127e --- /dev/null +++ b/models/Mask3D/third_party/pointnet2/pointnet2/_ext_src/include/ball_query.h @@ -0,0 +1,7 @@ +// Copyright (c) Facebook, Inc. and its affiliates. + +#pragma once +#include + +at::Tensor ball_query(at::Tensor new_xyz, at::Tensor xyz, const float radius, + const int nsample); diff --git a/models/Mask3D/third_party/pointnet2/pointnet2/_ext_src/include/cuda_utils.h b/models/Mask3D/third_party/pointnet2/pointnet2/_ext_src/include/cuda_utils.h new file mode 100644 index 0000000000000000000000000000000000000000..f746526af880edc8de3563785db784fe205354bd --- /dev/null +++ b/models/Mask3D/third_party/pointnet2/pointnet2/_ext_src/include/cuda_utils.h @@ -0,0 +1,43 @@ +// Copyright (c) Facebook, Inc. and its affiliates. + +#ifndef _CUDA_UTILS_H +#define _CUDA_UTILS_H + +#include +#include +#include + +#include +#include + +#include + +#define TOTAL_THREADS 512 + +inline int opt_n_threads(int work_size) { + const int pow_2 = std::log(static_cast(work_size)) / std::log(2.0); + + return max(min(1 << pow_2, TOTAL_THREADS), 1); +} + +inline dim3 opt_block_config(int x, int y) { + const int x_threads = opt_n_threads(x); + const int y_threads = + max(min(opt_n_threads(y), TOTAL_THREADS / x_threads), 1); + dim3 block_config(x_threads, y_threads, 1); + + return block_config; +} + +#define CUDA_CHECK_ERRORS() \ + do { \ + cudaError_t err = cudaGetLastError(); \ + if (cudaSuccess != err) { \ + fprintf(stderr, "CUDA kernel failed : %s\n%s at L:%d in %s\n", \ + cudaGetErrorString(err), __PRETTY_FUNCTION__, __LINE__, \ + __FILE__); \ + exit(-1); \ + } \ + } while (0) + +#endif diff --git a/models/Mask3D/third_party/pointnet2/pointnet2/_ext_src/include/group_points.h b/models/Mask3D/third_party/pointnet2/pointnet2/_ext_src/include/group_points.h new file mode 100644 index 0000000000000000000000000000000000000000..97be802326d57cb9311bfde309bf39b215a1513e --- /dev/null +++ b/models/Mask3D/third_party/pointnet2/pointnet2/_ext_src/include/group_points.h @@ -0,0 +1,8 @@ +// Copyright (c) Facebook, Inc. and its affiliates. + + +#pragma once +#include + +at::Tensor group_points(at::Tensor points, at::Tensor idx); +at::Tensor group_points_grad(at::Tensor grad_out, at::Tensor idx, const int n); diff --git a/models/Mask3D/third_party/pointnet2/pointnet2/_ext_src/include/interpolate.h b/models/Mask3D/third_party/pointnet2/pointnet2/_ext_src/include/interpolate.h new file mode 100644 index 0000000000000000000000000000000000000000..e7fb7923425a29aaaa7bef941463e4854ba2b991 --- /dev/null +++ b/models/Mask3D/third_party/pointnet2/pointnet2/_ext_src/include/interpolate.h @@ -0,0 +1,12 @@ +// Copyright (c) Facebook, Inc. and its affiliates. + +#pragma once + +#include +#include + +std::vector three_nn(at::Tensor unknowns, at::Tensor knows); +at::Tensor three_interpolate(at::Tensor points, at::Tensor idx, + at::Tensor weight); +at::Tensor three_interpolate_grad(at::Tensor grad_out, at::Tensor idx, + at::Tensor weight, const int m); diff --git a/models/Mask3D/third_party/pointnet2/pointnet2/_ext_src/include/sampling.h b/models/Mask3D/third_party/pointnet2/pointnet2/_ext_src/include/sampling.h new file mode 100644 index 0000000000000000000000000000000000000000..7de473e60ef260756547997986ec7f026a4a27f2 --- /dev/null +++ b/models/Mask3D/third_party/pointnet2/pointnet2/_ext_src/include/sampling.h @@ -0,0 +1,9 @@ +// Copyright (c) Facebook, Inc. and its affiliates. + + +#pragma once +#include + +at::Tensor gather_points(at::Tensor points, at::Tensor idx); +at::Tensor gather_points_grad(at::Tensor grad_out, at::Tensor idx, const int n); +at::Tensor furthest_point_sampling(at::Tensor points, const int nsamples); diff --git a/models/Mask3D/third_party/pointnet2/pointnet2/_ext_src/include/utils.h b/models/Mask3D/third_party/pointnet2/pointnet2/_ext_src/include/utils.h new file mode 100644 index 0000000000000000000000000000000000000000..815dabb20f6e1bead7e004551abfa48598802d35 --- /dev/null +++ b/models/Mask3D/third_party/pointnet2/pointnet2/_ext_src/include/utils.h @@ -0,0 +1,28 @@ +// Copyright (c) Facebook, Inc. and its affiliates. + + +#pragma once +#include +#include + +#define CHECK_CUDA(x) \ + do { \ + AT_ASSERT(x.is_cuda(), #x " must be a CUDA tensor"); \ + } while (0) + +#define CHECK_CONTIGUOUS(x) \ + do { \ + AT_ASSERT(x.is_contiguous(), #x " must be a contiguous tensor"); \ + } while (0) + +#define CHECK_IS_INT(x) \ + do { \ + AT_ASSERT(x.scalar_type() == at::ScalarType::Int, \ + #x " must be an int tensor"); \ + } while (0) + +#define CHECK_IS_FLOAT(x) \ + do { \ + AT_ASSERT(x.scalar_type() == at::ScalarType::Float, \ + #x " must be a float tensor"); \ + } while (0) diff --git a/models/Mask3D/third_party/pointnet2/pointnet2/_ext_src/src/ball_query.cpp b/models/Mask3D/third_party/pointnet2/pointnet2/_ext_src/src/ball_query.cpp new file mode 100644 index 0000000000000000000000000000000000000000..7dd77d5f32332eb6db1535df6bf954cec17e6502 --- /dev/null +++ b/models/Mask3D/third_party/pointnet2/pointnet2/_ext_src/src/ball_query.cpp @@ -0,0 +1,35 @@ +// Copyright (c) Facebook, Inc. and its affiliates. + + +#include "ball_query.h" +#include "utils.h" + +void query_ball_point_kernel_wrapper(int b, int n, int m, float radius, + int nsample, const float *new_xyz, + const float *xyz, int *idx); + +at::Tensor ball_query(at::Tensor new_xyz, at::Tensor xyz, const float radius, + const int nsample) { + CHECK_CONTIGUOUS(new_xyz); + CHECK_CONTIGUOUS(xyz); + CHECK_IS_FLOAT(new_xyz); + CHECK_IS_FLOAT(xyz); + + if (new_xyz.is_cuda()) { + CHECK_CUDA(xyz); + } + + at::Tensor idx = + torch::zeros({new_xyz.size(0), new_xyz.size(1), nsample}, + at::device(new_xyz.device()).dtype(at::ScalarType::Int)); + + if (new_xyz.is_cuda()) { + query_ball_point_kernel_wrapper(xyz.size(0), xyz.size(1), new_xyz.size(1), + radius, nsample, new_xyz.data(), + xyz.data(), idx.data()); + } else { + AT_ASSERT(false, "CPU not supported"); + } + + return idx; +} diff --git a/models/Mask3D/third_party/pointnet2/pointnet2/_ext_src/src/ball_query_gpu.cu b/models/Mask3D/third_party/pointnet2/pointnet2/_ext_src/src/ball_query_gpu.cu new file mode 100644 index 0000000000000000000000000000000000000000..cee88cb354999d7cb5a61e0d40216b8692c44265 --- /dev/null +++ b/models/Mask3D/third_party/pointnet2/pointnet2/_ext_src/src/ball_query_gpu.cu @@ -0,0 +1,57 @@ +// Copyright (c) Facebook, Inc. and its affiliates. + + +#include +#include +#include + +#include "cuda_utils.h" + +// input: new_xyz(b, m, 3) xyz(b, n, 3) +// output: idx(b, m, nsample) +__global__ void query_ball_point_kernel(int b, int n, int m, float radius, + int nsample, + const float *__restrict__ new_xyz, + const float *__restrict__ xyz, + int *__restrict__ idx) { + int batch_index = blockIdx.x; + xyz += batch_index * n * 3; + new_xyz += batch_index * m * 3; + idx += m * nsample * batch_index; + + int index = threadIdx.x; + int stride = blockDim.x; + + float radius2 = radius * radius; + for (int j = index; j < m; j += stride) { + float new_x = new_xyz[j * 3 + 0]; + float new_y = new_xyz[j * 3 + 1]; + float new_z = new_xyz[j * 3 + 2]; + for (int k = 0, cnt = 0; k < n && cnt < nsample; ++k) { + float x = xyz[k * 3 + 0]; + float y = xyz[k * 3 + 1]; + float z = xyz[k * 3 + 2]; + float d2 = (new_x - x) * (new_x - x) + (new_y - y) * (new_y - y) + + (new_z - z) * (new_z - z); + if (d2 < radius2) { + if (cnt == 0) { + for (int l = 0; l < nsample; ++l) { + idx[j * nsample + l] = k; + } + } + idx[j * nsample + cnt] = k; + ++cnt; + } + } + } +} + +void query_ball_point_kernel_wrapper(int b, int n, int m, float radius, + int nsample, const float *new_xyz, + const float *xyz, int *idx) { + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + query_ball_point_kernel<<>>( + b, n, m, radius, nsample, new_xyz, xyz, idx); + + CUDA_CHECK_ERRORS(); +} diff --git a/models/Mask3D/third_party/pointnet2/pointnet2/_ext_src/src/bindings.cpp b/models/Mask3D/third_party/pointnet2/pointnet2/_ext_src/src/bindings.cpp new file mode 100644 index 0000000000000000000000000000000000000000..58d6c2d25a388ca49016dc8bedf7ac8fabe8fe0b --- /dev/null +++ b/models/Mask3D/third_party/pointnet2/pointnet2/_ext_src/src/bindings.cpp @@ -0,0 +1,22 @@ +// Copyright (c) Facebook, Inc. and its affiliates. + + +#include "ball_query.h" +#include "group_points.h" +#include "interpolate.h" +#include "sampling.h" + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("gather_points", &gather_points); + m.def("gather_points_grad", &gather_points_grad); + m.def("furthest_point_sampling", &furthest_point_sampling); + + m.def("three_nn", &three_nn); + m.def("three_interpolate", &three_interpolate); + m.def("three_interpolate_grad", &three_interpolate_grad); + + m.def("ball_query", &ball_query); + + m.def("group_points", &group_points); + m.def("group_points_grad", &group_points_grad); +} diff --git a/models/Mask3D/third_party/pointnet2/pointnet2/_ext_src/src/group_points.cpp b/models/Mask3D/third_party/pointnet2/pointnet2/_ext_src/src/group_points.cpp new file mode 100644 index 0000000000000000000000000000000000000000..22998dd7f40e678de6dd7ed32c8b411ad3a438e8 --- /dev/null +++ b/models/Mask3D/third_party/pointnet2/pointnet2/_ext_src/src/group_points.cpp @@ -0,0 +1,63 @@ +// Copyright (c) Facebook, Inc. and its affiliates. + + +#include "group_points.h" +#include "utils.h" + +void group_points_kernel_wrapper(int b, int c, int n, int npoints, int nsample, + const float *points, const int *idx, + float *out); + +void group_points_grad_kernel_wrapper(int b, int c, int n, int npoints, + int nsample, const float *grad_out, + const int *idx, float *grad_points); + +at::Tensor group_points(at::Tensor points, at::Tensor idx) { + CHECK_CONTIGUOUS(points); + CHECK_CONTIGUOUS(idx); + CHECK_IS_FLOAT(points); + CHECK_IS_INT(idx); + + if (points.is_cuda()) { + CHECK_CUDA(idx); + } + + at::Tensor output = + torch::zeros({points.size(0), points.size(1), idx.size(1), idx.size(2)}, + at::device(points.device()).dtype(at::ScalarType::Float)); + + if (points.is_cuda()) { + group_points_kernel_wrapper(points.size(0), points.size(1), points.size(2), + idx.size(1), idx.size(2), points.data(), + idx.data(), output.data()); + } else { + AT_ASSERT(false, "CPU not supported"); + } + + return output; +} + +at::Tensor group_points_grad(at::Tensor grad_out, at::Tensor idx, const int n) { + CHECK_CONTIGUOUS(grad_out); + CHECK_CONTIGUOUS(idx); + CHECK_IS_FLOAT(grad_out); + CHECK_IS_INT(idx); + + if (grad_out.is_cuda()) { + CHECK_CUDA(idx); + } + + at::Tensor output = + torch::zeros({grad_out.size(0), grad_out.size(1), n}, + at::device(grad_out.device()).dtype(at::ScalarType::Float)); + + if (grad_out.is_cuda()) { + group_points_grad_kernel_wrapper( + grad_out.size(0), grad_out.size(1), n, idx.size(1), idx.size(2), + grad_out.data(), idx.data(), output.data()); + } else { + AT_ASSERT(false, "CPU not supported"); + } + + return output; +} diff --git a/models/Mask3D/third_party/pointnet2/pointnet2/_ext_src/src/group_points_gpu.cu b/models/Mask3D/third_party/pointnet2/pointnet2/_ext_src/src/group_points_gpu.cu new file mode 100644 index 0000000000000000000000000000000000000000..e36672e7476843f557035ab7e709e112b1b829da --- /dev/null +++ b/models/Mask3D/third_party/pointnet2/pointnet2/_ext_src/src/group_points_gpu.cu @@ -0,0 +1,78 @@ +// Copyright (c) Facebook, Inc. and its affiliates. + + +#include +#include + +#include "cuda_utils.h" + +// input: points(b, c, n) idx(b, npoints, nsample) +// output: out(b, c, npoints, nsample) +__global__ void group_points_kernel(int b, int c, int n, int npoints, + int nsample, + const float *__restrict__ points, + const int *__restrict__ idx, + float *__restrict__ out) { + int batch_index = blockIdx.x; + points += batch_index * n * c; + idx += batch_index * npoints * nsample; + out += batch_index * npoints * nsample * c; + + const int index = threadIdx.y * blockDim.x + threadIdx.x; + const int stride = blockDim.y * blockDim.x; + for (int i = index; i < c * npoints; i += stride) { + const int l = i / npoints; + const int j = i % npoints; + for (int k = 0; k < nsample; ++k) { + int ii = idx[j * nsample + k]; + out[(l * npoints + j) * nsample + k] = points[l * n + ii]; + } + } +} + +void group_points_kernel_wrapper(int b, int c, int n, int npoints, int nsample, + const float *points, const int *idx, + float *out) { + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + group_points_kernel<<>>( + b, c, n, npoints, nsample, points, idx, out); + + CUDA_CHECK_ERRORS(); +} + +// input: grad_out(b, c, npoints, nsample), idx(b, npoints, nsample) +// output: grad_points(b, c, n) +__global__ void group_points_grad_kernel(int b, int c, int n, int npoints, + int nsample, + const float *__restrict__ grad_out, + const int *__restrict__ idx, + float *__restrict__ grad_points) { + int batch_index = blockIdx.x; + grad_out += batch_index * npoints * nsample * c; + idx += batch_index * npoints * nsample; + grad_points += batch_index * n * c; + + const int index = threadIdx.y * blockDim.x + threadIdx.x; + const int stride = blockDim.y * blockDim.x; + for (int i = index; i < c * npoints; i += stride) { + const int l = i / npoints; + const int j = i % npoints; + for (int k = 0; k < nsample; ++k) { + int ii = idx[j * nsample + k]; + atomicAdd(grad_points + l * n + ii, + grad_out[(l * npoints + j) * nsample + k]); + } + } +} + +void group_points_grad_kernel_wrapper(int b, int c, int n, int npoints, + int nsample, const float *grad_out, + const int *idx, float *grad_points) { + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + group_points_grad_kernel<<>>( + b, c, n, npoints, nsample, grad_out, idx, grad_points); + + CUDA_CHECK_ERRORS(); +} diff --git a/models/Mask3D/third_party/pointnet2/pointnet2/_ext_src/src/interpolate.cpp b/models/Mask3D/third_party/pointnet2/pointnet2/_ext_src/src/interpolate.cpp new file mode 100644 index 0000000000000000000000000000000000000000..4b680c5202a1aea10dbceaf21e010c6a83c54932 --- /dev/null +++ b/models/Mask3D/third_party/pointnet2/pointnet2/_ext_src/src/interpolate.cpp @@ -0,0 +1,101 @@ +// Copyright (c) Facebook, Inc. and its affiliates. + +#include "interpolate.h" +#include "utils.h" + +void three_nn_kernel_wrapper(int b, int n, int m, const float *unknown, + const float *known, float *dist2, int *idx); +void three_interpolate_kernel_wrapper(int b, int c, int m, int n, + const float *points, const int *idx, + const float *weight, float *out); +void three_interpolate_grad_kernel_wrapper(int b, int c, int n, int m, + const float *grad_out, + const int *idx, const float *weight, + float *grad_points); + +std::vector three_nn(at::Tensor unknowns, at::Tensor knows) { + CHECK_CONTIGUOUS(unknowns); + CHECK_CONTIGUOUS(knows); + CHECK_IS_FLOAT(unknowns); + CHECK_IS_FLOAT(knows); + + if (unknowns.is_cuda()) { + CHECK_CUDA(knows); + } + + at::Tensor idx = + torch::zeros({unknowns.size(0), unknowns.size(1), 3}, + at::device(unknowns.device()).dtype(at::ScalarType::Int)); + at::Tensor dist2 = + torch::zeros({unknowns.size(0), unknowns.size(1), 3}, + at::device(unknowns.device()).dtype(at::ScalarType::Float)); + + if (unknowns.is_cuda()) { + three_nn_kernel_wrapper(unknowns.size(0), unknowns.size(1), knows.size(1), + unknowns.data(), knows.data(), + dist2.data(), idx.data()); + } else { + AT_ASSERT(false, "CPU not supported"); + } + + return {dist2, idx}; +} + +at::Tensor three_interpolate(at::Tensor points, at::Tensor idx, + at::Tensor weight) { + CHECK_CONTIGUOUS(points); + CHECK_CONTIGUOUS(idx); + CHECK_CONTIGUOUS(weight); + CHECK_IS_FLOAT(points); + CHECK_IS_INT(idx); + CHECK_IS_FLOAT(weight); + + if (points.is_cuda()) { + CHECK_CUDA(idx); + CHECK_CUDA(weight); + } + + at::Tensor output = + torch::zeros({points.size(0), points.size(1), idx.size(1)}, + at::device(points.device()).dtype(at::ScalarType::Float)); + + if (points.is_cuda()) { + three_interpolate_kernel_wrapper( + points.size(0), points.size(1), points.size(2), idx.size(1), + points.data(), idx.data(), weight.data(), + output.data()); + } else { + AT_ASSERT(false, "CPU not supported"); + } + + return output; +} +at::Tensor three_interpolate_grad(at::Tensor grad_out, at::Tensor idx, + at::Tensor weight, const int m) { + CHECK_CONTIGUOUS(grad_out); + CHECK_CONTIGUOUS(idx); + CHECK_CONTIGUOUS(weight); + CHECK_IS_FLOAT(grad_out); + CHECK_IS_INT(idx); + CHECK_IS_FLOAT(weight); + + if (grad_out.is_cuda()) { + CHECK_CUDA(idx); + CHECK_CUDA(weight); + } + + at::Tensor output = + torch::zeros({grad_out.size(0), grad_out.size(1), m}, + at::device(grad_out.device()).dtype(at::ScalarType::Float)); + + if (grad_out.is_cuda()) { + three_interpolate_grad_kernel_wrapper( + grad_out.size(0), grad_out.size(1), grad_out.size(2), m, + grad_out.data(), idx.data(), weight.data(), + output.data()); + } else { + AT_ASSERT(false, "CPU not supported"); + } + + return output; +} diff --git a/models/Mask3D/third_party/pointnet2/pointnet2/_ext_src/src/interpolate_gpu.cu b/models/Mask3D/third_party/pointnet2/pointnet2/_ext_src/src/interpolate_gpu.cu new file mode 100644 index 0000000000000000000000000000000000000000..b4c56440f316881695a9030a6bd508e43c7fc2b8 --- /dev/null +++ b/models/Mask3D/third_party/pointnet2/pointnet2/_ext_src/src/interpolate_gpu.cu @@ -0,0 +1,157 @@ +// Copyright (c) Facebook, Inc. and its affiliates. + + +#include +#include +#include + +#include "cuda_utils.h" + +// input: unknown(b, n, 3) known(b, m, 3) +// output: dist2(b, n, 3), idx(b, n, 3) +__global__ void three_nn_kernel(int b, int n, int m, + const float *__restrict__ unknown, + const float *__restrict__ known, + float *__restrict__ dist2, + int *__restrict__ idx) { + int batch_index = blockIdx.x; + unknown += batch_index * n * 3; + known += batch_index * m * 3; + dist2 += batch_index * n * 3; + idx += batch_index * n * 3; + + int index = threadIdx.x; + int stride = blockDim.x; + for (int j = index; j < n; j += stride) { + float ux = unknown[j * 3 + 0]; + float uy = unknown[j * 3 + 1]; + float uz = unknown[j * 3 + 2]; + + double best1 = 1e40, best2 = 1e40, best3 = 1e40; + int besti1 = 0, besti2 = 0, besti3 = 0; + for (int k = 0; k < m; ++k) { + float x = known[k * 3 + 0]; + float y = known[k * 3 + 1]; + float z = known[k * 3 + 2]; + float d = (ux - x) * (ux - x) + (uy - y) * (uy - y) + (uz - z) * (uz - z); + if (d < best1) { + best3 = best2; + besti3 = besti2; + best2 = best1; + besti2 = besti1; + best1 = d; + besti1 = k; + } else if (d < best2) { + best3 = best2; + besti3 = besti2; + best2 = d; + besti2 = k; + } else if (d < best3) { + best3 = d; + besti3 = k; + } + } + dist2[j * 3 + 0] = best1; + dist2[j * 3 + 1] = best2; + dist2[j * 3 + 2] = best3; + + idx[j * 3 + 0] = besti1; + idx[j * 3 + 1] = besti2; + idx[j * 3 + 2] = besti3; + } +} + +void three_nn_kernel_wrapper(int b, int n, int m, const float *unknown, + const float *known, float *dist2, int *idx) { + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + three_nn_kernel<<>>(b, n, m, unknown, known, + dist2, idx); + + CUDA_CHECK_ERRORS(); +} + +// input: points(b, c, m), idx(b, n, 3), weight(b, n, 3) +// output: out(b, c, n) +__global__ void three_interpolate_kernel(int b, int c, int m, int n, + const float *__restrict__ points, + const int *__restrict__ idx, + const float *__restrict__ weight, + float *__restrict__ out) { + int batch_index = blockIdx.x; + points += batch_index * m * c; + + idx += batch_index * n * 3; + weight += batch_index * n * 3; + + out += batch_index * n * c; + + const int index = threadIdx.y * blockDim.x + threadIdx.x; + const int stride = blockDim.y * blockDim.x; + for (int i = index; i < c * n; i += stride) { + const int l = i / n; + const int j = i % n; + float w1 = weight[j * 3 + 0]; + float w2 = weight[j * 3 + 1]; + float w3 = weight[j * 3 + 2]; + + int i1 = idx[j * 3 + 0]; + int i2 = idx[j * 3 + 1]; + int i3 = idx[j * 3 + 2]; + + out[i] = points[l * m + i1] * w1 + points[l * m + i2] * w2 + + points[l * m + i3] * w3; + } +} + +void three_interpolate_kernel_wrapper(int b, int c, int m, int n, + const float *points, const int *idx, + const float *weight, float *out) { + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + three_interpolate_kernel<<>>( + b, c, m, n, points, idx, weight, out); + + CUDA_CHECK_ERRORS(); +} + +// input: grad_out(b, c, n), idx(b, n, 3), weight(b, n, 3) +// output: grad_points(b, c, m) + +__global__ void three_interpolate_grad_kernel( + int b, int c, int n, int m, const float *__restrict__ grad_out, + const int *__restrict__ idx, const float *__restrict__ weight, + float *__restrict__ grad_points) { + int batch_index = blockIdx.x; + grad_out += batch_index * n * c; + idx += batch_index * n * 3; + weight += batch_index * n * 3; + grad_points += batch_index * m * c; + + const int index = threadIdx.y * blockDim.x + threadIdx.x; + const int stride = blockDim.y * blockDim.x; + for (int i = index; i < c * n; i += stride) { + const int l = i / n; + const int j = i % n; + float w1 = weight[j * 3 + 0]; + float w2 = weight[j * 3 + 1]; + float w3 = weight[j * 3 + 2]; + + int i1 = idx[j * 3 + 0]; + int i2 = idx[j * 3 + 1]; + int i3 = idx[j * 3 + 2]; + + atomicAdd(grad_points + l * m + i1, grad_out[i] * w1); + atomicAdd(grad_points + l * m + i2, grad_out[i] * w2); + atomicAdd(grad_points + l * m + i3, grad_out[i] * w3); + } +} + +void three_interpolate_grad_kernel_wrapper(int b, int c, int n, int m, + const float *grad_out, + const int *idx, const float *weight, + float *grad_points) { + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + three_interpolate_grad_kernel<<>>( + b, c, n, m, grad_out, idx, weight, grad_points); + + CUDA_CHECK_ERRORS(); +} diff --git a/models/Mask3D/third_party/pointnet2/pointnet2/_ext_src/src/sampling.cpp b/models/Mask3D/third_party/pointnet2/pointnet2/_ext_src/src/sampling.cpp new file mode 100644 index 0000000000000000000000000000000000000000..de55822a958c545f36e35264067f41c28f660286 --- /dev/null +++ b/models/Mask3D/third_party/pointnet2/pointnet2/_ext_src/src/sampling.cpp @@ -0,0 +1,88 @@ +// Copyright (c) Facebook, Inc. and its affiliates. + +#include "sampling.h" +#include "utils.h" + +void gather_points_kernel_wrapper(int b, int c, int n, int npoints, + const float *points, const int *idx, + float *out); +void gather_points_grad_kernel_wrapper(int b, int c, int n, int npoints, + const float *grad_out, const int *idx, + float *grad_points); + +void furthest_point_sampling_kernel_wrapper(int b, int n, int m, + const float *dataset, float *temp, + int *idxs); + +at::Tensor gather_points(at::Tensor points, at::Tensor idx) { + CHECK_CONTIGUOUS(points); + CHECK_CONTIGUOUS(idx); + CHECK_IS_FLOAT(points); + CHECK_IS_INT(idx); + + if (points.is_cuda()) { + CHECK_CUDA(idx); + } + + at::Tensor output = + torch::zeros({points.size(0), points.size(1), idx.size(1)}, + at::device(points.device()).dtype(at::ScalarType::Float)); + + if (points.is_cuda()) { + gather_points_kernel_wrapper(points.size(0), points.size(1), points.size(2), + idx.size(1), points.data(), + idx.data(), output.data()); + } else { + AT_ASSERT(false, "CPU not supported"); + } + + return output; +} + +at::Tensor gather_points_grad(at::Tensor grad_out, at::Tensor idx, + const int n) { + CHECK_CONTIGUOUS(grad_out); + CHECK_CONTIGUOUS(idx); + CHECK_IS_FLOAT(grad_out); + CHECK_IS_INT(idx); + + if (grad_out.is_cuda()) { + CHECK_CUDA(idx); + } + + at::Tensor output = + torch::zeros({grad_out.size(0), grad_out.size(1), n}, + at::device(grad_out.device()).dtype(at::ScalarType::Float)); + + if (grad_out.is_cuda()) { + gather_points_grad_kernel_wrapper(grad_out.size(0), grad_out.size(1), n, + idx.size(1), grad_out.data(), + idx.data(), output.data()); + } else { + AT_ASSERT(false, "CPU not supported"); + } + + return output; +} +at::Tensor furthest_point_sampling(at::Tensor points, const int nsamples) { + CHECK_CONTIGUOUS(points); + CHECK_IS_FLOAT(points); + + at::Tensor output = + torch::zeros({points.size(0), nsamples}, + at::device(points.device()).dtype(at::ScalarType::Int)); + + at::Tensor tmp = + torch::full({points.size(0), points.size(1)}, 1e10, + at::device(points.device()).dtype(at::ScalarType::Float)); + + if (points.is_cuda()) { + furthest_point_sampling_kernel_wrapper( + points.size(0), points.size(1), nsamples, points.data(), + tmp.data(), output.data()); + } else { + AT_ASSERT(false, "CPU not supported"); + } + + return output; +} diff --git a/models/Mask3D/third_party/pointnet2/pointnet2/_ext_src/src/sampling_gpu.cu b/models/Mask3D/third_party/pointnet2/pointnet2/_ext_src/src/sampling_gpu.cu new file mode 100644 index 0000000000000000000000000000000000000000..d2b3707f3d3950a49f70c3ccdfad3a53107d1ad4 --- /dev/null +++ b/models/Mask3D/third_party/pointnet2/pointnet2/_ext_src/src/sampling_gpu.cu @@ -0,0 +1,232 @@ +// Copyright (c) Facebook, Inc. and its affiliates. + + +#include +#include + +#include "cuda_utils.h" + +// input: points(b, c, n) idx(b, m) +// output: out(b, c, m) +__global__ void gather_points_kernel(int b, int c, int n, int m, + const float *__restrict__ points, + const int *__restrict__ idx, + float *__restrict__ out) { + for (int i = blockIdx.x; i < b; i += gridDim.x) { + for (int l = blockIdx.y; l < c; l += gridDim.y) { + for (int j = threadIdx.x; j < m; j += blockDim.x) { + int a = idx[i * m + j]; + out[(i * c + l) * m + j] = points[(i * c + l) * n + a]; + } + } + } +} + +void gather_points_kernel_wrapper(int b, int c, int n, int npoints, + const float *points, const int *idx, + float *out) { + gather_points_kernel<<>>(b, c, n, npoints, + points, idx, out); + + CUDA_CHECK_ERRORS(); +} + +// input: grad_out(b, c, m) idx(b, m) +// output: grad_points(b, c, n) +__global__ void gather_points_grad_kernel(int b, int c, int n, int m, + const float *__restrict__ grad_out, + const int *__restrict__ idx, + float *__restrict__ grad_points) { + for (int i = blockIdx.x; i < b; i += gridDim.x) { + for (int l = blockIdx.y; l < c; l += gridDim.y) { + for (int j = threadIdx.x; j < m; j += blockDim.x) { + int a = idx[i * m + j]; + atomicAdd(grad_points + (i * c + l) * n + a, + grad_out[(i * c + l) * m + j]); + } + } + } +} + +void gather_points_grad_kernel_wrapper(int b, int c, int n, int npoints, + const float *grad_out, const int *idx, + float *grad_points) { + gather_points_grad_kernel<<>>( + b, c, n, npoints, grad_out, idx, grad_points); + + CUDA_CHECK_ERRORS(); +} + +__device__ void __update(float *__restrict__ dists, int *__restrict__ dists_i, + int idx1, int idx2) { + const float v1 = dists[idx1], v2 = dists[idx2]; + const int i1 = dists_i[idx1], i2 = dists_i[idx2]; + dists[idx1] = max(v1, v2); + dists_i[idx1] = v2 > v1 ? i2 : i1; +} + +// Input dataset: (b, n, 3), tmp: (b, n) +// Ouput idxs (b, m) +template +__global__ void furthest_point_sampling_kernel( + int b, int n, int m, const float *__restrict__ dataset, + float *__restrict__ temp, int *__restrict__ idxs) { + if (m <= 0) return; + __shared__ float dists[block_size]; + __shared__ int dists_i[block_size]; + + int batch_index = blockIdx.x; + dataset += batch_index * n * 3; + temp += batch_index * n; + idxs += batch_index * m; + + int tid = threadIdx.x; + const int stride = block_size; + + int old = 0; + if (threadIdx.x == 0) idxs[0] = old; + + __syncthreads(); + for (int j = 1; j < m; j++) { + int besti = 0; + float best = -1; + float x1 = dataset[old * 3 + 0]; + float y1 = dataset[old * 3 + 1]; + float z1 = dataset[old * 3 + 2]; + for (int k = tid; k < n; k += stride) { + float x2, y2, z2; + x2 = dataset[k * 3 + 0]; + y2 = dataset[k * 3 + 1]; + z2 = dataset[k * 3 + 2]; + float mag = (x2 * x2) + (y2 * y2) + (z2 * z2); + if (mag <= 1e-3) continue; + + float d = + (x2 - x1) * (x2 - x1) + (y2 - y1) * (y2 - y1) + (z2 - z1) * (z2 - z1); + + float d2 = min(d, temp[k]); + temp[k] = d2; + besti = d2 > best ? k : besti; + best = d2 > best ? d2 : best; + } + dists[tid] = best; + dists_i[tid] = besti; + __syncthreads(); + + if (block_size >= 512) { + if (tid < 256) { + __update(dists, dists_i, tid, tid + 256); + } + __syncthreads(); + } + if (block_size >= 256) { + if (tid < 128) { + __update(dists, dists_i, tid, tid + 128); + } + __syncthreads(); + } + if (block_size >= 128) { + if (tid < 64) { + __update(dists, dists_i, tid, tid + 64); + } + __syncthreads(); + } + if (block_size >= 64) { + if (tid < 32) { + __update(dists, dists_i, tid, tid + 32); + } + __syncthreads(); + } + if (block_size >= 32) { + if (tid < 16) { + __update(dists, dists_i, tid, tid + 16); + } + __syncthreads(); + } + if (block_size >= 16) { + if (tid < 8) { + __update(dists, dists_i, tid, tid + 8); + } + __syncthreads(); + } + if (block_size >= 8) { + if (tid < 4) { + __update(dists, dists_i, tid, tid + 4); + } + __syncthreads(); + } + if (block_size >= 4) { + if (tid < 2) { + __update(dists, dists_i, tid, tid + 2); + } + __syncthreads(); + } + if (block_size >= 2) { + if (tid < 1) { + __update(dists, dists_i, tid, tid + 1); + } + __syncthreads(); + } + + old = dists_i[0]; + if (tid == 0) idxs[j] = old; + } +} + +void furthest_point_sampling_kernel_wrapper(int b, int n, int m, + const float *dataset, float *temp, + int *idxs) { + unsigned int n_threads = opt_n_threads(n); + + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + switch (n_threads) { + case 512: + furthest_point_sampling_kernel<512> + <<>>(b, n, m, dataset, temp, idxs); + break; + case 256: + furthest_point_sampling_kernel<256> + <<>>(b, n, m, dataset, temp, idxs); + break; + case 128: + furthest_point_sampling_kernel<128> + <<>>(b, n, m, dataset, temp, idxs); + break; + case 64: + furthest_point_sampling_kernel<64> + <<>>(b, n, m, dataset, temp, idxs); + break; + case 32: + furthest_point_sampling_kernel<32> + <<>>(b, n, m, dataset, temp, idxs); + break; + case 16: + furthest_point_sampling_kernel<16> + <<>>(b, n, m, dataset, temp, idxs); + break; + case 8: + furthest_point_sampling_kernel<8> + <<>>(b, n, m, dataset, temp, idxs); + break; + case 4: + furthest_point_sampling_kernel<4> + <<>>(b, n, m, dataset, temp, idxs); + break; + case 2: + furthest_point_sampling_kernel<2> + <<>>(b, n, m, dataset, temp, idxs); + break; + case 1: + furthest_point_sampling_kernel<1> + <<>>(b, n, m, dataset, temp, idxs); + break; + default: + furthest_point_sampling_kernel<512> + <<>>(b, n, m, dataset, temp, idxs); + } + + CUDA_CHECK_ERRORS(); +} diff --git a/models/Mask3D/third_party/pointnet2/pointnet2/pointnet2_modules.py b/models/Mask3D/third_party/pointnet2/pointnet2/pointnet2_modules.py new file mode 100644 index 0000000000000000000000000000000000000000..2e82cdc249bd2a6cd8e87940a2103ce4438908d8 --- /dev/null +++ b/models/Mask3D/third_party/pointnet2/pointnet2/pointnet2_modules.py @@ -0,0 +1,581 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +""" Pointnet2 layers. +Modified based on: https://github.com/erikwijmans/Pointnet2_PyTorch +Extended with the following: +1. Uniform sampling in each local region (sample_uniformly) +2. Return sampled points indices to support votenet. +""" +import torch +import torch.nn as nn +import torch.nn.functional as F + +import os +import sys + +BASE_DIR = os.path.dirname(os.path.abspath(__file__)) +sys.path.append(BASE_DIR) + +import pointnet2_utils +import pytorch_utils as pt_utils +from typing import List + + +class _PointnetSAModuleBase(nn.Module): + def __init__(self): + super().__init__() + self.npoint = None + self.groupers = None + self.mlps = None + + def forward( + self, xyz: torch.Tensor, features: torch.Tensor = None + ) -> (torch.Tensor, torch.Tensor): + r""" + Parameters + ---------- + xyz : torch.Tensor + (B, N, 3) tensor of the xyz coordinates of the features + features : torch.Tensor + (B, N, C) tensor of the descriptors of the the features + + Returns + ------- + new_xyz : torch.Tensor + (B, npoint, 3) tensor of the new features' xyz + new_features : torch.Tensor + (B, npoint, \sum_k(mlps[k][-1])) tensor of the new_features descriptors + """ + + new_features_list = [] + + xyz_flipped = xyz.transpose(1, 2).contiguous() + new_xyz = ( + pointnet2_utils.gather_operation( + xyz_flipped, + pointnet2_utils.furthest_point_sample(xyz, self.npoint), + ) + .transpose(1, 2) + .contiguous() + if self.npoint is not None + else None + ) + + for i in range(len(self.groupers)): + new_features = self.groupers[i]( + xyz, new_xyz, features + ) # (B, C, npoint, nsample) + + new_features = self.mlps[i]( + new_features + ) # (B, mlp[-1], npoint, nsample) + new_features = F.max_pool2d( + new_features, kernel_size=[1, new_features.size(3)] + ) # (B, mlp[-1], npoint, 1) + new_features = new_features.squeeze(-1) # (B, mlp[-1], npoint) + + new_features_list.append(new_features) + + return new_xyz, torch.cat(new_features_list, dim=1) + + +class PointnetSAModuleMSG(_PointnetSAModuleBase): + r"""Pointnet set abstrction layer with multiscale grouping + + Parameters + ---------- + npoint : int + Number of features + radii : list of float32 + list of radii to group with + nsamples : list of int32 + Number of samples in each ball query + mlps : list of list of int32 + Spec of the pointnet before the global max_pool for each scale + bn : bool + Use batchnorm + """ + + def __init__( + self, + *, + npoint: int, + radii: List[float], + nsamples: List[int], + mlps: List[List[int]], + bn: bool = True, + use_xyz: bool = True, + sample_uniformly: bool = False + ): + super().__init__() + + assert len(radii) == len(nsamples) == len(mlps) + + self.npoint = npoint + self.groupers = nn.ModuleList() + self.mlps = nn.ModuleList() + for i in range(len(radii)): + radius = radii[i] + nsample = nsamples[i] + self.groupers.append( + pointnet2_utils.QueryAndGroup( + radius, + nsample, + use_xyz=use_xyz, + sample_uniformly=sample_uniformly, + ) + if npoint is not None + else pointnet2_utils.GroupAll(use_xyz) + ) + mlp_spec = mlps[i] + if use_xyz: + mlp_spec[0] += 3 + + self.mlps.append(pt_utils.SharedMLP(mlp_spec, bn=bn)) + + +class PointnetSAModule(PointnetSAModuleMSG): + r"""Pointnet set abstrction layer + + Parameters + ---------- + npoint : int + Number of features + radius : float + Radius of ball + nsample : int + Number of samples in the ball query + mlp : list + Spec of the pointnet before the global max_pool + bn : bool + Use batchnorm + """ + + def __init__( + self, + *, + mlp: List[int], + npoint: int = None, + radius: float = None, + nsample: int = None, + bn: bool = True, + use_xyz: bool = True + ): + super().__init__( + mlps=[mlp], + npoint=npoint, + radii=[radius], + nsamples=[nsample], + bn=bn, + use_xyz=use_xyz, + ) + + +class PointnetSAModuleVotes(nn.Module): + """Modified based on _PointnetSAModuleBase and PointnetSAModuleMSG + with extra support for returning point indices for getting their GT votes""" + + def __init__( + self, + *, + mlp: List[int], + npoint: int = None, + radius: float = None, + nsample: int = None, + bn: bool = True, + use_xyz: bool = True, + pooling: str = "max", + sigma: float = None, # for RBF pooling + normalize_xyz: bool = False, # noramlize local XYZ with radius + sample_uniformly: bool = False, + ret_unique_cnt: bool = False + ): + super().__init__() + self.npoint = npoint + self.radius = radius + self.nsample = nsample + self.pooling = pooling + self.mlp_module = None + self.use_xyz = use_xyz + self.sigma = sigma + if self.sigma is None: + self.sigma = self.radius / 2 + self.normalize_xyz = normalize_xyz + self.ret_unique_cnt = ret_unique_cnt + + if npoint is not None: + self.grouper = pointnet2_utils.QueryAndGroup( + radius, + nsample, + use_xyz=use_xyz, + ret_grouped_xyz=True, + normalize_xyz=normalize_xyz, + sample_uniformly=sample_uniformly, + ret_unique_cnt=ret_unique_cnt, + ) + else: + self.grouper = pointnet2_utils.GroupAll( + use_xyz, ret_grouped_xyz=True + ) + + mlp_spec = mlp + if use_xyz and len(mlp_spec) > 0: + mlp_spec[0] += 3 + self.mlp_module = pt_utils.SharedMLP(mlp_spec, bn=bn) + + def forward( + self, + xyz: torch.Tensor, + features: torch.Tensor = None, + inds: torch.Tensor = None, + ) -> (torch.Tensor, torch.Tensor): + r""" + Parameters + ---------- + xyz : torch.Tensor + (B, N, 3) tensor of the xyz coordinates of the features + features : torch.Tensor + (B, C, N) tensor of the descriptors of the the features + inds : torch.Tensor + (B, npoint) tensor that stores index to the xyz points (values in 0-N-1) + + Returns + ------- + new_xyz : torch.Tensor + (B, npoint, 3) tensor of the new features' xyz + new_features : torch.Tensor + (B, \sum_k(mlps[k][-1]), npoint) tensor of the new_features descriptors + inds: torch.Tensor + (B, npoint) tensor of the inds + """ + + xyz_flipped = xyz.transpose(1, 2).contiguous() + if inds is None: + inds = pointnet2_utils.furthest_point_sample(xyz, self.npoint) + else: + assert inds.shape[1] == self.npoint + new_xyz = ( + pointnet2_utils.gather_operation(xyz_flipped, inds) + .transpose(1, 2) + .contiguous() + if self.npoint is not None + else None + ) + + if not self.ret_unique_cnt: + grouped_features, grouped_xyz = self.grouper( + xyz, new_xyz, features + ) # (B, C, npoint, nsample) + else: + grouped_features, grouped_xyz, unique_cnt = self.grouper( + xyz, new_xyz, features + ) # (B, C, npoint, nsample), (B,3,npoint,nsample), (B,npoint) + + new_features = self.mlp_module( + grouped_features + ) # (B, mlp[-1], npoint, nsample) + if self.pooling == "max": + new_features = F.max_pool2d( + new_features, kernel_size=[1, new_features.size(3)] + ) # (B, mlp[-1], npoint, 1) + elif self.pooling == "avg": + new_features = F.avg_pool2d( + new_features, kernel_size=[1, new_features.size(3)] + ) # (B, mlp[-1], npoint, 1) + elif self.pooling == "rbf": + # Use radial basis function kernel for weighted sum of features (normalized by nsample and sigma) + # Ref: https://en.wikipedia.org/wiki/Radial_basis_function_kernel + rbf = torch.exp( + -1 + * grouped_xyz.pow(2).sum(1, keepdim=False) + / (self.sigma**2) + / 2 + ) # (B, npoint, nsample) + new_features = torch.sum( + new_features * rbf.unsqueeze(1), -1, keepdim=True + ) / float( + self.nsample + ) # (B, mlp[-1], npoint, 1) + new_features = new_features.squeeze(-1) # (B, mlp[-1], npoint) + + if not self.ret_unique_cnt: + return new_xyz, new_features, inds + else: + return new_xyz, new_features, inds, unique_cnt + + +class PointnetSAModuleMSGVotes(nn.Module): + """Modified based on _PointnetSAModuleBase and PointnetSAModuleMSG + with extra support for returning point indices for getting their GT votes""" + + def __init__( + self, + *, + mlps: List[List[int]], + npoint: int, + radii: List[float], + nsamples: List[int], + bn: bool = True, + use_xyz: bool = True, + sample_uniformly: bool = False + ): + super().__init__() + + assert len(mlps) == len(nsamples) == len(radii) + + self.npoint = npoint + self.groupers = nn.ModuleList() + self.mlps = nn.ModuleList() + for i in range(len(radii)): + radius = radii[i] + nsample = nsamples[i] + self.groupers.append( + pointnet2_utils.QueryAndGroup( + radius, + nsample, + use_xyz=use_xyz, + sample_uniformly=sample_uniformly, + ) + if npoint is not None + else pointnet2_utils.GroupAll(use_xyz) + ) + mlp_spec = mlps[i] + if use_xyz: + mlp_spec[0] += 3 + + self.mlps.append(pt_utils.SharedMLP(mlp_spec, bn=bn)) + + def forward( + self, + xyz: torch.Tensor, + features: torch.Tensor = None, + inds: torch.Tensor = None, + ) -> (torch.Tensor, torch.Tensor): + r""" + Parameters + ---------- + xyz : torch.Tensor + (B, N, 3) tensor of the xyz coordinates of the features + features : torch.Tensor + (B, C, C) tensor of the descriptors of the the features + inds : torch.Tensor + (B, npoint) tensor that stores index to the xyz points (values in 0-N-1) + + Returns + ------- + new_xyz : torch.Tensor + (B, npoint, 3) tensor of the new features' xyz + new_features : torch.Tensor + (B, \sum_k(mlps[k][-1]), npoint) tensor of the new_features descriptors + inds: torch.Tensor + (B, npoint) tensor of the inds + """ + new_features_list = [] + + xyz_flipped = xyz.transpose(1, 2).contiguous() + if inds is None: + inds = pointnet2_utils.furthest_point_sample(xyz, self.npoint) + new_xyz = ( + pointnet2_utils.gather_operation(xyz_flipped, inds) + .transpose(1, 2) + .contiguous() + if self.npoint is not None + else None + ) + + for i in range(len(self.groupers)): + new_features = self.groupers[i]( + xyz, new_xyz, features + ) # (B, C, npoint, nsample) + new_features = self.mlps[i]( + new_features + ) # (B, mlp[-1], npoint, nsample) + new_features = F.max_pool2d( + new_features, kernel_size=[1, new_features.size(3)] + ) # (B, mlp[-1], npoint, 1) + new_features = new_features.squeeze(-1) # (B, mlp[-1], npoint) + + new_features_list.append(new_features) + + return new_xyz, torch.cat(new_features_list, dim=1), inds + + +class PointnetFPModule(nn.Module): + r"""Propigates the features of one set to another + + Parameters + ---------- + mlp : list + Pointnet module parameters + bn : bool + Use batchnorm + """ + + def __init__(self, *, mlp: List[int], bn: bool = True): + super().__init__() + self.mlp = pt_utils.SharedMLP(mlp, bn=bn) + + def forward( + self, + unknown: torch.Tensor, + known: torch.Tensor, + unknow_feats: torch.Tensor, + known_feats: torch.Tensor, + ) -> torch.Tensor: + r""" + Parameters + ---------- + unknown : torch.Tensor + (B, n, 3) tensor of the xyz positions of the unknown features + known : torch.Tensor + (B, m, 3) tensor of the xyz positions of the known features + unknow_feats : torch.Tensor + (B, C1, n) tensor of the features to be propigated to + known_feats : torch.Tensor + (B, C2, m) tensor of features to be propigated + + Returns + ------- + new_features : torch.Tensor + (B, mlp[-1], n) tensor of the features of the unknown features + """ + + if known is not None: + dist, idx = pointnet2_utils.three_nn(unknown, known) + dist_recip = 1.0 / (dist + 1e-8) + norm = torch.sum(dist_recip, dim=2, keepdim=True) + weight = dist_recip / norm + + interpolated_feats = pointnet2_utils.three_interpolate( + known_feats, idx, weight + ) + else: + interpolated_feats = known_feats.expand( + *known_feats.size()[0:2], unknown.size(1) + ) + + if unknow_feats is not None: + new_features = torch.cat( + [interpolated_feats, unknow_feats], dim=1 + ) # (B, C2 + C1, n) + else: + new_features = interpolated_feats + + new_features = new_features.unsqueeze(-1) + new_features = self.mlp(new_features) + + return new_features.squeeze(-1) + + +class PointnetLFPModuleMSG(nn.Module): + """Modified based on _PointnetSAModuleBase and PointnetSAModuleMSG + learnable feature propagation layer.""" + + def __init__( + self, + *, + mlps: List[List[int]], + radii: List[float], + nsamples: List[int], + post_mlp: List[int], + bn: bool = True, + use_xyz: bool = True, + sample_uniformly: bool = False + ): + super().__init__() + + assert len(mlps) == len(nsamples) == len(radii) + + self.post_mlp = pt_utils.SharedMLP(post_mlp, bn=bn) + + self.groupers = nn.ModuleList() + self.mlps = nn.ModuleList() + for i in range(len(radii)): + radius = radii[i] + nsample = nsamples[i] + self.groupers.append( + pointnet2_utils.QueryAndGroup( + radius, + nsample, + use_xyz=use_xyz, + sample_uniformly=sample_uniformly, + ) + ) + mlp_spec = mlps[i] + if use_xyz: + mlp_spec[0] += 3 + + self.mlps.append(pt_utils.SharedMLP(mlp_spec, bn=bn)) + + def forward( + self, + xyz2: torch.Tensor, + xyz1: torch.Tensor, + features2: torch.Tensor, + features1: torch.Tensor, + ) -> torch.Tensor: + r"""Propagate features from xyz1 to xyz2. + Parameters + ---------- + xyz2 : torch.Tensor + (B, N2, 3) tensor of the xyz coordinates of the features + xyz1 : torch.Tensor + (B, N1, 3) tensor of the xyz coordinates of the features + features2 : torch.Tensor + (B, C2, N2) tensor of the descriptors of the the features + features1 : torch.Tensor + (B, C1, N1) tensor of the descriptors of the the features + + Returns + ------- + new_features1 : torch.Tensor + (B, \sum_k(mlps[k][-1]), N1) tensor of the new_features descriptors + """ + new_features_list = [] + + for i in range(len(self.groupers)): + new_features = self.groupers[i]( + xyz1, xyz2, features1 + ) # (B, C1, N2, nsample) + new_features = self.mlps[i]( + new_features + ) # (B, mlp[-1], N2, nsample) + new_features = F.max_pool2d( + new_features, kernel_size=[1, new_features.size(3)] + ) # (B, mlp[-1], N2, 1) + new_features = new_features.squeeze(-1) # (B, mlp[-1], N2) + + if features2 is not None: + new_features = torch.cat( + [new_features, features2], dim=1 + ) # (B, mlp[-1] + C2, N2) + + new_features = new_features.unsqueeze(-1) + new_features = self.post_mlp(new_features) + + new_features_list.append(new_features) + + return torch.cat(new_features_list, dim=1).squeeze(-1) + + +if __name__ == "__main__": + from torch.autograd import Variable + + torch.manual_seed(1) + torch.cuda.manual_seed_all(1) + xyz = Variable(torch.randn(2, 9, 3).cuda(), requires_grad=True) + xyz_feats = Variable(torch.randn(2, 9, 6).cuda(), requires_grad=True) + + test_module = PointnetSAModuleMSG( + npoint=2, radii=[5.0, 10.0], nsamples=[6, 3], mlps=[[9, 3], [9, 6]] + ) + test_module.cuda() + print(test_module(xyz, xyz_feats)) + + for _ in range(1): + _, new_features = test_module(xyz, xyz_feats) + new_features.backward( + torch.cuda.FloatTensor(*new_features.size()).fill_(1) + ) + print(new_features) + print(xyz.grad) diff --git a/models/Mask3D/third_party/pointnet2/pointnet2/pointnet2_test.py b/models/Mask3D/third_party/pointnet2/pointnet2/pointnet2_test.py new file mode 100644 index 0000000000000000000000000000000000000000..0994ff56ccbb1e9c97316ba7125b164c34f0d66b --- /dev/null +++ b/models/Mask3D/third_party/pointnet2/pointnet2/pointnet2_test.py @@ -0,0 +1,39 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +""" Testing customized ops. """ + +import torch +from torch.autograd import gradcheck +import numpy as np + +import os +import sys + +BASE_DIR = os.path.dirname(os.path.abspath(__file__)) +sys.path.append(BASE_DIR) +import pointnet2_utils + + +def test_interpolation_grad(): + batch_size = 1 + feat_dim = 2 + m = 4 + feats = ( + torch.randn(batch_size, feat_dim, m, requires_grad=True).float().cuda() + ) + + def interpolate_func(inputs): + idx = torch.from_numpy(np.array([[[0, 1, 2], [1, 2, 3]]])).int().cuda() + weight = ( + torch.from_numpy(np.array([[[1, 1, 1], [2, 2, 2]]])).float().cuda() + ) + interpolated_feats = pointnet2_utils.three_interpolate( + inputs, idx, weight + ) + return interpolated_feats + + assert gradcheck(interpolate_func, feats, atol=1e-1, rtol=1e-1) + + +if __name__ == "__main__": + test_interpolation_grad() diff --git a/models/Mask3D/third_party/pointnet2/pointnet2/pointnet2_utils.py b/models/Mask3D/third_party/pointnet2/pointnet2/pointnet2_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..24be3136b1c465b10599393b12a92bcb4ee96e8f --- /dev/null +++ b/models/Mask3D/third_party/pointnet2/pointnet2/pointnet2_utils.py @@ -0,0 +1,438 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +""" Modified based on: https://github.com/erikwijmans/Pointnet2_PyTorch """ +from __future__ import ( + division, + absolute_import, + with_statement, + print_function, + unicode_literals, +) +import torch +from torch.autograd import Function +import torch.nn as nn +import pointnet2.pytorch_utils as pt_utils +import sys + +try: + import builtins +except: + import __builtin__ as builtins + +try: + import pointnet2._ext as _ext +except ImportError: + if not getattr(builtins, "__POINTNET2_SETUP__", False): + raise ImportError( + "Could not import _ext module.\n" + "Please see the setup instructions in the README: " + "https://github.com/erikwijmans/Pointnet2_PyTorch/blob/master/README.rst" + ) + +if False: + # Workaround for type hints without depending on the `typing` module + from typing import * + + +class RandomDropout(nn.Module): + def __init__(self, p=0.5, inplace=False): + super(RandomDropout, self).__init__() + self.p = p + self.inplace = inplace + + def forward(self, X): + theta = torch.Tensor(1).uniform_(0, self.p)[0] + return pt_utils.feature_dropout_no_scaling( + X, theta, self.train, self.inplace + ) + + +class FurthestPointSampling(Function): + @staticmethod + def forward(ctx, xyz, npoint): + # type: (Any, torch.Tensor, int) -> torch.Tensor + r""" + Uses iterative furthest point sampling to select a set of npoint features that have the largest + minimum distance + + Parameters + ---------- + xyz : torch.Tensor + (B, N, 3) tensor where N > npoint + npoint : int32 + number of features in the sampled set + + Returns + ------- + torch.Tensor + (B, npoint) tensor containing the set + """ + fps_inds = _ext.furthest_point_sampling(xyz, npoint) + ctx.mark_non_differentiable(fps_inds) + return fps_inds + + @staticmethod + def backward(xyz, a=None): + return None, None + + +furthest_point_sample = FurthestPointSampling.apply + + +class GatherOperation(Function): + @staticmethod + def forward(ctx, features, idx): + # type: (Any, torch.Tensor, torch.Tensor) -> torch.Tensor + r""" + + Parameters + ---------- + features : torch.Tensor + (B, C, N) tensor + + idx : torch.Tensor + (B, npoint) tensor of the features to gather + + Returns + ------- + torch.Tensor + (B, C, npoint) tensor + """ + + _, C, N = features.size() + + ctx.for_backwards = (idx, C, N) + + return _ext.gather_points(features, idx) + + @staticmethod + def backward(ctx, grad_out): + idx, C, N = ctx.for_backwards + + grad_features = _ext.gather_points_grad(grad_out.contiguous(), idx, N) + return grad_features, None + + +gather_operation = GatherOperation.apply + + +class ThreeNN(Function): + @staticmethod + def forward(ctx, unknown, known): + # type: (Any, torch.Tensor, torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor] + r""" + Find the three nearest neighbors of unknown in known + Parameters + ---------- + unknown : torch.Tensor + (B, n, 3) tensor of known features + known : torch.Tensor + (B, m, 3) tensor of unknown features + + Returns + ------- + dist : torch.Tensor + (B, n, 3) l2 distance to the three nearest neighbors + idx : torch.Tensor + (B, n, 3) index of 3 nearest neighbors + """ + dist2, idx = _ext.three_nn(unknown, known) + + return torch.sqrt(dist2), idx + + @staticmethod + def backward(ctx, a=None, b=None): + return None, None + + +three_nn = ThreeNN.apply + + +class ThreeInterpolate(Function): + @staticmethod + def forward(ctx, features, idx, weight): + # type(Any, torch.Tensor, torch.Tensor, torch.Tensor) -> Torch.Tensor + r""" + Performs weight linear interpolation on 3 features + Parameters + ---------- + features : torch.Tensor + (B, c, m) Features descriptors to be interpolated from + idx : torch.Tensor + (B, n, 3) three nearest neighbors of the target features in features + weight : torch.Tensor + (B, n, 3) weights + + Returns + ------- + torch.Tensor + (B, c, n) tensor of the interpolated features + """ + B, c, m = features.size() + n = idx.size(1) + + ctx.three_interpolate_for_backward = (idx, weight, m) + + return _ext.three_interpolate(features, idx, weight) + + @staticmethod + def backward(ctx, grad_out): + # type: (Any, torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor] + r""" + Parameters + ---------- + grad_out : torch.Tensor + (B, c, n) tensor with gradients of ouputs + + Returns + ------- + grad_features : torch.Tensor + (B, c, m) tensor with gradients of features + + None + + None + """ + idx, weight, m = ctx.three_interpolate_for_backward + + grad_features = _ext.three_interpolate_grad( + grad_out.contiguous(), idx, weight, m + ) + + return grad_features, None, None + + +three_interpolate = ThreeInterpolate.apply + + +class GroupingOperation(Function): + @staticmethod + def forward(ctx, features, idx): + # type: (Any, torch.Tensor, torch.Tensor) -> torch.Tensor + r""" + + Parameters + ---------- + features : torch.Tensor + (B, C, N) tensor of features to group + idx : torch.Tensor + (B, npoint, nsample) tensor containing the indicies of features to group with + + Returns + ------- + torch.Tensor + (B, C, npoint, nsample) tensor + """ + B, nfeatures, nsample = idx.size() + _, C, N = features.size() + + ctx.for_backwards = (idx, N) + + return _ext.group_points(features, idx) + + @staticmethod + def backward(ctx, grad_out): + # type: (Any, torch.tensor) -> Tuple[torch.Tensor, torch.Tensor] + r""" + + Parameters + ---------- + grad_out : torch.Tensor + (B, C, npoint, nsample) tensor of the gradients of the output from forward + + Returns + ------- + torch.Tensor + (B, C, N) gradient of the features + None + """ + idx, N = ctx.for_backwards + + grad_features = _ext.group_points_grad(grad_out.contiguous(), idx, N) + + return grad_features, None + + +grouping_operation = GroupingOperation.apply + + +class BallQuery(Function): + @staticmethod + def forward(ctx, radius, nsample, xyz, new_xyz): + # type: (Any, float, int, torch.Tensor, torch.Tensor) -> torch.Tensor + r""" + + Parameters + ---------- + radius : float + radius of the balls + nsample : int + maximum number of features in the balls + xyz : torch.Tensor + (B, N, 3) xyz coordinates of the features + new_xyz : torch.Tensor + (B, npoint, 3) centers of the ball query + + Returns + ------- + torch.Tensor + (B, npoint, nsample) tensor with the indicies of the features that form the query balls + """ + inds = _ext.ball_query(new_xyz, xyz, radius, nsample) + ctx.mark_non_differentiable(inds) + return inds + + @staticmethod + def backward(ctx, a=None): + return None, None, None, None + + +ball_query = BallQuery.apply + + +class QueryAndGroup(nn.Module): + r""" + Groups with a ball query of radius + + Parameters + --------- + radius : float32 + Radius of ball + nsample : int32 + Maximum number of features to gather in the ball + """ + + def __init__( + self, + radius, + nsample, + use_xyz=True, + ret_grouped_xyz=False, + normalize_xyz=False, + sample_uniformly=False, + ret_unique_cnt=False, + ): + super(QueryAndGroup, self).__init__() + self.radius, self.nsample, self.use_xyz = radius, nsample, use_xyz + self.ret_grouped_xyz = ret_grouped_xyz + self.normalize_xyz = normalize_xyz + self.sample_uniformly = sample_uniformly + self.ret_unique_cnt = ret_unique_cnt + if self.ret_unique_cnt: + assert self.sample_uniformly + + def forward(self, xyz, new_xyz, features=None): + # type: (QueryAndGroup, torch.Tensor. torch.Tensor, torch.Tensor) -> Tuple[Torch.Tensor] + r""" + Parameters + ---------- + xyz : torch.Tensor + xyz coordinates of the features (B, N, 3) + new_xyz : torch.Tensor + centriods (B, npoint, 3) + features : torch.Tensor + Descriptors of the features (B, C, N) + + Returns + ------- + new_features : torch.Tensor + (B, 3 + C, npoint, nsample) tensor + """ + idx = ball_query(self.radius, self.nsample, xyz, new_xyz) + + if self.sample_uniformly: + unique_cnt = torch.zeros((idx.shape[0], idx.shape[1])) + for i_batch in range(idx.shape[0]): + for i_region in range(idx.shape[1]): + unique_ind = torch.unique(idx[i_batch, i_region, :]) + num_unique = unique_ind.shape[0] + unique_cnt[i_batch, i_region] = num_unique + sample_ind = torch.randint( + 0, + num_unique, + (self.nsample - num_unique,), + dtype=torch.long, + ) + all_ind = torch.cat((unique_ind, unique_ind[sample_ind])) + idx[i_batch, i_region, :] = all_ind + + xyz_trans = xyz.transpose(1, 2).contiguous() + grouped_xyz = grouping_operation( + xyz_trans, idx + ) # (B, 3, npoint, nsample) + grouped_xyz -= new_xyz.transpose(1, 2).unsqueeze(-1) + if self.normalize_xyz: + grouped_xyz /= self.radius + + if features is not None: + grouped_features = grouping_operation(features, idx) + if self.use_xyz: + new_features = torch.cat( + [grouped_xyz, grouped_features], dim=1 + ) # (B, C + 3, npoint, nsample) + else: + new_features = grouped_features + else: + assert ( + self.use_xyz + ), "Cannot have not features and not use xyz as a feature!" + new_features = grouped_xyz + + ret = [new_features] + if self.ret_grouped_xyz: + ret.append(grouped_xyz) + if self.ret_unique_cnt: + ret.append(unique_cnt) + if len(ret) == 1: + return ret[0] + else: + return tuple(ret) + + +class GroupAll(nn.Module): + r""" + Groups all features + + Parameters + --------- + """ + + def __init__(self, use_xyz=True, ret_grouped_xyz=False): + # type: (GroupAll, bool) -> None + super(GroupAll, self).__init__() + self.use_xyz = use_xyz + + def forward(self, xyz, new_xyz, features=None): + # type: (GroupAll, torch.Tensor, torch.Tensor, torch.Tensor) -> Tuple[torch.Tensor] + r""" + Parameters + ---------- + xyz : torch.Tensor + xyz coordinates of the features (B, N, 3) + new_xyz : torch.Tensor + Ignored + features : torch.Tensor + Descriptors of the features (B, C, N) + + Returns + ------- + new_features : torch.Tensor + (B, C + 3, 1, N) tensor + """ + + grouped_xyz = xyz.transpose(1, 2).unsqueeze(2) + if features is not None: + grouped_features = features.unsqueeze(2) + if self.use_xyz: + new_features = torch.cat( + [grouped_xyz, grouped_features], dim=1 + ) # (B, 3 + C, 1, N) + else: + new_features = grouped_features + else: + new_features = grouped_xyz + + if self.ret_grouped_xyz: + return new_features, grouped_xyz + else: + return new_features diff --git a/models/Mask3D/third_party/pointnet2/pointnet2/pytorch_utils.py b/models/Mask3D/third_party/pointnet2/pointnet2/pytorch_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..59ece654dcda0cc3c0bb25c84f63bd06563dcfcd --- /dev/null +++ b/models/Mask3D/third_party/pointnet2/pointnet2/pytorch_utils.py @@ -0,0 +1,283 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +""" Modified based on Ref: https://github.com/erikwijmans/Pointnet2_PyTorch """ +import torch +import torch.nn as nn +from typing import List, Tuple + + +class SharedMLP(nn.Sequential): + def __init__( + self, + args: List[int], + *, + bn: bool = False, + activation=nn.ReLU(inplace=True), + preact: bool = False, + first: bool = False, + name: str = "" + ): + super().__init__() + + for i in range(len(args) - 1): + self.add_module( + name + "layer{}".format(i), + Conv2d( + args[i], + args[i + 1], + bn=(not first or not preact or (i != 0)) and bn, + activation=activation + if (not first or not preact or (i != 0)) + else None, + preact=preact, + ), + ) + + +class _BNBase(nn.Sequential): + def __init__(self, in_size, batch_norm=None, name=""): + super().__init__() + self.add_module(name + "bn", batch_norm(in_size)) + + nn.init.constant_(self[0].weight, 1.0) + nn.init.constant_(self[0].bias, 0) + + +class BatchNorm1d(_BNBase): + def __init__(self, in_size: int, *, name: str = ""): + super().__init__(in_size, batch_norm=nn.BatchNorm1d, name=name) + + +class BatchNorm2d(_BNBase): + def __init__(self, in_size: int, name: str = ""): + super().__init__(in_size, batch_norm=nn.BatchNorm2d, name=name) + + +class BatchNorm3d(_BNBase): + def __init__(self, in_size: int, name: str = ""): + super().__init__(in_size, batch_norm=nn.BatchNorm3d, name=name) + + +class _ConvBase(nn.Sequential): + def __init__( + self, + in_size, + out_size, + kernel_size, + stride, + padding, + activation, + bn, + init, + conv=None, + batch_norm=None, + bias=True, + preact=False, + name="", + ): + super().__init__() + + bias = bias and (not bn) + conv_unit = conv( + in_size, + out_size, + kernel_size=kernel_size, + stride=stride, + padding=padding, + bias=bias, + ) + init(conv_unit.weight) + if bias: + nn.init.constant_(conv_unit.bias, 0) + + if bn: + if not preact: + bn_unit = batch_norm(out_size) + else: + bn_unit = batch_norm(in_size) + + if preact: + if bn: + self.add_module(name + "bn", bn_unit) + + if activation is not None: + self.add_module(name + "activation", activation) + + self.add_module(name + "conv", conv_unit) + + if not preact: + if bn: + self.add_module(name + "bn", bn_unit) + + if activation is not None: + self.add_module(name + "activation", activation) + + +class Conv1d(_ConvBase): + def __init__( + self, + in_size: int, + out_size: int, + *, + kernel_size: int = 1, + stride: int = 1, + padding: int = 0, + activation=nn.ReLU(inplace=True), + bn: bool = False, + init=nn.init.kaiming_normal_, + bias: bool = True, + preact: bool = False, + name: str = "" + ): + super().__init__( + in_size, + out_size, + kernel_size, + stride, + padding, + activation, + bn, + init, + conv=nn.Conv1d, + batch_norm=BatchNorm1d, + bias=bias, + preact=preact, + name=name, + ) + + +class Conv2d(_ConvBase): + def __init__( + self, + in_size: int, + out_size: int, + *, + kernel_size: Tuple[int, int] = (1, 1), + stride: Tuple[int, int] = (1, 1), + padding: Tuple[int, int] = (0, 0), + activation=nn.ReLU(inplace=True), + bn: bool = False, + init=nn.init.kaiming_normal_, + bias: bool = True, + preact: bool = False, + name: str = "" + ): + super().__init__( + in_size, + out_size, + kernel_size, + stride, + padding, + activation, + bn, + init, + conv=nn.Conv2d, + batch_norm=BatchNorm2d, + bias=bias, + preact=preact, + name=name, + ) + + +class Conv3d(_ConvBase): + def __init__( + self, + in_size: int, + out_size: int, + *, + kernel_size: Tuple[int, int, int] = (1, 1, 1), + stride: Tuple[int, int, int] = (1, 1, 1), + padding: Tuple[int, int, int] = (0, 0, 0), + activation=nn.ReLU(inplace=True), + bn: bool = False, + init=nn.init.kaiming_normal_, + bias: bool = True, + preact: bool = False, + name: str = "" + ): + super().__init__( + in_size, + out_size, + kernel_size, + stride, + padding, + activation, + bn, + init, + conv=nn.Conv3d, + batch_norm=BatchNorm3d, + bias=bias, + preact=preact, + name=name, + ) + + +class FC(nn.Sequential): + def __init__( + self, + in_size: int, + out_size: int, + *, + activation=nn.ReLU(inplace=True), + bn: bool = False, + init=None, + preact: bool = False, + name: str = "" + ): + super().__init__() + + fc = nn.Linear(in_size, out_size, bias=not bn) + if init is not None: + init(fc.weight) + if not bn: + nn.init.constant_(fc.bias, 0) + + if preact: + if bn: + self.add_module(name + "bn", BatchNorm1d(in_size)) + + if activation is not None: + self.add_module(name + "activation", activation) + + self.add_module(name + "fc", fc) + + if not preact: + if bn: + self.add_module(name + "bn", BatchNorm1d(out_size)) + + if activation is not None: + self.add_module(name + "activation", activation) + + +def set_bn_momentum_default(bn_momentum): + def fn(m): + if isinstance(m, (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d)): + m.momentum = bn_momentum + + return fn + + +class BNMomentumScheduler(object): + def __init__( + self, model, bn_lambda, last_epoch=-1, setter=set_bn_momentum_default + ): + if not isinstance(model, nn.Module): + raise RuntimeError( + "Class '{}' is not a PyTorch nn Module".format( + type(model).__name__ + ) + ) + + self.model = model + self.setter = setter + self.lmbd = bn_lambda + + self.step(last_epoch + 1) + self.last_epoch = last_epoch + + def step(self, epoch=None): + if epoch is None: + epoch = self.last_epoch + 1 + + self.last_epoch = epoch + self.model.apply(self.setter(self.lmbd(epoch))) diff --git a/models/Mask3D/third_party/pointnet2/setup.py b/models/Mask3D/third_party/pointnet2/setup.py new file mode 100644 index 0000000000000000000000000000000000000000..2c0a7e1a9723a71548a6e0a0dcc349fa698ee142 --- /dev/null +++ b/models/Mask3D/third_party/pointnet2/setup.py @@ -0,0 +1,40 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +from setuptools import find_packages, setup +from torch.utils.cpp_extension import BuildExtension, CUDAExtension +import glob +import os.path as osp + +this_dir = osp.dirname(osp.abspath(__file__)) + +_ext_src_root = "pointnet2/_ext_src" +_ext_sources = glob.glob("{}/src/*.cpp".format(_ext_src_root)) + glob.glob( + "{}/src/*.cu".format(_ext_src_root) +) +_ext_headers = glob.glob("{}/include/*".format(_ext_src_root)) + +setup( + name="pointnet2", + ext_modules=[ + CUDAExtension( + name="pointnet2._ext", + sources=_ext_sources, + extra_compile_args={ + "cxx": [ + "-O2", + "-I{}".format("{}/include".format(_ext_src_root)), + ], + "nvcc": [ + "-O2", + "-I{}".format("{}/include".format(_ext_src_root)), + ], + }, + include_dirs=[osp.join(this_dir, _ext_src_root, "include")], + ) + ], + cmdclass={"build_ext": BuildExtension}, + packages=find_packages(), +) diff --git a/models/YOLO-World/.dockerignore b/models/YOLO-World/.dockerignore new file mode 100644 index 0000000000000000000000000000000000000000..1aefdd171f6113a7e143e357e7fc0a804d8121ae --- /dev/null +++ b/models/YOLO-World/.dockerignore @@ -0,0 +1,2 @@ +docs +Dockerfile \ No newline at end of file diff --git a/models/YOLO-World/.gitattributes b/models/YOLO-World/.gitattributes new file mode 100644 index 0000000000000000000000000000000000000000..a86bbf76e750c2c73f109d8eecd23298d0a5a6dd --- /dev/null +++ b/models/YOLO-World/.gitattributes @@ -0,0 +1,34 @@ +# Basic .gitattributes for a python repo. + +# Source files +# ============ +*.pxd text diff=python +*.py text diff=python +*.py3 text diff=python +*.pyw text diff=python +*.pyx text diff=python +*.pyz text diff=python +*.pyi text diff=python + +# Binary files +# ============ +*.db binary +*.p binary +*.pkl binary +*.pickle binary +*.pyc binary export-ignore +*.pyo binary export-ignore +*.pyd binary + +# Jupyter notebook +*.ipynb text eol=lf + +# Others +* text=auto +*.txt text +*.sh text eol=lf + +# Note: .db, .p, and .pkl files are associated +# with the python modules ``pickle``, ``dbm.*``, +# ``shelve``, ``marshal``, ``anydbm``, & ``bsddb`` +# (among others). diff --git a/models/YOLO-World/.gitmodules b/models/YOLO-World/.gitmodules new file mode 100644 index 0000000000000000000000000000000000000000..4d97e1db083248de331235e176a80e5d5fc4b85b --- /dev/null +++ b/models/YOLO-World/.gitmodules @@ -0,0 +1,3 @@ +[submodule "third_party/mmyolo"] + path = third_party/mmyolo + url = https://github.com/onuralpszr/mmyolo.git diff --git a/models/YOLO-World/Dockerfile b/models/YOLO-World/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..4869c792851577c052bee84aa2ead917cef3d96a --- /dev/null +++ b/models/YOLO-World/Dockerfile @@ -0,0 +1,43 @@ +FROM nvidia/cuda:11.8.0-devel-ubuntu22.04 + +ARG MODEL="yolo_world_l_dual_vlpan_l2norm_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival.py" +ARG WEIGHT="yolo_world_l_clip_base_dual_vlpan_2e-3adamw_32xb16_100e_o365_goldg_train_pretrained-0e566235.pth" + +ENV FORCE_CUDA="1" +ENV MMCV_WITH_OPS=1 + +RUN apt-get update && apt-get install -y --no-install-recommends \ + python3-pip \ + libgl1-mesa-glx \ + libsm6 \ + libxext6 \ + libxrender-dev \ + libglib2.0-0 \ + git \ + python3-dev \ + python3-wheel + +RUN pip3 install --upgrade pip \ + && pip3 install \ + gradio \ + opencv-python \ + supervision \ + mmengine \ + setuptools \ + openmim \ + && mim install mmcv==2.0.0 \ + && pip3 install --no-cache-dir --index-url https://download.pytorch.org/whl/cu118 \ + wheel \ + torch \ + torchvision \ + torchaudio + +COPY . /yolo +WORKDIR /yolo + +RUN pip3 install -e . + +RUN curl -o weights/$WEIGHT -L https://huggingface.co/wondervictor/YOLO-World/resolve/main/$WEIGHT + +ENTRYPOINT [ "python3", "demo.py" ] +CMD ["configs/pretrain/$MODEL", "weights/$WEIGHT"] \ No newline at end of file diff --git a/models/YOLO-World/LICENSE b/models/YOLO-World/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..dd8b209f0bc925e778312b84191a75023c38eb87 --- /dev/null +++ b/models/YOLO-World/LICENSE @@ -0,0 +1,1347 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. \ No newline at end of file diff --git a/models/YOLO-World/README.md b/models/YOLO-World/README.md new file mode 100644 index 0000000000000000000000000000000000000000..90f85f319e78f24bbdc01e7a3328f95c6bcf37aa --- /dev/null +++ b/models/YOLO-World/README.md @@ -0,0 +1,231 @@ +
+ +
+Tianheng Cheng2,3,*, +Lin Song1,📧,*, +Yixiao Ge1,🌟,2, + Wenyu Liu3, +Xinggang Wang3,📧, +Ying Shan1,2 +
+ +\* Equal contribution 🌟 Project lead 📧 Corresponding author + +1 Tencent AI Lab, 2 ARC Lab, Tencent PCG +3 Huazhong University of Science and Technology +
+
+ +[![arxiv paper](https://img.shields.io/badge/Project-Page-green)](https://wondervictor.github.io/) +[![arxiv paper](https://img.shields.io/badge/arXiv-Paper-red)](https://arxiv.org/abs/2401.17270) +Open In Colab +[![demo](https://img.shields.io/badge/🤗HugginngFace-Spaces-orange)](https://huggingface.co/spaces/stevengrove/YOLO-World) +[![Replicate](https://replicate.com/zsxkib/yolo-world/badge)](https://replicate.com/zsxkib/yolo-world) +[![hfpaper](https://img.shields.io/badge/🤗HugginngFace-Paper-yellow)](https://huggingface.co/papers/2401.17270) +[![license](https://img.shields.io/badge/License-GPLv3.0-blue)](LICENSE) +[![yoloworldseg](https://img.shields.io/badge/YOLOWorldxEfficientSAM-🤗Spaces-orange)](https://huggingface.co/spaces/SkalskiP/YOLO-World) +[![yologuide](https://img.shields.io/badge/📖Notebook-roboflow-purple)](https://supervision.roboflow.com/develop/notebooks/zero-shot-object-detection-with-yolo-world) +[![deploy](https://media.roboflow.com/deploy.svg)](https://inference.roboflow.com/foundation/yolo_world/) + +
+
+ +## Notice + +We recommend that everyone **use English to communicate on issues**, as this helps developers from around the world discuss, share experiences, and answer questions together. + +## 🔥 Updates +`[2024-5-18]:` YOLO-World models have been [integrated with the FiftyOne computer vision toolkit](https://docs.voxel51.com/integrations/ultralytics.html#open-vocabulary-detection) for streamlined open-vocabulary inference across image and video datasets. +`[2024-5-16]:` Hey guys! Long time no see! This update contains (1) [fine-tuning guide](https://github.com/AILab-CVC/YOLO-World?#highlights--introduction) and (2) [TFLite Export](./docs/tflite_deploy.md) with INT8 Quantization. +`[2024-5-9]:` This update contains the real [`reparameterization`](./docs/reparameterize.md) 🪄, and it's better for fine-tuning on custom datasets and improves the training/inference efficiency 🚀! +`[2024-4-28]:` Long time no see! This update contains bugfixs and improvements: (1) ONNX demo; (2) image demo (support tensor input); (2) new pre-trained models; (3) image prompts; (4) simple version for fine-tuning / deployment; (5) guide for installation (include a `requirements.txt`). +`[2024-3-28]:` We provide: (1) more high-resolution pre-trained models (e.g., S, M, X) ([#142](https://github.com/AILab-CVC/YOLO-World/issues/142)); (2) pre-trained models with CLIP-Large text encoders. Most importantly, we preliminarily fix the **fine-tuning without `mask-refine`** and explore a new fine-tuning setting ([#160](https://github.com/AILab-CVC/YOLO-World/issues/160),[#76](https://github.com/AILab-CVC/YOLO-World/issues/76)). In addition, fine-tuning YOLO-World with `mask-refine` also obtains significant improvements, check more details in [configs/finetune_coco](./configs/finetune_coco/). +`[2024-3-16]:` We fix the bugs about the demo ([#110](https://github.com/AILab-CVC/YOLO-World/issues/110),[#94](https://github.com/AILab-CVC/YOLO-World/issues/94),[#129](https://github.com/AILab-CVC/YOLO-World/issues/129), [#125](https://github.com/AILab-CVC/YOLO-World/issues/125)) with visualizations of segmentation masks, and release [**YOLO-World with Embeddings**](./docs/prompt_yolo_world.md), which supports prompt tuning, text prompts and image prompts. +`[2024-3-3]:` We add the **high-resolution YOLO-World**, which supports `1280x1280` resolution with higher accuracy and better performance for small objects! +`[2024-2-29]:` We release the newest version of [ **YOLO-World-v2**](./docs/updates.md) with higher accuracy and faster speed! We hope the community can join us to improve YOLO-World! +`[2024-2-28]:` Excited to announce that YOLO-World has been accepted by **CVPR 2024**! We're continuing to make YOLO-World faster and stronger, as well as making it better to use for all. +`[2024-2-22]:` We sincerely thank [RoboFlow](https://roboflow.com/) and [@Skalskip92](https://twitter.com/skalskip92) for the [**Video Guide**](https://www.youtube.com/watch?v=X7gKBGVz4vs) about YOLO-World, nice work! +`[2024-2-18]:` We thank [@Skalskip92](https://twitter.com/skalskip92) for developing the wonderful segmentation demo via connecting YOLO-World and EfficientSAM. You can try it now at the [🤗 HuggingFace Spaces](https://huggingface.co/spaces/SkalskiP/YOLO-World). +`[2024-2-17]:` The largest model **X** of YOLO-World is released, which achieves better zero-shot performance! +`[2024-2-17]:` We release the code & models for **YOLO-World-Seg** now! YOLO-World now supports open-vocabulary / zero-shot object segmentation! +`[2024-2-15]:` The pre-traind YOLO-World-L with CC3M-Lite is released! +`[2024-2-14]:` We provide the [`image_demo`](demo.py) for inference on images or directories. +`[2024-2-10]:` We provide the [fine-tuning](./docs/finetuning.md) and [data](./docs/data.md) details for fine-tuning YOLO-World on the COCO dataset or the custom datasets! +`[2024-2-3]:` We support the `Gradio` demo now in the repo and you can build the YOLO-World demo on your own device! +`[2024-2-1]:` We've released the code and weights of YOLO-World now! +`[2024-2-1]:` We deploy the YOLO-World demo on [HuggingFace 🤗](https://huggingface.co/spaces/stevengrove/YOLO-World), you can try it now! +`[2024-1-31]:` We are excited to launch **YOLO-World**, a cutting-edge real-time open-vocabulary object detector. + + +## TODO + +YOLO-World is under active development and please stay tuned ☕️! +If you have suggestions📃 or ideas💡,**we would love for you to bring them up in the [Roadmap](https://github.com/AILab-CVC/YOLO-World/issues/109)** ❤️! +> YOLO-World 目前正在积极开发中📃,如果你有建议或者想法💡,**我们非常希望您在 [Roadmap](https://github.com/AILab-CVC/YOLO-World/issues/109) 中提出来** ❤️! + +## [FAQ (Frequently Asked Questions)](https://github.com/AILab-CVC/YOLO-World/discussions/149) + +We have set up an FAQ about YOLO-World in the discussion on GitHub. We hope everyone can raise issues or solutions during use here, and we also hope that everyone can quickly find solutions from it. + +> 我们在GitHub的discussion中建立了关于YOLO-World的常见问答,这里将收集一些常见问题,同时大家可以在此提出使用中的问题或者解决方案,也希望大家能够从中快速寻找到解决方案 + + +## Highlights & Introduction + +This repo contains the PyTorch implementation, pre-trained weights, and pre-training/fine-tuning code for YOLO-World. + +* YOLO-World is pre-trained on large-scale datasets, including detection, grounding, and image-text datasets. + +* YOLO-World is the next-generation YOLO detector, with a strong open-vocabulary detection capability and grounding ability. + +* YOLO-World presents a *prompt-then-detect* paradigm for efficient user-vocabulary inference, which re-parameterizes vocabulary embeddings as parameters into the model and achieve superior inference speed. You can try to export your own detection model without extra training or fine-tuning in our [online demo](https://huggingface.co/spaces/stevengrove/YOLO-World)! + + +
+ +
+## Model Zoo + +We've pre-trained YOLO-World-S/M/L from scratch and evaluate on the `LVIS val-1.0` and `LVIS minival`. We provide the pre-trained model weights and training logs for applications/research or re-producing the results. + +### Zero-shot Inference on LVIS dataset + +
+ +| model | Pre-train Data | Size | APmini | APr | APc | APf | APval | APr | APc | APf | weights | +| :------------------------------------------------------------------------------------------------------------------- | :------------------- | :----------------- | :--------------: | :------------: | :------------: | :------------: | :-------------: | :------------: | :------------: | :------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| [YOLO-Worldv2-S](./configs/pretrain/yolo_world_v2_s_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival.py) | O365+GoldG | 640 | 22.7 | 16.3 | 20.8 | 25.5 | 17.3 | 11.3 | 14.9 | 22.7 |[HF Checkpoints 🤗](https://huggingface.co/wondervictor/YOLO-World/blob/main/yolo_world_v2_s_obj365v1_goldg_pretrain-55b943ea.pth)| +| [YOLO-Worldv2-S](./configs/pretrain/yolo_world_v2_s_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_1280ft_lvis_minival.py) | O365+GoldG | 1280🔸 | 24.1 | 18.7 | 22.0 | 26.9 | 18.8 | 14.1 | 16.3 | 23.8 |[HF Checkpoints 🤗](https://huggingface.co/wondervictor/YOLO-World/blob/main/yolo_world_v2_s_obj365v1_goldg_pretrain_1280ft-fc4ff4f7.pth)| +| [YOLO-Worldv2-M](./configs/pretrain/yolo_world_v2_m_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival.py) | O365+GoldG | 640 | 30.0 | 25.0 | 27.2 | 33.4 | 23.5 | 17.1 | 20.0 | 30.1 | [HF Checkpoints 🤗](https://huggingface.co/wondervictor/YOLO-World/blob/main/yolo_world_v2_m_obj365v1_goldg_pretrain-c6237d5b.pth)| +| [YOLO-Worldv2-M](./configs/pretrain/yolo_world_v2_m_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_1280ft_lvis_minival.py) | O365+GoldG | 1280🔸 | 31.6 | 24.5 | 29.0 | 35.1 | 25.3 | 19.3 | 22.0 | 31.7 | [HF Checkpoints 🤗](https://huggingface.co/wondervictor/YOLO-World/blob/main/yolo_world_v2_m_obj365v1_goldg_pretrain_1280ft-77d0346d.pth)| +| [YOLO-Worldv2-L](./configs/pretrain/yolo_world_v2_l_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival.py) | O365+GoldG | 640 | 33.0 | 22.6 | 32.0 | 35.8 | 26.0 | 18.6 | 23.0 | 32.6 | [HF Checkpoints 🤗](https://huggingface.co/wondervictor/YOLO-World/blob/main/yolo_world_v2_l_obj365v1_goldg_pretrain-a82b1fe3.pth)| +| [YOLO-Worldv2-L](./configs/pretrain/yolo_world_v2_l_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_1280ft_lvis_minival.py) | O365+GoldG | 1280🔸 | 34.6 | 29.2 | 32.8 | 37.2 | 27.6 | 21.9 | 24.2 | 34.0 | [HF Checkpoints 🤗](https://huggingface.co/wondervictor/YOLO-World/blob/main/yolo_world_v2_l_obj365v1_goldg_pretrain_1280ft-9babe3f6.pth)| +| [YOLO-Worldv2-L (CLIP-Large)](./configs/pretrain/yolo_world_v2_l_clip_large_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival.py) 🔥 | O365+GoldG | 640 | 34.0 | 22.0 | 32.6 | 37.4 | 27.1 | 19.9 | 23.9 | 33.9 | [HF Checkpoints 🤗](https://huggingface.co/wondervictor/YOLO-World/blob/main/yolo_world_v2_l_clip_large_o365v1_goldg_pretrain-8ff2e744.pth)| +| [YOLO-Worldv2-L (CLIP-Large)](./configs/pretrain/yolo_world_v2_l_clip_large_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_800ft_lvis_minival.py) 🔥 | O365+GoldG | 800🔸 | 35.5 | 28.3 | 33.2 | 38.8 | 28.6 | 22.0 | 25.1 | 35.4 | [HF Checkpoints 🤗](https://huggingface.co/wondervictor/YOLO-World/blob/main/yolo_world_v2_l_clip_large_o365v1_goldg_pretrain_800ft-9df82e55.pth)| +| [YOLO-Worldv2-L](./configs/pretrain/yolo_world_v2_l_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival.py) | O365+GoldG+CC3M-Lite | 640 | 32.9 | 25.3 | 31.1 | 35.8 | 26.1 | 20.6 | 22.6 | 32.3 | [HF Checkpoints 🤗](https://huggingface.co/wondervictor/YOLO-World/blob/main/yolo_world_v2_l_obj365v1_goldg_cc3mlite_pretrain-ca93cd1f.pth)| +| [YOLO-Worldv2-X](./configs/pretrain/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival.py) | O365+GoldG+CC3M-Lite | 640 | 35.4 | 28.7 | 32.9 | 38.7 | 28.4 | 20.6 | 25.6 | 35.0 | [HF Checkpoints 🤗](https://huggingface.co/wondervictor/YOLO-World/blob/main/yolo_world_v2_x_obj365v1_goldg_cc3mlite_pretrain-8698fbfa.pth) | +| 🔥 [YOLO-Worldv2-X]() | O365+GoldG+CC3M-Lite | 1280🔸 | 37.4 | 30.5 | 35.2 | 40.7 | 29.8 | 21.1 | 26.8 | 37.0 | [HF Checkpoints 🤗](https://huggingface.co/wondervictor/YOLO-World/blob/main/yolo_world_v2_x_obj365v1_goldg_cc3mlite_pretrain_1280ft-14996a36.pth) | +| [YOLO-Worldv2-XL](./configs/pretrain/yolo_world_v2_xl_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival.py) | O365+GoldG+CC3M-Lite | 640 | 36.0 | 25.8 | 34.1 | 39.5 | 29.1 | 21.1 | 26.3 | 35.8 | [HF Checkpoints 🤗](https://huggingface.co/wondervictor/YOLO-World/blob/main/yolo_world_v2_xl_obj365v1_goldg_cc3mlite_pretrain-5daf1395.pth) | + + +
+ +**NOTE:** +1. APmini: evaluated on LVIS `minival`. +3. APval: evaluated on LVIS `val 1.0`. +4. [HuggingFace Mirror](https://hf-mirror.com/) provides the mirror of HuggingFace, which is a choice for users who are unable to reach. +5. 🔸: fine-tuning models with the pre-trained data. + +**Pre-training Logs:** + +We provide the pre-training logs of `YOLO-World-v2`. Due to the unexpected errors of the local machines, the training might be interrupted several times. + +| Model | YOLO-World-v2-S | YOLO-World-v2-M | YOLO-World-v2-L | YOLO-World-v2-X | +| :--- | :-------------: | :--------------: | :-------------: | :-------------: | +|Pre-training Log | [Part-1](https://drive.google.com/file/d/1oib7pKfA2h1U_5-85H_s0Nz8jWd0R-WP/view?usp=drive_link), [Part-2](https://drive.google.com/file/d/11cZ6OZy80VTvBlZy3kzLAHCxx5Iix5-n/view?usp=drive_link) | [Part-1](https://drive.google.com/file/d/1E6vYSS8kBipGc8oQnsjAfeUAx8I9yOX7/view?usp=drive_link), [Part-2](https://drive.google.com/file/d/1fbM7vt2tgSeB8o_7tUDofWvpPNSViNj5/view?usp=drive_link) | [Part-1](https://drive.google.com/file/d/1Tola1QGJZTL6nGy3SBxKuknfNfREDm8J/view?usp=drive_link), [Part-2](https://drive.google.com/file/d/1mTBXniioUb0CdctCG4ckIU6idGo0NnH8/view?usp=drive_link) | [Final part](https://drive.google.com/file/d/1aEUA_EPQbXOrpxHTQYB6ieGXudb1PLpd/view?usp=drive_link)| + + +## Getting started + +### 1. Installation + +YOLO-World is developed based on `torch==1.11.0` `mmyolo==0.6.0` and `mmdetection==3.0.0`. Check more details about `requirements` and `mmcv` in [docs/installation](./docs/installation.md). + +#### Clone Project + +```bash +git clone --recursive https://github.com/AILab-CVC/YOLO-World.git +``` +#### Install + +```bash +pip install torch wheel -q +pip install -e . +``` + +### 2. Preparing Data + +We provide the details about the pre-training data in [docs/data](./docs/data.md). + + +## Training & Evaluation + +We adopt the default [training](./tools/train.py) or [evaluation](./tools/test.py) scripts of [mmyolo](https://github.com/open-mmlab/mmyolo). +We provide the configs for pre-training and fine-tuning in `configs/pretrain` and `configs/finetune_coco`. +Training YOLO-World is easy: + +```bash +chmod +x tools/dist_train.sh +# sample command for pre-training, use AMP for mixed-precision training +./tools/dist_train.sh configs/pretrain/yolo_world_l_t2i_bn_2e-4_100e_4x8gpus_obj365v1_goldg_train_lvis_minival.py 8 --amp +``` +**NOTE:** YOLO-World is pre-trained on 4 nodes with 8 GPUs per node (32 GPUs in total). For pre-training, the `node_rank` and `nnodes` for multi-node training should be specified. + +Evaluating YOLO-World is also easy: + +```bash +chmod +x tools/dist_test.sh +./tools/dist_test.sh path/to/config path/to/weights 8 +``` + +**NOTE:** We mainly evaluate the performance on LVIS-minival for pre-training. + +## Fine-tuning YOLO-World + +
+ +
+ + +
+

Chose your pre-trained YOLO-World and Fine-tune it!

+
+ + +YOLO-World supports **zero-shot inference**, and three types of **fine-tuning recipes**: **(1) normal fine-tuning**, **(2) prompt tuning**, and **(3) reparameterized fine-tuning**. + +* Normal Fine-tuning: we provide the details about fine-tuning YOLO-World in [docs/fine-tuning](./docs/finetuning.md). + +* Prompt Tuning: we provide more details ahout prompt tuning in [docs/prompt_yolo_world](./docs/prompt_yolo_world.md). + +* Reparameterized Fine-tuning: the reparameterized YOLO-World is more suitable for specific domains far from generic scenes. You can find more details in [docs/reparameterize](./docs/reparameterize.md). + +## Deployment + +We provide the details about deployment for downstream applications in [docs/deployment](./docs/deploy.md). +You can directly download the ONNX model through the online [demo](https://huggingface.co/spaces/stevengrove/YOLO-World) in Huggingface Spaces 🤗. + +- [x] ONNX export and demo: [docs/deploy](https://github.com/AILab-CVC/YOLO-World/blob/master/docs/deploy.md) +- [x] TFLite and INT8 Quantization: [docs/tflite_deploy](https://github.com/AILab-CVC/YOLO-World/blob/master/docs/tflite_deploy.md) +- [ ] TensorRT: coming soon. +- [ ] C++: coming soon. + +## Demo + +See [`demo`](./demo) for more details + +- [x] `gradio_demo.py`: Gradio demo, ONNX export +- [x] `image_demo.py`: inference with images or a directory of images +- [x] `simple_demo.py`: a simple demo of YOLO-World, using `array` (instead of path as input). +- [x] `video_demo.py`: inference YOLO-World on videos. +- [x] `inference.ipynb`: jupyter notebook for YOLO-World. +- [x] [Google Colab Notebook](https://colab.research.google.com/drive/1F_7S5lSaFM06irBCZqjhbN7MpUXo6WwO?usp=sharing): We sincerely thank [Onuralp](https://github.com/onuralpszr) for sharing the [Colab Demo](https://colab.research.google.com/drive/1F_7S5lSaFM06irBCZqjhbN7MpUXo6WwO?usp=sharing), you can have a try 😊! + +## Acknowledgement + +We sincerely thank [mmyolo](https://github.com/open-mmlab/mmyolo), [mmdetection](https://github.com/open-mmlab/mmdetection), [GLIP](https://github.com/microsoft/GLIP), and [transformers](https://github.com/huggingface/transformers) for providing their wonderful code to the community! + +## Citations +If you find YOLO-World is useful in your research or applications, please consider giving us a star 🌟 and citing it. + +```bibtex +@inproceedings{Cheng2024YOLOWorld, + title={YOLO-World: Real-Time Open-Vocabulary Object Detection}, + author={Cheng, Tianheng and Song, Lin and Ge, Yixiao and Liu, Wenyu and Wang, Xinggang and Shan, Ying}, + booktitle={Proc. IEEE Conf. Computer Vision and Pattern Recognition (CVPR)}, + year={2024} +} +``` + +## Licence +YOLO-World is under the GPL-v3 Licence and is supported for commercial usage. If you need a commercial license for YOLO-World, please feel free to contact us. diff --git a/models/YOLO-World/configs/finetune_coco/README.md b/models/YOLO-World/configs/finetune_coco/README.md new file mode 100644 index 0000000000000000000000000000000000000000..954d64a5f593f0f984f6ddcf5c6cd96168b7179e --- /dev/null +++ b/models/YOLO-World/configs/finetune_coco/README.md @@ -0,0 +1,29 @@ +## Fine-tune YOLO-World on MS-COCO + + +### Updates + +1. [2024-3-27]: Considering that fine-tuning YOLO-World on COCO **without `mask-refine`** obtains bad results, e.g., YOLO-World-L obtains 48.6 AP without `mask-refine` compared to 53.3 AP with `mask-refine`, we rethink the training process and explore new training schemes for fine-tuning without `mask-refine`. +BTW, the COCO fine-tuning results are updated with higher performance (with `mask-refine`)! + + +### COCO Results and Checkpoints + +**NOTE:** +1. APZS: AP evaluated in the zero-shot setting (w/o fine-tuning on COCO dataset). +2. `mask-refine`: refine the box annotations with masks, and add `CopyPaste` augmentation during training. + +| model | Schedule | `mask-refine` | efficient neck | APZS| AP | AP50 | AP75 | weights | log | +| :---- | :-------: | :----------: |:-------------: | :------------: | :-: | :--------------:| :-------------: |:------: | :-: | +| [YOLO-World-v2-S](./yolo_world_v2_s_vlpan_bn_2e-4_80e_8gpus_mask-refine_finetune_coco.py) | AdamW, 2e-4, 80e | ✔️ | ✖️ | 37.5 | 46.1 | 62.0 | 49.9 | [HF Checkpoints](https://huggingface.co/wondervictor/YOLO-World/blob/main/yolo_world_v2_s_vlpan_bn_2e-4_80e_8gpus_mask-refine_finetune_coco_ep80-492dc329.pth) | [log](https://huggingface.co/wondervictor/YOLO-World/blob/main/yolo_world_v2_s_vlpan_bn_2e-4_80e_8gpus_mask-refine_finetune_coco_20240327_110411.log) | +| [YOLO-World-v2-M](./yolo_world_v2_m_vlpan_bn_2e-4_80e_8gpus_mask-refine_finetune_coco.py) | AdamW, 2e-4, 80e | ✔️ | ✖️ | 42.8 | 51.0 | 67.5 | 55.2 | [HF Checkpoints](https://huggingface.co/wondervictor/YOLO-World/blob/main/yolo_world_v2_m_vlpan_bn_2e-4_80e_8gpus_mask-refine_finetune_coco_ep80-69c27ac7.pth) | [log](https://huggingface.co/wondervictor/YOLO-World/blob/main/yolo_world_v2_m_vlpan_bn_2e-4_80e_8gpus_mask-refine_finetune_coco_20240327_110411.log) | +| [YOLO-World-v2-L](./yolo_world_v2_l_vlpan_bn_2e-4_80e_8gpus_mask-refine_finetune_coco.py) | AdamW, 2e-4, 80e | ✔️ | ✖️ | 45.1 | 53.9 | 70.9 | 58.8 | [HF Checkpoints](https://huggingface.co/wondervictor/YOLO-World/blob/main/yolo_world_v2_l_vlpan_bn_2e-4_80e_8gpus_mask-refine_finetune_coco_ep80-81c701ee.pth) | [log](https://huggingface.co/wondervictor/YOLO-World/blob/main/yolo_world_v2_l_vlpan_bn_2e-4_80e_8gpus_mask-refine_finetune_coco_20240326_160313.log) | +| [YOLO-World-v2-X](./yolo_world_v2_x_vlpan_bn_2e-4_80e_8gpus_mask-refine_finetune_coco.py) | AdamW, 2e-4, 80e | ✔️ | ✖️ | 46.8 | 54.7 | 71.6 | 59.6 | [HF Checkpoints](https://huggingface.co/wondervictor/YOLO-World/blob/main/yolo_world_v2_x_vlpan_bn_2e-4_80e_8gpus_mask-refine_finetune_coco_ep80-76bc0cbd.pth) | [log](https://huggingface.co/wondervictor/YOLO-World/blob/main/yolo_world_v2_x_vlpan_bn_2e-4_80e_8gpus_mask-refine_finetune_coco_20240322_181232.log) | +| [YOLO-World-v2-L](./yolo_world_v2_l_vlpan_bn_sgd_1e-3_40e_8gpus_finetune_coco.py) 🔥 | SGD, 1e-3, 40e | ✖️ | ✖️ | 45.1 | 52.8 | 69.5 | 57.8 | [HF Checkpoints](https://huggingface.co/wondervictor/YOLO-World/blob/main/yolo_world_v2_l_vlpan_bn_sgd_1e-3_40e_8gpus_finetune_coco_ep80-e1288152.pth) | [log](https://huggingface.co/wondervictor/YOLO-World/blob/main/yolo_world_v2_l_vlpan_bn_sgd_1e-3_40e_8gpus_finetuning_coco_20240327_014902.log) | + + +### Reparameterized Training + +| model | Schedule | `mask-refine` | efficient neck | APZS| AP | AP50 | AP75 | weights | log | +| :---- | :-------: | :----------: |:-------------: | :------------: | :-: | :--------------:| :-------------: |:------: | :-: | +| [YOLO-World-v2-S](./yolo_world_v2_s_rep_vlpan_bn_2e-4_80e_8gpus_mask-refine_finetune_coco.py) | AdamW, 2e-4, 80e | ✔️ | ✖️ | 37.5 | 46.3 | 62.8 | 50.4 | [HF Checkpoints]() | [log]() | \ No newline at end of file diff --git a/models/YOLO-World/configs/finetune_coco/yolo_world_l_dual_vlpan_2e-4_80e_8gpus_finetune_coco.py b/models/YOLO-World/configs/finetune_coco/yolo_world_l_dual_vlpan_2e-4_80e_8gpus_finetune_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..101a571dbf6a6c79d50c37dff98a2ac0698e91b7 --- /dev/null +++ b/models/YOLO-World/configs/finetune_coco/yolo_world_l_dual_vlpan_2e-4_80e_8gpus_finetune_coco.py @@ -0,0 +1,179 @@ +_base_ = ( + '../../third_party/mmyolo/configs/yolov8/' + 'yolov8_l_syncbn_fast_8xb16-500e_coco.py') +custom_imports = dict( + imports=['yolo_world'], + allow_failed_imports=False) + +# hyper-parameters +num_classes = 80 +num_training_classes = 80 +max_epochs = 80 # Maximum training epochs +close_mosaic_epochs = 10 +save_epoch_intervals = 5 +text_channels = 512 +neck_embed_channels = [128, 256, _base_.last_stage_out_channels // 2] +neck_num_heads = [4, 8, _base_.last_stage_out_channels // 2 // 32] +base_lr = 2e-4 +weight_decay = 0.05 +train_batch_size_per_gpu = 16 +load_from='pretrained_models/yolo_world_l_clip_base_dual_vlpan_2e-3adamw_32xb16_100e_o365_goldg_train_pretrained-0e566235.pth' +persistent_workers = False + +# model settings +model = dict( + type='YOLOWorldDetector', + mm_neck=True, + num_train_classes=num_training_classes, + num_test_classes=num_classes, + data_preprocessor=dict(type='YOLOWDetDataPreprocessor'), + backbone=dict( + _delete_=True, + type='MultiModalYOLOBackbone', + image_model={{_base_.model.backbone}}, + text_model=dict( + type='HuggingCLIPLanguageBackbone', + model_name='openai/clip-vit-base-patch32', + frozen_modules=['all'])), + neck=dict(type='YOLOWorldDualPAFPN', + guide_channels=text_channels, + embed_channels=neck_embed_channels, + num_heads=neck_num_heads, + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv'), + text_enhancder=dict(type='ImagePoolingAttentionModule', + embed_channels=256, + num_heads=8)), + bbox_head=dict(type='YOLOWorldHead', + head_module=dict(type='YOLOWorldHeadModule', + embed_dims=text_channels, + num_classes=num_training_classes)), + train_cfg=dict(assigner=dict(num_classes=num_training_classes))) + +# dataset settings +text_transform = [ + dict(type='RandomLoadText', + num_neg_samples=(num_classes, num_classes), + max_num_samples=num_training_classes, + padding_to_max=True, + padding_value=''), + dict(type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'flip', + 'flip_direction', 'texts')) +] +mosaic_affine_transform = [ + dict( + type='MultiModalMosaic', + img_scale=_base_.img_scale, + pad_val=114.0, + pre_transform=_base_.pre_transform), + dict( + type='YOLOv5RandomAffine', + max_rotate_degree=0.0, + max_shear_degree=0.0, + max_aspect_ratio=100., + scaling_ratio_range=(1 - _base_.affine_scale, + 1 + _base_.affine_scale), + # img_scale is (width, height) + border=(-_base_.img_scale[0] // 2, -_base_.img_scale[1] // 2), + border_val=(114, 114, 114)) +] +train_pipeline = [ + *_base_.pre_transform, + *mosaic_affine_transform, + dict( + type='YOLOv5MultiModalMixUp', + prob=_base_.mixup_prob, + pre_transform=[*_base_.pre_transform, + *mosaic_affine_transform]), + *_base_.last_transform[:-1], + *text_transform +] +train_pipeline_stage2 = [ + *_base_.train_pipeline_stage2[:-1], + *text_transform +] +coco_train_dataset = dict( + _delete_=True, + type='MultiModalDataset', + dataset=dict( + type='YOLOv5CocoDataset', + data_root='data/coco', + ann_file='annotations/instances_train2017.json', + data_prefix=dict(img='train2017/'), + filter_cfg=dict(filter_empty_gt=False, min_size=32)), + class_text_path='data/texts/coco_class_texts.json', + pipeline=train_pipeline) + +train_dataloader = dict( + persistent_workers=persistent_workers, + batch_size=train_batch_size_per_gpu, + collate_fn=dict(type='yolow_collate'), + dataset=coco_train_dataset) +test_pipeline = [ + *_base_.test_pipeline[:-1], + dict(type='LoadText'), + dict( + type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'pad_param', 'texts')) +] +coco_val_dataset = dict( + _delete_=True, + type='MultiModalDataset', + dataset=dict( + type='YOLOv5CocoDataset', + data_root='data/coco', + ann_file='annotations/instances_val2017.json', + data_prefix=dict(img='val2017/'), + filter_cfg=dict(filter_empty_gt=False, min_size=32)), + class_text_path='data/texts/coco_class_texts.json', + pipeline=test_pipeline) +val_dataloader = dict(dataset=coco_val_dataset) +test_dataloader = val_dataloader +# training settings +default_hooks = dict( + param_scheduler=dict( + scheduler_type='linear', + lr_factor=0.01, + max_epochs=max_epochs), + checkpoint=dict( + max_keep_ckpts=-1, + save_best=None, + interval=save_epoch_intervals)) +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0001, + update_buffers=True, + strict_load=False, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - close_mosaic_epochs, + switch_pipeline=train_pipeline_stage2) +] +train_cfg = dict( + max_epochs=max_epochs, + val_interval=5, + dynamic_intervals=[((max_epochs - close_mosaic_epochs), + _base_.val_interval_stage2)]) +optim_wrapper = dict( + optimizer=dict( + _delete_=True, + type='AdamW', + lr=base_lr, + weight_decay=weight_decay, + batch_size_per_gpu=train_batch_size_per_gpu), + paramwise_cfg=dict( + custom_keys={'backbone.text_model': dict(lr_mult=0.01), + 'logit_scale': dict(weight_decay=0.0)}), + constructor='YOLOWv5OptimizerConstructor') + +# evaluation settings +val_evaluator = dict( + _delete_=True, + type='mmdet.CocoMetric', + proposal_nums=(100, 1, 10), + ann_file='data/coco/annotations/instances_val2017.json', + metric='bbox') diff --git a/models/YOLO-World/configs/finetune_coco/yolo_world_l_dual_vlpan_2e-4_80e_8gpus_mask-refine_finetune_coco.py b/models/YOLO-World/configs/finetune_coco/yolo_world_l_dual_vlpan_2e-4_80e_8gpus_mask-refine_finetune_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..2ddbe50d4c63d7cd5953f9f096b57661ccb2f287 --- /dev/null +++ b/models/YOLO-World/configs/finetune_coco/yolo_world_l_dual_vlpan_2e-4_80e_8gpus_mask-refine_finetune_coco.py @@ -0,0 +1,181 @@ +_base_ = ( + '../../third_party/mmyolo/configs/yolov8/' + 'yolov8_l_mask-refine_syncbn_fast_8xb16-500e_coco.py') +custom_imports = dict( + imports=['yolo_world'], + allow_failed_imports=False) + +# hyper-parameters +num_classes = 80 +num_training_classes = 80 +max_epochs = 80 # Maximum training epochs +close_mosaic_epochs = 10 +save_epoch_intervals = 5 +text_channels = 512 +neck_embed_channels = [128, 256, _base_.last_stage_out_channels // 2] +neck_num_heads = [4, 8, _base_.last_stage_out_channels // 2 // 32] +base_lr = 2e-4 +weight_decay = 0.05 +train_batch_size_per_gpu = 16 +load_from='pretrained_models/yolo_world_l_clip_base_dual_vlpan_2e-3adamw_32xb16_100e_o365_goldg_train_pretrained-0e566235.pth' +persistent_workers = False + +# model settings +model = dict( + type='YOLOWorldDetector', + mm_neck=True, + num_train_classes=num_training_classes, + num_test_classes=num_classes, + data_preprocessor=dict(type='YOLOWDetDataPreprocessor'), + backbone=dict( + _delete_=True, + type='MultiModalYOLOBackbone', + image_model={{_base_.model.backbone}}, + text_model=dict( + type='HuggingCLIPLanguageBackbone', + model_name='openai/clip-vit-base-patch32', + frozen_modules=['all'])), + neck=dict(type='YOLOWorldDualPAFPN', + guide_channels=text_channels, + embed_channels=neck_embed_channels, + num_heads=neck_num_heads, + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv'), + text_enhancder=dict(type='ImagePoolingAttentionModule', + embed_channels=256, + num_heads=8)), + bbox_head=dict(type='YOLOWorldHead', + head_module=dict(type='YOLOWorldHeadModule', + embed_dims=text_channels, + num_classes=num_training_classes)), + train_cfg=dict(assigner=dict(num_classes=num_training_classes))) + +# dataset settings +text_transform = [ + dict(type='RandomLoadText', + num_neg_samples=(num_classes, num_classes), + max_num_samples=num_training_classes, + padding_to_max=True, + padding_value=''), + dict(type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'flip', + 'flip_direction', 'texts')) +] +mosaic_affine_transform = [ + dict( + type='MultiModalMosaic', + img_scale=_base_.img_scale, + pad_val=114.0, + pre_transform=_base_.pre_transform), + dict(type='YOLOv5CopyPaste', prob=_base_.copypaste_prob), + dict( + type='YOLOv5RandomAffine', + max_rotate_degree=0.0, + max_shear_degree=0.0, + max_aspect_ratio=100., + scaling_ratio_range=(1 - _base_.affine_scale, + 1 + _base_.affine_scale), + # img_scale is (width, height) + border=(-_base_.img_scale[0] // 2, -_base_.img_scale[1] // 2), + border_val=(114, 114, 114), + min_area_ratio=_base_.min_area_ratio, + use_mask_refine=_base_.use_mask2refine) +] +train_pipeline = [ + *_base_.pre_transform, + *mosaic_affine_transform, + dict( + type='YOLOv5MultiModalMixUp', + prob=_base_.mixup_prob, + pre_transform=[*_base_.pre_transform, + *mosaic_affine_transform]), + *_base_.last_transform[:-1], + *text_transform +] +train_pipeline_stage2 = [ + *_base_.train_pipeline_stage2[:-1], + *text_transform +] +coco_train_dataset = dict( + _delete_=True, + type='MultiModalDataset', + dataset=dict( + type='YOLOv5CocoDataset', + data_root='data/coco', + ann_file='annotations/instances_train2017.json', + data_prefix=dict(img='train2017/'), + filter_cfg=dict(filter_empty_gt=False, min_size=32)), + class_text_path='data/texts/coco_class_texts.json', + pipeline=train_pipeline) + +train_dataloader = dict( + persistent_workers=persistent_workers, + batch_size=train_batch_size_per_gpu, + collate_fn=dict(type='yolow_collate'), + dataset=coco_train_dataset) +test_pipeline = [ + *_base_.test_pipeline[:-1], + dict(type='LoadText'), + dict( + type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'pad_param', 'texts')) +] +coco_val_dataset = dict( + _delete_=True, + type='MultiModalDataset', + dataset=dict( + type='YOLOv5CocoDataset', + data_root='data/coco', + ann_file='annotations/instances_val2017.json', + data_prefix=dict(img='val2017/'), + filter_cfg=dict(filter_empty_gt=False, min_size=32)), + class_text_path='data/texts/coco_class_texts.json', + pipeline=test_pipeline) +val_dataloader = dict(dataset=coco_val_dataset) +test_dataloader = val_dataloader +# training settings +default_hooks = dict( + param_scheduler=dict( + scheduler_type='linear', + lr_factor=0.01, + max_epochs=max_epochs), + checkpoint=dict( + max_keep_ckpts=-1, + save_best=None, + interval=save_epoch_intervals)) +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0001, + update_buffers=True, + strict_load=False, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - close_mosaic_epochs, + switch_pipeline=train_pipeline_stage2) +] +train_cfg = dict( + max_epochs=max_epochs, + val_interval=5, + dynamic_intervals=[((max_epochs - close_mosaic_epochs), + _base_.val_interval_stage2)]) +optim_wrapper = dict( + optimizer=dict( + _delete_=True, + type='AdamW', + lr=base_lr, + weight_decay=weight_decay, + batch_size_per_gpu=train_batch_size_per_gpu), + paramwise_cfg=dict( + custom_keys={'backbone.text_model': dict(lr_mult=0.01), + 'logit_scale': dict(weight_decay=0.0)}), + constructor='YOLOWv5OptimizerConstructor') +# evaluation settings +val_evaluator = dict( + _delete_=True, + type='mmdet.CocoMetric', + proposal_nums=(100, 1, 10), + ann_file='data/coco/annotations/instances_val2017.json', + metric='bbox') diff --git a/models/YOLO-World/configs/finetune_coco/yolo_world_l_efficient_neck_2e-4_80e_8gpus_mask-refine_finetune_coco.py b/models/YOLO-World/configs/finetune_coco/yolo_world_l_efficient_neck_2e-4_80e_8gpus_mask-refine_finetune_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..b5cdca5069b2a76915b80d64e02fc0a44840899e --- /dev/null +++ b/models/YOLO-World/configs/finetune_coco/yolo_world_l_efficient_neck_2e-4_80e_8gpus_mask-refine_finetune_coco.py @@ -0,0 +1,159 @@ +_base_ = ('../../third_party/mmyolo/configs/yolov8/' + 'yolov8_l_mask-refine_syncbn_fast_8xb16-500e_coco.py') +custom_imports = dict(imports=['yolo_world'], allow_failed_imports=False) + +# hyper-parameters +num_classes = 80 +num_training_classes = 80 +max_epochs = 80 # Maximum training epochs +close_mosaic_epochs = 10 +save_epoch_intervals = 5 +text_channels = 512 +neck_embed_channels = [128, 256, _base_.last_stage_out_channels // 2] +neck_num_heads = [4, 8, _base_.last_stage_out_channels // 2 // 32] +base_lr = 2e-4 +weight_decay = 0.05 +train_batch_size_per_gpu = 16 +load_from = 'pretrained_models/yolo_world_l_clip_base_dual_vlpan_2e-3adamw_32xb16_100e_o365_goldg_train_pretrained-0e566235.pth' +# huggingface text model +text_model_name = 'openai/clip-vit-base-patch32' +persistent_workers = False + +# model settings +model = dict( + type='YOLOWorldDetector', + mm_neck=True, + num_train_classes=num_training_classes, + num_test_classes=num_classes, + data_preprocessor=dict(type='YOLOWDetDataPreprocessor'), + backbone=dict( + _delete_=True, + type='MultiModalYOLOBackbone', + image_model={{_base_.model.backbone}}, + text_model=dict( + type='HuggingCLIPLanguageBackbone', + model_name=text_model_name, + frozen_modules=['all'])), + neck=dict(type='YOLOWorldPAFPN', + guide_channels=text_channels, + embed_channels=neck_embed_channels, + num_heads=neck_num_heads, + block_cfg=dict(type='EfficientCSPLayerWithTwoConv')), + bbox_head=dict(type='YOLOWorldHead', + head_module=dict(type='YOLOWorldHeadModule', + embed_dims=text_channels, + num_classes=num_training_classes)), + train_cfg=dict(assigner=dict(num_classes=num_training_classes))) + +# dataset settings +text_transform = [ + dict(type='RandomLoadText', + num_neg_samples=(num_classes, num_classes), + max_num_samples=num_training_classes, + padding_to_max=True, + padding_value=''), + dict(type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'flip', + 'flip_direction', 'texts')) +] +mosaic_affine_transform = [ + dict(type='MultiModalMosaic', + img_scale=_base_.img_scale, + pad_val=114.0, + pre_transform=_base_.pre_transform), + dict(type='YOLOv5CopyPaste', prob=_base_.copypaste_prob), + dict( + type='YOLOv5RandomAffine', + max_rotate_degree=0.0, + max_shear_degree=0.0, + max_aspect_ratio=100., + scaling_ratio_range=(1 - _base_.affine_scale, 1 + _base_.affine_scale), + # img_scale is (width, height) + border=(-_base_.img_scale[0] // 2, -_base_.img_scale[1] // 2), + border_val=(114, 114, 114), + min_area_ratio=_base_.min_area_ratio, + use_mask_refine=_base_.use_mask2refine) +] +train_pipeline = [ + *_base_.pre_transform, *mosaic_affine_transform, + dict(type='YOLOv5MultiModalMixUp', + prob=_base_.mixup_prob, + pre_transform=[*_base_.pre_transform, *mosaic_affine_transform]), + *_base_.last_transform[:-1], *text_transform +] +train_pipeline_stage2 = [*_base_.train_pipeline_stage2[:-1], *text_transform] +coco_train_dataset = dict(_delete_=True, + type='MultiModalDataset', + dataset=dict( + type='YOLOv5CocoDataset', + data_root='data/coco', + ann_file='annotations/instances_train2017.json', + data_prefix=dict(img='train2017/'), + filter_cfg=dict(filter_empty_gt=False, + min_size=32)), + class_text_path='data/texts/coco_class_texts.json', + pipeline=train_pipeline) + +train_dataloader = dict(persistent_workers=persistent_workers, + batch_size=train_batch_size_per_gpu, + collate_fn=dict(type='yolow_collate'), + dataset=coco_train_dataset) +test_pipeline = [ + *_base_.test_pipeline[:-1], + dict(type='LoadText'), + dict(type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'pad_param', 'texts')) +] +coco_val_dataset = dict( + _delete_=True, + type='MultiModalDataset', + dataset=dict(type='YOLOv5CocoDataset', + data_root='data/coco', + ann_file='annotations/instances_val2017.json', + data_prefix=dict(img='val2017/'), + filter_cfg=dict(filter_empty_gt=False, min_size=32)), + class_text_path='data/texts/coco_class_texts.json', + pipeline=test_pipeline) +val_dataloader = dict(dataset=coco_val_dataset) +test_dataloader = val_dataloader +# training settings +default_hooks = dict(param_scheduler=dict(scheduler_type='linear', + lr_factor=0.01, + max_epochs=max_epochs), + checkpoint=dict(max_keep_ckpts=-1, + save_best=None, + interval=save_epoch_intervals)) +custom_hooks = [ + dict(type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0001, + update_buffers=True, + strict_load=False, + priority=49), + dict(type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - close_mosaic_epochs, + switch_pipeline=train_pipeline_stage2) +] +train_cfg = dict(max_epochs=max_epochs, + val_interval=5, + dynamic_intervals=[((max_epochs - close_mosaic_epochs), + _base_.val_interval_stage2)]) +optim_wrapper = dict( + optimizer=dict( + _delete_=True, + type='AdamW', + lr=base_lr, + weight_decay=weight_decay, + batch_size_per_gpu=train_batch_size_per_gpu), + paramwise_cfg=dict( + custom_keys={'backbone.text_model': dict(lr_mult=0.01), + 'logit_scale': dict(weight_decay=0.0)}), + constructor='YOLOWv5OptimizerConstructor') + +# evaluation settings +val_evaluator = dict(_delete_=True, + type='mmdet.CocoMetric', + proposal_nums=(100, 1, 10), + ann_file='data/coco/annotations/instances_val2017.json', + metric='bbox') diff --git a/models/YOLO-World/configs/finetune_coco/yolo_world_v2_l_efficient_neck_2e-4_80e_8gpus_mask-refine_finetune_coco.py b/models/YOLO-World/configs/finetune_coco/yolo_world_v2_l_efficient_neck_2e-4_80e_8gpus_mask-refine_finetune_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..275311837444f5f56f823825b98f474d211c7519 --- /dev/null +++ b/models/YOLO-World/configs/finetune_coco/yolo_world_v2_l_efficient_neck_2e-4_80e_8gpus_mask-refine_finetune_coco.py @@ -0,0 +1,182 @@ +_base_ = ( + '../../third_party/mmyolo/configs/yolov8/' + 'yolov8_l_mask-refine_syncbn_fast_8xb16-500e_coco.py') +custom_imports = dict( + imports=['yolo_world'], + allow_failed_imports=False) + +# hyper-parameters +num_classes = 80 +num_training_classes = 80 +max_epochs = 80 # Maximum training epochs +close_mosaic_epochs = 10 +save_epoch_intervals = 5 +text_channels = 512 +neck_embed_channels = [128, 256, _base_.last_stage_out_channels // 2] +neck_num_heads = [4, 8, _base_.last_stage_out_channels // 2 // 32] +base_lr = 2e-4 +weight_decay = 0.05 +train_batch_size_per_gpu = 16 +load_from = 'pretrained_models/yolo_world_l_clip_t2i_bn_2e-3adamw_32xb16-100e_obj365v1_goldg_cc3mlite_train-ca93cd1f.pth' +text_model_name = '../pretrained_models/clip-vit-base-patch32-projection' +# text_model_name = 'openai/clip-vit-base-patch32' +persistent_workers = False + +# model settings +model = dict( + type='YOLOWorldDetector', + mm_neck=True, + num_train_classes=num_training_classes, + num_test_classes=num_classes, + data_preprocessor=dict(type='YOLOWDetDataPreprocessor'), + backbone=dict( + _delete_=True, + type='MultiModalYOLOBackbone', + image_model={{_base_.model.backbone}}, + text_model=dict( + type='HuggingCLIPLanguageBackbone', + model_name=text_model_name, + frozen_modules=['all'])), + neck=dict(type='YOLOWorldPAFPN', + guide_channels=text_channels, + embed_channels=neck_embed_channels, + num_heads=neck_num_heads, + block_cfg=dict(type='EfficientCSPLayerWithTwoConv')), + bbox_head=dict(type='YOLOWorldHead', + head_module=dict(type='YOLOWorldHeadModule', + use_bn_head=True, + embed_dims=text_channels, + num_classes=num_training_classes)), + train_cfg=dict(assigner=dict(num_classes=num_training_classes))) + +# dataset settings +text_transform = [ + dict(type='RandomLoadText', + num_neg_samples=(num_classes, num_classes), + max_num_samples=num_training_classes, + padding_to_max=True, + padding_value=''), + dict(type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'flip', + 'flip_direction', 'texts')) +] +mosaic_affine_transform = [ + dict( + type='MultiModalMosaic', + img_scale=_base_.img_scale, + pad_val=114.0, + pre_transform=_base_.pre_transform), + dict(type='YOLOv5CopyPaste', prob=_base_.copypaste_prob), + dict( + type='YOLOv5RandomAffine', + max_rotate_degree=0.0, + max_shear_degree=0.0, + max_aspect_ratio=100., + scaling_ratio_range=(1 - _base_.affine_scale, + 1 + _base_.affine_scale), + # img_scale is (width, height) + border=(-_base_.img_scale[0] // 2, -_base_.img_scale[1] // 2), + border_val=(114, 114, 114), + min_area_ratio=_base_.min_area_ratio, + use_mask_refine=_base_.use_mask2refine) +] +train_pipeline = [ + *_base_.pre_transform, + *mosaic_affine_transform, + dict( + type='YOLOv5MultiModalMixUp', + prob=_base_.mixup_prob, + pre_transform=[*_base_.pre_transform, + *mosaic_affine_transform]), + *_base_.last_transform[:-1], + *text_transform +] +train_pipeline_stage2 = [ + *_base_.train_pipeline_stage2[:-1], + *text_transform +] +coco_train_dataset = dict( + _delete_=True, + type='MultiModalDataset', + dataset=dict( + type='YOLOv5CocoDataset', + data_root='data/coco', + ann_file='annotations/instances_train2017.json', + data_prefix=dict(img='train2017/'), + filter_cfg=dict(filter_empty_gt=False, min_size=32)), + class_text_path='data/texts/coco_class_texts.json', + pipeline=train_pipeline) + +train_dataloader = dict( + persistent_workers=persistent_workers, + batch_size=train_batch_size_per_gpu, + collate_fn=dict(type='yolow_collate'), + dataset=coco_train_dataset) +test_pipeline = [ + *_base_.test_pipeline[:-1], + dict(type='LoadText'), + dict( + type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'pad_param', 'texts')) +] +coco_val_dataset = dict( + _delete_=True, + type='MultiModalDataset', + dataset=dict( + type='YOLOv5CocoDataset', + data_root='data/coco', + ann_file='annotations/instances_val2017.json', + data_prefix=dict(img='val2017/'), + filter_cfg=dict(filter_empty_gt=False, min_size=32)), + class_text_path='data/texts/coco_class_texts.json', + pipeline=test_pipeline) +val_dataloader = dict(dataset=coco_val_dataset) +test_dataloader = val_dataloader +# training settings +default_hooks = dict( + param_scheduler=dict( + scheduler_type='linear', + lr_factor=0.01, + max_epochs=max_epochs), + checkpoint=dict( + max_keep_ckpts=-1, + save_best=None, + interval=save_epoch_intervals)) +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0001, + update_buffers=True, + strict_load=False, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - close_mosaic_epochs, + switch_pipeline=train_pipeline_stage2) +] +train_cfg = dict( + max_epochs=max_epochs, + val_interval=5, + dynamic_intervals=[((max_epochs - close_mosaic_epochs), + _base_.val_interval_stage2)]) +optim_wrapper = dict( + optimizer=dict( + _delete_=True, + type='AdamW', + lr=base_lr, + weight_decay=weight_decay, + batch_size_per_gpu=train_batch_size_per_gpu), + paramwise_cfg=dict( + custom_keys={'backbone.text_model': dict(lr_mult=0.01), + 'logit_scale': dict(weight_decay=0.0)}), + constructor='YOLOWv5OptimizerConstructor') + +# evaluation settings +val_evaluator = dict( + _delete_=True, + type='mmdet.CocoMetric', + proposal_nums=(100, 1, 10), + ann_file='data/coco/annotations/instances_val2017.json', + metric='bbox') diff --git a/models/YOLO-World/configs/finetune_coco/yolo_world_v2_l_vlpan_bn_2e-4_80e_8gpus_mask-refine_finetune_coco.py b/models/YOLO-World/configs/finetune_coco/yolo_world_v2_l_vlpan_bn_2e-4_80e_8gpus_mask-refine_finetune_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..d01affe6182e271da2057bbd32b15f43200fd5c6 --- /dev/null +++ b/models/YOLO-World/configs/finetune_coco/yolo_world_v2_l_vlpan_bn_2e-4_80e_8gpus_mask-refine_finetune_coco.py @@ -0,0 +1,181 @@ +_base_ = ( + '../../third_party/mmyolo/configs/yolov8/' + 'yolov8_l_mask-refine_syncbn_fast_8xb16-500e_coco.py') +custom_imports = dict( + imports=['yolo_world'], + allow_failed_imports=False) + +# hyper-parameters +num_classes = 80 +num_training_classes = 80 +max_epochs = 80 # Maximum training epochs +close_mosaic_epochs = 10 +save_epoch_intervals = 5 +text_channels = 512 +neck_embed_channels = [128, 256, _base_.last_stage_out_channels // 2] +neck_num_heads = [4, 8, _base_.last_stage_out_channels // 2 // 32] +base_lr = 2e-4 +weight_decay = 0.05 +train_batch_size_per_gpu = 16 +load_from = 'pretrained_models/yolo_world_l_clip_t2i_bn_2e-3adamw_32xb16-100e_obj365v1_goldg_cc3mlite_train-ca93cd1f.pth' +# text_model_name = '../pretrained_models/clip-vit-base-patch32-projection' +text_model_name = 'openai/clip-vit-base-patch32' +persistent_workers = False + +# model settings +model = dict( + type='YOLOWorldDetector', + mm_neck=True, + num_train_classes=num_training_classes, + num_test_classes=num_classes, + data_preprocessor=dict(type='YOLOWDetDataPreprocessor'), + backbone=dict( + _delete_=True, + type='MultiModalYOLOBackbone', + image_model={{_base_.model.backbone}}, + text_model=dict( + type='HuggingCLIPLanguageBackbone', + model_name=text_model_name, + frozen_modules=['all'])), + neck=dict(type='YOLOWorldPAFPN', + guide_channels=text_channels, + embed_channels=neck_embed_channels, + num_heads=neck_num_heads, + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv')), + bbox_head=dict(type='YOLOWorldHead', + head_module=dict(type='YOLOWorldHeadModule', + use_bn_head=True, + embed_dims=text_channels, + num_classes=num_training_classes)), + train_cfg=dict(assigner=dict(num_classes=num_training_classes))) + +# dataset settings +text_transform = [ + dict(type='RandomLoadText', + num_neg_samples=(num_classes, num_classes), + max_num_samples=num_training_classes, + padding_to_max=True, + padding_value=''), + dict(type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'flip', + 'flip_direction', 'texts')) +] +mosaic_affine_transform = [ + dict( + type='MultiModalMosaic', + img_scale=_base_.img_scale, + pad_val=114.0, + pre_transform=_base_.pre_transform), + dict(type='YOLOv5CopyPaste', prob=_base_.copypaste_prob), + dict( + type='YOLOv5RandomAffine', + max_rotate_degree=0.0, + max_shear_degree=0.0, + max_aspect_ratio=100., + scaling_ratio_range=(1 - _base_.affine_scale, + 1 + _base_.affine_scale), + # img_scale is (width, height) + border=(-_base_.img_scale[0] // 2, -_base_.img_scale[1] // 2), + border_val=(114, 114, 114), + min_area_ratio=_base_.min_area_ratio, + use_mask_refine=_base_.use_mask2refine) +] +train_pipeline = [ + *_base_.pre_transform, + *mosaic_affine_transform, + dict( + type='YOLOv5MultiModalMixUp', + prob=_base_.mixup_prob, + pre_transform=[*_base_.pre_transform, + *mosaic_affine_transform]), + *_base_.last_transform[:-1], + *text_transform +] +train_pipeline_stage2 = [ + *_base_.train_pipeline_stage2[:-1], + *text_transform +] +coco_train_dataset = dict( + _delete_=True, + type='MultiModalDataset', + dataset=dict( + type='YOLOv5CocoDataset', + data_root='data/coco', + ann_file='annotations/instances_train2017.json', + data_prefix=dict(img='train2017/'), + filter_cfg=dict(filter_empty_gt=False, min_size=32)), + class_text_path='data/texts/coco_class_texts.json', + pipeline=train_pipeline) + +train_dataloader = dict( + persistent_workers=persistent_workers, + batch_size=train_batch_size_per_gpu, + collate_fn=dict(type='yolow_collate'), + dataset=coco_train_dataset) +test_pipeline = [ + *_base_.test_pipeline[:-1], + dict(type='LoadText'), + dict( + type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'pad_param', 'texts')) +] +coco_val_dataset = dict( + _delete_=True, + type='MultiModalDataset', + dataset=dict( + type='YOLOv5CocoDataset', + data_root='data/coco', + ann_file='annotations/instances_val2017.json', + data_prefix=dict(img='val2017/'), + filter_cfg=dict(filter_empty_gt=False, min_size=32)), + class_text_path='data/texts/coco_class_texts.json', + pipeline=test_pipeline) +val_dataloader = dict(dataset=coco_val_dataset) +test_dataloader = val_dataloader +# training settings +default_hooks = dict( + param_scheduler=dict( + scheduler_type='linear', + lr_factor=0.01, + max_epochs=max_epochs), + checkpoint=dict( + max_keep_ckpts=-1, + save_best=None, + interval=save_epoch_intervals)) +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0001, + update_buffers=True, + strict_load=False, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - close_mosaic_epochs, + switch_pipeline=train_pipeline_stage2) +] +train_cfg = dict( + max_epochs=max_epochs, + val_interval=5, + dynamic_intervals=[((max_epochs - close_mosaic_epochs), + _base_.val_interval_stage2)]) +optim_wrapper = dict( + optimizer=dict( + _delete_=True, + type='AdamW', + lr=base_lr, + weight_decay=weight_decay, + batch_size_per_gpu=train_batch_size_per_gpu), + paramwise_cfg=dict( + custom_keys={'backbone.text_model': dict(lr_mult=0.01), + 'logit_scale': dict(weight_decay=0.0)}), + constructor='YOLOWv5OptimizerConstructor') +# evaluation settings +val_evaluator = dict( + _delete_=True, + type='mmdet.CocoMetric', + proposal_nums=(100, 1, 10), + ann_file='data/coco/annotations/instances_val2017.json', + metric='bbox') diff --git a/models/YOLO-World/configs/finetune_coco/yolo_world_v2_l_vlpan_bn_sgd_1e-3_40e_8gpus_finetune_coco.py b/models/YOLO-World/configs/finetune_coco/yolo_world_v2_l_vlpan_bn_sgd_1e-3_40e_8gpus_finetune_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..3978469fccb003aa02e6a39da38d1e6007706253 --- /dev/null +++ b/models/YOLO-World/configs/finetune_coco/yolo_world_v2_l_vlpan_bn_sgd_1e-3_40e_8gpus_finetune_coco.py @@ -0,0 +1,160 @@ +_base_ = ('../../third_party/mmyolo/configs/yolov8/' + 'yolov8_l_syncbn_fast_8xb16-500e_coco.py') +custom_imports = dict(imports=['yolo_world'], allow_failed_imports=False) + +# hyper-parameters +num_classes = 80 +num_training_classes = 80 +max_epochs = 40 # Maximum training epochs +close_mosaic_epochs = 30 +save_epoch_intervals = 5 +text_channels = 512 +neck_embed_channels = [128, 256, _base_.last_stage_out_channels // 2] +neck_num_heads = [4, 8, _base_.last_stage_out_channels // 2 // 32] +base_lr = 1e-3 +weight_decay = 0.0005 +train_batch_size_per_gpu = 16 +load_from = 'pretrained_models/yolo_world_l_clip_t2i_bn_2e-3adamw_32xb16-100e_obj365v1_goldg_cc3mlite_train-ca93cd1f.pth' +text_model_name = '../pretrained_models/clip-vit-base-patch32-projection' +# text_model_name = 'openai/clip-vit-base-patch32' +persistent_workers = False + +# model settings +model = dict(type='YOLOWorldDetector', + mm_neck=True, + num_train_classes=num_training_classes, + num_test_classes=num_classes, + data_preprocessor=dict(type='YOLOWDetDataPreprocessor'), + backbone=dict(_delete_=True, + type='MultiModalYOLOBackbone', + image_model={{_base_.model.backbone}}, + text_model=dict(type='HuggingCLIPLanguageBackbone', + model_name=text_model_name, + frozen_modules=['all'])), + neck=dict(type='YOLOWorldPAFPN', + guide_channels=text_channels, + embed_channels=neck_embed_channels, + num_heads=neck_num_heads, + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv')), + bbox_head=dict(type='YOLOWorldHead', + head_module=dict( + type='YOLOWorldHeadModule', + use_bn_head=True, + embed_dims=text_channels, + num_classes=num_training_classes)), + train_cfg=dict(assigner=dict(num_classes=num_training_classes))) + +# dataset settings +text_transform = [ + dict(type='RandomLoadText', + num_neg_samples=(num_classes, num_classes), + max_num_samples=num_training_classes, + padding_to_max=True, + padding_value=''), + dict(type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'flip', + 'flip_direction', 'texts')) +] +mosaic_affine_transform = [ + dict(type='MultiModalMosaic', + img_scale=_base_.img_scale, + pad_val=114.0, + pre_transform=_base_.pre_transform), + dict( + type='YOLOv5RandomAffine', + max_rotate_degree=0.0, + max_shear_degree=0.0, + max_aspect_ratio=100., + scaling_ratio_range=(1 - _base_.affine_scale, 1 + _base_.affine_scale), + # img_scale is (width, height) + border=(-_base_.img_scale[0] // 2, -_base_.img_scale[1] // 2), + border_val=(114, 114, 114)) +] + +train_pipeline = [ + *_base_.pre_transform, *mosaic_affine_transform, + dict(type='YOLOv5MultiModalMixUp', + prob=_base_.mixup_prob, + pre_transform=[*_base_.pre_transform, *mosaic_affine_transform]), + *_base_.last_transform[:-1], *text_transform +] +train_pipeline_stage2 = [*_base_.train_pipeline_stage2[:-1], *text_transform] + +coco_train_dataset = dict(_delete_=True, + type='MultiModalDataset', + dataset=dict( + type='YOLOv5CocoDataset', + data_root='data/coco', + ann_file='annotations/instances_train2017.json', + data_prefix=dict(img='train2017/'), + filter_cfg=dict(filter_empty_gt=False, + min_size=32)), + class_text_path='data/texts/coco_class_texts.json', + pipeline=train_pipeline) + +train_dataloader = dict(persistent_workers=persistent_workers, + batch_size=train_batch_size_per_gpu, + collate_fn=dict(type='yolow_collate'), + dataset=coco_train_dataset) +test_pipeline = [ + *_base_.test_pipeline[:-1], + dict(type='LoadText'), + dict(type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'pad_param', 'texts')) +] +coco_val_dataset = dict( + _delete_=True, + type='MultiModalDataset', + dataset=dict(type='YOLOv5CocoDataset', + data_root='data/coco', + ann_file='annotations/instances_val2017.json', + data_prefix=dict(img='val2017/'), + filter_cfg=dict(filter_empty_gt=False, min_size=32)), + class_text_path='data/texts/coco_class_texts.json', + pipeline=test_pipeline) +val_dataloader = dict(dataset=coco_val_dataset) +test_dataloader = val_dataloader +# training settings +default_hooks = dict(param_scheduler=dict(scheduler_type='linear', + lr_factor=0.01, + max_epochs=max_epochs), + checkpoint=dict(max_keep_ckpts=-1, + save_best=None, + interval=save_epoch_intervals)) +custom_hooks = [ + dict(type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0001, + update_buffers=True, + strict_load=False, + priority=49), + dict(type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - close_mosaic_epochs, + switch_pipeline=train_pipeline_stage2) +] +train_cfg = dict(max_epochs=max_epochs, + val_interval=5, + dynamic_intervals=[((max_epochs - close_mosaic_epochs), + _base_.val_interval_stage2)]) +optim_wrapper = dict(optimizer=dict( + _delete_=True, + type='SGD', + lr=base_lr, + momentum=0.937, + nesterov=True, + weight_decay=weight_decay, + batch_size_per_gpu=train_batch_size_per_gpu), + paramwise_cfg=dict( + custom_keys={ + 'backbone.text_model': dict(lr_mult=0.01), + 'logit_scale': dict(weight_decay=0.0) + }), + constructor='YOLOWv5OptimizerConstructor') + +# evaluation settings +val_evaluator = dict(_delete_=True, + type='mmdet.CocoMetric', + proposal_nums=(100, 1, 10), + ann_file='data/coco/annotations/instances_val2017.json', + metric='bbox') diff --git a/models/YOLO-World/configs/finetune_coco/yolo_world_v2_l_vlpan_bn_sgd_1e-3_80e_8gpus_mask-refine_finetune_coco.py b/models/YOLO-World/configs/finetune_coco/yolo_world_v2_l_vlpan_bn_sgd_1e-3_80e_8gpus_mask-refine_finetune_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..23608ba56c47df3bf19d5ddcaa12cf70d4f15ee6 --- /dev/null +++ b/models/YOLO-World/configs/finetune_coco/yolo_world_v2_l_vlpan_bn_sgd_1e-3_80e_8gpus_mask-refine_finetune_coco.py @@ -0,0 +1,161 @@ +_base_ = ('../../third_party/mmyolo/configs/yolov8/' + 'yolov8_l_mask-refine_syncbn_fast_8xb16-500e_coco.py') +custom_imports = dict(imports=['yolo_world'], allow_failed_imports=False) + +# hyper-parameters +num_classes = 80 +num_training_classes = 80 +max_epochs = 80 # Maximum training epochs +close_mosaic_epochs = 10 +save_epoch_intervals = 5 +text_channels = 512 +neck_embed_channels = [128, 256, _base_.last_stage_out_channels // 2] +neck_num_heads = [4, 8, _base_.last_stage_out_channels // 2 // 32] +base_lr = 1e-3 +weight_decay = 0.0005 +train_batch_size_per_gpu = 16 +load_from = 'pretrained_models/yolo_world_l_clip_t2i_bn_2e-3adamw_32xb16-100e_obj365v1_goldg_cc3mlite_train-ca93cd1f.pth' +text_model_name = '../pretrained_models/clip-vit-base-patch32-projection' +# text_model_name = 'openai/clip-vit-base-patch32' +persistent_workers = False + +# model settings +model = dict(type='YOLOWorldDetector', + mm_neck=True, + num_train_classes=num_training_classes, + num_test_classes=num_classes, + data_preprocessor=dict(type='YOLOWDetDataPreprocessor'), + backbone=dict(_delete_=True, + type='MultiModalYOLOBackbone', + image_model={{_base_.model.backbone}}, + text_model=dict(type='HuggingCLIPLanguageBackbone', + model_name=text_model_name, + frozen_modules=['all'])), + neck=dict(type='YOLOWorldPAFPN', + guide_channels=text_channels, + embed_channels=neck_embed_channels, + num_heads=neck_num_heads, + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv')), + bbox_head=dict(type='YOLOWorldHead', + head_module=dict( + type='YOLOWorldHeadModule', + use_bn_head=True, + embed_dims=text_channels, + num_classes=num_training_classes)), + train_cfg=dict(assigner=dict(num_classes=num_training_classes))) + +# dataset settings +text_transform = [ + dict(type='RandomLoadText', + num_neg_samples=(num_classes, num_classes), + max_num_samples=num_training_classes, + padding_to_max=True, + padding_value=''), + dict(type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'flip', + 'flip_direction', 'texts')) +] +mosaic_affine_transform = [ + dict(type='MultiModalMosaic', + img_scale=_base_.img_scale, + pad_val=114.0, + pre_transform=_base_.pre_transform), + dict(type='YOLOv5CopyPaste', prob=_base_.copypaste_prob), + dict( + type='YOLOv5RandomAffine', + max_rotate_degree=0.0, + max_shear_degree=0.0, + max_aspect_ratio=100., + scaling_ratio_range=(1 - _base_.affine_scale, 1 + _base_.affine_scale), + # img_scale is (width, height) + border=(-_base_.img_scale[0] // 2, -_base_.img_scale[1] // 2), + border_val=(114, 114, 114), + min_area_ratio=_base_.min_area_ratio, + use_mask_refine=_base_.use_mask2refine) +] +train_pipeline = [ + *_base_.pre_transform, *mosaic_affine_transform, + dict(type='YOLOv5MultiModalMixUp', + prob=_base_.mixup_prob, + pre_transform=[*_base_.pre_transform, *mosaic_affine_transform]), + *_base_.last_transform[:-1], *text_transform +] +train_pipeline_stage2 = [*_base_.train_pipeline_stage2[:-1], *text_transform] +coco_train_dataset = dict(_delete_=True, + type='MultiModalDataset', + dataset=dict( + type='YOLOv5CocoDataset', + data_root='data/coco', + ann_file='annotations/instances_train2017.json', + data_prefix=dict(img='train2017/'), + filter_cfg=dict(filter_empty_gt=False, + min_size=32)), + class_text_path='data/texts/coco_class_texts.json', + pipeline=train_pipeline) + +train_dataloader = dict(persistent_workers=persistent_workers, + batch_size=train_batch_size_per_gpu, + collate_fn=dict(type='yolow_collate'), + dataset=coco_train_dataset) +test_pipeline = [ + *_base_.test_pipeline[:-1], + dict(type='LoadText'), + dict(type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'pad_param', 'texts')) +] +coco_val_dataset = dict( + _delete_=True, + type='MultiModalDataset', + dataset=dict(type='YOLOv5CocoDataset', + data_root='data/coco', + ann_file='annotations/instances_val2017.json', + data_prefix=dict(img='val2017/'), + filter_cfg=dict(filter_empty_gt=False, min_size=32)), + class_text_path='data/texts/coco_class_texts.json', + pipeline=test_pipeline) +val_dataloader = dict(dataset=coco_val_dataset) +test_dataloader = val_dataloader +# training settings +default_hooks = dict(param_scheduler=dict(scheduler_type='linear', + lr_factor=0.01, + max_epochs=max_epochs), + checkpoint=dict(max_keep_ckpts=-1, + save_best=None, + interval=save_epoch_intervals)) +custom_hooks = [ + dict(type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0001, + update_buffers=True, + strict_load=False, + priority=49), + dict(type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - close_mosaic_epochs, + switch_pipeline=train_pipeline_stage2) +] +train_cfg = dict(max_epochs=max_epochs, + val_interval=5, + dynamic_intervals=[((max_epochs - close_mosaic_epochs), + _base_.val_interval_stage2)]) +optim_wrapper = dict(optimizer=dict( + _delete_=True, + type='SGD', + lr=base_lr, + momentum=0.937, + nesterov=True, + weight_decay=weight_decay, + batch_size_per_gpu=train_batch_size_per_gpu), + paramwise_cfg=dict( + custom_keys={ + 'backbone.text_model': dict(lr_mult=0.01), + 'logit_scale': dict(weight_decay=0.0) + }), + constructor='YOLOWv5OptimizerConstructor') + +# evaluation settings +val_evaluator = dict(_delete_=True, + type='mmdet.CocoMetric', + proposal_nums=(100, 1, 10), + ann_file='data/coco/annotations/instances_val2017.json', + metric='bbox') diff --git a/models/YOLO-World/configs/finetune_coco/yolo_world_v2_m_vlpan_bn_2e-4_80e_8gpus_mask-refine_finetune_coco.py b/models/YOLO-World/configs/finetune_coco/yolo_world_v2_m_vlpan_bn_2e-4_80e_8gpus_mask-refine_finetune_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..32fcc51cdffc459a3d11461174a989e6e3438688 --- /dev/null +++ b/models/YOLO-World/configs/finetune_coco/yolo_world_v2_m_vlpan_bn_2e-4_80e_8gpus_mask-refine_finetune_coco.py @@ -0,0 +1,182 @@ +_base_ = ( + '../../third_party/mmyolo/configs/yolov8/' + 'yolov8_m_mask-refine_syncbn_fast_8xb16-500e_coco.py') +custom_imports = dict( + imports=['yolo_world'], + allow_failed_imports=False) + +# hyper-parameters +num_classes = 80 +num_training_classes = 80 +max_epochs = 80 # Maximum training epochs +close_mosaic_epochs = 10 +save_epoch_intervals = 5 +text_channels = 512 +neck_embed_channels = [128, 256, _base_.last_stage_out_channels // 2] +neck_num_heads = [4, 8, _base_.last_stage_out_channels // 2 // 32] +base_lr = 2e-4 +weight_decay = 0.05 +train_batch_size_per_gpu = 16 +load_from = 'pretrained_models/yolo_world_m_clip_t2i_bn_2e-3adamw_32xb16-100e_obj365v1_goldg_train-c6237d5b.pth' +# text_model_name = '../pretrained_models/clip-vit-base-patch32-projection' +text_model_name = 'openai/clip-vit-base-patch32' +persistent_workers = False + +# model settings +model = dict( + type='YOLOWorldDetector', + mm_neck=True, + num_train_classes=num_training_classes, + num_test_classes=num_classes, + data_preprocessor=dict(type='YOLOWDetDataPreprocessor'), + backbone=dict( + _delete_=True, + type='MultiModalYOLOBackbone', + image_model={{_base_.model.backbone}}, + text_model=dict( + type='HuggingCLIPLanguageBackbone', + model_name=text_model_name, + frozen_modules=['all'])), + neck=dict(type='YOLOWorldPAFPN', + guide_channels=text_channels, + embed_channels=neck_embed_channels, + num_heads=neck_num_heads, + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv')), + bbox_head=dict(type='YOLOWorldHead', + head_module=dict(type='YOLOWorldHeadModule', + use_bn_head=True, + embed_dims=text_channels, + num_classes=num_training_classes)), + train_cfg=dict(assigner=dict(num_classes=num_training_classes))) + +# dataset settings +text_transform = [ + dict(type='RandomLoadText', + num_neg_samples=(num_classes, num_classes), + max_num_samples=num_training_classes, + padding_to_max=True, + padding_value=''), + dict(type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'flip', + 'flip_direction', 'texts')) +] +mosaic_affine_transform = [ + dict( + type='MultiModalMosaic', + img_scale=_base_.img_scale, + pad_val=114.0, + pre_transform=_base_.pre_transform), + dict(type='YOLOv5CopyPaste', prob=_base_.copypaste_prob), + dict( + type='YOLOv5RandomAffine', + max_rotate_degree=0.0, + max_shear_degree=0.0, + max_aspect_ratio=100., + scaling_ratio_range=(1 - _base_.affine_scale, + 1 + _base_.affine_scale), + # img_scale is (width, height) + border=(-_base_.img_scale[0] // 2, -_base_.img_scale[1] // 2), + border_val=(114, 114, 114), + min_area_ratio=_base_.min_area_ratio, + use_mask_refine=_base_.use_mask2refine) +] +train_pipeline = [ + *_base_.pre_transform, + *mosaic_affine_transform, + dict( + type='YOLOv5MultiModalMixUp', + prob=_base_.mixup_prob, + pre_transform=[*_base_.pre_transform, + *mosaic_affine_transform]), + *_base_.last_transform[:-1], + *text_transform +] +train_pipeline_stage2 = [ + *_base_.train_pipeline_stage2[:-1], + *text_transform +] +coco_train_dataset = dict( + _delete_=True, + type='MultiModalDataset', + dataset=dict( + type='YOLOv5CocoDataset', + data_root='data/coco', + ann_file='annotations/instances_train2017.json', + data_prefix=dict(img='train2017/'), + filter_cfg=dict(filter_empty_gt=False, min_size=32)), + class_text_path='data/texts/coco_class_texts.json', + pipeline=train_pipeline) + +train_dataloader = dict( + persistent_workers=persistent_workers, + batch_size=train_batch_size_per_gpu, + collate_fn=dict(type='yolow_collate'), + dataset=coco_train_dataset) +test_pipeline = [ + *_base_.test_pipeline[:-1], + dict(type='LoadText'), + dict( + type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'pad_param', 'texts')) +] +coco_val_dataset = dict( + _delete_=True, + type='MultiModalDataset', + dataset=dict( + type='YOLOv5CocoDataset', + data_root='data/coco', + ann_file='annotations/instances_val2017.json', + data_prefix=dict(img='val2017/'), + filter_cfg=dict(filter_empty_gt=False, min_size=32)), + class_text_path='data/texts/coco_class_texts.json', + pipeline=test_pipeline) +val_dataloader = dict(dataset=coco_val_dataset) +test_dataloader = val_dataloader +# training settings +default_hooks = dict( + param_scheduler=dict( + scheduler_type='linear', + lr_factor=0.01, + max_epochs=max_epochs), + checkpoint=dict( + max_keep_ckpts=-1, + save_best=None, + interval=save_epoch_intervals)) +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0001, + update_buffers=True, + strict_load=False, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - close_mosaic_epochs, + switch_pipeline=train_pipeline_stage2) +] +train_cfg = dict( + max_epochs=max_epochs, + val_interval=5, + dynamic_intervals=[((max_epochs - close_mosaic_epochs), + _base_.val_interval_stage2)]) +optim_wrapper = dict( + optimizer=dict( + _delete_=True, + type='AdamW', + lr=base_lr, + weight_decay=weight_decay, + batch_size_per_gpu=train_batch_size_per_gpu), + paramwise_cfg=dict( + custom_keys={'backbone.text_model': dict(lr_mult=0.01), + 'logit_scale': dict(weight_decay=0.0)}), + constructor='YOLOWv5OptimizerConstructor') + +# evaluation settings +val_evaluator = dict( + _delete_=True, + type='mmdet.CocoMetric', + proposal_nums=(100, 1, 10), + ann_file='data/coco/annotations/instances_val2017.json', + metric='bbox') diff --git a/models/YOLO-World/configs/finetune_coco/yolo_world_v2_s_bn_2e-4_80e_8gpus_mask-refine_finetune_coco.py b/models/YOLO-World/configs/finetune_coco/yolo_world_v2_s_bn_2e-4_80e_8gpus_mask-refine_finetune_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..498011019471f55cf525802c44bbc865f9a67655 --- /dev/null +++ b/models/YOLO-World/configs/finetune_coco/yolo_world_v2_s_bn_2e-4_80e_8gpus_mask-refine_finetune_coco.py @@ -0,0 +1,145 @@ +_base_ = ('../../third_party/mmyolo/configs/yolov8/' + 'yolov8_s_mask-refine_syncbn_fast_8xb16-500e_coco.py') +custom_imports = dict(imports=['yolo_world'], allow_failed_imports=False) + +# hyper-parameters +num_classes = 80 +num_training_classes = 80 +max_epochs = 80 # Maximum training epochs +close_mosaic_epochs = 10 +save_epoch_intervals = 5 +text_channels = 512 +neck_embed_channels = [128, 256, _base_.last_stage_out_channels // 2] +neck_num_heads = [4, 8, _base_.last_stage_out_channels // 2 // 32] +base_lr = 2e-4 +weight_decay = 0.05 +train_batch_size_per_gpu = 16 +load_from = '../FastDet/output_models/pretrain_yolow-v8_s_clipv2_frozen_te_noprompt_t2i_bn_2e-3adamw_scale_lr_wd_32xb16-100e_obj365v1_goldg_cc3mram250k_train_lviseval-e3592307_rep_conv.pth' +persistent_workers = False +mixup_prob = 0.15 +copypaste_prob = 0.3 + +# model settings +model = dict(type='SimpleYOLOWorldDetector', + mm_neck=True, + num_train_classes=num_classes, + num_test_classes=num_classes, + reparameterized=True, + data_preprocessor=dict(type='YOLOv5DetDataPreprocessor'), + backbone=dict(_delete_=True, + type='MultiModalYOLOBackbone', + text_model=None, + image_model={{_base_.model.backbone}}, + with_text_model=False), + neck=dict(type='YOLOWorldPAFPN', + guide_channels=text_channels, + embed_channels=neck_embed_channels, + num_heads=neck_num_heads, + block_cfg=dict(type='EfficientCSPLayerWithTwoConv')), + bbox_head=dict(head_module=dict(type='RepYOLOWorldHeadModule', + embed_dims=text_channels, + num_guide=num_classes, + num_classes=num_classes)), + train_cfg=dict(assigner=dict(num_classes=num_classes))) + +# dataset settings +final_transform = [ + dict(type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'flip', + 'flip_direction')) +] +mosaic_affine_transform = [ + dict(type='Mosaic', + img_scale=_base_.img_scale, + pad_val=114.0, + pre_transform=_base_.pre_transform), + dict(type='YOLOv5CopyPaste', prob=copypaste_prob), + dict( + type='YOLOv5RandomAffine', + max_rotate_degree=0.0, + max_shear_degree=0.0, + max_aspect_ratio=100., + scaling_ratio_range=(1 - _base_.affine_scale, 1 + _base_.affine_scale), + # img_scale is (width, height) + border=(-_base_.img_scale[0] // 2, -_base_.img_scale[1] // 2), + border_val=(114, 114, 114), + min_area_ratio=_base_.min_area_ratio, + use_mask_refine=_base_.use_mask2refine) +] +train_pipeline = [ + *_base_.pre_transform, *mosaic_affine_transform, + dict(type='YOLOv5MixUp', + prob=mixup_prob, + pre_transform=[*_base_.pre_transform, *mosaic_affine_transform]), + *_base_.last_transform[:-1], *final_transform +] + +train_pipeline_stage2 = [*_base_.train_pipeline_stage2[:-1], *final_transform] + +coco_train_dataset = dict(type='YOLOv5CocoDataset', + data_root='data/coco', + ann_file='annotations/instances_train2017.json', + data_prefix=dict(img='train2017/'), + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=train_pipeline) + +train_dataloader = dict(persistent_workers=persistent_workers, + batch_size=train_batch_size_per_gpu, + collate_fn=dict(type='yolow_collate'), + dataset=coco_train_dataset) + +train_dataloader = dict(persistent_workers=persistent_workers, + batch_size=train_batch_size_per_gpu, + collate_fn=dict(type='yolow_collate'), + dataset=coco_train_dataset) +test_pipeline = [ + *_base_.test_pipeline[:-1], + dict(type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'pad_param')) +] +coco_val_dataset = dict(type='YOLOv5CocoDataset', + data_root='data/coco', + ann_file='annotations/instances_val2017.json', + data_prefix=dict(img='val2017/'), + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=test_pipeline) + +val_dataloader = dict(dataset=coco_val_dataset) +test_dataloader = val_dataloader +# training settings +default_hooks = dict(param_scheduler=dict(scheduler_type='linear', + lr_factor=0.01, + max_epochs=max_epochs), + checkpoint=dict(max_keep_ckpts=-1, + save_best=None, + interval=save_epoch_intervals)) +custom_hooks = [ + dict(type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0001, + update_buffers=True, + strict_load=False, + priority=49), + dict(type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - close_mosaic_epochs, + switch_pipeline=train_pipeline_stage2) +] +train_cfg = dict(max_epochs=max_epochs, + val_interval=5, + dynamic_intervals=[((max_epochs - close_mosaic_epochs), + _base_.val_interval_stage2)]) +optim_wrapper = dict(optimizer=dict( + _delete_=True, + type='AdamW', + lr=base_lr, + weight_decay=weight_decay, + batch_size_per_gpu=train_batch_size_per_gpu), + constructor='YOLOWv5OptimizerConstructor') + +# evaluation settings +val_evaluator = dict(_delete_=True, + type='mmdet.CocoMetric', + proposal_nums=(100, 1, 10), + ann_file='data/coco/annotations/instances_val2017.json', + metric='bbox') diff --git a/models/YOLO-World/configs/finetune_coco/yolo_world_v2_s_rep_vlpan_bn_2e-4_80e_8gpus_mask-refine_finetune_coco.py b/models/YOLO-World/configs/finetune_coco/yolo_world_v2_s_rep_vlpan_bn_2e-4_80e_8gpus_mask-refine_finetune_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..9fe682c87edc1c1c7c8e6d10f2c08e5f819b501f --- /dev/null +++ b/models/YOLO-World/configs/finetune_coco/yolo_world_v2_s_rep_vlpan_bn_2e-4_80e_8gpus_mask-refine_finetune_coco.py @@ -0,0 +1,146 @@ +_base_ = ('../../third_party/mmyolo/configs/yolov8/' + 'yolov8_s_mask-refine_syncbn_fast_8xb16-500e_coco.py') +custom_imports = dict(imports=['yolo_world'], allow_failed_imports=False) + +# hyper-parameters +num_classes = 80 +num_training_classes = 80 +max_epochs = 80 # Maximum training epochs +close_mosaic_epochs = 10 +save_epoch_intervals = 5 +text_channels = 512 +neck_embed_channels = [128, 256, _base_.last_stage_out_channels // 2] +neck_num_heads = [4, 8, _base_.last_stage_out_channels // 2 // 32] +base_lr = 2e-4 +weight_decay = 0.05 +train_batch_size_per_gpu = 16 +load_from = '../FastDet/output_models/yolo_world_s_clip_t2i_bn_2e-3adamw_32xb16-100e_obj365v1_goldg_train-55b943ea_rep_conv.pth' +persistent_workers = False +mixup_prob = 0.15 +copypaste_prob = 0.3 + +# model settings +model = dict(type='SimpleYOLOWorldDetector', + mm_neck=True, + num_train_classes=num_classes, + num_test_classes=num_classes, + reparameterized=True, + data_preprocessor=dict(type='YOLOv5DetDataPreprocessor'), + backbone=dict(_delete_=True, + type='MultiModalYOLOBackbone', + text_model=None, + image_model={{_base_.model.backbone}}, + with_text_model=False), + neck=dict(type='YOLOWorldPAFPN', + guide_channels=num_classes, + embed_channels=neck_embed_channels, + num_heads=neck_num_heads, + block_cfg=dict(type='RepConvMaxSigmoidCSPLayerWithTwoConv', + guide_channels=num_classes)), + bbox_head=dict(head_module=dict(type='RepYOLOWorldHeadModule', + embed_dims=text_channels, + num_guide=num_classes, + num_classes=num_classes)), + train_cfg=dict(assigner=dict(num_classes=num_classes))) + +# dataset settings +final_transform = [ + dict(type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'flip', + 'flip_direction')) +] +mosaic_affine_transform = [ + dict(type='Mosaic', + img_scale=_base_.img_scale, + pad_val=114.0, + pre_transform=_base_.pre_transform), + dict(type='YOLOv5CopyPaste', prob=copypaste_prob), + dict( + type='YOLOv5RandomAffine', + max_rotate_degree=0.0, + max_shear_degree=0.0, + max_aspect_ratio=100., + scaling_ratio_range=(1 - _base_.affine_scale, 1 + _base_.affine_scale), + # img_scale is (width, height) + border=(-_base_.img_scale[0] // 2, -_base_.img_scale[1] // 2), + border_val=(114, 114, 114), + min_area_ratio=_base_.min_area_ratio, + use_mask_refine=_base_.use_mask2refine) +] +train_pipeline = [ + *_base_.pre_transform, *mosaic_affine_transform, + dict(type='YOLOv5MixUp', + prob=mixup_prob, + pre_transform=[*_base_.pre_transform, *mosaic_affine_transform]), + *_base_.last_transform[:-1], *final_transform +] + +train_pipeline_stage2 = [*_base_.train_pipeline_stage2[:-1], *final_transform] + +coco_train_dataset = dict(type='YOLOv5CocoDataset', + data_root='data/coco', + ann_file='annotations/instances_train2017.json', + data_prefix=dict(img='train2017/'), + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=train_pipeline) + +train_dataloader = dict(persistent_workers=persistent_workers, + batch_size=train_batch_size_per_gpu, + collate_fn=dict(type='yolow_collate'), + dataset=coco_train_dataset) + +train_dataloader = dict(persistent_workers=persistent_workers, + batch_size=train_batch_size_per_gpu, + collate_fn=dict(type='yolow_collate'), + dataset=coco_train_dataset) +test_pipeline = [ + *_base_.test_pipeline[:-1], + dict(type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'pad_param')) +] +coco_val_dataset = dict(type='YOLOv5CocoDataset', + data_root='data/coco', + ann_file='annotations/instances_val2017.json', + data_prefix=dict(img='val2017/'), + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=test_pipeline) + +val_dataloader = dict(dataset=coco_val_dataset) +test_dataloader = val_dataloader +# training settings +default_hooks = dict(param_scheduler=dict(scheduler_type='linear', + lr_factor=0.01, + max_epochs=max_epochs), + checkpoint=dict(max_keep_ckpts=-1, + save_best=None, + interval=save_epoch_intervals)) +custom_hooks = [ + dict(type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0001, + update_buffers=True, + strict_load=False, + priority=49), + dict(type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - close_mosaic_epochs, + switch_pipeline=train_pipeline_stage2) +] +train_cfg = dict(max_epochs=max_epochs, + val_interval=5, + dynamic_intervals=[((max_epochs - close_mosaic_epochs), + _base_.val_interval_stage2)]) +optim_wrapper = dict(optimizer=dict( + _delete_=True, + type='AdamW', + lr=base_lr, + weight_decay=weight_decay, + batch_size_per_gpu=train_batch_size_per_gpu), + constructor='YOLOWv5OptimizerConstructor') + +# evaluation settings +val_evaluator = dict(_delete_=True, + type='mmdet.CocoMetric', + proposal_nums=(100, 1, 10), + ann_file='data/coco/annotations/instances_val2017.json', + metric='bbox') diff --git a/models/YOLO-World/configs/finetune_coco/yolo_world_v2_s_vlpan_bn_2e-4_80e_8gpus_mask-refine_finetune_coco.py b/models/YOLO-World/configs/finetune_coco/yolo_world_v2_s_vlpan_bn_2e-4_80e_8gpus_mask-refine_finetune_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..49d2e4bdffd9cb561399d694196b885be3524efe --- /dev/null +++ b/models/YOLO-World/configs/finetune_coco/yolo_world_v2_s_vlpan_bn_2e-4_80e_8gpus_mask-refine_finetune_coco.py @@ -0,0 +1,184 @@ +_base_ = ( + '../../third_party/mmyolo/configs/yolov8/' + 'yolov8_s_mask-refine_syncbn_fast_8xb16-500e_coco.py') +custom_imports = dict( + imports=['yolo_world'], + allow_failed_imports=False) + +# hyper-parameters +num_classes = 80 +num_training_classes = 80 +max_epochs = 80 # Maximum training epochs +close_mosaic_epochs = 10 +save_epoch_intervals = 5 +text_channels = 512 +neck_embed_channels = [128, 256, _base_.last_stage_out_channels // 2] +neck_num_heads = [4, 8, _base_.last_stage_out_channels // 2 // 32] +base_lr = 2e-4 +weight_decay = 0.05 +train_batch_size_per_gpu = 16 +load_from = 'pretrained_models/yolo_world_s_clip_t2i_bn_2e-3adamw_32xb16-100e_obj365v1_goldg_train-55b943ea.pth' +# text_model_name = '../pretrained_models/clip-vit-base-patch32-projection' +text_model_name = 'openai/clip-vit-base-patch32' +persistent_workers = False +mixup_prob = 0.15 +copypaste_prob = 0.3 + +# model settings +model = dict( + type='YOLOWorldDetector', + mm_neck=True, + num_train_classes=num_training_classes, + num_test_classes=num_classes, + data_preprocessor=dict(type='YOLOWDetDataPreprocessor'), + backbone=dict( + _delete_=True, + type='MultiModalYOLOBackbone', + image_model={{_base_.model.backbone}}, + text_model=dict( + type='HuggingCLIPLanguageBackbone', + model_name=text_model_name, + frozen_modules=['all'])), + neck=dict(type='YOLOWorldPAFPN', + guide_channels=text_channels, + embed_channels=neck_embed_channels, + num_heads=neck_num_heads, + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv')), + bbox_head=dict(type='YOLOWorldHead', + head_module=dict(type='YOLOWorldHeadModule', + use_bn_head=True, + embed_dims=text_channels, + num_classes=num_training_classes)), + train_cfg=dict(assigner=dict(num_classes=num_training_classes))) + +# dataset settings +text_transform = [ + dict(type='RandomLoadText', + num_neg_samples=(num_classes, num_classes), + max_num_samples=num_training_classes, + padding_to_max=True, + padding_value=''), + dict(type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'flip', + 'flip_direction', 'texts')) +] +mosaic_affine_transform = [ + dict( + type='MultiModalMosaic', + img_scale=_base_.img_scale, + pad_val=114.0, + pre_transform=_base_.pre_transform), + dict(type='YOLOv5CopyPaste', prob=copypaste_prob), + dict( + type='YOLOv5RandomAffine', + max_rotate_degree=0.0, + max_shear_degree=0.0, + max_aspect_ratio=100., + scaling_ratio_range=(1 - _base_.affine_scale, + 1 + _base_.affine_scale), + # img_scale is (width, height) + border=(-_base_.img_scale[0] // 2, -_base_.img_scale[1] // 2), + border_val=(114, 114, 114), + min_area_ratio=_base_.min_area_ratio, + use_mask_refine=_base_.use_mask2refine) +] +train_pipeline = [ + *_base_.pre_transform, + *mosaic_affine_transform, + dict( + type='YOLOv5MultiModalMixUp', + prob=mixup_prob, + pre_transform=[*_base_.pre_transform, + *mosaic_affine_transform]), + *_base_.last_transform[:-1], + *text_transform +] +train_pipeline_stage2 = [ + *_base_.train_pipeline_stage2[:-1], + *text_transform +] +coco_train_dataset = dict( + _delete_=True, + type='MultiModalDataset', + dataset=dict( + type='YOLOv5CocoDataset', + data_root='data/coco', + ann_file='annotations/instances_train2017.json', + data_prefix=dict(img='train2017/'), + filter_cfg=dict(filter_empty_gt=False, min_size=32)), + class_text_path='data/texts/coco_class_texts.json', + pipeline=train_pipeline) + +train_dataloader = dict( + persistent_workers=persistent_workers, + batch_size=train_batch_size_per_gpu, + collate_fn=dict(type='yolow_collate'), + dataset=coco_train_dataset) +test_pipeline = [ + *_base_.test_pipeline[:-1], + dict(type='LoadText'), + dict( + type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'pad_param', 'texts')) +] +coco_val_dataset = dict( + _delete_=True, + type='MultiModalDataset', + dataset=dict( + type='YOLOv5CocoDataset', + data_root='data/coco', + ann_file='annotations/instances_val2017.json', + data_prefix=dict(img='val2017/'), + filter_cfg=dict(filter_empty_gt=False, min_size=32)), + class_text_path='data/texts/coco_class_texts.json', + pipeline=test_pipeline) +val_dataloader = dict(dataset=coco_val_dataset) +test_dataloader = val_dataloader +# training settings +default_hooks = dict( + param_scheduler=dict( + scheduler_type='linear', + lr_factor=0.01, + max_epochs=max_epochs), + checkpoint=dict( + max_keep_ckpts=-1, + save_best=None, + interval=save_epoch_intervals)) +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0001, + update_buffers=True, + strict_load=False, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - close_mosaic_epochs, + switch_pipeline=train_pipeline_stage2) +] +train_cfg = dict( + max_epochs=max_epochs, + val_interval=5, + dynamic_intervals=[((max_epochs - close_mosaic_epochs), + _base_.val_interval_stage2)]) +optim_wrapper = dict( + optimizer=dict( + _delete_=True, + type='AdamW', + lr=base_lr, + weight_decay=weight_decay, + batch_size_per_gpu=train_batch_size_per_gpu), + paramwise_cfg=dict( + custom_keys={'backbone.text_model': dict(lr_mult=0.01), + 'logit_scale': dict(weight_decay=0.0)}), + constructor='YOLOWv5OptimizerConstructor') + +# evaluation settings +val_evaluator = dict( + _delete_=True, + type='mmdet.CocoMetric', + proposal_nums=(100, 1, 10), + ann_file='data/coco/annotations/instances_val2017.json', + metric='bbox') diff --git a/models/YOLO-World/configs/finetune_coco/yolo_world_v2_x_vlpan_bn_2e-4_80e_8gpus_mask-refine_finetune_coco.py b/models/YOLO-World/configs/finetune_coco/yolo_world_v2_x_vlpan_bn_2e-4_80e_8gpus_mask-refine_finetune_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..6ce88a89ab9ea18b0f91760d985f2313859c8762 --- /dev/null +++ b/models/YOLO-World/configs/finetune_coco/yolo_world_v2_x_vlpan_bn_2e-4_80e_8gpus_mask-refine_finetune_coco.py @@ -0,0 +1,183 @@ +_base_ = ( + '../../third_party/mmyolo/configs/yolov8/' + 'yolov8_x_mask-refine_syncbn_fast_8xb16-500e_coco.py') +custom_imports = dict( + imports=['yolo_world'], + allow_failed_imports=False) + +# hyper-parameters +num_classes = 80 +num_training_classes = 80 +max_epochs = 80 # Maximum training epochs +close_mosaic_epochs = 10 +save_epoch_intervals = 5 +text_channels = 512 +neck_embed_channels = [128, 256, _base_.last_stage_out_channels // 2] +neck_num_heads = [4, 8, _base_.last_stage_out_channels // 2 // 32] +base_lr = 2e-4 +weight_decay = 0.05 +train_batch_size_per_gpu = 16 +load_from = 'pretrained_models/yolo_world_x_clip_t2i_bn_2e-3adamw_32xb16-100e_obj365v1_goldg_cc250k_train_lviseval-8698fbfa.pth' +text_model_name = '../pretrained_models/clip-vit-base-patch32-projection' +# text_model_name = 'openai/clip-vit-base-patch32' +persistent_workers = False + +# model settings +model = dict( + type='YOLOWorldDetector', + mm_neck=True, + num_train_classes=num_training_classes, + num_test_classes=num_classes, + data_preprocessor=dict(type='YOLOWDetDataPreprocessor'), + backbone=dict( + _delete_=True, + type='MultiModalYOLOBackbone', + image_model={{_base_.model.backbone}}, + text_model=dict( + type='HuggingCLIPLanguageBackbone', + model_name=text_model_name, + frozen_modules=['all'])), + neck=dict(type='YOLOWorldPAFPN', + guide_channels=text_channels, + embed_channels=neck_embed_channels, + num_heads=neck_num_heads, + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv')), + bbox_head=dict(type='YOLOWorldHead', + head_module=dict(type='YOLOWorldHeadModule', + use_bn_head=True, + embed_dims=text_channels, + num_classes=num_training_classes)), + train_cfg=dict(assigner=dict(num_classes=num_training_classes))) + +# dataset settings +text_transform = [ + dict(type='RandomLoadText', + num_neg_samples=(num_classes, num_classes), + max_num_samples=num_training_classes, + padding_to_max=True, + padding_value=''), + dict(type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'flip', + 'flip_direction', 'texts')) +] +mosaic_affine_transform = [ + dict( + type='MultiModalMosaic', + img_scale=_base_.img_scale, + pad_val=114.0, + pre_transform=_base_.pre_transform), + dict(type='YOLOv5CopyPaste', prob=_base_.copypaste_prob), + dict( + type='YOLOv5RandomAffine', + max_rotate_degree=0.0, + max_shear_degree=0.0, + max_aspect_ratio=100., + scaling_ratio_range=(1 - _base_.affine_scale, + 1 + _base_.affine_scale), + # img_scale is (width, height) + border=(-_base_.img_scale[0] // 2, -_base_.img_scale[1] // 2), + border_val=(114, 114, 114), + min_area_ratio=_base_.min_area_ratio, + use_mask_refine=_base_.use_mask2refine) +] +train_pipeline = [ + *_base_.pre_transform, + *mosaic_affine_transform, + dict( + type='YOLOv5MultiModalMixUp', + prob=_base_.mixup_prob, + pre_transform=[*_base_.pre_transform, + *mosaic_affine_transform]), + *_base_.last_transform[:-1], + *text_transform +] +train_pipeline_stage2 = [ + *_base_.train_pipeline_stage2[:-1], + *text_transform +] +coco_train_dataset = dict( + _delete_=True, + type='MultiModalDataset', + dataset=dict( + type='YOLOv5CocoDataset', + data_root='data/coco', + ann_file='annotations/instances_train2017.json', + data_prefix=dict(img='train2017/'), + filter_cfg=dict(filter_empty_gt=False, min_size=32)), + class_text_path='data/texts/coco_class_texts.json', + pipeline=train_pipeline) + +train_dataloader = dict( + persistent_workers=persistent_workers, + batch_size=train_batch_size_per_gpu, + collate_fn=dict(type='yolow_collate'), + dataset=coco_train_dataset) + +test_pipeline = [ + *_base_.test_pipeline[:-1], + dict(type='LoadText'), + dict( + type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'pad_param', 'texts')) +] + +coco_val_dataset = dict( + _delete_=True, + type='MultiModalDataset', + dataset=dict( + type='YOLOv5CocoDataset', + data_root='data/coco', + ann_file='annotations/instances_val2017.json', + data_prefix=dict(img='val2017/'), + filter_cfg=dict(filter_empty_gt=False, min_size=32)), + class_text_path='data/texts/coco_class_texts.json', + pipeline=test_pipeline) +val_dataloader = dict(dataset=coco_val_dataset) +test_dataloader = val_dataloader +# training settings +default_hooks = dict( + param_scheduler=dict( + scheduler_type='linear', + lr_factor=0.01, + max_epochs=max_epochs), + checkpoint=dict( + max_keep_ckpts=-1, + save_best=None, + interval=save_epoch_intervals)) +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0001, + update_buffers=True, + strict_load=False, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - close_mosaic_epochs, + switch_pipeline=train_pipeline_stage2) +] +train_cfg = dict( + max_epochs=max_epochs, + val_interval=5, + dynamic_intervals=[((max_epochs - close_mosaic_epochs), + _base_.val_interval_stage2)]) +optim_wrapper = dict( + optimizer=dict( + _delete_=True, + type='AdamW', + lr=base_lr, + weight_decay=weight_decay, + batch_size_per_gpu=train_batch_size_per_gpu), + paramwise_cfg=dict( + custom_keys={'backbone.text_model': dict(lr_mult=0.01), + 'logit_scale': dict(weight_decay=0.0)}), + constructor='YOLOWv5OptimizerConstructor') +# evaluation settings +val_evaluator = dict( + _delete_=True, + type='mmdet.CocoMetric', + proposal_nums=(100, 1, 10), + ann_file='data/coco/annotations/instances_val2017.json', + metric='bbox') diff --git a/models/YOLO-World/configs/finetune_coco/yolo_world_v2_xl_vlpan_bn_2e-4_80e_8gpus_mask-refine_finetune_coco.py b/models/YOLO-World/configs/finetune_coco/yolo_world_v2_xl_vlpan_bn_2e-4_80e_8gpus_mask-refine_finetune_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..4dc856605e8a38ef4d8d924a06b4ec13b0a7333b --- /dev/null +++ b/models/YOLO-World/configs/finetune_coco/yolo_world_v2_xl_vlpan_bn_2e-4_80e_8gpus_mask-refine_finetune_coco.py @@ -0,0 +1,173 @@ +_base_ = ('../../third_party/mmyolo/configs/yolov8/' + 'yolov8_x_mask-refine_syncbn_fast_8xb16-500e_coco.py') +custom_imports = dict(imports=['yolo_world'], allow_failed_imports=False) + +# hyper-parameters +num_classes = 80 +num_training_classes = 80 +max_epochs = 80 # Maximum training epochs +close_mosaic_epochs = 10 +save_epoch_intervals = 5 +text_channels = 512 +neck_embed_channels = [128, 256, _base_.last_stage_out_channels // 2] +neck_num_heads = [4, 8, _base_.last_stage_out_channels // 2 // 32] +base_lr = 2e-4 +weight_decay = 0.05 +train_batch_size_per_gpu = 16 +text_model_name = '../pretrained_models/clip-vit-base-patch32-projection' +# text_model_name = 'openai/clip-vit-base-patch32' +persistent_workers = False + +# scaling model from X to XL +deepen_factor = 1.0 +widen_factor = 1.5 + +backbone = _base_.model.backbone +backbone.update(deepen_factor=deepen_factor, widen_factor=widen_factor) + +# model settings +model = dict(type='YOLOWorldDetector', + mm_neck=True, + num_train_classes=num_training_classes, + num_test_classes=num_classes, + data_preprocessor=dict(type='YOLOWDetDataPreprocessor'), + backbone=dict(_delete_=True, + type='MultiModalYOLOBackbone', + image_model=backbone, + text_model=dict(type='HuggingCLIPLanguageBackbone', + model_name=text_model_name, + frozen_modules=['all'])), + neck=dict(type='YOLOWorldPAFPN', + deepen_factor=deepen_factor, + widen_factor=widen_factor, + guide_channels=text_channels, + embed_channels=neck_embed_channels, + num_heads=neck_num_heads, + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv')), + bbox_head=dict(type='YOLOWorldHead', + head_module=dict( + type='YOLOWorldHeadModule', + widen_factor=widen_factor, + use_bn_head=True, + embed_dims=text_channels, + num_classes=num_training_classes)), + train_cfg=dict(assigner=dict(num_classes=num_training_classes))) + +# dataset settings +text_transform = [ + dict(type='RandomLoadText', + num_neg_samples=(num_classes, num_classes), + max_num_samples=num_training_classes, + padding_to_max=True, + padding_value=''), + dict(type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'flip', + 'flip_direction', 'texts')) +] +mosaic_affine_transform = [ + dict(type='MultiModalMosaic', + img_scale=_base_.img_scale, + pad_val=114.0, + pre_transform=_base_.pre_transform), + dict(type='YOLOv5CopyPaste', prob=_base_.copypaste_prob), + dict( + type='YOLOv5RandomAffine', + max_rotate_degree=0.0, + max_shear_degree=0.0, + max_aspect_ratio=100., + scaling_ratio_range=(1 - _base_.affine_scale, 1 + _base_.affine_scale), + # img_scale is (width, height) + border=(-_base_.img_scale[0] // 2, -_base_.img_scale[1] // 2), + border_val=(114, 114, 114), + min_area_ratio=_base_.min_area_ratio, + use_mask_refine=_base_.use_mask2refine) +] +train_pipeline = [ + *_base_.pre_transform, *mosaic_affine_transform, + dict(type='YOLOv5MultiModalMixUp', + prob=_base_.mixup_prob, + pre_transform=[*_base_.pre_transform, *mosaic_affine_transform]), + *_base_.last_transform[:-1], *text_transform +] +train_pipeline_stage2 = [*_base_.train_pipeline_stage2[:-1], *text_transform] +coco_train_dataset = dict(_delete_=True, + type='MultiModalDataset', + dataset=dict( + type='YOLOv5CocoDataset', + data_root='data/coco', + ann_file='annotations/instances_train2017.json', + data_prefix=dict(img='train2017/'), + filter_cfg=dict(filter_empty_gt=False, + min_size=32)), + class_text_path='data/texts/coco_class_texts.json', + pipeline=train_pipeline) + +train_dataloader = dict(persistent_workers=persistent_workers, + batch_size=train_batch_size_per_gpu, + collate_fn=dict(type='yolow_collate'), + dataset=coco_train_dataset) + +test_pipeline = [ + *_base_.test_pipeline[:-1], + dict(type='LoadText'), + dict(type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'pad_param', 'texts')) +] + +coco_val_dataset = dict( + _delete_=True, + type='MultiModalDataset', + dataset=dict(type='YOLOv5CocoDataset', + data_root='data/coco', + ann_file='annotations/instances_val2017.json', + data_prefix=dict(img='val2017/'), + filter_cfg=dict(filter_empty_gt=False, min_size=32)), + class_text_path='data/texts/coco_class_texts.json', + pipeline=test_pipeline) +val_dataloader = dict(dataset=coco_val_dataset) +test_dataloader = val_dataloader +# training settings +default_hooks = dict(param_scheduler=dict(scheduler_type='linear', + lr_factor=0.01, + max_epochs=max_epochs), + checkpoint=dict(max_keep_ckpts=-1, + save_best=None, + interval=save_epoch_intervals)) +custom_hooks = [ + dict(type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0001, + update_buffers=True, + strict_load=False, + priority=49), + dict(type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - close_mosaic_epochs, + switch_pipeline=train_pipeline_stage2) +] +train_cfg = dict(max_epochs=max_epochs, + val_interval=5, + dynamic_intervals=[((max_epochs - close_mosaic_epochs), + _base_.val_interval_stage2)]) +optim_wrapper = dict(optimizer=dict( + _delete_=True, + type='AdamW', + lr=base_lr, + weight_decay=weight_decay, + batch_size_per_gpu=train_batch_size_per_gpu), + paramwise_cfg=dict(bias_decay_mult=0.0, + norm_decay_mult=0.0, + custom_keys={ + 'backbone.text_model': + dict(lr_mult=0.01), + 'logit_scale': + dict(weight_decay=0.0) + }), + constructor='YOLOWv5OptimizerConstructor') + +# evaluation settings +val_evaluator = dict(_delete_=True, + type='mmdet.CocoMetric', + proposal_nums=(100, 1, 10), + ann_file='data/coco/annotations/instances_val2017.json', + metric='bbox') diff --git a/models/YOLO-World/configs/pretrain/yolo_world_v2_l_clip_large_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_800ft_lvis_minival.py b/models/YOLO-World/configs/pretrain/yolo_world_v2_l_clip_large_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_800ft_lvis_minival.py new file mode 100644 index 0000000000000000000000000000000000000000..16067a6880b0e21f0b6ec06c98cf02626bec552e --- /dev/null +++ b/models/YOLO-World/configs/pretrain/yolo_world_v2_l_clip_large_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_800ft_lvis_minival.py @@ -0,0 +1,200 @@ +_base_ = ('../../third_party/mmyolo/configs/yolov8/' + 'yolov8_l_syncbn_fast_8xb16-500e_coco.py') +custom_imports = dict(imports=['yolo_world'], + allow_failed_imports=False) + +# hyper-parameters +num_classes = 1203 +num_training_classes = 80 +max_epochs = 100 # Maximum training epochs +close_mosaic_epochs = 2 +save_epoch_intervals = 2 +text_channels = 768 +neck_embed_channels = [128, 256, _base_.last_stage_out_channels // 2] +neck_num_heads = [4, 8, _base_.last_stage_out_channels // 2 // 32] +base_lr = 2e-3 +weight_decay = 0.0125 +train_batch_size_per_gpu = 16 +# text_model_name = '../pretrained_models/clip-vit-large-patch14-336' +text_model_name = 'openai/clip-vit-large-patch14-336' +img_scale = (800, 800) + +# model settings +model = dict( + type='YOLOWorldDetector', + mm_neck=True, + num_train_classes=num_training_classes, + num_test_classes=num_classes, + data_preprocessor=dict(type='YOLOWDetDataPreprocessor'), + backbone=dict( + _delete_=True, + type='MultiModalYOLOBackbone', + image_model={{_base_.model.backbone}}, + text_model=dict( + type='HuggingCLIPLanguageBackbone', + model_name=text_model_name, + frozen_modules=['all'])), + neck=dict(type='YOLOWorldPAFPN', + guide_channels=text_channels, + embed_channels=neck_embed_channels, + num_heads=neck_num_heads, + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv')), + bbox_head=dict(type='YOLOWorldHead', + head_module=dict(type='YOLOWorldHeadModule', + use_bn_head=True, + embed_dims=text_channels, + num_classes=num_training_classes)), + train_cfg=dict(assigner=dict(num_classes=num_training_classes))) + +# dataset settings +text_transform = [ + dict(type='RandomLoadText', + num_neg_samples=(num_classes, num_classes), + max_num_samples=num_training_classes, + padding_to_max=True, + padding_value=''), + dict(type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'flip', + 'flip_direction', 'texts')) +] +train_pipeline = [ + *_base_.pre_transform, + dict(type='MultiModalMosaic', + img_scale=img_scale, + pad_val=114.0, + pre_transform=_base_.pre_transform), + dict( + type='YOLOv5RandomAffine', + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=(1 - _base_.affine_scale, 1 + _base_.affine_scale), + max_aspect_ratio=_base_.max_aspect_ratio, + border=(-img_scale[0] // 2, -img_scale[1] // 2), + border_val=(114, 114, 114)), + *_base_.last_transform[:-1], + *text_transform, +] + +train_pipeline_stage2 = [ + *_base_.pre_transform, + dict(type='YOLOv5KeepRatioResize', scale=img_scale), + dict( + type='LetterResize', + scale=img_scale, + allow_scale_up=True, + pad_val=dict(img=114.0)), + dict( + type='YOLOv5RandomAffine', + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=(1 - _base_.affine_scale, 1 + _base_.affine_scale), + max_aspect_ratio=_base_.max_aspect_ratio, + border_val=(114, 114, 114)), + *_base_.last_transform[:-1], + *text_transform +] + +obj365v1_train_dataset = dict( + type='MultiModalDataset', + dataset=dict( + type='YOLOv5Objects365V1Dataset', + data_root='data/objects365v1/', + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + filter_cfg=dict(filter_empty_gt=False, min_size=32)), + class_text_path='data/texts/obj365v1_class_texts.json', + pipeline=train_pipeline) + +mg_train_dataset = dict(type='YOLOv5MixedGroundingDataset', + data_root='data/mixed_grounding/', + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=train_pipeline) + +flickr_train_dataset = dict( + type='YOLOv5MixedGroundingDataset', + data_root='data/flickr/', + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=train_pipeline) + +train_dataloader = dict(batch_size=train_batch_size_per_gpu, + collate_fn=dict(type='yolow_collate'), + dataset=dict(_delete_=True, + type='ConcatDataset', + datasets=[ + obj365v1_train_dataset, + flickr_train_dataset, mg_train_dataset + ], + ignore_keys=['classes', 'palette'])) + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='YOLOv5KeepRatioResize', scale=img_scale), + dict( + type='LetterResize', + scale=img_scale, + allow_scale_up=False, + pad_val=dict(img=114)), + dict(type='LoadAnnotations', with_bbox=True, _scope_='mmdet'), + dict(type='LoadText'), + dict(type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'pad_param', 'texts')) +] + +coco_val_dataset = dict( + _delete_=True, + type='MultiModalDataset', + dataset=dict(type='YOLOv5LVISV1Dataset', + data_root='data/coco/', + test_mode=True, + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + data_prefix=dict(img=''), + batch_shapes_cfg=None), + class_text_path='data/texts/lvis_v1_class_texts.json', + pipeline=test_pipeline) +val_dataloader = dict(dataset=coco_val_dataset) +test_dataloader = val_dataloader + +val_evaluator = dict(type='mmdet.LVISMetric', + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox') +test_evaluator = val_evaluator + +# training settings +default_hooks = dict(param_scheduler=dict(max_epochs=max_epochs), + checkpoint=dict(interval=save_epoch_intervals, + rule='greater')) +custom_hooks = [ + dict(type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0001, + update_buffers=True, + strict_load=False, + priority=49), + dict(type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - close_mosaic_epochs, + switch_pipeline=train_pipeline_stage2) +] +train_cfg = dict(max_epochs=max_epochs, + val_interval=10, + dynamic_intervals=[((max_epochs - close_mosaic_epochs), + _base_.val_interval_stage2)]) +optim_wrapper = dict(optimizer=dict( + _delete_=True, + type='AdamW', + lr=base_lr, + weight_decay=weight_decay, + batch_size_per_gpu=train_batch_size_per_gpu), + paramwise_cfg=dict(bias_decay_mult=0.0, + norm_decay_mult=0.0, + custom_keys={ + 'backbone.text_model': + dict(lr_mult=0.01), + 'logit_scale': + dict(weight_decay=0.0) + }), + constructor='YOLOWv5OptimizerConstructor') diff --git a/models/YOLO-World/configs/pretrain/yolo_world_v2_l_clip_large_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival.py b/models/YOLO-World/configs/pretrain/yolo_world_v2_l_clip_large_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival.py new file mode 100644 index 0000000000000000000000000000000000000000..b5b84ab7a6724d25a0fb0678ed0f2f5f566afb1a --- /dev/null +++ b/models/YOLO-World/configs/pretrain/yolo_world_v2_l_clip_large_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival.py @@ -0,0 +1,171 @@ +_base_ = ('../../third_party/mmyolo/configs/yolov8/' + 'yolov8_l_syncbn_fast_8xb16-500e_coco.py') +custom_imports = dict(imports=['yolo_world'], + allow_failed_imports=False) + +# hyper-parameters +num_classes = 1203 +num_training_classes = 80 +max_epochs = 100 # Maximum training epochs +close_mosaic_epochs = 2 +save_epoch_intervals = 2 +text_channels = 768 +neck_embed_channels = [128, 256, _base_.last_stage_out_channels // 2] +neck_num_heads = [4, 8, _base_.last_stage_out_channels // 2 // 32] +base_lr = 2e-3 +weight_decay = 0.0125 +train_batch_size_per_gpu = 16 +# text_model_name = '../pretrained_models/clip-vit-large-patch14-336' +text_model_name = 'openai/clip-vit-large-patch14-336' +# model settings +model = dict( + type='YOLOWorldDetector', + mm_neck=True, + num_train_classes=num_training_classes, + num_test_classes=num_classes, + data_preprocessor=dict(type='YOLOWDetDataPreprocessor'), + backbone=dict( + _delete_=True, + type='MultiModalYOLOBackbone', + image_model={{_base_.model.backbone}}, + text_model=dict( + type='HuggingCLIPLanguageBackbone', + model_name=text_model_name, + frozen_modules=['all'])), + neck=dict(type='YOLOWorldPAFPN', + guide_channels=text_channels, + embed_channels=neck_embed_channels, + num_heads=neck_num_heads, + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv')), + bbox_head=dict(type='YOLOWorldHead', + head_module=dict(type='YOLOWorldHeadModule', + use_bn_head=True, + embed_dims=text_channels, + num_classes=num_training_classes)), + train_cfg=dict(assigner=dict(num_classes=num_training_classes))) + +# dataset settings +text_transform = [ + dict(type='RandomLoadText', + num_neg_samples=(num_classes, num_classes), + max_num_samples=num_training_classes, + padding_to_max=True, + padding_value=''), + dict(type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'flip', + 'flip_direction', 'texts')) +] +train_pipeline = [ + *_base_.pre_transform, + dict(type='MultiModalMosaic', + img_scale=_base_.img_scale, + pad_val=114.0, + pre_transform=_base_.pre_transform), + dict( + type='YOLOv5RandomAffine', + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=(1 - _base_.affine_scale, 1 + _base_.affine_scale), + max_aspect_ratio=_base_.max_aspect_ratio, + border=(-_base_.img_scale[0] // 2, -_base_.img_scale[1] // 2), + border_val=(114, 114, 114)), + *_base_.last_transform[:-1], + *text_transform, +] +train_pipeline_stage2 = [*_base_.train_pipeline_stage2[:-1], *text_transform] +obj365v1_train_dataset = dict( + type='MultiModalDataset', + dataset=dict( + type='YOLOv5Objects365V1Dataset', + data_root='data/objects365v1/', + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + filter_cfg=dict(filter_empty_gt=False, min_size=32)), + class_text_path='data/texts/obj365v1_class_texts.json', + pipeline=train_pipeline) + +mg_train_dataset = dict(type='YOLOv5MixedGroundingDataset', + data_root='data/mixed_grounding/', + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=train_pipeline) + +flickr_train_dataset = dict( + type='YOLOv5MixedGroundingDataset', + data_root='data/flickr/', + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=train_pipeline) + +train_dataloader = dict(batch_size=train_batch_size_per_gpu, + collate_fn=dict(type='yolow_collate'), + dataset=dict(_delete_=True, + type='ConcatDataset', + datasets=[ + obj365v1_train_dataset, + flickr_train_dataset, mg_train_dataset + ], + ignore_keys=['classes', 'palette'])) + +test_pipeline = [ + *_base_.test_pipeline[:-1], + dict(type='LoadText'), + dict(type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'pad_param', 'texts')) +] +coco_val_dataset = dict( + _delete_=True, + type='MultiModalDataset', + dataset=dict(type='YOLOv5LVISV1Dataset', + data_root='data/coco/', + test_mode=True, + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + data_prefix=dict(img=''), + batch_shapes_cfg=None), + class_text_path='data/texts/lvis_v1_class_texts.json', + pipeline=test_pipeline) +val_dataloader = dict(dataset=coco_val_dataset) +test_dataloader = val_dataloader + +val_evaluator = dict(type='mmdet.LVISMetric', + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox') +test_evaluator = val_evaluator + +# training settings +default_hooks = dict(param_scheduler=dict(max_epochs=max_epochs), + checkpoint=dict(interval=save_epoch_intervals, + rule='greater')) +custom_hooks = [ + dict(type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0001, + update_buffers=True, + strict_load=False, + priority=49), + dict(type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - close_mosaic_epochs, + switch_pipeline=train_pipeline_stage2) +] +train_cfg = dict(max_epochs=max_epochs, + val_interval=10, + dynamic_intervals=[((max_epochs - close_mosaic_epochs), + _base_.val_interval_stage2)]) +optim_wrapper = dict(optimizer=dict( + _delete_=True, + type='AdamW', + lr=base_lr, + weight_decay=weight_decay, + batch_size_per_gpu=train_batch_size_per_gpu), + paramwise_cfg=dict(bias_decay_mult=0.0, + norm_decay_mult=0.0, + custom_keys={ + 'backbone.text_model': + dict(lr_mult=0.01), + 'logit_scale': + dict(weight_decay=0.0) + }), + constructor='YOLOWv5OptimizerConstructor') diff --git a/models/YOLO-World/configs/pretrain/yolo_world_v2_l_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_1280ft_lvis_minival.py b/models/YOLO-World/configs/pretrain/yolo_world_v2_l_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_1280ft_lvis_minival.py new file mode 100644 index 0000000000000000000000000000000000000000..1c34f3a4c99d2676f98bdacd02561c3b7896ae36 --- /dev/null +++ b/models/YOLO-World/configs/pretrain/yolo_world_v2_l_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_1280ft_lvis_minival.py @@ -0,0 +1,202 @@ +_base_ = ('../../third_party/mmyolo/configs/yolov8/' + 'yolov8_l_syncbn_fast_8xb16-500e_coco.py') +custom_imports = dict(imports=['yolo_world'], + allow_failed_imports=False) + +# hyper-parameters +num_classes = 1203 +num_training_classes = 80 +max_epochs = 20 # Maximum training epochs +close_mosaic_epochs = 2 +save_epoch_intervals = 2 +text_channels = 512 +neck_embed_channels = [128, 256, _base_.last_stage_out_channels // 2] +neck_num_heads = [4, 8, _base_.last_stage_out_channels // 2 // 32] +base_lr = 2e-4 +weight_decay = 0.025 +train_batch_size_per_gpu = 4 +load_from = "pretrained_models/yolo_world_v2_l_obj365v1_goldg_pretrain-a82b1fe3.pth" +# text_model_name = '../pretrained_models/clip-vit-base-patch32-projection' +text_model_name = 'openai/clip-vit-base-patch32' +img_scale = (1280, 1280) + +# model settings +model = dict( + type='YOLOWorldDetector', + mm_neck=True, + num_train_classes=num_training_classes, + num_test_classes=num_classes, + data_preprocessor=dict(type='YOLOWDetDataPreprocessor'), + backbone=dict( + _delete_=True, + type='MultiModalYOLOBackbone', + image_model={{_base_.model.backbone}}, + text_model=dict( + type='HuggingCLIPLanguageBackbone', + model_name=text_model_name, + frozen_modules=['all'])), + neck=dict(type='YOLOWorldPAFPN', + guide_channels=text_channels, + embed_channels=neck_embed_channels, + num_heads=neck_num_heads, + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv')), + bbox_head=dict(type='YOLOWorldHead', + head_module=dict(type='YOLOWorldHeadModule', + use_bn_head=True, + embed_dims=text_channels, + num_classes=num_training_classes)), + train_cfg=dict(assigner=dict(num_classes=num_training_classes))) + +# dataset settings +text_transform = [ + dict(type='RandomLoadText', + num_neg_samples=(num_classes, num_classes), + max_num_samples=num_training_classes, + padding_to_max=True, + padding_value=''), + dict(type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'flip', + 'flip_direction', 'texts')) +] +train_pipeline = [ + *_base_.pre_transform, + dict(type='MultiModalMosaic', + img_scale=img_scale, + pad_val=114.0, + pre_transform=_base_.pre_transform), + dict( + type='YOLOv5RandomAffine', + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=(1 - _base_.affine_scale, 1 + _base_.affine_scale), + max_aspect_ratio=_base_.max_aspect_ratio, + border=(-img_scale[0] // 2, -img_scale[1] // 2), + border_val=(114, 114, 114)), + *_base_.last_transform[:-1], + *text_transform, +] + +train_pipeline_stage2 = [ + *_base_.pre_transform, + dict(type='YOLOv5KeepRatioResize', scale=img_scale), + dict( + type='LetterResize', + scale=img_scale, + allow_scale_up=True, + pad_val=dict(img=114.0)), + dict( + type='YOLOv5RandomAffine', + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=(1 - _base_.affine_scale, 1 + _base_.affine_scale), + max_aspect_ratio=_base_.max_aspect_ratio, + border_val=(114, 114, 114)), + *_base_.last_transform[:-1], + *text_transform +] + +obj365v1_train_dataset = dict( + type='MultiModalDataset', + dataset=dict( + type='YOLOv5Objects365V1Dataset', + data_root='data/objects365v1/', + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + filter_cfg=dict(filter_empty_gt=False, min_size=32)), + class_text_path='data/texts/obj365v1_class_texts.json', + pipeline=train_pipeline) + +mg_train_dataset = dict(type='YOLOv5MixedGroundingDataset', + data_root='data/mixed_grounding/', + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=train_pipeline) + +flickr_train_dataset = dict( + type='YOLOv5MixedGroundingDataset', + data_root='data/flickr/', + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=train_pipeline) + +train_dataloader = dict(batch_size=train_batch_size_per_gpu, + collate_fn=dict(type='yolow_collate'), + dataset=dict(_delete_=True, + type='ConcatDataset', + datasets=[ + obj365v1_train_dataset, + flickr_train_dataset, mg_train_dataset + ], + ignore_keys=['classes', 'palette'])) + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='YOLOv5KeepRatioResize', scale=img_scale), + dict( + type='LetterResize', + scale=img_scale, + allow_scale_up=False, + pad_val=dict(img=114)), + dict(type='LoadAnnotations', with_bbox=True, _scope_='mmdet'), + dict(type='LoadText'), + dict(type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'pad_param', 'texts')) +] + +coco_val_dataset = dict( + _delete_=True, + type='MultiModalDataset', + dataset=dict(type='YOLOv5LVISV1Dataset', + data_root='data/coco/', + test_mode=True, + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + data_prefix=dict(img=''), + batch_shapes_cfg=None), + class_text_path='data/texts/lvis_v1_class_texts.json', + pipeline=test_pipeline) +val_dataloader = dict(dataset=coco_val_dataset) +test_dataloader = val_dataloader + +val_evaluator = dict(type='mmdet.LVISMetric', + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox') +test_evaluator = val_evaluator + +# training settings +default_hooks = dict(param_scheduler=dict(max_epochs=max_epochs), + checkpoint=dict(interval=save_epoch_intervals, + rule='greater')) +custom_hooks = [ + dict(type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0001, + update_buffers=True, + strict_load=False, + priority=49), + dict(type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - close_mosaic_epochs, + switch_pipeline=train_pipeline_stage2) +] +train_cfg = dict(max_epochs=max_epochs, + val_interval=10, + dynamic_intervals=[((max_epochs - close_mosaic_epochs), + _base_.val_interval_stage2)]) + +optim_wrapper = dict(optimizer=dict( + _delete_=True, + type='AdamW', + lr=base_lr, + weight_decay=weight_decay, + batch_size_per_gpu=train_batch_size_per_gpu), + paramwise_cfg=dict(bias_decay_mult=0.0, + norm_decay_mult=0.0, + custom_keys={ + 'backbone.text_model': + dict(lr_mult=0.01), + 'logit_scale': + dict(weight_decay=0.0) + }), + constructor='YOLOWv5OptimizerConstructor') diff --git a/models/YOLO-World/configs/pretrain/yolo_world_v2_l_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival.py b/models/YOLO-World/configs/pretrain/yolo_world_v2_l_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival.py new file mode 100644 index 0000000000000000000000000000000000000000..cb8beec0af6f0fc4b0642f2f6fca4462e44eae60 --- /dev/null +++ b/models/YOLO-World/configs/pretrain/yolo_world_v2_l_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival.py @@ -0,0 +1,171 @@ +_base_ = ('../../third_party/mmyolo/configs/yolov8/' + 'yolov8_l_syncbn_fast_8xb16-500e_coco.py') +custom_imports = dict(imports=['yolo_world'], + allow_failed_imports=False) + +# hyper-parameters +num_classes = 1203 +num_training_classes = 80 +max_epochs = 100 # Maximum training epochs +close_mosaic_epochs = 2 +save_epoch_intervals = 2 +text_channels = 512 +neck_embed_channels = [128, 256, _base_.last_stage_out_channels // 2] +neck_num_heads = [4, 8, _base_.last_stage_out_channels // 2 // 32] +base_lr = 2e-3 +weight_decay = 0.05 / 2 +train_batch_size_per_gpu = 16 +# text_model_name = '../pretrained_models/clip-vit-base-patch32-projection' +text_model_name = 'openai/clip-vit-base-patch32' +# model settings +model = dict( + type='YOLOWorldDetector', + mm_neck=True, + num_train_classes=num_training_classes, + num_test_classes=num_classes, + data_preprocessor=dict(type='YOLOWDetDataPreprocessor'), + backbone=dict( + _delete_=True, + type='MultiModalYOLOBackbone', + image_model={{_base_.model.backbone}}, + text_model=dict( + type='HuggingCLIPLanguageBackbone', + model_name=text_model_name, + frozen_modules=['all'])), + neck=dict(type='YOLOWorldPAFPN', + guide_channels=text_channels, + embed_channels=neck_embed_channels, + num_heads=neck_num_heads, + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv')), + bbox_head=dict(type='YOLOWorldHead', + head_module=dict(type='YOLOWorldHeadModule', + use_bn_head=True, + embed_dims=text_channels, + num_classes=num_training_classes)), + train_cfg=dict(assigner=dict(num_classes=num_training_classes))) + +# dataset settings +text_transform = [ + dict(type='RandomLoadText', + num_neg_samples=(num_classes, num_classes), + max_num_samples=num_training_classes, + padding_to_max=True, + padding_value=''), + dict(type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'flip', + 'flip_direction', 'texts')) +] +train_pipeline = [ + *_base_.pre_transform, + dict(type='MultiModalMosaic', + img_scale=_base_.img_scale, + pad_val=114.0, + pre_transform=_base_.pre_transform), + dict( + type='YOLOv5RandomAffine', + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=(1 - _base_.affine_scale, 1 + _base_.affine_scale), + max_aspect_ratio=_base_.max_aspect_ratio, + border=(-_base_.img_scale[0] // 2, -_base_.img_scale[1] // 2), + border_val=(114, 114, 114)), + *_base_.last_transform[:-1], + *text_transform, +] +train_pipeline_stage2 = [*_base_.train_pipeline_stage2[:-1], *text_transform] +obj365v1_train_dataset = dict( + type='MultiModalDataset', + dataset=dict( + type='YOLOv5Objects365V1Dataset', + data_root='data/objects365v1/', + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + filter_cfg=dict(filter_empty_gt=False, min_size=32)), + class_text_path='data/texts/obj365v1_class_texts.json', + pipeline=train_pipeline) + +mg_train_dataset = dict(type='YOLOv5MixedGroundingDataset', + data_root='data/mixed_grounding/', + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=train_pipeline) + +flickr_train_dataset = dict( + type='YOLOv5MixedGroundingDataset', + data_root='data/flickr/', + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=train_pipeline) + +train_dataloader = dict(batch_size=train_batch_size_per_gpu, + collate_fn=dict(type='yolow_collate'), + dataset=dict(_delete_=True, + type='ConcatDataset', + datasets=[ + obj365v1_train_dataset, + flickr_train_dataset, mg_train_dataset + ], + ignore_keys=['classes', 'palette'])) + +test_pipeline = [ + *_base_.test_pipeline[:-1], + dict(type='LoadText'), + dict(type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'pad_param', 'texts')) +] +coco_val_dataset = dict( + _delete_=True, + type='MultiModalDataset', + dataset=dict(type='YOLOv5LVISV1Dataset', + data_root='data/coco/', + test_mode=True, + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + data_prefix=dict(img=''), + batch_shapes_cfg=None), + class_text_path='data/texts/lvis_v1_class_texts.json', + pipeline=test_pipeline) +val_dataloader = dict(dataset=coco_val_dataset) +test_dataloader = val_dataloader + +val_evaluator = dict(type='mmdet.LVISMetric', + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox') +test_evaluator = val_evaluator + +# training settings +default_hooks = dict(param_scheduler=dict(max_epochs=max_epochs), + checkpoint=dict(interval=save_epoch_intervals, + rule='greater')) +custom_hooks = [ + dict(type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0001, + update_buffers=True, + strict_load=False, + priority=49), + dict(type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - close_mosaic_epochs, + switch_pipeline=train_pipeline_stage2) +] +train_cfg = dict(max_epochs=max_epochs, + val_interval=10, + dynamic_intervals=[((max_epochs - close_mosaic_epochs), + _base_.val_interval_stage2)]) +optim_wrapper = dict(optimizer=dict( + _delete_=True, + type='AdamW', + lr=base_lr, + weight_decay=weight_decay, + batch_size_per_gpu=train_batch_size_per_gpu), + paramwise_cfg=dict(bias_decay_mult=0.0, + norm_decay_mult=0.0, + custom_keys={ + 'backbone.text_model': + dict(lr_mult=0.01), + 'logit_scale': + dict(weight_decay=0.0) + }), + constructor='YOLOWv5OptimizerConstructor') diff --git a/models/YOLO-World/configs/pretrain/yolo_world_v2_l_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_val.py b/models/YOLO-World/configs/pretrain/yolo_world_v2_l_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_val.py new file mode 100644 index 0000000000000000000000000000000000000000..70b19b287e03ea84131f9b8911b761f7eeaaa77e --- /dev/null +++ b/models/YOLO-World/configs/pretrain/yolo_world_v2_l_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_val.py @@ -0,0 +1,171 @@ +_base_ = ('../../third_party/mmyolo/configs/yolov8/' + 'yolov8_l_syncbn_fast_8xb16-500e_coco.py') +custom_imports = dict(imports=['yolo_world'], + allow_failed_imports=False) + +# hyper-parameters +num_classes = 1203 +num_training_classes = 80 +max_epochs = 100 # Maximum training epochs +close_mosaic_epochs = 2 +save_epoch_intervals = 2 +text_channels = 512 +neck_embed_channels = [128, 256, _base_.last_stage_out_channels // 2] +neck_num_heads = [4, 8, _base_.last_stage_out_channels // 2 // 32] +base_lr = 2e-3 +weight_decay = 0.05 / 2 +train_batch_size_per_gpu = 16 +# text_model_name = '../pretrained_models/clip-vit-base-patch32-projection' +text_model_name = 'openai/clip-vit-base-patch32' +# model settings +model = dict( + type='YOLOWorldDetector', + mm_neck=True, + num_train_classes=num_training_classes, + num_test_classes=num_classes, + data_preprocessor=dict(type='YOLOWDetDataPreprocessor'), + backbone=dict( + _delete_=True, + type='MultiModalYOLOBackbone', + image_model={{_base_.model.backbone}}, + text_model=dict( + type='HuggingCLIPLanguageBackbone', + model_name=text_model_name, + frozen_modules=['all'])), + neck=dict(type='YOLOWorldPAFPN', + guide_channels=text_channels, + embed_channels=neck_embed_channels, + num_heads=neck_num_heads, + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv')), + bbox_head=dict(type='YOLOWorldHead', + head_module=dict(type='YOLOWorldHeadModule', + use_bn_head=True, + embed_dims=text_channels, + num_classes=num_training_classes)), + train_cfg=dict(assigner=dict(num_classes=num_training_classes))) + +# dataset settings +text_transform = [ + dict(type='RandomLoadText', + num_neg_samples=(num_classes, num_classes), + max_num_samples=num_training_classes, + padding_to_max=True, + padding_value=''), + dict(type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'flip', + 'flip_direction', 'texts')) +] +train_pipeline = [ + *_base_.pre_transform, + dict(type='MultiModalMosaic', + img_scale=_base_.img_scale, + pad_val=114.0, + pre_transform=_base_.pre_transform), + dict( + type='YOLOv5RandomAffine', + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=(1 - _base_.affine_scale, 1 + _base_.affine_scale), + max_aspect_ratio=_base_.max_aspect_ratio, + border=(-_base_.img_scale[0] // 2, -_base_.img_scale[1] // 2), + border_val=(114, 114, 114)), + *_base_.last_transform[:-1], + *text_transform, +] +train_pipeline_stage2 = [*_base_.train_pipeline_stage2[:-1], *text_transform] +obj365v1_train_dataset = dict( + type='MultiModalDataset', + dataset=dict( + type='YOLOv5Objects365V1Dataset', + data_root='data/objects365v1/', + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + filter_cfg=dict(filter_empty_gt=False, min_size=32)), + class_text_path='data/texts/obj365v1_class_texts.json', + pipeline=train_pipeline) + +mg_train_dataset = dict(type='YOLOv5MixedGroundingDataset', + data_root='data/mixed_grounding/', + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=train_pipeline) + +flickr_train_dataset = dict( + type='YOLOv5MixedGroundingDataset', + data_root='data/flickr/', + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=train_pipeline) + +train_dataloader = dict(batch_size=train_batch_size_per_gpu, + collate_fn=dict(type='yolow_collate'), + dataset=dict(_delete_=True, + type='ConcatDataset', + datasets=[ + obj365v1_train_dataset, + flickr_train_dataset, mg_train_dataset + ], + ignore_keys=['classes', 'palette'])) + +test_pipeline = [ + *_base_.test_pipeline[:-1], + dict(type='LoadText'), + dict(type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'pad_param', 'texts')) +] +coco_val_dataset = dict( + _delete_=True, + type='MultiModalDataset', + dataset=dict(type='YOLOv5LVISV1Dataset', + data_root='data/coco/', + test_mode=True, + ann_file='lvis/lvis_v1_val.json', + data_prefix=dict(img=''), + batch_shapes_cfg=None), + class_text_path='data/texts/lvis_v1_class_texts.json', + pipeline=test_pipeline) +val_dataloader = dict(dataset=coco_val_dataset) +test_dataloader = val_dataloader + +val_evaluator = dict(type='mmdet.LVISMetric', + ann_file='data/coco/lvis/lvis_v1_val.json', + metric='bbox') +test_evaluator = val_evaluator + +# training settings +default_hooks = dict(param_scheduler=dict(max_epochs=max_epochs), + checkpoint=dict(interval=save_epoch_intervals, + rule='greater')) +custom_hooks = [ + dict(type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0001, + update_buffers=True, + strict_load=False, + priority=49), + dict(type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - close_mosaic_epochs, + switch_pipeline=train_pipeline_stage2) +] +train_cfg = dict(max_epochs=max_epochs, + val_interval=10, + dynamic_intervals=[((max_epochs - close_mosaic_epochs), + _base_.val_interval_stage2)]) +optim_wrapper = dict(optimizer=dict( + _delete_=True, + type='AdamW', + lr=base_lr, + weight_decay=weight_decay, + batch_size_per_gpu=train_batch_size_per_gpu), + paramwise_cfg=dict(bias_decay_mult=0.0, + norm_decay_mult=0.0, + custom_keys={ + 'backbone.text_model': + dict(lr_mult=0.01), + 'logit_scale': + dict(weight_decay=0.0) + }), + constructor='YOLOWv5OptimizerConstructor') diff --git a/models/YOLO-World/configs/pretrain/yolo_world_v2_m_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_1280ft_lvis_minival.py b/models/YOLO-World/configs/pretrain/yolo_world_v2_m_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_1280ft_lvis_minival.py new file mode 100644 index 0000000000000000000000000000000000000000..9a430e8499aa329799cb0c3edd59df21e2c42f9c --- /dev/null +++ b/models/YOLO-World/configs/pretrain/yolo_world_v2_m_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_1280ft_lvis_minival.py @@ -0,0 +1,198 @@ +_base_ = ('../../third_party/mmyolo/configs/yolov8/' + 'yolov8_m_syncbn_fast_8xb16-500e_coco.py') +custom_imports = dict(imports=['yolo_world'], + allow_failed_imports=False) + +# hyper-parameters +num_classes = 1203 +num_training_classes = 80 +max_epochs = 100 # Maximum training epochs +close_mosaic_epochs = 2 +save_epoch_intervals = 2 +text_channels = 512 +neck_embed_channels = [128, 256, _base_.last_stage_out_channels // 2] +neck_num_heads = [4, 8, _base_.last_stage_out_channels // 2 // 32] +base_lr = 2e-3 +weight_decay = 0.05 / 2 +train_batch_size_per_gpu = 16 +text_model_name = '../pretrained_models/clip-vit-base-patch32-projection' +img_scale = (1280, 1280) + +# text_model_name = 'openai/clip-vit-base-patch32' +# model settings +model = dict( + type='YOLOWorldDetector', + mm_neck=True, + num_train_classes=num_training_classes, + num_test_classes=num_classes, + data_preprocessor=dict(type='YOLOWDetDataPreprocessor'), + backbone=dict( + _delete_=True, + type='MultiModalYOLOBackbone', + image_model={{_base_.model.backbone}}, + text_model=dict( + type='HuggingCLIPLanguageBackbone', + model_name=text_model_name, + frozen_modules=['all'])), + neck=dict(type='YOLOWorldPAFPN', + guide_channels=text_channels, + embed_channels=neck_embed_channels, + num_heads=neck_num_heads, + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv')), + bbox_head=dict(type='YOLOWorldHead', + head_module=dict(type='YOLOWorldHeadModule', + use_bn_head=True, + embed_dims=text_channels, + num_classes=num_training_classes)), + train_cfg=dict(assigner=dict(num_classes=num_training_classes))) + +# dataset settings +text_transform = [ + dict(type='RandomLoadText', + num_neg_samples=(num_classes, num_classes), + max_num_samples=num_training_classes, + padding_to_max=True, + padding_value=''), + dict(type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'flip', + 'flip_direction', 'texts')) +] +train_pipeline = [ + *_base_.pre_transform, + dict(type='MultiModalMosaic', + img_scale=img_scale, + pad_val=114.0, + pre_transform=_base_.pre_transform), + dict( + type='YOLOv5RandomAffine', + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=(1 - _base_.affine_scale, 1 + _base_.affine_scale), + max_aspect_ratio=_base_.max_aspect_ratio, + border=(-img_scale[0] // 2, -img_scale[1] // 2), + border_val=(114, 114, 114)), + *_base_.last_transform[:-1], + *text_transform, +] + +train_pipeline_stage2 = [ + *_base_.pre_transform, + dict(type='YOLOv5KeepRatioResize', scale=img_scale), + dict( + type='LetterResize', + scale=img_scale, + allow_scale_up=True, + pad_val=dict(img=114.0)), + dict( + type='YOLOv5RandomAffine', + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=(1 - _base_.affine_scale, 1 + _base_.affine_scale), + max_aspect_ratio=_base_.max_aspect_ratio, + border_val=(114, 114, 114)), + *_base_.last_transform[:-1], + *text_transform +] +obj365v1_train_dataset = dict( + type='MultiModalDataset', + dataset=dict( + type='YOLOv5Objects365V1Dataset', + data_root='data/objects365v1/', + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + filter_cfg=dict(filter_empty_gt=False, min_size=32)), + class_text_path='data/texts/obj365v1_class_texts.json', + pipeline=train_pipeline) + +mg_train_dataset = dict(type='YOLOv5MixedGroundingDataset', + data_root='data/mixed_grounding/', + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=train_pipeline) + +flickr_train_dataset = dict( + type='YOLOv5MixedGroundingDataset', + data_root='data/flickr/', + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=train_pipeline) + +train_dataloader = dict(batch_size=train_batch_size_per_gpu, + collate_fn=dict(type='yolow_collate'), + dataset=dict(_delete_=True, + type='ConcatDataset', + datasets=[ + obj365v1_train_dataset, + flickr_train_dataset, mg_train_dataset + ], + ignore_keys=['classes', 'palette'])) + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='YOLOv5KeepRatioResize', scale=img_scale), + dict( + type='LetterResize', + scale=img_scale, + allow_scale_up=False, + pad_val=dict(img=114)), + dict(type='LoadAnnotations', with_bbox=True, _scope_='mmdet'), + dict(type='LoadText'), + dict(type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'pad_param', 'texts')) +] +coco_val_dataset = dict( + _delete_=True, + type='MultiModalDataset', + dataset=dict(type='YOLOv5LVISV1Dataset', + data_root='data/coco/', + test_mode=True, + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + data_prefix=dict(img=''), + batch_shapes_cfg=None), + class_text_path='data/texts/lvis_v1_class_texts.json', + pipeline=test_pipeline) +val_dataloader = dict(dataset=coco_val_dataset) +test_dataloader = val_dataloader + +val_evaluator = dict(type='mmdet.LVISMetric', + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox') +test_evaluator = val_evaluator + +# training settings +default_hooks = dict(param_scheduler=dict(max_epochs=max_epochs), + checkpoint=dict(interval=save_epoch_intervals, + rule='greater')) +custom_hooks = [ + dict(type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0001, + update_buffers=True, + strict_load=False, + priority=49), + dict(type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - close_mosaic_epochs, + switch_pipeline=train_pipeline_stage2) +] +train_cfg = dict(max_epochs=max_epochs, + val_interval=10, + dynamic_intervals=[((max_epochs - close_mosaic_epochs), + _base_.val_interval_stage2)]) +optim_wrapper = dict(optimizer=dict( + _delete_=True, + type='AdamW', + lr=base_lr, + weight_decay=weight_decay, + batch_size_per_gpu=train_batch_size_per_gpu), + paramwise_cfg=dict(bias_decay_mult=0.0, + norm_decay_mult=0.0, + custom_keys={ + 'backbone.text_model': + dict(lr_mult=0.01), + 'logit_scale': + dict(weight_decay=0.0) + }), + constructor='YOLOWv5OptimizerConstructor') diff --git a/models/YOLO-World/configs/pretrain/yolo_world_v2_m_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival.py b/models/YOLO-World/configs/pretrain/yolo_world_v2_m_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival.py new file mode 100644 index 0000000000000000000000000000000000000000..c8ce8129cfd6e08f7efdc09159f86d0f22351aa8 --- /dev/null +++ b/models/YOLO-World/configs/pretrain/yolo_world_v2_m_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival.py @@ -0,0 +1,171 @@ +_base_ = ('../../third_party/mmyolo/configs/yolov8/' + 'yolov8_m_syncbn_fast_8xb16-500e_coco.py') +custom_imports = dict(imports=['yolo_world'], + allow_failed_imports=False) + +# hyper-parameters +num_classes = 1203 +num_training_classes = 80 +max_epochs = 100 # Maximum training epochs +close_mosaic_epochs = 2 +save_epoch_intervals = 2 +text_channels = 512 +neck_embed_channels = [128, 256, _base_.last_stage_out_channels // 2] +neck_num_heads = [4, 8, _base_.last_stage_out_channels // 2 // 32] +base_lr = 2e-3 +weight_decay = 0.05 / 2 +train_batch_size_per_gpu = 16 +text_model_name = 'openai/clip-vit-large-patch14-336' +# text_model_name = 'openai/clip-vit-base-patch32' +# model settings +model = dict( + type='YOLOWorldDetector', + mm_neck=True, + num_train_classes=num_training_classes, + num_test_classes=num_classes, + data_preprocessor=dict(type='YOLOWDetDataPreprocessor'), + backbone=dict( + _delete_=True, + type='MultiModalYOLOBackbone', + image_model={{_base_.model.backbone}}, + text_model=dict( + type='HuggingCLIPLanguageBackbone', + model_name=text_model_name, + frozen_modules=['all'])), + neck=dict(type='YOLOWorldPAFPN', + guide_channels=text_channels, + embed_channels=neck_embed_channels, + num_heads=neck_num_heads, + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv')), + bbox_head=dict(type='YOLOWorldHead', + head_module=dict(type='YOLOWorldHeadModule', + use_bn_head=True, + embed_dims=text_channels, + num_classes=num_training_classes)), + train_cfg=dict(assigner=dict(num_classes=num_training_classes))) + +# dataset settings +text_transform = [ + dict(type='RandomLoadText', + num_neg_samples=(num_classes, num_classes), + max_num_samples=num_training_classes, + padding_to_max=True, + padding_value=''), + dict(type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'flip', + 'flip_direction', 'texts')) +] +train_pipeline = [ + *_base_.pre_transform, + dict(type='MultiModalMosaic', + img_scale=_base_.img_scale, + pad_val=114.0, + pre_transform=_base_.pre_transform), + dict( + type='YOLOv5RandomAffine', + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=(1 - _base_.affine_scale, 1 + _base_.affine_scale), + max_aspect_ratio=_base_.max_aspect_ratio, + border=(-_base_.img_scale[0] // 2, -_base_.img_scale[1] // 2), + border_val=(114, 114, 114)), + *_base_.last_transform[:-1], + *text_transform, +] +train_pipeline_stage2 = [*_base_.train_pipeline_stage2[:-1], *text_transform] +obj365v1_train_dataset = dict( + type='MultiModalDataset', + dataset=dict( + type='YOLOv5Objects365V1Dataset', + data_root='data/objects365v1/', + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + filter_cfg=dict(filter_empty_gt=False, min_size=32)), + class_text_path='data/texts/obj365v1_class_texts.json', + pipeline=train_pipeline) + +mg_train_dataset = dict(type='YOLOv5MixedGroundingDataset', + data_root='data/mixed_grounding/', + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=train_pipeline) + +flickr_train_dataset = dict( + type='YOLOv5MixedGroundingDataset', + data_root='data/flickr/', + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=train_pipeline) + +train_dataloader = dict(batch_size=train_batch_size_per_gpu, + collate_fn=dict(type='yolow_collate'), + dataset=dict(_delete_=True, + type='ConcatDataset', + datasets=[ + obj365v1_train_dataset, + flickr_train_dataset, mg_train_dataset + ], + ignore_keys=['classes', 'palette'])) + +test_pipeline = [ + *_base_.test_pipeline[:-1], + dict(type='LoadText'), + dict(type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'pad_param', 'texts')) +] +coco_val_dataset = dict( + _delete_=True, + type='MultiModalDataset', + dataset=dict(type='YOLOv5LVISV1Dataset', + data_root='data/coco/', + test_mode=True, + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + data_prefix=dict(img=''), + batch_shapes_cfg=None), + class_text_path='data/texts/lvis_v1_class_texts.json', + pipeline=test_pipeline) +val_dataloader = dict(dataset=coco_val_dataset) +test_dataloader = val_dataloader + +val_evaluator = dict(type='mmdet.LVISMetric', + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox') +test_evaluator = val_evaluator + +# training settings +default_hooks = dict(param_scheduler=dict(max_epochs=max_epochs), + checkpoint=dict(interval=save_epoch_intervals, + rule='greater')) +custom_hooks = [ + dict(type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0001, + update_buffers=True, + strict_load=False, + priority=49), + dict(type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - close_mosaic_epochs, + switch_pipeline=train_pipeline_stage2) +] +train_cfg = dict(max_epochs=max_epochs, + val_interval=10, + dynamic_intervals=[((max_epochs - close_mosaic_epochs), + _base_.val_interval_stage2)]) +optim_wrapper = dict(optimizer=dict( + _delete_=True, + type='AdamW', + lr=base_lr, + weight_decay=weight_decay, + batch_size_per_gpu=train_batch_size_per_gpu), + paramwise_cfg=dict(bias_decay_mult=0.0, + norm_decay_mult=0.0, + custom_keys={ + 'backbone.text_model': + dict(lr_mult=0.01), + 'logit_scale': + dict(weight_decay=0.0) + }), + constructor='YOLOWv5OptimizerConstructor') diff --git a/models/YOLO-World/configs/pretrain/yolo_world_v2_m_vlpan_bn_noeinsum_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival.py b/models/YOLO-World/configs/pretrain/yolo_world_v2_m_vlpan_bn_noeinsum_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival.py new file mode 100644 index 0000000000000000000000000000000000000000..264b026ca780dffc91236cf53908858488337541 --- /dev/null +++ b/models/YOLO-World/configs/pretrain/yolo_world_v2_m_vlpan_bn_noeinsum_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival.py @@ -0,0 +1,176 @@ +_base_ = ('../../third_party/mmyolo/configs/yolov8/' + 'yolov8_m_syncbn_fast_8xb16-500e_coco.py') +custom_imports = dict(imports=['yolo_world'], + allow_failed_imports=False) + +# hyper-parameters +num_classes = 1203 +num_training_classes = 80 +max_epochs = 100 # Maximum training epochs +close_mosaic_epochs = 2 +save_epoch_intervals = 2 +text_channels = 512 +neck_embed_channels = [128, 256, _base_.last_stage_out_channels // 2] +neck_num_heads = [4, 8, _base_.last_stage_out_channels // 2 // 32] +base_lr = 2e-3 +weight_decay = 0.05 / 2 +train_batch_size_per_gpu = 16 +text_model_name = '../pretrained_models/clip-vit-base-patch32-projection' +text_model_name = 'openai/clip-vit-base-patch32' +# model settings +model = dict( + type='YOLOWorldDetector', + mm_neck=True, + num_train_classes=num_training_classes, + num_test_classes=num_classes, + data_preprocessor=dict(type='YOLOWDetDataPreprocessor'), + backbone=dict( + _delete_=True, + type='MultiModalYOLOBackbone', + image_model={{_base_.model.backbone}}, + text_model=dict( + type='HuggingCLIPLanguageBackbone', + model_name=text_model_name, + frozen_modules=['all'])), + neck=dict(type='YOLOWorldPAFPN', + guide_channels=text_channels, + embed_channels=neck_embed_channels, + num_heads=neck_num_heads, + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv', + use_einsum=False)), + bbox_head=dict(type='YOLOWorldHead', + head_module=dict(type='YOLOWorldHeadModule', + use_bn_head=True, + embed_dims=text_channels, + num_classes=num_training_classes, + use_einsum=False)), + train_cfg=dict(assigner=dict(num_classes=num_training_classes))) + +# dataset settings +text_transform = [ + dict(type='RandomLoadText', + num_neg_samples=(num_classes, num_classes), + max_num_samples=num_training_classes, + padding_to_max=True, + padding_value=''), + dict(type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'flip', + 'flip_direction', 'texts')) +] + +train_pipeline = [ + *_base_.pre_transform, + dict(type='MultiModalMosaic', + img_scale=_base_.img_scale, + pad_val=114.0, + pre_transform=_base_.pre_transform), + dict( + type='YOLOv5RandomAffine', + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=(1 - _base_.affine_scale, 1 + _base_.affine_scale), + max_aspect_ratio=_base_.max_aspect_ratio, + border=(-_base_.img_scale[0] // 2, -_base_.img_scale[1] // 2), + border_val=(114, 114, 114)), + *_base_.last_transform[:-1], + *text_transform, +] + +train_pipeline_stage2 = [*_base_.train_pipeline_stage2[:-1], *text_transform] +obj365v1_train_dataset = dict( + type='MultiModalDataset', + dataset=dict( + type='YOLOv5Objects365V1Dataset', + data_root='data/objects365v1/', + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + filter_cfg=dict(filter_empty_gt=False, min_size=32)), + class_text_path='data/texts/obj365v1_class_texts.json', + pipeline=train_pipeline) + +mg_train_dataset = dict(type='YOLOv5MixedGroundingDataset', + data_root='data/mixed_grounding/', + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=train_pipeline) + +flickr_train_dataset = dict( + type='YOLOv5MixedGroundingDataset', + data_root='data/flickr/', + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=train_pipeline) + +train_dataloader = dict(batch_size=train_batch_size_per_gpu, + collate_fn=dict(type='yolow_collate'), + dataset=dict(_delete_=True, + type='ConcatDataset', + datasets=[ + obj365v1_train_dataset, + flickr_train_dataset, mg_train_dataset + ], + ignore_keys=['classes', 'palette'])) + +test_pipeline = [ + *_base_.test_pipeline[:-1], + dict(type='LoadText'), + dict(type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'pad_param', 'texts')) +] + +coco_val_dataset = dict( + _delete_=True, + type='MultiModalDataset', + dataset=dict(type='YOLOv5LVISV1Dataset', + data_root='data/coco/', + test_mode=True, + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + data_prefix=dict(img=''), + batch_shapes_cfg=None), + class_text_path='data/texts/lvis_v1_class_texts.json', + pipeline=test_pipeline) +val_dataloader = dict(dataset=coco_val_dataset) +test_dataloader = val_dataloader + +val_evaluator = dict(type='mmdet.LVISMetric', + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox') +test_evaluator = val_evaluator + +# training settings +default_hooks = dict(param_scheduler=dict(max_epochs=max_epochs), + checkpoint=dict(interval=save_epoch_intervals, + rule='greater')) +custom_hooks = [ + dict(type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0001, + update_buffers=True, + strict_load=False, + priority=49), + dict(type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - close_mosaic_epochs, + switch_pipeline=train_pipeline_stage2) +] +train_cfg = dict(max_epochs=max_epochs, + val_interval=10, + dynamic_intervals=[((max_epochs - close_mosaic_epochs), + _base_.val_interval_stage2)]) +optim_wrapper = dict(optimizer=dict( + _delete_=True, + type='AdamW', + lr=base_lr, + weight_decay=weight_decay, + batch_size_per_gpu=train_batch_size_per_gpu), + paramwise_cfg=dict(bias_decay_mult=0.0, + norm_decay_mult=0.0, + custom_keys={ + 'backbone.text_model': + dict(lr_mult=0.01), + 'logit_scale': + dict(weight_decay=0.0) + }), + constructor='YOLOWv5OptimizerConstructor') diff --git a/models/YOLO-World/configs/pretrain/yolo_world_v2_s_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_1280ft_lvis_minival.py b/models/YOLO-World/configs/pretrain/yolo_world_v2_s_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_1280ft_lvis_minival.py new file mode 100644 index 0000000000000000000000000000000000000000..3afb76aa8aab584a99c623926b5a363c1a453d89 --- /dev/null +++ b/models/YOLO-World/configs/pretrain/yolo_world_v2_s_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_1280ft_lvis_minival.py @@ -0,0 +1,195 @@ +_base_ = ('../../third_party/mmyolo/configs/yolov8/' + 'yolov8_s_syncbn_fast_8xb16-500e_coco.py') +custom_imports = dict(imports=['yolo_world'], + allow_failed_imports=False) + +# hyper-parameters +num_classes = 1203 +num_training_classes = 80 +max_epochs = 100 # Maximum training epochs +close_mosaic_epochs = 2 +save_epoch_intervals = 2 +text_channels = 512 +neck_embed_channels = [128, 256, _base_.last_stage_out_channels // 2] +neck_num_heads = [4, 8, _base_.last_stage_out_channels // 2 // 32] +base_lr = 2e-4 +weight_decay = 0.025 +train_batch_size_per_gpu = 4 +img_scale = (1280, 1280) + +# model settings +model = dict( + type='YOLOWorldDetector', + mm_neck=True, + num_train_classes=num_training_classes, + num_test_classes=num_classes, + data_preprocessor=dict(type='YOLOWDetDataPreprocessor'), + backbone=dict( + _delete_=True, + type='MultiModalYOLOBackbone', + image_model={{_base_.model.backbone}}, + text_model=dict( + type='HuggingCLIPLanguageBackbone', + model_name='openai/clip-vit-base-patch32', + frozen_modules=['all'])), + neck=dict(type='YOLOWorldPAFPN', + guide_channels=text_channels, + embed_channels=neck_embed_channels, + num_heads=neck_num_heads, + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv')), + bbox_head=dict(type='YOLOWorldHead', + head_module=dict(type='YOLOWorldHeadModule', + use_bn_head=True, + embed_dims=text_channels, + num_classes=num_training_classes)), + train_cfg=dict(assigner=dict(num_classes=num_training_classes))) + +# dataset settings +text_transform = [ + dict(type='RandomLoadText', + num_neg_samples=(num_classes, num_classes), + max_num_samples=num_training_classes, + padding_to_max=True, + padding_value=''), + dict(type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'flip', + 'flip_direction', 'texts')) +] +train_pipeline = [ + *_base_.pre_transform, + dict(type='MultiModalMosaic', + img_scale=img_scale, + pad_val=114.0, + pre_transform=_base_.pre_transform), + dict( + type='YOLOv5RandomAffine', + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=(1 - _base_.affine_scale, 1 + _base_.affine_scale), + max_aspect_ratio=_base_.max_aspect_ratio, + border=(-img_scale[0] // 2, -img_scale[1] // 2), + border_val=(114, 114, 114)), + *_base_.last_transform[:-1], + *text_transform, +] +train_pipeline_stage2 = [ + *_base_.pre_transform, + dict(type='YOLOv5KeepRatioResize', scale=img_scale), + dict( + type='LetterResize', + scale=img_scale, + allow_scale_up=True, + pad_val=dict(img=114.0)), + dict( + type='YOLOv5RandomAffine', + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=(1 - _base_.affine_scale, 1 + _base_.affine_scale), + max_aspect_ratio=_base_.max_aspect_ratio, + border_val=(114, 114, 114)), + *_base_.last_transform[:-1], + *text_transform +] +obj365v1_train_dataset = dict( + type='MultiModalDataset', + dataset=dict( + type='YOLOv5Objects365V1Dataset', + data_root='data/objects365v1/', + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + filter_cfg=dict(filter_empty_gt=False, min_size=32)), + class_text_path='data/texts/obj365v1_class_texts.json', + pipeline=train_pipeline) + +mg_train_dataset = dict(type='YOLOv5MixedGroundingDataset', + data_root='data/mixed_grounding/', + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=train_pipeline) + +flickr_train_dataset = dict( + type='YOLOv5MixedGroundingDataset', + data_root='data/flickr/', + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=train_pipeline) + +train_dataloader = dict(batch_size=train_batch_size_per_gpu, + collate_fn=dict(type='yolow_collate'), + dataset=dict(_delete_=True, + type='ConcatDataset', + datasets=[ + obj365v1_train_dataset, + flickr_train_dataset, mg_train_dataset + ], + ignore_keys=['classes', 'palette'])) +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='YOLOv5KeepRatioResize', scale=img_scale), + dict( + type='LetterResize', + scale=img_scale, + allow_scale_up=False, + pad_val=dict(img=114)), + dict(type='LoadAnnotations', with_bbox=True, _scope_='mmdet'), + dict(type='LoadText'), + dict(type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'pad_param', 'texts')) +] + +coco_val_dataset = dict( + _delete_=True, + type='MultiModalDataset', + dataset=dict(type='YOLOv5LVISV1Dataset', + data_root='data/coco/', + test_mode=True, + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + data_prefix=dict(img=''), + batch_shapes_cfg=None), + class_text_path='data/texts/lvis_v1_class_texts.json', + pipeline=test_pipeline) +val_dataloader = dict(dataset=coco_val_dataset) +test_dataloader = val_dataloader + +val_evaluator = dict(type='mmdet.LVISMetric', + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox') +test_evaluator = val_evaluator + +# training settings +default_hooks = dict(param_scheduler=dict(max_epochs=max_epochs), + checkpoint=dict(interval=save_epoch_intervals, + rule='greater')) +custom_hooks = [ + dict(type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0001, + update_buffers=True, + strict_load=False, + priority=49), + dict(type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - close_mosaic_epochs, + switch_pipeline=train_pipeline_stage2) +] +train_cfg = dict(max_epochs=max_epochs, + val_interval=10, + dynamic_intervals=[((max_epochs - close_mosaic_epochs), + _base_.val_interval_stage2)]) +optim_wrapper = dict(optimizer=dict( + _delete_=True, + type='AdamW', + lr=base_lr, + weight_decay=weight_decay, + batch_size_per_gpu=train_batch_size_per_gpu), + paramwise_cfg=dict(bias_decay_mult=0.0, + norm_decay_mult=0.0, + custom_keys={ + 'backbone.text_model': + dict(lr_mult=0.01), + 'logit_scale': + dict(weight_decay=0.0) + }), + constructor='YOLOWv5OptimizerConstructor') diff --git a/models/YOLO-World/configs/pretrain/yolo_world_v2_s_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival.py b/models/YOLO-World/configs/pretrain/yolo_world_v2_s_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival.py new file mode 100644 index 0000000000000000000000000000000000000000..775cc8e7867e60f4fad8d24d492d982b3463e697 --- /dev/null +++ b/models/YOLO-World/configs/pretrain/yolo_world_v2_s_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival.py @@ -0,0 +1,170 @@ +_base_ = ('../../third_party/mmyolo/configs/yolov8/' + 'yolov8_s_syncbn_fast_8xb16-500e_coco.py') +custom_imports = dict(imports=['yolo_world'], + allow_failed_imports=False) + +# hyper-parameters +num_classes = 1203 +num_training_classes = 80 +max_epochs = 100 # Maximum training epochs +close_mosaic_epochs = 2 +save_epoch_intervals = 2 +text_channels = 512 +neck_embed_channels = [128, 256, _base_.last_stage_out_channels // 2] +neck_num_heads = [4, 8, _base_.last_stage_out_channels // 2 // 32] +base_lr = 2e-3 +weight_decay = 0.05 / 2 +train_batch_size_per_gpu = 16 + +# model settings +model = dict( + type='YOLOWorldDetector', + mm_neck=True, + num_train_classes=num_training_classes, + num_test_classes=num_classes, + data_preprocessor=dict(type='YOLOWDetDataPreprocessor'), + backbone=dict( + _delete_=True, + type='MultiModalYOLOBackbone', + image_model={{_base_.model.backbone}}, + text_model=dict( + type='HuggingCLIPLanguageBackbone', + model_name='openai/clip-vit-base-patch32', + frozen_modules=['all'])), + neck=dict(type='YOLOWorldPAFPN', + guide_channels=text_channels, + embed_channels=neck_embed_channels, + num_heads=neck_num_heads, + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv')), + bbox_head=dict(type='YOLOWorldHead', + head_module=dict(type='YOLOWorldHeadModule', + use_bn_head=True, + embed_dims=text_channels, + num_classes=num_training_classes)), + train_cfg=dict(assigner=dict(num_classes=num_training_classes))) + +# dataset settings +text_transform = [ + dict(type='RandomLoadText', + num_neg_samples=(num_classes, num_classes), + max_num_samples=num_training_classes, + padding_to_max=True, + padding_value=''), + dict(type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'flip', + 'flip_direction', 'texts')) +] +train_pipeline = [ + *_base_.pre_transform, + dict(type='MultiModalMosaic', + img_scale=_base_.img_scale, + pad_val=114.0, + pre_transform=_base_.pre_transform), + dict( + type='YOLOv5RandomAffine', + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=(1 - _base_.affine_scale, 1 + _base_.affine_scale), + max_aspect_ratio=_base_.max_aspect_ratio, + border=(-_base_.img_scale[0] // 2, -_base_.img_scale[1] // 2), + border_val=(114, 114, 114)), + *_base_.last_transform[:-1], + *text_transform, +] +train_pipeline_stage2 = [*_base_.train_pipeline_stage2[:-1], *text_transform] +obj365v1_train_dataset = dict( + type='MultiModalDataset', + dataset=dict( + type='YOLOv5Objects365V1Dataset', + data_root='data/objects365v1/', + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + filter_cfg=dict(filter_empty_gt=False, min_size=32)), + class_text_path='data/texts/obj365v1_class_texts.json', + pipeline=train_pipeline) + +mg_train_dataset = dict(type='YOLOv5MixedGroundingDataset', + data_root='data/mixed_grounding/', + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=train_pipeline) + +flickr_train_dataset = dict( + type='YOLOv5MixedGroundingDataset', + data_root='data/flickr/', + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=train_pipeline) + +train_dataloader = dict(batch_size=train_batch_size_per_gpu, + collate_fn=dict(type='yolow_collate'), + dataset=dict(_delete_=True, + type='ConcatDataset', + datasets=[ + obj365v1_train_dataset, + flickr_train_dataset, mg_train_dataset + ], + ignore_keys=['classes', 'palette'])) + +test_pipeline = [ + *_base_.test_pipeline[:-1], + dict(type='LoadText'), + dict(type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'pad_param', 'texts')) +] +coco_val_dataset = dict( + _delete_=True, + type='MultiModalDataset', + dataset=dict(type='YOLOv5LVISV1Dataset', + data_root='data/coco/', + test_mode=True, + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + data_prefix=dict(img=''), + batch_shapes_cfg=None), + class_text_path='data/texts/lvis_v1_class_texts.json', + pipeline=test_pipeline) +val_dataloader = dict(dataset=coco_val_dataset) +test_dataloader = val_dataloader + +val_evaluator = dict(type='mmdet.LVISMetric', + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox') +test_evaluator = val_evaluator + +# training settings +default_hooks = dict(param_scheduler=dict(max_epochs=max_epochs), + checkpoint=dict(interval=save_epoch_intervals, + rule='greater')) +custom_hooks = [ + dict(type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0001, + update_buffers=True, + strict_load=False, + priority=49), + dict(type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - close_mosaic_epochs, + switch_pipeline=train_pipeline_stage2) +] +train_cfg = dict(max_epochs=max_epochs, + val_interval=10, + dynamic_intervals=[((max_epochs - close_mosaic_epochs), + _base_.val_interval_stage2)]) +optim_wrapper = dict(optimizer=dict( + _delete_=True, + type='AdamW', + lr=base_lr, + weight_decay=weight_decay, + batch_size_per_gpu=train_batch_size_per_gpu), + paramwise_cfg=dict(bias_decay_mult=0.0, + norm_decay_mult=0.0, + custom_keys={ + 'backbone.text_model': + dict(lr_mult=0.01), + 'logit_scale': + dict(weight_decay=0.0) + }), + constructor='YOLOWv5OptimizerConstructor') diff --git a/models/YOLO-World/configs/pretrain/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_1280ft_lvis_minival.py b/models/YOLO-World/configs/pretrain/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_1280ft_lvis_minival.py new file mode 100644 index 0000000000000000000000000000000000000000..ab4cd23f4628950bd31b01422f92a0a3ee50c683 --- /dev/null +++ b/models/YOLO-World/configs/pretrain/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_1280ft_lvis_minival.py @@ -0,0 +1,199 @@ +_base_ = ('../../third_party/mmyolo/configs/yolov8/' + 'yolov8_x_syncbn_fast_8xb16-500e_coco.py') +custom_imports = dict(imports=['yolo_world'], + allow_failed_imports=False) + +# hyper-parameters +num_classes = 1203 +num_training_classes = 80 +max_epochs = 100 # Maximum training epochs +close_mosaic_epochs = 2 +save_epoch_intervals = 2 +text_channels = 512 +neck_embed_channels = [128, 256, _base_.last_stage_out_channels // 2] +neck_num_heads = [4, 8, _base_.last_stage_out_channels // 2 // 32] +base_lr = 2e-3 +weight_decay = 0.05 / 2 +train_batch_size_per_gpu = 16 +text_model_name = '../pretrained_models/clip-vit-base-patch32-projection' +# text_model_name = 'openai/clip-vit-base-patch32' +img_scale = (1280, 1280) + +# model settings +model = dict( + type='YOLOWorldDetector', + mm_neck=True, + num_train_classes=num_training_classes, + num_test_classes=num_classes, + data_preprocessor=dict(type='YOLOWDetDataPreprocessor'), + backbone=dict( + _delete_=True, + type='MultiModalYOLOBackbone', + image_model={{_base_.model.backbone}}, + text_model=dict( + type='HuggingCLIPLanguageBackbone', + model_name=text_model_name, + frozen_modules=['all'])), + neck=dict(type='YOLOWorldPAFPN', + guide_channels=text_channels, + embed_channels=neck_embed_channels, + num_heads=neck_num_heads, + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv')), + bbox_head=dict(type='YOLOWorldHead', + head_module=dict(type='YOLOWorldHeadModule', + use_bn_head=True, + embed_dims=text_channels, + num_classes=num_training_classes)), + train_cfg=dict(assigner=dict(num_classes=num_training_classes))) + +# dataset settings +text_transform = [ + dict(type='RandomLoadText', + num_neg_samples=(num_classes, num_classes), + max_num_samples=num_training_classes, + padding_to_max=True, + padding_value=''), + dict(type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'flip', + 'flip_direction', 'texts')) +] +train_pipeline = [ + *_base_.pre_transform, + dict(type='MultiModalMosaic', + img_scale=img_scale, + pad_val=114.0, + pre_transform=_base_.pre_transform), + dict( + type='YOLOv5RandomAffine', + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=(1 - _base_.affine_scale, 1 + _base_.affine_scale), + max_aspect_ratio=_base_.max_aspect_ratio, + border=(-img_scale[0] // 2, -img_scale[1] // 2), + border_val=(114, 114, 114)), + *_base_.last_transform[:-1], + *text_transform, +] +train_pipeline_stage2 = [ + *_base_.pre_transform, + dict(type='YOLOv5KeepRatioResize', scale=img_scale), + dict( + type='LetterResize', + scale=img_scale, + allow_scale_up=True, + pad_val=dict(img=114.0)), + dict( + type='YOLOv5RandomAffine', + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=(1 - _base_.affine_scale, 1 + _base_.affine_scale), + max_aspect_ratio=_base_.max_aspect_ratio, + border_val=(114, 114, 114)), + *_base_.last_transform[:-1], + *text_transform +] + +obj365v1_train_dataset = dict( + type='MultiModalDataset', + dataset=dict( + type='YOLOv5Objects365V1Dataset', + data_root='data/objects365v1/', + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + filter_cfg=dict(filter_empty_gt=False, min_size=32)), + class_text_path='data/texts/obj365v1_class_texts.json', + pipeline=train_pipeline) + +mg_train_dataset = dict(type='YOLOv5MixedGroundingDataset', + data_root='data/mixed_grounding/', + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=train_pipeline) + +flickr_train_dataset = dict( + type='YOLOv5MixedGroundingDataset', + data_root='data/flickr/', + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=train_pipeline) + +train_dataloader = dict(batch_size=train_batch_size_per_gpu, + collate_fn=dict(type='yolow_collate'), + dataset=dict(_delete_=True, + type='ConcatDataset', + datasets=[ + obj365v1_train_dataset, + flickr_train_dataset, mg_train_dataset + ], + ignore_keys=['classes', 'palette'])) + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='YOLOv5KeepRatioResize', scale=img_scale), + dict( + type='LetterResize', + scale=img_scale, + allow_scale_up=False, + pad_val=dict(img=114)), + dict(type='LoadAnnotations', with_bbox=True, _scope_='mmdet'), + dict(type='LoadText'), + dict(type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'pad_param', 'texts')) +] + +coco_val_dataset = dict( + _delete_=True, + type='MultiModalDataset', + dataset=dict(type='YOLOv5LVISV1Dataset', + data_root='data/coco/', + test_mode=True, + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + data_prefix=dict(img=''), + batch_shapes_cfg=None), + class_text_path='data/texts/lvis_v1_class_texts.json', + pipeline=test_pipeline) +val_dataloader = dict(dataset=coco_val_dataset) +test_dataloader = val_dataloader + +val_evaluator = dict(type='mmdet.LVISMetric', + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox') +test_evaluator = val_evaluator + +# training settings +default_hooks = dict(param_scheduler=dict(max_epochs=max_epochs), + checkpoint=dict(interval=save_epoch_intervals, + rule='greater')) +custom_hooks = [ + dict(type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0001, + update_buffers=True, + strict_load=False, + priority=49), + dict(type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - close_mosaic_epochs, + switch_pipeline=train_pipeline_stage2) +] +train_cfg = dict(max_epochs=max_epochs, + val_interval=10, + dynamic_intervals=[((max_epochs - close_mosaic_epochs), + _base_.val_interval_stage2)]) +optim_wrapper = dict(optimizer=dict( + _delete_=True, + type='AdamW', + lr=base_lr, + weight_decay=weight_decay, + batch_size_per_gpu=train_batch_size_per_gpu), + paramwise_cfg=dict(bias_decay_mult=0.0, + norm_decay_mult=0.0, + custom_keys={ + 'backbone.text_model': + dict(lr_mult=0.01), + 'logit_scale': + dict(weight_decay=0.0) + }), + constructor='YOLOWv5OptimizerConstructor') diff --git a/models/YOLO-World/configs/pretrain/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival.py b/models/YOLO-World/configs/pretrain/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival.py new file mode 100644 index 0000000000000000000000000000000000000000..e3c1226d6f10bd785a03eeccf1a669f9f6531062 --- /dev/null +++ b/models/YOLO-World/configs/pretrain/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival.py @@ -0,0 +1,171 @@ +_base_ = ('../../third_party/mmyolo/configs/yolov8/' + 'yolov8_x_syncbn_fast_8xb16-500e_coco.py') +custom_imports = dict(imports=['yolo_world'], + allow_failed_imports=False) + +# hyper-parameters +num_classes = 1203 +num_training_classes = 80 +max_epochs = 100 # Maximum training epochs +close_mosaic_epochs = 2 +save_epoch_intervals = 2 +text_channels = 512 +neck_embed_channels = [128, 256, _base_.last_stage_out_channels // 2] +neck_num_heads = [4, 8, _base_.last_stage_out_channels // 2 // 32] +base_lr = 2e-3 +weight_decay = 0.05 / 2 +train_batch_size_per_gpu = 16 +# text_model_name = '../pretrained_models/clip-vit-base-patch32-projection' +text_model_name = 'openai/clip-vit-base-patch32' +# model settings +model = dict( + type='YOLOWorldDetector', + mm_neck=True, + num_train_classes=num_training_classes, + num_test_classes=num_classes, + data_preprocessor=dict(type='YOLOWDetDataPreprocessor'), + backbone=dict( + _delete_=True, + type='MultiModalYOLOBackbone', + image_model={{_base_.model.backbone}}, + text_model=dict( + type='HuggingCLIPLanguageBackbone', + model_name=text_model_name, + frozen_modules=['all'])), + neck=dict(type='YOLOWorldPAFPN', + guide_channels=text_channels, + embed_channels=neck_embed_channels, + num_heads=neck_num_heads, + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv')), + bbox_head=dict(type='YOLOWorldHead', + head_module=dict(type='YOLOWorldHeadModule', + use_bn_head=True, + embed_dims=text_channels, + num_classes=num_training_classes)), + train_cfg=dict(assigner=dict(num_classes=num_training_classes))) + +# dataset settings +text_transform = [ + dict(type='RandomLoadText', + num_neg_samples=(num_classes, num_classes), + max_num_samples=num_training_classes, + padding_to_max=True, + padding_value=''), + dict(type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'flip', + 'flip_direction', 'texts')) +] +train_pipeline = [ + *_base_.pre_transform, + dict(type='MultiModalMosaic', + img_scale=_base_.img_scale, + pad_val=114.0, + pre_transform=_base_.pre_transform), + dict( + type='YOLOv5RandomAffine', + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=(1 - _base_.affine_scale, 1 + _base_.affine_scale), + max_aspect_ratio=_base_.max_aspect_ratio, + border=(-_base_.img_scale[0] // 2, -_base_.img_scale[1] // 2), + border_val=(114, 114, 114)), + *_base_.last_transform[:-1], + *text_transform, +] +train_pipeline_stage2 = [*_base_.train_pipeline_stage2[:-1], *text_transform] +obj365v1_train_dataset = dict( + type='MultiModalDataset', + dataset=dict( + type='YOLOv5Objects365V1Dataset', + data_root='data/objects365v1/', + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + filter_cfg=dict(filter_empty_gt=False, min_size=32)), + class_text_path='data/texts/obj365v1_class_texts.json', + pipeline=train_pipeline) + +mg_train_dataset = dict(type='YOLOv5MixedGroundingDataset', + data_root='data/mixed_grounding/', + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=train_pipeline) + +flickr_train_dataset = dict( + type='YOLOv5MixedGroundingDataset', + data_root='data/flickr/', + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=train_pipeline) + +train_dataloader = dict(batch_size=train_batch_size_per_gpu, + collate_fn=dict(type='yolow_collate'), + dataset=dict(_delete_=True, + type='ConcatDataset', + datasets=[ + obj365v1_train_dataset, + flickr_train_dataset, mg_train_dataset + ], + ignore_keys=['classes', 'palette'])) + +test_pipeline = [ + *_base_.test_pipeline[:-1], + dict(type='LoadText'), + dict(type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'pad_param', 'texts')) +] +coco_val_dataset = dict( + _delete_=True, + type='MultiModalDataset', + dataset=dict(type='YOLOv5LVISV1Dataset', + data_root='data/coco/', + test_mode=True, + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + data_prefix=dict(img=''), + batch_shapes_cfg=None), + class_text_path='data/texts/lvis_v1_class_texts.json', + pipeline=test_pipeline) +val_dataloader = dict(dataset=coco_val_dataset) +test_dataloader = val_dataloader + +val_evaluator = dict(type='mmdet.LVISMetric', + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox') +test_evaluator = val_evaluator + +# training settings +default_hooks = dict(param_scheduler=dict(max_epochs=max_epochs), + checkpoint=dict(interval=save_epoch_intervals, + rule='greater')) +custom_hooks = [ + dict(type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0001, + update_buffers=True, + strict_load=False, + priority=49), + dict(type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - close_mosaic_epochs, + switch_pipeline=train_pipeline_stage2) +] +train_cfg = dict(max_epochs=max_epochs, + val_interval=10, + dynamic_intervals=[((max_epochs - close_mosaic_epochs), + _base_.val_interval_stage2)]) +optim_wrapper = dict(optimizer=dict( + _delete_=True, + type='AdamW', + lr=base_lr, + weight_decay=weight_decay, + batch_size_per_gpu=train_batch_size_per_gpu), + paramwise_cfg=dict(bias_decay_mult=0.0, + norm_decay_mult=0.0, + custom_keys={ + 'backbone.text_model': + dict(lr_mult=0.01), + 'logit_scale': + dict(weight_decay=0.0) + }), + constructor='YOLOWv5OptimizerConstructor') diff --git a/models/YOLO-World/configs/pretrain/yolo_world_v2_xl_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival.py b/models/YOLO-World/configs/pretrain/yolo_world_v2_xl_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival.py new file mode 100644 index 0000000000000000000000000000000000000000..35977e8ed93ee68ef96a9a2b98ebe02d4c18abf8 --- /dev/null +++ b/models/YOLO-World/configs/pretrain/yolo_world_v2_xl_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival.py @@ -0,0 +1,185 @@ +_base_ = ('../../third_party/mmyolo/configs/yolov8/' + 'yolov8_x_syncbn_fast_8xb16-500e_coco.py') +custom_imports = dict(imports=['yolo_world'], + allow_failed_imports=False) + +# hyper-parameters +num_classes = 1203 +num_training_classes = 80 +max_epochs = 100 # Maximum training epochs +close_mosaic_epochs = 2 +save_epoch_intervals = 2 +text_channels = 512 +neck_embed_channels = [128, 256, _base_.last_stage_out_channels // 2] +neck_num_heads = [4, 8, _base_.last_stage_out_channels // 2 // 32] +base_lr = 2e-3 +weight_decay = 0.05 / 2 +train_batch_size_per_gpu = 16 +text_model_name = '../pretrained_models/clip-vit-base-patch32-projection' +text_model_name = 'openai/clip-vit-base-patch32' + +# scaling model from X to XL +deepen_factor = 1.0 +widen_factor = 1.5 + +backbone = _base_.model.backbone +backbone.update( + deepen_factor=deepen_factor, + widen_factor=widen_factor +) + +# model settings +model = dict( + type='YOLOWorldDetector', + mm_neck=True, + num_train_classes=num_training_classes, + num_test_classes=num_classes, + data_preprocessor=dict(type='YOLOWDetDataPreprocessor'), + backbone=dict( + _delete_=True, + type='MultiModalYOLOBackbone', + image_model=backbone, + text_model=dict( + type='HuggingCLIPLanguageBackbone', + model_name=text_model_name, + frozen_modules=['all'])), + neck=dict(type='YOLOWorldPAFPN', + deepen_factor=deepen_factor, + widen_factor=widen_factor, + guide_channels=text_channels, + embed_channels=neck_embed_channels, + num_heads=neck_num_heads, + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv')), + bbox_head=dict(type='YOLOWorldHead', + head_module=dict(type='YOLOWorldHeadModule', + widen_factor=widen_factor, + use_bn_head=True, + embed_dims=text_channels, + num_classes=num_training_classes)), + train_cfg=dict(assigner=dict(num_classes=num_training_classes))) + +# dataset settings +text_transform = [ + dict(type='RandomLoadText', + num_neg_samples=(num_classes, num_classes), + max_num_samples=num_training_classes, + padding_to_max=True, + padding_value=''), + dict(type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'flip', + 'flip_direction', 'texts')) +] +train_pipeline = [ + *_base_.pre_transform, + dict(type='MultiModalMosaic', + img_scale=_base_.img_scale, + pad_val=114.0, + pre_transform=_base_.pre_transform), + dict( + type='YOLOv5RandomAffine', + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=(1 - _base_.affine_scale, 1 + _base_.affine_scale), + max_aspect_ratio=_base_.max_aspect_ratio, + border=(-_base_.img_scale[0] // 2, -_base_.img_scale[1] // 2), + border_val=(114, 114, 114)), + *_base_.last_transform[:-1], + *text_transform, +] +train_pipeline_stage2 = [*_base_.train_pipeline_stage2[:-1], *text_transform] +obj365v1_train_dataset = dict( + type='MultiModalDataset', + dataset=dict( + type='YOLOv5Objects365V1Dataset', + data_root='data/objects365v1/', + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + filter_cfg=dict(filter_empty_gt=False, min_size=32)), + class_text_path='data/texts/obj365v1_class_texts.json', + pipeline=train_pipeline) + +mg_train_dataset = dict(type='YOLOv5MixedGroundingDataset', + data_root='data/mixed_grounding/', + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=train_pipeline) + +flickr_train_dataset = dict( + type='YOLOv5MixedGroundingDataset', + data_root='data/flickr/', + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=train_pipeline) + +train_dataloader = dict(batch_size=train_batch_size_per_gpu, + collate_fn=dict(type='yolow_collate'), + dataset=dict(_delete_=True, + type='ConcatDataset', + datasets=[ + obj365v1_train_dataset, + flickr_train_dataset, mg_train_dataset + ], + ignore_keys=['classes', 'palette'])) + +test_pipeline = [ + *_base_.test_pipeline[:-1], + dict(type='LoadText'), + dict(type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'pad_param', 'texts')) +] +coco_val_dataset = dict( + _delete_=True, + type='MultiModalDataset', + dataset=dict(type='YOLOv5LVISV1Dataset', + data_root='data/coco/', + test_mode=True, + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + data_prefix=dict(img=''), + batch_shapes_cfg=None), + class_text_path='data/texts/lvis_v1_class_texts.json', + pipeline=test_pipeline) +val_dataloader = dict(dataset=coco_val_dataset) +test_dataloader = val_dataloader + +val_evaluator = dict(type='mmdet.LVISMetric', + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox') +test_evaluator = val_evaluator + +# training settings +default_hooks = dict(param_scheduler=dict(max_epochs=max_epochs), + checkpoint=dict(interval=save_epoch_intervals, + rule='greater')) +custom_hooks = [ + dict(type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0001, + update_buffers=True, + strict_load=False, + priority=49), + dict(type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - close_mosaic_epochs, + switch_pipeline=train_pipeline_stage2) +] +train_cfg = dict(max_epochs=max_epochs, + val_interval=10, + dynamic_intervals=[((max_epochs - close_mosaic_epochs), + _base_.val_interval_stage2)]) +optim_wrapper = dict(optimizer=dict( + _delete_=True, + type='AdamW', + lr=base_lr, + weight_decay=weight_decay, + batch_size_per_gpu=train_batch_size_per_gpu), + paramwise_cfg=dict(bias_decay_mult=0.0, + norm_decay_mult=0.0, + custom_keys={ + 'backbone.text_model': + dict(lr_mult=0.01), + 'logit_scale': + dict(weight_decay=0.0) + }), + constructor='YOLOWv5OptimizerConstructor') diff --git a/models/YOLO-World/configs/pretrain_v1/README.md b/models/YOLO-World/configs/pretrain_v1/README.md new file mode 100644 index 0000000000000000000000000000000000000000..3290c7e0f7ab6b3bd10dd5b0ecaa5371d723f915 --- /dev/null +++ b/models/YOLO-World/configs/pretrain_v1/README.md @@ -0,0 +1,21 @@ +## Pre-training YOLO-World-v1 + +> The YOLO-World-v1 is an initial version, and now is nearly deprecated! We strongly suggest you use the [latest version](../pretrain/). + + + +### Zero-shot Inference on LVIS dataset + +| model | Pre-train Data | Size | APmini | APr | APc | APf | APval | APr | APc | APf | weights | +| :------------------------------------------------------------------------------------------------------------------- | :------------------- | :----------------- | :--------------: | :------------: | :------------: | :------------: | :-------------: | :------------: | :------------: | :------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| [YOLO-World-S](./yolo_world_s_dual_vlpan_l2norm_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival.py) | O365+GoldG | 640 | 24.3 | 16.6 | 22.1 | 27.7 | 17.8 | 11.0 | 14.8 | 24.0 | [HF Checkpoints 🤗](https://huggingface.co/wondervictor/YOLO-World/resolve/main/yolo_world_s_clip_base_dual_vlpan_2e-3adamw_32xb16_100e_o365_goldg_train_pretrained-18bea4d2.pth) | +| [YOLO-World-M](./yolo_world_m_dual_l2norm_2e-4_100e_4x8gpus_obj365v1_goldg_train_lvis_minival.py) | O365+GoldG | 640 | 28.6 | 19.7 | 26.6 | 31.9 | 22.3 | 16.2 | 19.0 | 28.7 | [HF Checkpoints 🤗](https://huggingface.co/wondervictor/YOLO-World/resolve/main/yolo_world_m_clip_base_dual_vlpan_2e-3adamw_32xb16_100e_o365_goldg_train_pretrained-2b7bd1be.pth) | +| [YOLO-World-L](./yolo_world_l_dual_vlpan_l2norm_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival.py) | O365+GoldG | 640 | 32.5 | 22.3 | 30.6 | 36.1 | 24.8 | 17.8 | 22.4 | 32.5 | [HF Checkpoints 🤗](https://huggingface.co/wondervictor/YOLO-World/resolve/main/yolo_world_l_clip_base_dual_vlpan_2e-3adamw_32xb16_100e_o365_goldg_train_pretrained-0e566235.pth) | +| [YOLO-World-L](./yolo_world_l_dual_vlpan_l2norm_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival.py) | O365+GoldG+CC3M-Lite | 640 | 33.0 | 23.6 | 32.0 | 35.5 | 25.3 | 18.0 | 22.1 | 32.1 | [HF Checkpoints 🤗](https://huggingface.co/wondervictor/YOLO-World/blob/main/yolo_world_l_clip_base_dual_vlpan_2e-3adamw_32xb16_100e_o365_goldg_cc3mlite_train_pretrained-7a5eea3b.pth) | +| [YOLO-World-X](./yolo_world_x_dual_vlpan_l2norm_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival.py) | O365+GoldG+CC3M-Lite | 640 | 33.4 | 24.4 | 31.6 | 36.6 | 26.6 | 19.2 | 23.5 | 33.2 | [HF Checkpoints 🤗](https://huggingface.co/wondervictor/YOLO-World/blob/main/yolo_world_x_clip_base_dual_vlpan_2e-3adamw_32xb16_100e_o365_goldg_cc3mlite_train_pretrained-8cf6b025.pth) | + + +**NOTE:** +1. APmini: evaluated on LVIS `minival`. +3. APval: evaluated on LVIS `val 1.0`. +4. [HuggingFace Mirror](https://hf-mirror.com/) provides the mirror of HuggingFace, which is a choice for users who are unable to reach. \ No newline at end of file diff --git a/models/YOLO-World/configs/pretrain_v1/yolo_world_l_dual_vlpan_l2norm_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival.py b/models/YOLO-World/configs/pretrain_v1/yolo_world_l_dual_vlpan_l2norm_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival.py new file mode 100644 index 0000000000000000000000000000000000000000..e88be2eb6f54cb19d066974548ea08239ac4127f --- /dev/null +++ b/models/YOLO-World/configs/pretrain_v1/yolo_world_l_dual_vlpan_l2norm_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival.py @@ -0,0 +1,172 @@ +_base_ = ('../../third_party/mmyolo/configs/yolov8/' + 'yolov8_l_syncbn_fast_8xb16-500e_coco.py') +custom_imports = dict(imports=['yolo_world'], + allow_failed_imports=False) + +# hyper-parameters +num_classes = 1203 +num_training_classes = 80 +max_epochs = 100 # Maximum training epochs +close_mosaic_epochs = 2 +save_epoch_intervals = 2 +text_channels = 512 +neck_embed_channels = [128, 256, _base_.last_stage_out_channels // 2] +neck_num_heads = [4, 8, _base_.last_stage_out_channels // 2 // 32] +base_lr = 2e-3 +weight_decay = 0.05 / 2 +train_batch_size_per_gpu = 16 + +# model settings +model = dict( + type='YOLOWorldDetector', + mm_neck=True, + num_train_classes=num_training_classes, + num_test_classes=num_classes, + data_preprocessor=dict(type='YOLOWDetDataPreprocessor'), + backbone=dict( + _delete_=True, + type='MultiModalYOLOBackbone', + image_model={{_base_.model.backbone}}, + text_model=dict( + type='HuggingCLIPLanguageBackbone', + model_name='openai/clip-vit-base-patch32', + frozen_modules=['all'])), + neck=dict(type='YOLOWorldDualPAFPN', + guide_channels=text_channels, + embed_channels=neck_embed_channels, + num_heads=neck_num_heads, + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv'), + text_enhancder=dict(type='ImagePoolingAttentionModule', + embed_channels=256, + num_heads=8)), + bbox_head=dict(type='YOLOWorldHead', + head_module=dict(type='YOLOWorldHeadModule', + embed_dims=text_channels, + num_classes=num_training_classes)), + train_cfg=dict(assigner=dict(num_classes=num_training_classes))) + +# dataset settings +text_transform = [ + dict(type='RandomLoadText', + num_neg_samples=(num_classes, num_classes), + max_num_samples=num_training_classes, + padding_to_max=True, + padding_value=''), + dict(type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'flip', + 'flip_direction', 'texts')) +] +train_pipeline = [ + *_base_.pre_transform, + dict(type='MultiModalMosaic', + img_scale=_base_.img_scale, + pad_val=114.0, + pre_transform=_base_.pre_transform), + dict( + type='YOLOv5RandomAffine', + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=(1 - _base_.affine_scale, 1 + _base_.affine_scale), + max_aspect_ratio=_base_.max_aspect_ratio, + border=(-_base_.img_scale[0] // 2, -_base_.img_scale[1] // 2), + border_val=(114, 114, 114)), + *_base_.last_transform[:-1], + *text_transform, +] +train_pipeline_stage2 = [*_base_.train_pipeline_stage2[:-1], *text_transform] +obj365v1_train_dataset = dict( + type='MultiModalDataset', + dataset=dict( + type='YOLOv5Objects365V1Dataset', + data_root='data/objects365v1/', + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + filter_cfg=dict(filter_empty_gt=False, min_size=32)), + class_text_path='data/texts/obj365v1_class_texts.json', + pipeline=train_pipeline) + +mg_train_dataset = dict(type='YOLOv5MixedGroundingDataset', + data_root='data/mixed_grounding/', + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=train_pipeline) + +flickr_train_dataset = dict( + type='YOLOv5MixedGroundingDataset', + data_root='data/flickr/', + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=train_pipeline) + +train_dataloader = dict(batch_size=train_batch_size_per_gpu, + collate_fn=dict(type='yolow_collate'), + dataset=dict(_delete_=True, + type='ConcatDataset', + datasets=[ + obj365v1_train_dataset, + flickr_train_dataset, mg_train_dataset + ], + ignore_keys=['classes', 'palette'])) + +test_pipeline = [ + *_base_.test_pipeline[:-1], + dict(type='LoadText'), + dict(type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'pad_param', 'texts')) +] +coco_val_dataset = dict( + _delete_=True, + type='MultiModalDataset', + dataset=dict(type='YOLOv5LVISV1Dataset', + data_root='data/coco/', + test_mode=True, + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + data_prefix=dict(img=''), + batch_shapes_cfg=None), + class_text_path='data/texts/lvis_v1_class_texts.json', + pipeline=test_pipeline) +val_dataloader = dict(dataset=coco_val_dataset) +test_dataloader = val_dataloader + +val_evaluator = dict(type='mmdet.LVISMetric', + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox') +test_evaluator = val_evaluator + +# training settings +default_hooks = dict(param_scheduler=dict(max_epochs=max_epochs), + checkpoint=dict(interval=save_epoch_intervals, + rule='greater')) +custom_hooks = [ + dict(type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0001, + update_buffers=True, + strict_load=False, + priority=49), + dict(type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - close_mosaic_epochs, + switch_pipeline=train_pipeline_stage2) +] +train_cfg = dict(max_epochs=max_epochs, + val_interval=10, + dynamic_intervals=[((max_epochs - close_mosaic_epochs), + _base_.val_interval_stage2)]) +optim_wrapper = dict(optimizer=dict( + _delete_=True, + type='AdamW', + lr=base_lr, + weight_decay=weight_decay, + batch_size_per_gpu=train_batch_size_per_gpu), + paramwise_cfg=dict(bias_decay_mult=0.0, + norm_decay_mult=0.0, + custom_keys={ + 'backbone.text_model': + dict(lr_mult=0.01), + 'logit_scale': + dict(weight_decay=0.0) + }), + constructor='YOLOWv5OptimizerConstructor') diff --git a/models/YOLO-World/configs/pretrain_v1/yolo_world_l_dual_vlpan_l2norm_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_val.py b/models/YOLO-World/configs/pretrain_v1/yolo_world_l_dual_vlpan_l2norm_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_val.py new file mode 100644 index 0000000000000000000000000000000000000000..66333b10916d3e971d4d3c9e968ab91b48f28022 --- /dev/null +++ b/models/YOLO-World/configs/pretrain_v1/yolo_world_l_dual_vlpan_l2norm_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_val.py @@ -0,0 +1,172 @@ +_base_ = ('../../third_party/mmyolo/configs/yolov8/' + 'yolov8_l_syncbn_fast_8xb16-500e_coco.py') +custom_imports = dict(imports=['yolo_world'], + allow_failed_imports=False) + +# hyper-parameters +num_classes = 1203 +num_training_classes = 80 +max_epochs = 100 # Maximum training epochs +close_mosaic_epochs = 2 +save_epoch_intervals = 2 +text_channels = 512 +neck_embed_channels = [128, 256, _base_.last_stage_out_channels // 2] +neck_num_heads = [4, 8, _base_.last_stage_out_channels // 2 // 32] +base_lr = 2e-3 +weight_decay = 0.05 / 2 +train_batch_size_per_gpu = 16 + +# model settings +model = dict( + type='YOLOWorldDetector', + mm_neck=True, + num_train_classes=num_training_classes, + num_test_classes=num_classes, + data_preprocessor=dict(type='YOLOWDetDataPreprocessor'), + backbone=dict( + _delete_=True, + type='MultiModalYOLOBackbone', + image_model={{_base_.model.backbone}}, + text_model=dict( + type='HuggingCLIPLanguageBackbone', + model_name='openai/clip-vit-base-patch32', + frozen_modules=['all'])), + neck=dict(type='YOLOWorldDualPAFPN', + guide_channels=text_channels, + embed_channels=neck_embed_channels, + num_heads=neck_num_heads, + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv'), + text_enhancder=dict(type='ImagePoolingAttentionModule', + embed_channels=256, + num_heads=8)), + bbox_head=dict(type='YOLOWorldHead', + head_module=dict(type='YOLOWorldHeadModule', + embed_dims=text_channels, + num_classes=num_training_classes)), + train_cfg=dict(assigner=dict(num_classes=num_training_classes))) + +# dataset settings +text_transform = [ + dict(type='RandomLoadText', + num_neg_samples=(num_classes, num_classes), + max_num_samples=num_training_classes, + padding_to_max=True, + padding_value=''), + dict(type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'flip', + 'flip_direction', 'texts')) +] +train_pipeline = [ + *_base_.pre_transform, + dict(type='MultiModalMosaic', + img_scale=_base_.img_scale, + pad_val=114.0, + pre_transform=_base_.pre_transform), + dict( + type='YOLOv5RandomAffine', + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=(1 - _base_.affine_scale, 1 + _base_.affine_scale), + max_aspect_ratio=_base_.max_aspect_ratio, + border=(-_base_.img_scale[0] // 2, -_base_.img_scale[1] // 2), + border_val=(114, 114, 114)), + *_base_.last_transform[:-1], + *text_transform, +] +train_pipeline_stage2 = [*_base_.train_pipeline_stage2[:-1], *text_transform] +obj365v1_train_dataset = dict( + type='MultiModalDataset', + dataset=dict( + type='YOLOv5Objects365V1Dataset', + data_root='data/objects365v1/', + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + filter_cfg=dict(filter_empty_gt=False, min_size=32)), + class_text_path='data/texts/obj365v1_class_texts.json', + pipeline=train_pipeline) + +mg_train_dataset = dict(type='YOLOv5MixedGroundingDataset', + data_root='data/mixed_grounding/', + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=train_pipeline) + +flickr_train_dataset = dict( + type='YOLOv5MixedGroundingDataset', + data_root='data/flickr/', + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=train_pipeline) + +train_dataloader = dict(batch_size=train_batch_size_per_gpu, + collate_fn=dict(type='yolow_collate'), + dataset=dict(_delete_=True, + type='ConcatDataset', + datasets=[ + obj365v1_train_dataset, + flickr_train_dataset, mg_train_dataset + ], + ignore_keys=['classes', 'palette'])) + +test_pipeline = [ + *_base_.test_pipeline[:-1], + dict(type='LoadText'), + dict(type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'pad_param', 'texts')) +] +coco_val_dataset = dict( + _delete_=True, + type='MultiModalDataset', + dataset=dict(type='YOLOv5LVISV1Dataset', + data_root='data/coco/', + test_mode=True, + ann_file='lvis/lvis_v1_val.json', + data_prefix=dict(img=''), + batch_shapes_cfg=None), + class_text_path='data/texts/lvis_v1_class_texts.json', + pipeline=test_pipeline) +val_dataloader = dict(dataset=coco_val_dataset) +test_dataloader = val_dataloader + +val_evaluator = dict(type='mmdet.LVISMetric', + ann_file='data/coco/lvis/lvis_v1_val.json', + metric='bbox') +test_evaluator = val_evaluator + +# training settings +default_hooks = dict(param_scheduler=dict(max_epochs=max_epochs), + checkpoint=dict(interval=save_epoch_intervals, + rule='greater')) +custom_hooks = [ + dict(type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0001, + update_buffers=True, + strict_load=False, + priority=49), + dict(type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - close_mosaic_epochs, + switch_pipeline=train_pipeline_stage2) +] +train_cfg = dict(max_epochs=max_epochs, + val_interval=10, + dynamic_intervals=[((max_epochs - close_mosaic_epochs), + _base_.val_interval_stage2)]) +optim_wrapper = dict(optimizer=dict( + _delete_=True, + type='AdamW', + lr=base_lr, + weight_decay=weight_decay, + batch_size_per_gpu=train_batch_size_per_gpu), + paramwise_cfg=dict(bias_decay_mult=0.0, + norm_decay_mult=0.0, + custom_keys={ + 'backbone.text_model': + dict(lr_mult=0.01), + 'logit_scale': + dict(weight_decay=0.0) + }), + constructor='YOLOWv5OptimizerConstructor') diff --git a/models/YOLO-World/configs/pretrain_v1/yolo_world_m_dual_vlpan_l2norm_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival.py b/models/YOLO-World/configs/pretrain_v1/yolo_world_m_dual_vlpan_l2norm_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival.py new file mode 100644 index 0000000000000000000000000000000000000000..18c3be69dca57df960b428e46800fb7543d2c1da --- /dev/null +++ b/models/YOLO-World/configs/pretrain_v1/yolo_world_m_dual_vlpan_l2norm_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival.py @@ -0,0 +1,172 @@ +_base_ = ('../../third_party/mmyolo/configs/yolov8/' + 'yolov8_m_syncbn_fast_8xb16-500e_coco.py') +custom_imports = dict(imports=['yolo_world'], + allow_failed_imports=False) + +# hyper-parameters +num_classes = 1203 +num_training_classes = 80 +max_epochs = 100 # Maximum training epochs +close_mosaic_epochs = 2 +save_epoch_intervals = 2 +text_channels = 512 +neck_embed_channels = [128, 256, _base_.last_stage_out_channels // 2] +neck_num_heads = [4, 8, _base_.last_stage_out_channels // 2 // 32] +base_lr = 2e-3 +weight_decay = 0.05 / 2 +train_batch_size_per_gpu = 16 + +# model settings +model = dict( + type='YOLOWorldDetector', + mm_neck=True, + num_train_classes=num_training_classes, + num_test_classes=num_classes, + data_preprocessor=dict(type='YOLOWDetDataPreprocessor'), + backbone=dict( + _delete_=True, + type='MultiModalYOLOBackbone', + image_model={{_base_.model.backbone}}, + text_model=dict( + type='HuggingCLIPLanguageBackbone', + model_name='openai/clip-vit-base-patch32', + frozen_modules=['all'])), + neck=dict(type='YOLOWorldDualPAFPN', + guide_channels=text_channels, + embed_channels=neck_embed_channels, + num_heads=neck_num_heads, + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv'), + text_enhancder=dict(type='ImagePoolingAttentionModule', + embed_channels=256, + num_heads=8)), + bbox_head=dict(type='YOLOWorldHead', + head_module=dict(type='YOLOWorldHeadModule', + embed_dims=text_channels, + num_classes=num_training_classes)), + train_cfg=dict(assigner=dict(num_classes=num_training_classes))) + +# dataset settings +text_transform = [ + dict(type='RandomLoadText', + num_neg_samples=(num_classes, num_classes), + max_num_samples=num_training_classes, + padding_to_max=True, + padding_value=''), + dict(type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'flip', + 'flip_direction', 'texts')) +] +train_pipeline = [ + *_base_.pre_transform, + dict(type='MultiModalMosaic', + img_scale=_base_.img_scale, + pad_val=114.0, + pre_transform=_base_.pre_transform), + dict( + type='YOLOv5RandomAffine', + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=(1 - _base_.affine_scale, 1 + _base_.affine_scale), + max_aspect_ratio=_base_.max_aspect_ratio, + border=(-_base_.img_scale[0] // 2, -_base_.img_scale[1] // 2), + border_val=(114, 114, 114)), + *_base_.last_transform[:-1], + *text_transform, +] +train_pipeline_stage2 = [*_base_.train_pipeline_stage2[:-1], *text_transform] +obj365v1_train_dataset = dict( + type='MultiModalDataset', + dataset=dict( + type='YOLOv5Objects365V1Dataset', + data_root='data/objects365v1/', + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + filter_cfg=dict(filter_empty_gt=False, min_size=32)), + class_text_path='data/texts/obj365v1_class_texts.json', + pipeline=train_pipeline) + +mg_train_dataset = dict(type='YOLOv5MixedGroundingDataset', + data_root='data/mixed_grounding/', + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=train_pipeline) + +flickr_train_dataset = dict( + type='YOLOv5MixedGroundingDataset', + data_root='data/flickr/', + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=train_pipeline) + +train_dataloader = dict(batch_size=train_batch_size_per_gpu, + collate_fn=dict(type='yolow_collate'), + dataset=dict(_delete_=True, + type='ConcatDataset', + datasets=[ + obj365v1_train_dataset, + flickr_train_dataset, mg_train_dataset + ], + ignore_keys=['classes', 'palette'])) + +test_pipeline = [ + *_base_.test_pipeline[:-1], + dict(type='LoadText'), + dict(type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'pad_param', 'texts')) +] +coco_val_dataset = dict( + _delete_=True, + type='MultiModalDataset', + dataset=dict(type='YOLOv5LVISV1Dataset', + data_root='data/coco/', + test_mode=True, + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + data_prefix=dict(img=''), + batch_shapes_cfg=None), + class_text_path='data/texts/lvis_v1_class_texts.json', + pipeline=test_pipeline) +val_dataloader = dict(dataset=coco_val_dataset) +test_dataloader = val_dataloader + +val_evaluator = dict(type='mmdet.LVISMetric', + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox') +test_evaluator = val_evaluator + +# training settings +default_hooks = dict(param_scheduler=dict(max_epochs=max_epochs), + checkpoint=dict(interval=save_epoch_intervals, + rule='greater')) +custom_hooks = [ + dict(type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0001, + update_buffers=True, + strict_load=False, + priority=49), + dict(type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - close_mosaic_epochs, + switch_pipeline=train_pipeline_stage2) +] +train_cfg = dict(max_epochs=max_epochs, + val_interval=10, + dynamic_intervals=[((max_epochs - close_mosaic_epochs), + _base_.val_interval_stage2)]) +optim_wrapper = dict(optimizer=dict( + _delete_=True, + type='AdamW', + lr=base_lr, + weight_decay=weight_decay, + batch_size_per_gpu=train_batch_size_per_gpu), + paramwise_cfg=dict(bias_decay_mult=0.0, + norm_decay_mult=0.0, + custom_keys={ + 'backbone.text_model': + dict(lr_mult=0.01), + 'logit_scale': + dict(weight_decay=0.0) + }), + constructor='YOLOWv5OptimizerConstructor') diff --git a/models/YOLO-World/configs/pretrain_v1/yolo_world_s_dual_vlpan_l2norm_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival.py b/models/YOLO-World/configs/pretrain_v1/yolo_world_s_dual_vlpan_l2norm_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival.py new file mode 100644 index 0000000000000000000000000000000000000000..5441d0ff995889f9ebcef97c853daf69dbbc4564 --- /dev/null +++ b/models/YOLO-World/configs/pretrain_v1/yolo_world_s_dual_vlpan_l2norm_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival.py @@ -0,0 +1,172 @@ +_base_ = ('../../third_party/mmyolo/configs/yolov8/' + 'yolov8_s_syncbn_fast_8xb16-500e_coco.py') +custom_imports = dict(imports=['yolo_world'], + allow_failed_imports=False) + +# hyper-parameters +num_classes = 1203 +num_training_classes = 80 +max_epochs = 100 # Maximum training epochs +close_mosaic_epochs = 2 +save_epoch_intervals = 2 +text_channels = 512 +neck_embed_channels = [128, 256, _base_.last_stage_out_channels // 2] +neck_num_heads = [4, 8, _base_.last_stage_out_channels // 2 // 32] +base_lr = 2e-3 +weight_decay = 0.05 / 2 +train_batch_size_per_gpu = 16 + +# model settings +model = dict( + type='YOLOWorldDetector', + mm_neck=True, + num_train_classes=num_training_classes, + num_test_classes=num_classes, + data_preprocessor=dict(type='YOLOWDetDataPreprocessor'), + backbone=dict( + _delete_=True, + type='MultiModalYOLOBackbone', + image_model={{_base_.model.backbone}}, + text_model=dict( + type='HuggingCLIPLanguageBackbone', + model_name='openai/clip-vit-base-patch32', + frozen_modules=['all'])), + neck=dict(type='YOLOWorldDualPAFPN', + guide_channels=text_channels, + embed_channels=neck_embed_channels, + num_heads=neck_num_heads, + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv'), + text_enhancder=dict(type='ImagePoolingAttentionModule', + embed_channels=256, + num_heads=8)), + bbox_head=dict(type='YOLOWorldHead', + head_module=dict(type='YOLOWorldHeadModule', + embed_dims=text_channels, + num_classes=num_training_classes)), + train_cfg=dict(assigner=dict(num_classes=num_training_classes))) + +# dataset settings +text_transform = [ + dict(type='RandomLoadText', + num_neg_samples=(num_classes, num_classes), + max_num_samples=num_training_classes, + padding_to_max=True, + padding_value=''), + dict(type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'flip', + 'flip_direction', 'texts')) +] +train_pipeline = [ + *_base_.pre_transform, + dict(type='MultiModalMosaic', + img_scale=_base_.img_scale, + pad_val=114.0, + pre_transform=_base_.pre_transform), + dict( + type='YOLOv5RandomAffine', + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=(1 - _base_.affine_scale, 1 + _base_.affine_scale), + max_aspect_ratio=_base_.max_aspect_ratio, + border=(-_base_.img_scale[0] // 2, -_base_.img_scale[1] // 2), + border_val=(114, 114, 114)), + *_base_.last_transform[:-1], + *text_transform, +] +train_pipeline_stage2 = [*_base_.train_pipeline_stage2[:-1], *text_transform] +obj365v1_train_dataset = dict( + type='MultiModalDataset', + dataset=dict( + type='YOLOv5Objects365V1Dataset', + data_root='data/objects365v1/', + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + filter_cfg=dict(filter_empty_gt=False, min_size=32)), + class_text_path='data/texts/obj365v1_class_texts.json', + pipeline=train_pipeline) + +mg_train_dataset = dict(type='YOLOv5MixedGroundingDataset', + data_root='data/mixed_grounding/', + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=train_pipeline) + +flickr_train_dataset = dict( + type='YOLOv5MixedGroundingDataset', + data_root='data/flickr/', + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=train_pipeline) + +train_dataloader = dict(batch_size=train_batch_size_per_gpu, + collate_fn=dict(type='yolow_collate'), + dataset=dict(_delete_=True, + type='ConcatDataset', + datasets=[ + obj365v1_train_dataset, + flickr_train_dataset, mg_train_dataset + ], + ignore_keys=['classes', 'palette'])) + +test_pipeline = [ + *_base_.test_pipeline[:-1], + dict(type='LoadText'), + dict(type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'pad_param', 'texts')) +] +coco_val_dataset = dict( + _delete_=True, + type='MultiModalDataset', + dataset=dict(type='YOLOv5LVISV1Dataset', + data_root='data/coco/', + test_mode=True, + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + data_prefix=dict(img=''), + batch_shapes_cfg=None), + class_text_path='data/texts/lvis_v1_class_texts.json', + pipeline=test_pipeline) +val_dataloader = dict(dataset=coco_val_dataset) +test_dataloader = val_dataloader + +val_evaluator = dict(type='mmdet.LVISMetric', + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox') +test_evaluator = val_evaluator + +# training settings +default_hooks = dict(param_scheduler=dict(max_epochs=max_epochs), + checkpoint=dict(interval=save_epoch_intervals, + rule='greater')) +custom_hooks = [ + dict(type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0001, + update_buffers=True, + strict_load=False, + priority=49), + dict(type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - close_mosaic_epochs, + switch_pipeline=train_pipeline_stage2) +] +train_cfg = dict(max_epochs=max_epochs, + val_interval=10, + dynamic_intervals=[((max_epochs - close_mosaic_epochs), + _base_.val_interval_stage2)]) +optim_wrapper = dict(optimizer=dict( + _delete_=True, + type='AdamW', + lr=base_lr, + weight_decay=weight_decay, + batch_size_per_gpu=train_batch_size_per_gpu), + paramwise_cfg=dict(bias_decay_mult=0.0, + norm_decay_mult=0.0, + custom_keys={ + 'backbone.text_model': + dict(lr_mult=0.01), + 'logit_scale': + dict(weight_decay=0.0) + }), + constructor='YOLOWv5OptimizerConstructor') diff --git a/models/YOLO-World/configs/pretrain_v1/yolo_world_x_dual_vlpan_l2norm_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival.py b/models/YOLO-World/configs/pretrain_v1/yolo_world_x_dual_vlpan_l2norm_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival.py new file mode 100644 index 0000000000000000000000000000000000000000..f20d3f01cb6f3a301e726ed2d3f8e7b32b61f50f --- /dev/null +++ b/models/YOLO-World/configs/pretrain_v1/yolo_world_x_dual_vlpan_l2norm_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival.py @@ -0,0 +1,172 @@ +_base_ = ('../../third_party/mmyolo/configs/yolov8/' + 'yolov8_x_syncbn_fast_8xb16-500e_coco.py') +custom_imports = dict(imports=['yolo_world'], + allow_failed_imports=False) + +# hyper-parameters +num_classes = 1203 +num_training_classes = 80 +max_epochs = 100 # Maximum training epochs +close_mosaic_epochs = 2 +save_epoch_intervals = 2 +text_channels = 512 +neck_embed_channels = [128, 256, _base_.last_stage_out_channels // 2] +neck_num_heads = [4, 8, _base_.last_stage_out_channels // 2 // 32] +base_lr = 2e-3 +weight_decay = 0.05 / 2 +train_batch_size_per_gpu = 16 + +# model settings +model = dict( + type='YOLOWorldDetector', + mm_neck=True, + num_train_classes=num_training_classes, + num_test_classes=num_classes, + data_preprocessor=dict(type='YOLOWDetDataPreprocessor'), + backbone=dict( + _delete_=True, + type='MultiModalYOLOBackbone', + image_model={{_base_.model.backbone}}, + text_model=dict( + type='HuggingCLIPLanguageBackbone', + model_name='openai/clip-vit-base-patch32', + frozen_modules=['all'])), + neck=dict(type='YOLOWorldDualPAFPN', + guide_channels=text_channels, + embed_channels=neck_embed_channels, + num_heads=neck_num_heads, + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv'), + text_enhancder=dict(type='ImagePoolingAttentionModule', + embed_channels=256, + num_heads=8)), + bbox_head=dict(type='YOLOWorldHead', + head_module=dict(type='YOLOWorldHeadModule', + embed_dims=text_channels, + num_classes=num_training_classes)), + train_cfg=dict(assigner=dict(num_classes=num_training_classes))) + +# dataset settings +text_transform = [ + dict(type='RandomLoadText', + num_neg_samples=(num_classes, num_classes), + max_num_samples=num_training_classes, + padding_to_max=True, + padding_value=''), + dict(type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'flip', + 'flip_direction', 'texts')) +] +train_pipeline = [ + *_base_.pre_transform, + dict(type='MultiModalMosaic', + img_scale=_base_.img_scale, + pad_val=114.0, + pre_transform=_base_.pre_transform), + dict( + type='YOLOv5RandomAffine', + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=(1 - _base_.affine_scale, 1 + _base_.affine_scale), + max_aspect_ratio=_base_.max_aspect_ratio, + border=(-_base_.img_scale[0] // 2, -_base_.img_scale[1] // 2), + border_val=(114, 114, 114)), + *_base_.last_transform[:-1], + *text_transform, +] +train_pipeline_stage2 = [*_base_.train_pipeline_stage2[:-1], *text_transform] +obj365v1_train_dataset = dict( + type='MultiModalDataset', + dataset=dict( + type='YOLOv5Objects365V1Dataset', + data_root='data/objects365v1/', + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + filter_cfg=dict(filter_empty_gt=False, min_size=32)), + class_text_path='data/texts/obj365v1_class_texts.json', + pipeline=train_pipeline) + +mg_train_dataset = dict(type='YOLOv5MixedGroundingDataset', + data_root='data/mixed_grounding/', + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=train_pipeline) + +flickr_train_dataset = dict( + type='YOLOv5MixedGroundingDataset', + data_root='data/flickr/', + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=train_pipeline) + +train_dataloader = dict(batch_size=train_batch_size_per_gpu, + collate_fn=dict(type='yolow_collate'), + dataset=dict(_delete_=True, + type='ConcatDataset', + datasets=[ + obj365v1_train_dataset, + flickr_train_dataset, mg_train_dataset + ], + ignore_keys=['classes', 'palette'])) + +test_pipeline = [ + *_base_.test_pipeline[:-1], + dict(type='LoadText'), + dict(type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'pad_param', 'texts')) +] +coco_val_dataset = dict( + _delete_=True, + type='MultiModalDataset', + dataset=dict(type='YOLOv5LVISV1Dataset', + data_root='data/coco/', + test_mode=True, + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + data_prefix=dict(img=''), + batch_shapes_cfg=None), + class_text_path='data/texts/lvis_v1_class_texts.json', + pipeline=test_pipeline) +val_dataloader = dict(dataset=coco_val_dataset) +test_dataloader = val_dataloader + +val_evaluator = dict(type='mmdet.LVISMetric', + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox') +test_evaluator = val_evaluator + +# training settings +default_hooks = dict(param_scheduler=dict(max_epochs=max_epochs), + checkpoint=dict(interval=save_epoch_intervals, + rule='greater')) +custom_hooks = [ + dict(type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0001, + update_buffers=True, + strict_load=False, + priority=49), + dict(type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - close_mosaic_epochs, + switch_pipeline=train_pipeline_stage2) +] +train_cfg = dict(max_epochs=max_epochs, + val_interval=10, + dynamic_intervals=[((max_epochs - close_mosaic_epochs), + _base_.val_interval_stage2)]) +optim_wrapper = dict(optimizer=dict( + _delete_=True, + type='AdamW', + lr=base_lr, + weight_decay=weight_decay, + batch_size_per_gpu=train_batch_size_per_gpu), + paramwise_cfg=dict(bias_decay_mult=0.0, + norm_decay_mult=0.0, + custom_keys={ + 'backbone.text_model': + dict(lr_mult=0.01), + 'logit_scale': + dict(weight_decay=0.0) + }), + constructor='YOLOWv5OptimizerConstructor') diff --git a/models/YOLO-World/configs/prompt_tuning_coco/READEME.md b/models/YOLO-World/configs/prompt_tuning_coco/READEME.md new file mode 100644 index 0000000000000000000000000000000000000000..2888d1bf2ecb14d8f5d903d6aa0be38006bae204 --- /dev/null +++ b/models/YOLO-World/configs/prompt_tuning_coco/READEME.md @@ -0,0 +1,12 @@ +## Prompt Tuning for YOLO-World + +### NOTE: + +This folder contains many experimental config files, which will be removed later!! + +### Experimental Results + +| Model | Config | AP | AP50 | AP75 | APS | APM | APL | +| :---- | :----: | :--: | :--: | :---: | :-: | :-: | :-: | +| YOLO-World-v2-L | Zero-shot | 45.7 | 61.6 | 49.8 | 29.9 | 50.0 | 60.8 | +| [YOLO-World-v2-L](./../configs/prompt_tuning_coco/yolo_world_v2_l_vlpan_bn_2e-4_80e_8gpus_mask-refine_prompt_tuning_coco.py) | Prompt tuning | 47.9 | 64.3 | 52.5 | 31.9 | 52.6 | 61.3 | diff --git a/models/YOLO-World/configs/prompt_tuning_coco/yolo_world_v2_l_vlpan_bn_2e-4_80e_8gpus_mask-refine_prompt_tuning_coco.py b/models/YOLO-World/configs/prompt_tuning_coco/yolo_world_v2_l_vlpan_bn_2e-4_80e_8gpus_mask-refine_prompt_tuning_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..3212fa05005d31c01823e103b65792832c4342da --- /dev/null +++ b/models/YOLO-World/configs/prompt_tuning_coco/yolo_world_v2_l_vlpan_bn_2e-4_80e_8gpus_mask-refine_prompt_tuning_coco.py @@ -0,0 +1,161 @@ +_base_ = ('../../third_party/mmyolo/configs/yolov8/' + 'yolov8_l_mask-refine_syncbn_fast_8xb16-500e_coco.py') +custom_imports = dict(imports=['yolo_world'], allow_failed_imports=False) + +# hyper-parameters +num_classes = 80 +num_training_classes = 80 +max_epochs = 80 # Maximum training epochs +close_mosaic_epochs = 10 +save_epoch_intervals = 5 +text_channels = 512 +neck_embed_channels = [128, 256, _base_.last_stage_out_channels // 2] +neck_num_heads = [4, 8, _base_.last_stage_out_channels // 2 // 32] +base_lr = 2e-3 +weight_decay = 0.05 +train_batch_size_per_gpu = 16 +load_from = 'pretrained_models/yolo_world_l_clip_t2i_bn_2e-3adamw_32xb16-100e_obj365v1_goldg_cc3mlite_train-ca93cd1f.pth' +persistent_workers = False + +# model settings +model = dict(type='SimpleYOLOWorldDetector', + mm_neck=True, + num_train_classes=num_training_classes, + num_test_classes=num_classes, + embedding_path='embeddings/clip_vit_b32_coco_80_embeddings.npy', + prompt_dim=text_channels, + num_prompts=80, + data_preprocessor=dict(type='YOLOv5DetDataPreprocessor'), + backbone=dict(_delete_=True, + type='MultiModalYOLOBackbone', + text_model=None, + image_model={{_base_.model.backbone}}, + frozen_stages=4, + with_text_model=False), + neck=dict(type='YOLOWorldPAFPN', + freeze_all=True, + guide_channels=text_channels, + embed_channels=neck_embed_channels, + num_heads=neck_num_heads, + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv')), + bbox_head=dict(type='YOLOWorldHead', + head_module=dict( + type='YOLOWorldHeadModule', + freeze_all=True, + use_bn_head=True, + embed_dims=text_channels, + num_classes=num_training_classes)), + train_cfg=dict(assigner=dict(num_classes=num_training_classes))) + +# dataset settings +final_transform = [ + dict(type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'flip', + 'flip_direction')) +] +mosaic_affine_transform = [ + dict(type='Mosaic', + img_scale=_base_.img_scale, + pad_val=114.0, + pre_transform=_base_.pre_transform), + dict(type='YOLOv5CopyPaste', prob=_base_.copypaste_prob), + dict( + type='YOLOv5RandomAffine', + max_rotate_degree=0.0, + max_shear_degree=0.0, + max_aspect_ratio=100., + scaling_ratio_range=(1 - _base_.affine_scale, 1 + _base_.affine_scale), + # img_scale is (width, height) + border=(-_base_.img_scale[0] // 2, -_base_.img_scale[1] // 2), + border_val=(114, 114, 114), + min_area_ratio=_base_.min_area_ratio, + use_mask_refine=_base_.use_mask2refine) +] +train_pipeline = [ + *_base_.pre_transform, *mosaic_affine_transform, + dict(type='YOLOv5MixUp', + prob=_base_.mixup_prob, + pre_transform=[*_base_.pre_transform, *mosaic_affine_transform]), + *_base_.last_transform[:-1], *final_transform +] + +train_pipeline_stage2 = [*_base_.train_pipeline_stage2[:-1], *final_transform] + +coco_train_dataset = dict(type='YOLOv5CocoDataset', + data_root='data/coco', + ann_file='annotations/instances_train2017.json', + data_prefix=dict(img='train2017/'), + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=train_pipeline) + +train_dataloader = dict(persistent_workers=persistent_workers, + batch_size=train_batch_size_per_gpu, + collate_fn=dict(type='yolow_collate'), + dataset=coco_train_dataset) + +train_dataloader = dict(persistent_workers=persistent_workers, + batch_size=train_batch_size_per_gpu, + collate_fn=dict(type='yolow_collate'), + dataset=coco_train_dataset) +test_pipeline = [ + *_base_.test_pipeline[:-1], + dict(type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'pad_param')) +] +coco_val_dataset = dict(type='YOLOv5CocoDataset', + data_root='data/coco', + ann_file='annotations/instances_val2017.json', + data_prefix=dict(img='val2017/'), + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=test_pipeline) + +val_dataloader = dict(dataset=coco_val_dataset) +test_dataloader = val_dataloader +# training settings +default_hooks = dict(param_scheduler=dict(scheduler_type='linear', + lr_factor=0.01, + max_epochs=max_epochs), + checkpoint=dict(max_keep_ckpts=-1, + save_best=None, + interval=save_epoch_intervals)) +custom_hooks = [ + dict(type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0001, + update_buffers=True, + strict_load=False, + priority=49), + dict(type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - close_mosaic_epochs, + switch_pipeline=train_pipeline_stage2) +] +train_cfg = dict(max_epochs=max_epochs, + val_interval=5, + dynamic_intervals=[((max_epochs - close_mosaic_epochs), + _base_.val_interval_stage2)]) +optim_wrapper = dict(optimizer=dict( + _delete_=True, + type='AdamW', + lr=base_lr, + weight_decay=weight_decay, + batch_size_per_gpu=train_batch_size_per_gpu), + paramwise_cfg=dict(bias_decay_mult=0.0, + norm_decay_mult=0.0, + custom_keys={ + 'backbone.text_model': + dict(lr_mult=0.01), + 'logit_scale': + dict(weight_decay=0.0), + 'embeddings': + dict(weight_decay=0.0) + }), + constructor='YOLOWv5OptimizerConstructor') + +# evaluation settings +val_evaluator = dict(_delete_=True, + type='mmdet.CocoMetric', + proposal_nums=(100, 1, 10), + ann_file='data/coco/annotations/instances_val2017.json', + metric='bbox') +find_unused_parameters = True diff --git a/models/YOLO-World/configs/prompt_tuning_coco/yolo_world_v2_l_vlpan_bn_2e-4_80e_8gpus_prompt_tuning_coco.py b/models/YOLO-World/configs/prompt_tuning_coco/yolo_world_v2_l_vlpan_bn_2e-4_80e_8gpus_prompt_tuning_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..64ce89d13a436d4aa04ed057b60f4586f8b350da --- /dev/null +++ b/models/YOLO-World/configs/prompt_tuning_coco/yolo_world_v2_l_vlpan_bn_2e-4_80e_8gpus_prompt_tuning_coco.py @@ -0,0 +1,117 @@ +_base_ = ('../../third_party/mmyolo/configs/yolov8/' + 'yolov8_l_syncbn_fast_8xb16-500e_coco.py') +custom_imports = dict(imports=['yolo_world'], allow_failed_imports=False) + +# hyper-parameters +num_classes = 80 +num_training_classes = 80 +max_epochs = 80 # Maximum training epochs +close_mosaic_epochs = 10 +save_epoch_intervals = 5 +text_channels = 512 +neck_embed_channels = [128, 256, _base_.last_stage_out_channels // 2] +neck_num_heads = [4, 8, _base_.last_stage_out_channels // 2 // 32] +base_lr = 2e-4 +weight_decay = 0.05 +train_batch_size_per_gpu = 16 +load_from = 'pretrained_models/yolo_world_l_clip_t2i_bn_2e-3adamw_32xb16-100e_obj365v1_goldg_cc3mlite_train-ca93cd1f.pth' +persistent_workers = False + +# model settings +model = dict(type='SimpleYOLOWorldDetector', + mm_neck=True, + num_train_classes=num_training_classes, + num_test_classes=num_classes, + embedding_path='embeddings/clip_vit_b32_coco_80_embeddings.npy', + prompt_dim=text_channels, + num_prompts=80, + freeze_prompt=False, + data_preprocessor=dict(type='YOLOv5DetDataPreprocessor'), + backbone=dict(_delete_=True, + type='MultiModalYOLOBackbone', + text_model=None, + image_model={{_base_.model.backbone}}, + frozen_stages=4, + with_text_model=False), + neck=dict(type='YOLOWorldPAFPN', + freeze_all=True, + guide_channels=text_channels, + embed_channels=neck_embed_channels, + num_heads=neck_num_heads, + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv')), + bbox_head=dict(type='YOLOWorldHead', + head_module=dict( + type='YOLOWorldHeadModule', + freeze_all=True, + use_bn_head=True, + embed_dims=text_channels, + num_classes=num_training_classes)), + train_cfg=dict(assigner=dict(num_classes=num_training_classes))) + +# dataset settings +coco_train_dataset = dict(type='YOLOv5CocoDataset', + data_root='data/coco', + ann_file='annotations/instances_train2017.json', + data_prefix=dict(img='train2017/'), + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=_base_.train_pipeline) + +train_dataloader = dict(persistent_workers=persistent_workers, + batch_size=train_batch_size_per_gpu, + collate_fn=dict(type='yolow_collate'), + dataset=coco_train_dataset) + +coco_val_dataset = dict(type='YOLOv5CocoDataset', + data_root='data/coco', + ann_file='annotations/instances_val2017.json', + data_prefix=dict(img='val2017/'), + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=_base_.test_pipeline) + +val_dataloader = dict(dataset=coco_val_dataset) +test_dataloader = val_dataloader +# training settings +default_hooks = dict(param_scheduler=dict(scheduler_type='linear', + lr_factor=0.01, + max_epochs=max_epochs), + checkpoint=dict(max_keep_ckpts=-1, + save_best=None, + interval=save_epoch_intervals)) +custom_hooks = [ + dict(type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0001, + update_buffers=True, + strict_load=False, + priority=49), + dict(type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - close_mosaic_epochs, + switch_pipeline=_base_.train_pipeline_stage2) +] +train_cfg = dict(max_epochs=max_epochs, + val_interval=5, + dynamic_intervals=[((max_epochs - close_mosaic_epochs), + _base_.val_interval_stage2)]) + +optim_wrapper = dict(optimizer=dict( + _delete_=True, + type='AdamW', + lr=base_lr, + weight_decay=weight_decay, + batch_size_per_gpu=train_batch_size_per_gpu), + paramwise_cfg=dict(custom_keys={ + 'backbone.text_model': + dict(lr_mult=0.01), + 'logit_scale': + dict(weight_decay=0.0), + 'embeddings': + dict(weight_decay=0.0) + }), + constructor='YOLOWv5OptimizerConstructor') + +# evaluation settings +val_evaluator = dict(_delete_=True, + type='mmdet.CocoMetric', + proposal_nums=(100, 1, 10), + ann_file='data/coco/annotations/instances_val2017.json', + metric='bbox') diff --git a/models/YOLO-World/configs/prompt_tuning_coco/yolo_world_v2_l_vlpan_bn_sgd_1e-3_80e_8gpus_all_finetuning_coco.py b/models/YOLO-World/configs/prompt_tuning_coco/yolo_world_v2_l_vlpan_bn_sgd_1e-3_80e_8gpus_all_finetuning_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..8caf5bd70769bfb5a728bb8c6d35448dd1ff9454 --- /dev/null +++ b/models/YOLO-World/configs/prompt_tuning_coco/yolo_world_v2_l_vlpan_bn_sgd_1e-3_80e_8gpus_all_finetuning_coco.py @@ -0,0 +1,109 @@ +_base_ = ('../../third_party/mmyolo/configs/yolov8/' + 'yolov8_l_syncbn_fast_8xb16-500e_coco.py') +custom_imports = dict(imports=['yolo_world'], allow_failed_imports=False) + +# hyper-parameters +num_classes = 80 +num_training_classes = 80 +max_epochs = 40 # Maximum training epochs +close_mosaic_epochs = 10 +save_epoch_intervals = 5 +text_channels = 512 +neck_embed_channels = [128, 256, _base_.last_stage_out_channels // 2] +neck_num_heads = [4, 8, _base_.last_stage_out_channels // 2 // 32] +base_lr = 1e-3 +weight_decay = 0.0005 +train_batch_size_per_gpu = 16 +load_from = 'pretrained_models/yolo_world_l_clip_t2i_bn_2e-3adamw_32xb16-100e_obj365v1_goldg_cc3mlite_train-ca93cd1f.pth' +persistent_workers = False + +# model settings +model = dict(type='SimpleYOLOWorldDetector', + mm_neck=True, + num_train_classes=num_training_classes, + num_test_classes=num_classes, + embedding_path='embeddings/clip_vit_b32_coco_80_embeddings.npy', + prompt_dim=text_channels, + num_prompts=80, + freeze_prompt=True, + data_preprocessor=dict(type='YOLOv5DetDataPreprocessor'), + backbone=dict(_delete_=True, + type='MultiModalYOLOBackbone', + text_model=None, + image_model={{_base_.model.backbone}}, + with_text_model=False), + neck=dict(type='YOLOWorldPAFPN', + freeze_all=False, + guide_channels=text_channels, + embed_channels=neck_embed_channels, + num_heads=neck_num_heads, + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv')), + bbox_head=dict(type='YOLOWorldHead', + head_module=dict( + type='YOLOWorldHeadModule', + freeze_all=False, + use_bn_head=True, + embed_dims=text_channels, + num_classes=num_training_classes)), + train_cfg=dict(assigner=dict(num_classes=num_training_classes))) + +# dataset settings +coco_train_dataset = dict(type='YOLOv5CocoDataset', + data_root='data/coco', + ann_file='annotations/instances_train2017.json', + data_prefix=dict(img='train2017/'), + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=_base_.train_pipeline) + +train_dataloader = dict(persistent_workers=persistent_workers, + batch_size=train_batch_size_per_gpu, + collate_fn=dict(type='yolow_collate'), + dataset=coco_train_dataset) + +coco_val_dataset = dict(type='YOLOv5CocoDataset', + data_root='data/coco', + ann_file='annotations/instances_val2017.json', + data_prefix=dict(img='val2017/'), + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=_base_.test_pipeline) + +val_dataloader = dict(dataset=coco_val_dataset) +test_dataloader = val_dataloader +# training settings +default_hooks = dict(param_scheduler=dict(scheduler_type='linear', + lr_factor=0.01, + max_epochs=max_epochs), + checkpoint=dict(max_keep_ckpts=-1, + save_best=None, + interval=save_epoch_intervals)) +custom_hooks = [ + dict(type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0001, + update_buffers=True, + strict_load=False, + priority=49), + dict(type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - close_mosaic_epochs, + switch_pipeline=_base_.train_pipeline_stage2) +] +train_cfg = dict(max_epochs=max_epochs, + val_interval=5, + dynamic_intervals=[((max_epochs - close_mosaic_epochs), + _base_.val_interval_stage2)]) + +optim_wrapper = dict(optimizer=dict( + _delete_=True, + type='SGD', + lr=base_lr, + momentum=0.937, + nesterov=True, + weight_decay=weight_decay, + batch_size_per_gpu=train_batch_size_per_gpu)) + +# evaluation settings +val_evaluator = dict(_delete_=True, + type='mmdet.CocoMetric', + proposal_nums=(100, 1, 10), + ann_file='data/coco/annotations/instances_val2017.json', + metric='bbox') diff --git a/models/YOLO-World/configs/segmentation/README.md b/models/YOLO-World/configs/segmentation/README.md new file mode 100644 index 0000000000000000000000000000000000000000..8cfd30341ae20ab7ff9a24fb4df03825d29f0520 --- /dev/null +++ b/models/YOLO-World/configs/segmentation/README.md @@ -0,0 +1,27 @@ +## Fine-tuning YOLO-World for Instance Segmentation + + +### Models + +We fine-tune YOLO-World on LVIS (`LVIS-Base`) with mask annotations for open-vocabulary (zero-shot) instance segmentation. + +We provide two fine-tuning strategies YOLO-World towards open-vocabulary instance segmentation: + +* fine-tuning `all modules`: leads to better LVIS segmentation accuracy but affects the zero-shot performance. + +* fine-tuning the `segmentation head`: maintains the zero-shot performanc but lowers LVIS segmentation accuracy. + +| Model | Fine-tuning Data | Fine-tuning Modules| APmask | APr | APc | APf | Weights | +| :---- | :--------------- | :----------------: | :--------------: | :------------: | :------------: | :------------: | :-----: | +| [YOLO-World-Seg-M](./yolo_world_seg_m_dual_vlpan_2e-4_80e_8gpus_allmodules_finetune_lvis.py) | `LVIS-Base` | `all modules` | 25.9 | 13.4 | 24.9 | 32.6 | [HF Checkpoints 🤗](https://huggingface.co/wondervictor/YOLO-World/blob/main/yolo_world_seg_m_dual_vlpan_2e-4_80e_8gpus_allmodules_finetune_lvis-ca465825.pth) | +| [YOLO-World-v2-Seg-M](./yolo_world_seg_m_dual_vlpan_2e-4_80e_8gpus_allmodules_finetune_lvis.py) | `LVIS-Base` | `all modules` | 25.9 | 13.4 | 24.9 | 32.6 | [HF Checkpoints 🤗]() | +| [YOLO-World-Seg-L](./yolo_world_seg_l_dual_vlpan_2e-4_80e_8gpus_allmodules_finetune_lvis.py) | `LVIS-Base` | `all modules` | 28.7 | 15.0 | 28.3 | 35.2| [HF Checkpoints 🤗](https://huggingface.co/wondervictor/YOLO-World/blob/main/yolo_world_seg_l_dual_vlpan_2e-4_80e_8gpus_allmodules_finetune_lvis-8c58c916.pth) | +| [YOLO-World-v2-Seg-L](./yolo_world_seg_l_dual_vlpan_2e-4_80e_8gpus_allmodules_finetune_lvis.py) | `LVIS-Base` | `all modules` | 28.7 | 15.0 | 28.3 | 35.2| [HF Checkpoints 🤗]() | +| [YOLO-World-Seg-M](./yolo_seg_world_m_dual_vlpan_2e-4_80e_8gpus_seghead_finetune_lvis.py) | `LVIS-Base` | `seg head` | 16.7 | 12.6 | 14.6 | 20.8 | [HF Checkpoints 🤗](https://huggingface.co/wondervictor/YOLO-World/blob/main/yolo_world_seg_m_dual_vlpan_2e-4_80e_8gpus_seghead_finetune_lvis-7bca59a7.pth) | +| [YOLO-World-v2-Seg-M](./yolo_world_v2_seg_m_vlpan_bn_2e-4_80e_8gpus_seghead_finetune_lvis.py) | `LVIS-Base` | `seg head` | 17.8 | 13.9 | 15.5 | 22.0 | [HF Checkpoints 🤗]() | +| [YOLO-World-Seg-L](yolo_seg_world_l_dual_vlpan_2e-4_80e_8gpus_seghead_finetune_lvis.py) | `LVIS-Base` | `seg head` | 19.1 | 14.2 | 17.2 | 23.5 | [HF Checkpoints 🤗](https://huggingface.co/wondervictor/YOLO-World/blob/main/yolo_world_seg_l_dual_vlpan_2e-4_80e_8gpus_seghead_finetune_lvis-5a642d30.pth) | +| [YOLO-World-v2-Seg-L](./yolo_world_v2_seg_l_vlpan_bn_2e-4_80e_8gpus_seghead_finetune_lvis.py) | `LVIS-Base` | `seg head` | 19.8 | 17.2 | 17.5 | 23.6 | [HF Checkpoints 🤗]() | +**NOTE:** +1. The mask AP are evaluated on the LVIS `val 1.0`. +2. All models are fine-tuned for 80 epochs on `LVIS-Base` (866 categories, `common + frequent`). +3. The YOLO-World-Seg with only `seg head` fine-tuned maintains the original zero-shot detection capability and segments objects. diff --git a/models/YOLO-World/configs/segmentation/yolo_world_seg_l_dual_vlpan_2e-4_80e_8gpus_allmodules_finetune_lvis.py b/models/YOLO-World/configs/segmentation/yolo_world_seg_l_dual_vlpan_2e-4_80e_8gpus_allmodules_finetune_lvis.py new file mode 100644 index 0000000000000000000000000000000000000000..01885dd5461359eb0dd026886268b28449dc6a25 --- /dev/null +++ b/models/YOLO-World/configs/segmentation/yolo_world_seg_l_dual_vlpan_2e-4_80e_8gpus_allmodules_finetune_lvis.py @@ -0,0 +1,227 @@ +_base_ = ( + '../../third_party/mmyolo/configs/yolov8/yolov8_l_mask-refine_syncbn_fast_8xb16-500e_coco.py' +) +custom_imports = dict(imports=['yolo_world'], allow_failed_imports=False) +# hyper-parameters +num_classes = 1203 +num_training_classes = 80 +max_epochs = 80 # Maximum training epochs +close_mosaic_epochs = 10 +save_epoch_intervals = 5 +text_channels = 512 +neck_embed_channels = [128, 256, _base_.last_stage_out_channels // 2] +neck_num_heads = [4, 8, _base_.last_stage_out_channels // 2 // 32] +base_lr = 2e-4 + +weight_decay = 0.05 +train_batch_size_per_gpu = 8 +load_from = 'pretrained_models/yolo_world_l_clip_base_dual_vlpan_2e-3adamw_32xb16_100e_o365_goldg_train_pretrained-0e566235.pth' +persistent_workers = False +text_model_name = '../pretrained_models/clip-vit-base-patch32-projection' +# text_model_name = 'openai/clip-vit-base-patch32' +# Polygon2Mask +downsample_ratio = 4 +mask_overlap = False +use_mask2refine = True +max_aspect_ratio = 100 +min_area_ratio = 0.01 + +# model settings +model = dict( + type='YOLOWorldDetector', + mm_neck=True, + num_train_classes=num_training_classes, + num_test_classes=num_classes, + data_preprocessor=dict(type='YOLOWDetDataPreprocessor'), + backbone=dict( + _delete_=True, + type='MultiModalYOLOBackbone', + image_model={{_base_.model.backbone}}, + text_model=dict( + type='HuggingCLIPLanguageBackbone', + model_name=text_model_name, + frozen_modules=[])), + neck=dict(type='YOLOWorldDualPAFPN', + guide_channels=text_channels, + embed_channels=neck_embed_channels, + num_heads=neck_num_heads, + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv'), + text_enhancder=dict(type='ImagePoolingAttentionModule', + embed_channels=256, + num_heads=8)), + bbox_head=dict(type='YOLOWorldSegHead', + head_module=dict(type='YOLOWorldSegHeadModule', + embed_dims=text_channels, + num_classes=num_training_classes, + mask_channels=32, + proto_channels=256), + mask_overlap=mask_overlap, + loss_mask=dict(type='mmdet.CrossEntropyLoss', + use_sigmoid=True, + reduction='none'), + loss_mask_weight=1.0), + train_cfg=dict(assigner=dict(num_classes=num_training_classes)), + test_cfg=dict(mask_thr_binary=0.5, fast_test=True)) + +pre_transform = [ + dict(type='LoadImageFromFile', backend_args=_base_.backend_args), + dict(type='LoadAnnotations', + with_bbox=True, + with_mask=True, + mask2bbox=True) +] + +last_transform = [ + dict(type='mmdet.Albu', + transforms=_base_.albu_train_transforms, + bbox_params=dict(type='BboxParams', + format='pascal_voc', + label_fields=['gt_bboxes_labels', + 'gt_ignore_flags']), + keymap={ + 'img': 'image', + 'gt_bboxes': 'bboxes' + }), + dict(type='YOLOv5HSVRandomAug'), + dict(type='mmdet.RandomFlip', prob=0.5), + dict(type='Polygon2Mask', + downsample_ratio=downsample_ratio, + mask_overlap=mask_overlap), +] + +# dataset settings +text_transform = [ + dict(type='RandomLoadText', + num_neg_samples=(num_classes, num_classes), + max_num_samples=num_training_classes, + padding_to_max=True, + padding_value=''), + dict(type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'flip', + 'flip_direction', 'texts')) +] +mosaic_affine_transform = [ + dict(type='MultiModalMosaic', + img_scale=_base_.img_scale, + pad_val=114.0, + pre_transform=pre_transform), + dict(type='YOLOv5CopyPaste', prob=_base_.copypaste_prob), + dict( + type='YOLOv5RandomAffine', + max_rotate_degree=0.0, + max_shear_degree=0.0, + max_aspect_ratio=100., + scaling_ratio_range=(1 - _base_.affine_scale, 1 + _base_.affine_scale), + # img_scale is (width, height) + border=(-_base_.img_scale[0] // 2, -_base_.img_scale[1] // 2), + border_val=(114, 114, 114), + min_area_ratio=_base_.min_area_ratio, + use_mask_refine=True) +] +train_pipeline = [ + *pre_transform, *mosaic_affine_transform, + dict(type='YOLOv5MultiModalMixUp', + prob=_base_.mixup_prob, + pre_transform=[*pre_transform, *mosaic_affine_transform]), + *last_transform, *text_transform +] + +_train_pipeline_stage2 = [ + *pre_transform, + dict(type='YOLOv5KeepRatioResize', scale=_base_.img_scale), + dict(type='LetterResize', + scale=_base_.img_scale, + allow_scale_up=True, + pad_val=dict(img=114.0)), + dict(type='YOLOv5RandomAffine', + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=(1 - _base_.affine_scale, + 1 + _base_.affine_scale), + max_aspect_ratio=_base_.max_aspect_ratio, + border_val=(114, 114, 114), + min_area_ratio=min_area_ratio, + use_mask_refine=use_mask2refine), *last_transform +] +train_pipeline_stage2 = [*_train_pipeline_stage2, *text_transform] +coco_train_dataset = dict( + _delete_=True, + type='MultiModalDataset', + dataset=dict(type='YOLOv5LVISV1Dataset', + data_root='data/coco', + ann_file='lvis/lvis_v1_train_base.json', + data_prefix=dict(img=''), + filter_cfg=dict(filter_empty_gt=True, min_size=32)), + class_text_path='data/texts/lvis_v1_base_class_texts.json', + pipeline=train_pipeline) +train_dataloader = dict(persistent_workers=persistent_workers, + batch_size=train_batch_size_per_gpu, + collate_fn=dict(type='yolow_collate'), + dataset=coco_train_dataset) + +test_pipeline = [ + *_base_.test_pipeline[:-1], + dict(type='LoadText'), + dict(type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'pad_param', 'texts')) +] + +# training settings +default_hooks = dict(param_scheduler=dict(scheduler_type='linear', + lr_factor=0.01, + max_epochs=max_epochs), + checkpoint=dict(max_keep_ckpts=-1, + save_best=None, + interval=save_epoch_intervals)) +custom_hooks = [ + dict(type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0001, + update_buffers=True, + strict_load=False, + priority=49), + dict(type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - close_mosaic_epochs, + switch_pipeline=train_pipeline_stage2) +] +train_cfg = dict(max_epochs=max_epochs, + val_interval=5, + dynamic_intervals=[((max_epochs - close_mosaic_epochs), + _base_.val_interval_stage2)]) +optim_wrapper = dict(optimizer=dict( + _delete_=True, + type='AdamW', + lr=base_lr, + weight_decay=weight_decay, + batch_size_per_gpu=train_batch_size_per_gpu), + paramwise_cfg=dict(bias_decay_mult=0.0, + norm_decay_mult=0.0, + custom_keys={ + 'backbone.text_model': + dict(lr_mult=0.01), + 'logit_scale': + dict(weight_decay=0.0), + }), + constructor='YOLOWv5OptimizerConstructor') + +# evaluation settings +coco_val_dataset = dict( + _delete_=True, + type='MultiModalDataset', + dataset=dict(type='YOLOv5LVISV1Dataset', + data_root='data/coco/', + test_mode=True, + ann_file='lvis/lvis_v1_val.json', + data_prefix=dict(img=''), + batch_shapes_cfg=None), + class_text_path='data/captions/lvis_v1_class_captions.json', + pipeline=test_pipeline) +val_dataloader = dict(dataset=coco_val_dataset) +test_dataloader = val_dataloader + +val_evaluator = dict(type='mmdet.LVISMetric', + ann_file='data/coco/lvis/lvis_v1_val.json', + metric=['bbox', 'segm']) +test_evaluator = val_evaluator +find_unused_parameters = True diff --git a/models/YOLO-World/configs/segmentation/yolo_world_seg_l_dual_vlpan_2e-4_80e_8gpus_seghead_finetune_lvis.py b/models/YOLO-World/configs/segmentation/yolo_world_seg_l_dual_vlpan_2e-4_80e_8gpus_seghead_finetune_lvis.py new file mode 100644 index 0000000000000000000000000000000000000000..5d4174ab893a289b9f75499f1fe11e43b638ab41 --- /dev/null +++ b/models/YOLO-World/configs/segmentation/yolo_world_seg_l_dual_vlpan_2e-4_80e_8gpus_seghead_finetune_lvis.py @@ -0,0 +1,237 @@ +_base_ = ( + '../../third_party/mmyolo/configs/yolov8/yolov8_l_mask-refine_syncbn_fast_8xb16-500e_coco.py' +) +custom_imports = dict(imports=['yolo_world'], allow_failed_imports=False) +# hyper-parameters +num_classes = 1203 +num_training_classes = 80 +max_epochs = 80 # Maximum training epochs +close_mosaic_epochs = 10 +save_epoch_intervals = 5 +text_channels = 512 +neck_embed_channels = [128, 256, _base_.last_stage_out_channels // 2] +neck_num_heads = [4, 8, _base_.last_stage_out_channels // 2 // 32] +base_lr = 2e-4 + +weight_decay = 0.05 +train_batch_size_per_gpu = 8 +load_from = 'pretrained_models/yolo_world_l_clip_base_dual_vlpan_2e-3adamw_32xb16_100e_o365_goldg_train_pretrained-0e566235.pth' +persistent_workers = False + +# Polygon2Mask +downsample_ratio = 4 +mask_overlap = False +use_mask2refine = True +max_aspect_ratio = 100 +min_area_ratio = 0.01 + +# model settings +model = dict( + type='YOLOWorldDetector', + mm_neck=True, + num_train_classes=num_training_classes, + num_test_classes=num_classes, + data_preprocessor=dict(type='YOLOWDetDataPreprocessor'), + backbone=dict( + _delete_=True, + type='MultiModalYOLOBackbone', + image_model={{_base_.model.backbone}}, + frozen_stages=4, # frozen the image backbone + text_model=dict( + type='HuggingCLIPLanguageBackbone', + model_name='openai/clip-vit-base-patch32', + frozen_modules=['all'])), + neck=dict(type='YOLOWorldDualPAFPN', + freeze_all=True, + guide_channels=text_channels, + embed_channels=neck_embed_channels, + num_heads=neck_num_heads, + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv'), + text_enhancder=dict(type='ImagePoolingAttentionModule', + embed_channels=256, + num_heads=8)), + bbox_head=dict(type='YOLOWorldSegHead', + head_module=dict(type='YOLOWorldSegHeadModule', + embed_dims=text_channels, + num_classes=num_training_classes, + mask_channels=32, + proto_channels=256, + freeze_bbox=True), + mask_overlap=mask_overlap, + loss_mask=dict(type='mmdet.CrossEntropyLoss', + use_sigmoid=True, + reduction='none'), + loss_mask_weight=1.0), + train_cfg=dict(assigner=dict(num_classes=num_training_classes)), + test_cfg=dict(mask_thr_binary=0.5, fast_test=True)) + +pre_transform = [ + dict(type='LoadImageFromFile', backend_args=_base_.backend_args), + dict(type='LoadAnnotations', + with_bbox=True, + with_mask=True, + mask2bbox=True) +] + +last_transform = [ + dict(type='mmdet.Albu', + transforms=_base_.albu_train_transforms, + bbox_params=dict(type='BboxParams', + format='pascal_voc', + label_fields=['gt_bboxes_labels', + 'gt_ignore_flags']), + keymap={ + 'img': 'image', + 'gt_bboxes': 'bboxes' + }), + dict(type='YOLOv5HSVRandomAug'), + dict(type='mmdet.RandomFlip', prob=0.5), + dict(type='Polygon2Mask', + downsample_ratio=downsample_ratio, + mask_overlap=mask_overlap), +] + +# dataset settings +text_transform = [ + dict(type='RandomLoadText', + num_neg_samples=(num_classes, num_classes), + max_num_samples=num_training_classes, + padding_to_max=True, + padding_value=''), + dict(type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'flip', + 'flip_direction', 'texts')) +] +mosaic_affine_transform = [ + dict(type='MultiModalMosaic', + img_scale=_base_.img_scale, + pad_val=114.0, + pre_transform=pre_transform), + dict(type='YOLOv5CopyPaste', prob=_base_.copypaste_prob), + dict( + type='YOLOv5RandomAffine', + max_rotate_degree=0.0, + max_shear_degree=0.0, + max_aspect_ratio=100., + scaling_ratio_range=(1 - _base_.affine_scale, 1 + _base_.affine_scale), + # img_scale is (width, height) + border=(-_base_.img_scale[0] // 2, -_base_.img_scale[1] // 2), + border_val=(114, 114, 114), + min_area_ratio=_base_.min_area_ratio, + use_mask_refine=True) +] +train_pipeline = [ + *pre_transform, *mosaic_affine_transform, + dict(type='YOLOv5MultiModalMixUp', + prob=_base_.mixup_prob, + pre_transform=[*pre_transform, *mosaic_affine_transform]), + *last_transform, *text_transform +] + +_train_pipeline_stage2 = [ + *pre_transform, + dict(type='YOLOv5KeepRatioResize', scale=_base_.img_scale), + dict(type='LetterResize', + scale=_base_.img_scale, + allow_scale_up=True, + pad_val=dict(img=114.0)), + dict(type='YOLOv5RandomAffine', + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=(1 - _base_.affine_scale, + 1 + _base_.affine_scale), + max_aspect_ratio=_base_.max_aspect_ratio, + border_val=(114, 114, 114), + min_area_ratio=min_area_ratio, + use_mask_refine=use_mask2refine), *last_transform +] +train_pipeline_stage2 = [*_train_pipeline_stage2, *text_transform] +coco_train_dataset = dict( + _delete_=True, + type='MultiModalDataset', + dataset=dict(type='YOLOv5LVISV1Dataset', + data_root='data/coco', + ann_file='lvis/lvis_v1_train_base.json', + data_prefix=dict(img=''), + filter_cfg=dict(filter_empty_gt=True, min_size=32)), + class_text_path='data/texts/lvis_v1_base_class_texts.json', + pipeline=train_pipeline) +train_dataloader = dict(persistent_workers=persistent_workers, + batch_size=train_batch_size_per_gpu, + collate_fn=dict(type='yolow_collate'), + dataset=coco_train_dataset) + +test_pipeline = [ + *_base_.test_pipeline[:-1], + dict(type='LoadText'), + dict(type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'pad_param', 'texts')) +] + +# training settings +default_hooks = dict(param_scheduler=dict(scheduler_type='linear', + lr_factor=0.01, + max_epochs=max_epochs), + checkpoint=dict(max_keep_ckpts=-1, + save_best=None, + interval=save_epoch_intervals)) +custom_hooks = [ + dict(type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0001, + update_buffers=True, + strict_load=False, + priority=49), + dict(type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - close_mosaic_epochs, + switch_pipeline=train_pipeline_stage2) +] +train_cfg = dict(max_epochs=max_epochs, + val_interval=5, + dynamic_intervals=[((max_epochs - close_mosaic_epochs), + _base_.val_interval_stage2)]) +optim_wrapper = dict(optimizer=dict( + _delete_=True, + type='AdamW', + lr=base_lr, + weight_decay=weight_decay, + batch_size_per_gpu=train_batch_size_per_gpu), + paramwise_cfg=dict(bias_decay_mult=0.0, + norm_decay_mult=0.0, + custom_keys={ + 'backbone.text_model': + dict(lr_mult=0.01), + 'logit_scale': + dict(weight_decay=0.0), + 'neck': + dict(lr_mult=0.0), + 'head.head_module.reg_preds': + dict(lr_mult=0.0), + 'head.head_module.cls_preds': + dict(lr_mult=0.0), + 'head.head_module.cls_contrasts': + dict(lr_mult=0.0) + }), + constructor='YOLOWv5OptimizerConstructor') + +# evaluation settings +coco_val_dataset = dict( + _delete_=True, + type='MultiModalDataset', + dataset=dict(type='YOLOv5LVISV1Dataset', + data_root='data/coco/', + test_mode=True, + ann_file='lvis/lvis_v1_val.json', + data_prefix=dict(img=''), + batch_shapes_cfg=None), + class_text_path='data/captions/lvis_v1_class_captions.json', + pipeline=test_pipeline) +val_dataloader = dict(dataset=coco_val_dataset) +test_dataloader = val_dataloader + +val_evaluator = dict(type='mmdet.LVISMetric', + ann_file='data/coco/lvis/lvis_v1_val.json', + metric=['bbox', 'segm']) +test_evaluator = val_evaluator +find_unused_parameters = True diff --git a/models/YOLO-World/configs/segmentation/yolo_world_seg_m_dual_vlpan_2e-4_80e_8gpus_allmodules_finetune_lvis.py b/models/YOLO-World/configs/segmentation/yolo_world_seg_m_dual_vlpan_2e-4_80e_8gpus_allmodules_finetune_lvis.py new file mode 100644 index 0000000000000000000000000000000000000000..31331b663551f8f74af41d7efa6f9534dedf9738 --- /dev/null +++ b/models/YOLO-World/configs/segmentation/yolo_world_seg_m_dual_vlpan_2e-4_80e_8gpus_allmodules_finetune_lvis.py @@ -0,0 +1,226 @@ +_base_ = ( + '../../third_party/mmyolo/configs/yolov8/yolov8_m_mask-refine_syncbn_fast_8xb16-500e_coco.py' +) +custom_imports = dict(imports=['yolo_world'], allow_failed_imports=False) +# hyper-parameters +num_classes = 1203 +num_training_classes = 80 +max_epochs = 80 # Maximum training epochs +close_mosaic_epochs = 10 +save_epoch_intervals = 5 +text_channels = 512 +neck_embed_channels = [128, 256, _base_.last_stage_out_channels // 2] +neck_num_heads = [4, 8, _base_.last_stage_out_channels // 2 // 32] +base_lr = 2e-4 + +weight_decay = 0.05 +train_batch_size_per_gpu = 8 +load_from = 'pretrained_models/yolo_world_m_clip_base_dual_vlpan_2e-3adamw_32xb16_100e_o365_goldg_train_pretrained-2b7bd1be.pth' +persistent_workers = False + +# Polygon2Mask +downsample_ratio = 4 +mask_overlap = False +use_mask2refine = True +max_aspect_ratio = 100 +min_area_ratio = 0.01 + +# model settings +model = dict( + type='YOLOWorldDetector', + mm_neck=True, + num_train_classes=num_training_classes, + num_test_classes=num_classes, + data_preprocessor=dict(type='YOLOWDetDataPreprocessor'), + backbone=dict( + _delete_=True, + type='MultiModalYOLOBackbone', + image_model={{_base_.model.backbone}}, + text_model=dict( + type='HuggingCLIPLanguageBackbone', + model_name='openai/clip-vit-base-patch32', + frozen_modules=[])), + neck=dict(type='YOLOWorldDualPAFPN', + guide_channels=text_channels, + embed_channels=neck_embed_channels, + num_heads=neck_num_heads, + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv'), + text_enhancder=dict(type='ImagePoolingAttentionModule', + embed_channels=256, + num_heads=8)), + bbox_head=dict(type='YOLOWorldSegHead', + head_module=dict(type='YOLOWorldSegHeadModule', + embed_dims=text_channels, + num_classes=num_training_classes, + mask_channels=32, + proto_channels=256), + mask_overlap=mask_overlap, + loss_mask=dict(type='mmdet.CrossEntropyLoss', + use_sigmoid=True, + reduction='none'), + loss_mask_weight=1.0), + train_cfg=dict(assigner=dict(num_classes=num_training_classes)), + test_cfg=dict(mask_thr_binary=0.5, fast_test=True)) + +pre_transform = [ + dict(type='LoadImageFromFile', backend_args=_base_.backend_args), + dict(type='LoadAnnotations', + with_bbox=True, + with_mask=True, + mask2bbox=True) +] + +last_transform = [ + dict(type='mmdet.Albu', + transforms=_base_.albu_train_transforms, + bbox_params=dict(type='BboxParams', + format='pascal_voc', + label_fields=['gt_bboxes_labels', + 'gt_ignore_flags']), + keymap={ + 'img': 'image', + 'gt_bboxes': 'bboxes' + }), + dict(type='YOLOv5HSVRandomAug'), + dict(type='mmdet.RandomFlip', prob=0.5), + dict(type='Polygon2Mask', + downsample_ratio=downsample_ratio, + mask_overlap=mask_overlap), +] + +# dataset settings +text_transform = [ + dict(type='RandomLoadText', + num_neg_samples=(num_classes, num_classes), + max_num_samples=num_training_classes, + padding_to_max=True, + padding_value=''), + dict(type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'flip', + 'flip_direction', 'texts')) +] +mosaic_affine_transform = [ + dict(type='MultiModalMosaic', + img_scale=_base_.img_scale, + pad_val=114.0, + pre_transform=pre_transform), + dict(type='YOLOv5CopyPaste', prob=_base_.copypaste_prob), + dict( + type='YOLOv5RandomAffine', + max_rotate_degree=0.0, + max_shear_degree=0.0, + max_aspect_ratio=100., + scaling_ratio_range=(1 - _base_.affine_scale, 1 + _base_.affine_scale), + # img_scale is (width, height) + border=(-_base_.img_scale[0] // 2, -_base_.img_scale[1] // 2), + border_val=(114, 114, 114), + min_area_ratio=_base_.min_area_ratio, + use_mask_refine=True) +] +train_pipeline = [ + *pre_transform, *mosaic_affine_transform, + dict(type='YOLOv5MultiModalMixUp', + prob=_base_.mixup_prob, + pre_transform=[*pre_transform, *mosaic_affine_transform]), + *last_transform, *text_transform +] + +_train_pipeline_stage2 = [ + *pre_transform, + dict(type='YOLOv5KeepRatioResize', scale=_base_.img_scale), + dict(type='LetterResize', + scale=_base_.img_scale, + allow_scale_up=True, + pad_val=dict(img=114.0)), + dict(type='YOLOv5RandomAffine', + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=(1 - _base_.affine_scale, + 1 + _base_.affine_scale), + max_aspect_ratio=_base_.max_aspect_ratio, + border_val=(114, 114, 114), + min_area_ratio=min_area_ratio, + use_mask_refine=use_mask2refine), *last_transform +] +train_pipeline_stage2 = [*_train_pipeline_stage2, *text_transform] +coco_train_dataset = dict( + _delete_=True, + type='MultiModalDataset', + dataset=dict(type='YOLOv5LVISV1Dataset', + data_root='data/coco', + ann_file='lvis/lvis_v1_train_base.json', + data_prefix=dict(img=''), + filter_cfg=dict(filter_empty_gt=True, min_size=32)), + class_text_path='data/texts/lvis_v1_base_class_texts.json', + pipeline=train_pipeline) +train_dataloader = dict(persistent_workers=persistent_workers, + batch_size=train_batch_size_per_gpu, + collate_fn=dict(type='yolow_collate'), + dataset=coco_train_dataset) + +test_pipeline = [ + *_base_.test_pipeline[:-1], + dict(type='LoadText'), + dict(type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'pad_param', 'texts')) +] + +# training settings +default_hooks = dict(param_scheduler=dict(scheduler_type='linear', + lr_factor=0.01, + max_epochs=max_epochs), + checkpoint=dict(max_keep_ckpts=-1, + save_best=None, + interval=save_epoch_intervals)) +custom_hooks = [ + dict(type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0001, + update_buffers=True, + strict_load=False, + priority=49), + dict(type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - close_mosaic_epochs, + switch_pipeline=train_pipeline_stage2) +] +train_cfg = dict(max_epochs=max_epochs, + val_interval=5, + dynamic_intervals=[((max_epochs - close_mosaic_epochs), + _base_.val_interval_stage2)]) +optim_wrapper = dict(optimizer=dict( + _delete_=True, + type='AdamW', + lr=base_lr, + weight_decay=weight_decay, + batch_size_per_gpu=train_batch_size_per_gpu), + paramwise_cfg=dict(bias_decay_mult=0.0, + norm_decay_mult=0.0, + custom_keys={ + 'backbone.text_model': + dict(lr_mult=0.01), + 'logit_scale': + dict(weight_decay=0.0) + }), + constructor='YOLOWv5OptimizerConstructor') + +# evaluation settings +coco_val_dataset = dict( + _delete_=True, + type='MultiModalDataset', + dataset=dict(type='YOLOv5LVISV1Dataset', + data_root='data/coco/', + test_mode=True, + ann_file='lvis/lvis_v1_val.json', + data_prefix=dict(img=''), + batch_shapes_cfg=None), + class_text_path='data/captions/lvis_v1_class_captions.json', + pipeline=test_pipeline) +val_dataloader = dict(dataset=coco_val_dataset) +test_dataloader = val_dataloader + +val_evaluator = dict(type='mmdet.LVISMetric', + ann_file='data/coco/lvis/lvis_v1_val.json', + metric=['bbox', 'segm']) +test_evaluator = val_evaluator +find_unused_parameters = True diff --git a/models/YOLO-World/configs/segmentation/yolo_world_seg_m_dual_vlpan_2e-4_80e_8gpus_seghead_finetune_lvis.py b/models/YOLO-World/configs/segmentation/yolo_world_seg_m_dual_vlpan_2e-4_80e_8gpus_seghead_finetune_lvis.py new file mode 100644 index 0000000000000000000000000000000000000000..883c3225d4e1bbfbcefe96f9028b9082324c2466 --- /dev/null +++ b/models/YOLO-World/configs/segmentation/yolo_world_seg_m_dual_vlpan_2e-4_80e_8gpus_seghead_finetune_lvis.py @@ -0,0 +1,237 @@ +_base_ = ( + '../../third_party/mmyolo/configs/yolov8/yolov8_m_mask-refine_syncbn_fast_8xb16-500e_coco.py' +) +custom_imports = dict(imports=['yolo_world'], allow_failed_imports=False) +# hyper-parameters +num_classes = 1203 +num_training_classes = 80 +max_epochs = 80 # Maximum training epochs +close_mosaic_epochs = 10 +save_epoch_intervals = 5 +text_channels = 512 +neck_embed_channels = [128, 256, _base_.last_stage_out_channels // 2] +neck_num_heads = [4, 8, _base_.last_stage_out_channels // 2 // 32] +base_lr = 2e-4 + +weight_decay = 0.05 +train_batch_size_per_gpu = 8 +load_from = 'pretrained_models/yolo_world_m_clip_base_dual_vlpan_2e-3adamw_32xb16_100e_o365_goldg_train_pretrained-2b7bd1be.pth' +persistent_workers = False + +# Polygon2Mask +downsample_ratio = 4 +mask_overlap = False +use_mask2refine = True +max_aspect_ratio = 100 +min_area_ratio = 0.01 + +# model settings +model = dict( + type='YOLOWorldDetector', + mm_neck=True, + num_train_classes=num_training_classes, + num_test_classes=num_classes, + data_preprocessor=dict(type='YOLOWDetDataPreprocessor'), + backbone=dict( + _delete_=True, + type='MultiModalYOLOBackbone', + image_model={{_base_.model.backbone}}, + frozen_stages=4, # frozen the image backbone + text_model=dict( + type='HuggingCLIPLanguageBackbone', + model_name='openai/clip-vit-base-patch32', + frozen_modules=['all'])), + neck=dict(type='YOLOWorldDualPAFPN', + freeze_all=True, + guide_channels=text_channels, + embed_channels=neck_embed_channels, + num_heads=neck_num_heads, + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv'), + text_enhancder=dict(type='ImagePoolingAttentionModule', + embed_channels=256, + num_heads=8)), + bbox_head=dict(type='YOLOWorldSegHead', + head_module=dict(type='YOLOWorldSegHeadModule', + embed_dims=text_channels, + num_classes=num_training_classes, + mask_channels=32, + proto_channels=256, + freeze_bbox=True), + mask_overlap=mask_overlap, + loss_mask=dict(type='mmdet.CrossEntropyLoss', + use_sigmoid=True, + reduction='none'), + loss_mask_weight=1.0), + train_cfg=dict(assigner=dict(num_classes=num_training_classes)), + test_cfg=dict(mask_thr_binary=0.5, fast_test=True)) + +pre_transform = [ + dict(type='LoadImageFromFile', backend_args=_base_.backend_args), + dict(type='LoadAnnotations', + with_bbox=True, + with_mask=True, + mask2bbox=True) +] + +last_transform = [ + dict(type='mmdet.Albu', + transforms=_base_.albu_train_transforms, + bbox_params=dict(type='BboxParams', + format='pascal_voc', + label_fields=['gt_bboxes_labels', + 'gt_ignore_flags']), + keymap={ + 'img': 'image', + 'gt_bboxes': 'bboxes' + }), + dict(type='YOLOv5HSVRandomAug'), + dict(type='mmdet.RandomFlip', prob=0.5), + dict(type='Polygon2Mask', + downsample_ratio=downsample_ratio, + mask_overlap=mask_overlap), +] + +# dataset settings +text_transform = [ + dict(type='RandomLoadText', + num_neg_samples=(num_classes, num_classes), + max_num_samples=num_training_classes, + padding_to_max=True, + padding_value=''), + dict(type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'flip', + 'flip_direction', 'texts')) +] +mosaic_affine_transform = [ + dict(type='MultiModalMosaic', + img_scale=_base_.img_scale, + pad_val=114.0, + pre_transform=pre_transform), + dict(type='YOLOv5CopyPaste', prob=_base_.copypaste_prob), + dict( + type='YOLOv5RandomAffine', + max_rotate_degree=0.0, + max_shear_degree=0.0, + max_aspect_ratio=100., + scaling_ratio_range=(1 - _base_.affine_scale, 1 + _base_.affine_scale), + # img_scale is (width, height) + border=(-_base_.img_scale[0] // 2, -_base_.img_scale[1] // 2), + border_val=(114, 114, 114), + min_area_ratio=_base_.min_area_ratio, + use_mask_refine=True) +] +train_pipeline = [ + *pre_transform, *mosaic_affine_transform, + dict(type='YOLOv5MultiModalMixUp', + prob=_base_.mixup_prob, + pre_transform=[*pre_transform, *mosaic_affine_transform]), + *last_transform, *text_transform +] + +_train_pipeline_stage2 = [ + *pre_transform, + dict(type='YOLOv5KeepRatioResize', scale=_base_.img_scale), + dict(type='LetterResize', + scale=_base_.img_scale, + allow_scale_up=True, + pad_val=dict(img=114.0)), + dict(type='YOLOv5RandomAffine', + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=(1 - _base_.affine_scale, + 1 + _base_.affine_scale), + max_aspect_ratio=_base_.max_aspect_ratio, + border_val=(114, 114, 114), + min_area_ratio=min_area_ratio, + use_mask_refine=use_mask2refine), *last_transform +] +train_pipeline_stage2 = [*_train_pipeline_stage2, *text_transform] +coco_train_dataset = dict( + _delete_=True, + type='MultiModalDataset', + dataset=dict(type='YOLOv5LVISV1Dataset', + data_root='data/coco', + ann_file='lvis/lvis_v1_train_base.json', + data_prefix=dict(img=''), + filter_cfg=dict(filter_empty_gt=True, min_size=32)), + class_text_path='data/texts/lvis_v1_base_class_texts.json', + pipeline=train_pipeline) +train_dataloader = dict(persistent_workers=persistent_workers, + batch_size=train_batch_size_per_gpu, + collate_fn=dict(type='yolow_collate'), + dataset=coco_train_dataset) + +test_pipeline = [ + *_base_.test_pipeline[:-1], + dict(type='LoadText'), + dict(type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'pad_param', 'texts')) +] + +# training settings +default_hooks = dict(param_scheduler=dict(scheduler_type='linear', + lr_factor=0.01, + max_epochs=max_epochs), + checkpoint=dict(max_keep_ckpts=-1, + save_best=None, + interval=save_epoch_intervals)) +custom_hooks = [ + dict(type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0001, + update_buffers=True, + strict_load=False, + priority=49), + dict(type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - close_mosaic_epochs, + switch_pipeline=train_pipeline_stage2) +] +train_cfg = dict(max_epochs=max_epochs, + val_interval=5, + dynamic_intervals=[((max_epochs - close_mosaic_epochs), + _base_.val_interval_stage2)]) +optim_wrapper = dict(optimizer=dict( + _delete_=True, + type='AdamW', + lr=base_lr, + weight_decay=weight_decay, + batch_size_per_gpu=train_batch_size_per_gpu), + paramwise_cfg=dict(bias_decay_mult=0.0, + norm_decay_mult=0.0, + custom_keys={ + 'backbone.text_model': + dict(lr_mult=0.01), + 'logit_scale': + dict(weight_decay=0.0), + 'neck': + dict(lr_mult=0.0), + 'head.head_module.reg_preds': + dict(lr_mult=0.0), + 'head.head_module.cls_preds': + dict(lr_mult=0.0), + 'head.head_module.cls_contrasts': + dict(lr_mult=0.0) + }), + constructor='YOLOWv5OptimizerConstructor') + +# evaluation settings +coco_val_dataset = dict( + _delete_=True, + type='MultiModalDataset', + dataset=dict(type='YOLOv5LVISV1Dataset', + data_root='data/coco/', + test_mode=True, + ann_file='lvis/lvis_v1_val.json', + data_prefix=dict(img=''), + batch_shapes_cfg=None), + class_text_path='data/captions/lvis_v1_class_captions.json', + pipeline=test_pipeline) +val_dataloader = dict(dataset=coco_val_dataset) +test_dataloader = val_dataloader + +val_evaluator = dict(type='mmdet.LVISMetric', + ann_file='data/coco/lvis/lvis_v1_val.json', + metric=['bbox', 'segm']) +test_evaluator = val_evaluator +find_unused_parameters = True diff --git a/models/YOLO-World/configs/segmentation/yolo_world_v2_seg_l_vlpan_bn_2e-4_80e_8gpus_seghead_finetune_lvis.py b/models/YOLO-World/configs/segmentation/yolo_world_v2_seg_l_vlpan_bn_2e-4_80e_8gpus_seghead_finetune_lvis.py new file mode 100644 index 0000000000000000000000000000000000000000..062c9e31ed02a1ab84a68f59ca1e5f86a389a2d6 --- /dev/null +++ b/models/YOLO-World/configs/segmentation/yolo_world_v2_seg_l_vlpan_bn_2e-4_80e_8gpus_seghead_finetune_lvis.py @@ -0,0 +1,239 @@ +_base_ = ( + '../../third_party/mmyolo/configs/yolov8/yolov8_l_mask-refine_syncbn_fast_8xb16-500e_coco.py' +) +custom_imports = dict(imports=['yolo_world'], allow_failed_imports=False) +# hyper-parameters +num_classes = 1203 +num_training_classes = 80 +max_epochs = 80 # Maximum training epochs +close_mosaic_epochs = 10 +save_epoch_intervals = 5 +text_channels = 512 +neck_embed_channels = [128, 256, _base_.last_stage_out_channels // 2] +neck_num_heads = [4, 8, _base_.last_stage_out_channels // 2 // 32] +base_lr = 2e-4 + +weight_decay = 0.05 +train_batch_size_per_gpu = 8 +load_from = 'pretrained_models/yolo_world_l_clip_t2i_bn_2e-3adamw_32xb16-100e_obj365v1_goldg_cc3mlite_train-ca93cd1f.pth' +# text_model_name = '../pretrained_models/clip-vit-base-patch32-projection' +text_model_name = 'openai/clip-vit-base-patch32' +persistent_workers = False + +# Polygon2Mask +downsample_ratio = 4 +mask_overlap = False +use_mask2refine = True +max_aspect_ratio = 100 +min_area_ratio = 0.01 + +# model settings +model = dict( + type='YOLOWorldDetector', + mm_neck=True, + num_train_classes=num_training_classes, + num_test_classes=num_classes, + data_preprocessor=dict(type='YOLOWDetDataPreprocessor'), + backbone=dict( + _delete_=True, + type='MultiModalYOLOBackbone', + image_model={{_base_.model.backbone}}, + frozen_stages=4, # frozen the image backbone + text_model=dict( + type='HuggingCLIPLanguageBackbone', + model_name=text_model_name, + frozen_modules=['all'])), + neck=dict(type='YOLOWorldPAFPN', + freeze_all=True, + guide_channels=text_channels, + embed_channels=neck_embed_channels, + num_heads=neck_num_heads, + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv')), + bbox_head=dict(type='YOLOWorldSegHead', + head_module=dict(type='YOLOWorldSegHeadModule', + use_bn_head=True, + embed_dims=text_channels, + num_classes=num_training_classes, + mask_channels=32, + proto_channels=256, + freeze_bbox=True), + mask_overlap=mask_overlap, + loss_mask=dict(type='mmdet.CrossEntropyLoss', + use_sigmoid=True, + reduction='none'), + loss_mask_weight=1.0), + train_cfg=dict(assigner=dict( + type='YOLOWorldSegAssigner', + num_classes=num_training_classes)), + test_cfg=dict(mask_thr_binary=0.5, fast_test=True)) + +pre_transform = [ + dict(type='LoadImageFromFile', backend_args=_base_.backend_args), + dict(type='LoadAnnotations', + with_bbox=True, + with_mask=True, + mask2bbox=True) +] + +last_transform = [ + dict(type='mmdet.Albu', + transforms=_base_.albu_train_transforms, + bbox_params=dict(type='BboxParams', + format='pascal_voc', + label_fields=['gt_bboxes_labels', + 'gt_ignore_flags']), + keymap={ + 'img': 'image', + 'gt_bboxes': 'bboxes' + }), + dict(type='YOLOv5HSVRandomAug'), + dict(type='mmdet.RandomFlip', prob=0.5), + dict(type='Polygon2Mask', + downsample_ratio=downsample_ratio, + mask_overlap=mask_overlap), +] + +# dataset settings +text_transform = [ + dict(type='RandomLoadText', + num_neg_samples=(num_classes, num_classes), + max_num_samples=num_training_classes, + padding_to_max=True, + padding_value=''), + dict(type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'flip', + 'flip_direction', 'texts')) +] +mosaic_affine_transform = [ + dict(type='MultiModalMosaic', + img_scale=_base_.img_scale, + pad_val=114.0, + pre_transform=pre_transform), + dict(type='YOLOv5CopyPaste', prob=_base_.copypaste_prob), + dict( + type='YOLOv5RandomAffine', + max_rotate_degree=0.0, + max_shear_degree=0.0, + max_aspect_ratio=100., + scaling_ratio_range=(1 - _base_.affine_scale, 1 + _base_.affine_scale), + # img_scale is (width, height) + border=(-_base_.img_scale[0] // 2, -_base_.img_scale[1] // 2), + border_val=(114, 114, 114), + min_area_ratio=_base_.min_area_ratio, + use_mask_refine=True) +] +train_pipeline = [ + *pre_transform, *mosaic_affine_transform, + dict(type='YOLOv5MultiModalMixUp', + prob=_base_.mixup_prob, + pre_transform=[*pre_transform, *mosaic_affine_transform]), + *last_transform, *text_transform +] + +_train_pipeline_stage2 = [ + *pre_transform, + dict(type='YOLOv5KeepRatioResize', scale=_base_.img_scale), + dict(type='LetterResize', + scale=_base_.img_scale, + allow_scale_up=True, + pad_val=dict(img=114.0)), + dict(type='YOLOv5RandomAffine', + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=(1 - _base_.affine_scale, + 1 + _base_.affine_scale), + max_aspect_ratio=_base_.max_aspect_ratio, + border_val=(114, 114, 114), + min_area_ratio=min_area_ratio, + use_mask_refine=use_mask2refine), *last_transform +] +train_pipeline_stage2 = [*_train_pipeline_stage2, *text_transform] +coco_train_dataset = dict( + _delete_=True, + type='MultiModalDataset', + dataset=dict(type='YOLOv5LVISV1Dataset', + data_root='data/coco', + ann_file='lvis/lvis_v1_train_base.json', + data_prefix=dict(img=''), + filter_cfg=dict(filter_empty_gt=True, min_size=32)), + class_text_path='data/texts/lvis_v1_base_class_texts.json', + pipeline=train_pipeline) +train_dataloader = dict(persistent_workers=persistent_workers, + batch_size=train_batch_size_per_gpu, + collate_fn=dict(type='yolow_collate'), + dataset=coco_train_dataset) + +test_pipeline = [ + *_base_.test_pipeline[:-1], + dict(type='LoadText'), + dict(type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'pad_param', 'texts')) +] + +# training settings +default_hooks = dict(param_scheduler=dict(scheduler_type='linear', + lr_factor=0.01, + max_epochs=max_epochs), + checkpoint=dict(max_keep_ckpts=-1, + save_best=None, + interval=save_epoch_intervals)) +custom_hooks = [ + dict(type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0001, + update_buffers=True, + strict_load=False, + priority=49), + dict(type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - close_mosaic_epochs, + switch_pipeline=train_pipeline_stage2) +] +train_cfg = dict(max_epochs=max_epochs, + val_interval=5, + dynamic_intervals=[((max_epochs - close_mosaic_epochs), + _base_.val_interval_stage2)]) +optim_wrapper = dict(optimizer=dict( + _delete_=True, + type='AdamW', + lr=base_lr, + weight_decay=weight_decay, + batch_size_per_gpu=train_batch_size_per_gpu), + paramwise_cfg=dict(bias_decay_mult=0.0, + norm_decay_mult=0.0, + custom_keys={ + 'backbone.text_model': + dict(lr_mult=0.01), + 'logit_scale': + dict(weight_decay=0.0), + 'neck': + dict(lr_mult=0.0), + 'head.head_module.reg_preds': + dict(lr_mult=0.0), + 'head.head_module.cls_preds': + dict(lr_mult=0.0), + 'head.head_module.cls_contrasts': + dict(lr_mult=0.0) + }), + constructor='YOLOWv5OptimizerConstructor') + +# evaluation settings +coco_val_dataset = dict( + _delete_=True, + type='MultiModalDataset', + dataset=dict(type='YOLOv5LVISV1Dataset', + data_root='data/coco/', + test_mode=True, + ann_file='lvis/lvis_v1_val.json', + data_prefix=dict(img=''), + batch_shapes_cfg=None), + class_text_path='data/texts/lvis_v1_class_texts.json', + pipeline=test_pipeline) +val_dataloader = dict(dataset=coco_val_dataset) +test_dataloader = val_dataloader + +val_evaluator = dict(type='mmdet.LVISMetric', + ann_file='data/coco/lvis/lvis_v1_val.json', + metric=['bbox', 'segm']) +test_evaluator = val_evaluator +find_unused_parameters = True diff --git a/models/YOLO-World/configs/segmentation/yolo_world_v2_seg_m_vlpan_bn_2e-4_80e_8gpus_seghead_finetune_lvis.py b/models/YOLO-World/configs/segmentation/yolo_world_v2_seg_m_vlpan_bn_2e-4_80e_8gpus_seghead_finetune_lvis.py new file mode 100644 index 0000000000000000000000000000000000000000..d196d4ee1956d8c94bfcef1ad6da10f6b9af39b8 --- /dev/null +++ b/models/YOLO-World/configs/segmentation/yolo_world_v2_seg_m_vlpan_bn_2e-4_80e_8gpus_seghead_finetune_lvis.py @@ -0,0 +1,239 @@ +_base_ = ( + '../../third_party/mmyolo/configs/yolov8/yolov8_m_mask-refine_syncbn_fast_8xb16-500e_coco.py' +) +custom_imports = dict(imports=['yolo_world'], allow_failed_imports=False) +# hyper-parameters +num_classes = 1203 +num_training_classes = 80 +max_epochs = 80 # Maximum training epochs +close_mosaic_epochs = 10 +save_epoch_intervals = 5 +text_channels = 512 +neck_embed_channels = [128, 256, _base_.last_stage_out_channels // 2] +neck_num_heads = [4, 8, _base_.last_stage_out_channels // 2 // 32] +base_lr = 2e-4 + +weight_decay = 0.05 +train_batch_size_per_gpu = 8 +load_from = 'pretrained_models/yolo_world_m_clip_t2i_bn_2e-3adamw_32xb16-100e_obj365v1_goldg_train-c6237d5b.pth' +text_model_name = '../pretrained_models/clip-vit-base-patch32-projection' +# text_model_name = 'openai/clip-vit-base-patch32' +persistent_workers = False + +# Polygon2Mask +downsample_ratio = 4 +mask_overlap = False +use_mask2refine = True +max_aspect_ratio = 100 +min_area_ratio = 0.01 + +# model settings +model = dict( + type='YOLOWorldDetector', + mm_neck=True, + num_train_classes=num_training_classes, + num_test_classes=num_classes, + data_preprocessor=dict(type='YOLOWDetDataPreprocessor'), + backbone=dict( + _delete_=True, + type='MultiModalYOLOBackbone', + image_model={{_base_.model.backbone}}, + frozen_stages=4, # frozen the image backbone + text_model=dict( + type='HuggingCLIPLanguageBackbone', + model_name=text_model_name, + frozen_modules=['all'])), + neck=dict(type='YOLOWorldPAFPN', + freeze_all=True, + guide_channels=text_channels, + embed_channels=neck_embed_channels, + num_heads=neck_num_heads, + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv')), + bbox_head=dict(type='YOLOWorldSegHead', + head_module=dict(type='YOLOWorldSegHeadModule', + use_bn_head=True, + embed_dims=text_channels, + num_classes=num_training_classes, + mask_channels=32, + proto_channels=256, + freeze_bbox=True), + mask_overlap=mask_overlap, + loss_mask=dict(type='mmdet.CrossEntropyLoss', + use_sigmoid=True, + reduction='none'), + loss_mask_weight=1.0), + train_cfg=dict(assigner=dict( + type='YOLOWorldSegAssigner', + num_classes=num_training_classes)), + test_cfg=dict(mask_thr_binary=0.5, fast_test=True)) + +pre_transform = [ + dict(type='LoadImageFromFile', backend_args=_base_.backend_args), + dict(type='LoadAnnotations', + with_bbox=True, + with_mask=True, + mask2bbox=True) +] + +last_transform = [ + dict(type='mmdet.Albu', + transforms=_base_.albu_train_transforms, + bbox_params=dict(type='BboxParams', + format='pascal_voc', + label_fields=['gt_bboxes_labels', + 'gt_ignore_flags']), + keymap={ + 'img': 'image', + 'gt_bboxes': 'bboxes' + }), + dict(type='YOLOv5HSVRandomAug'), + dict(type='mmdet.RandomFlip', prob=0.5), + dict(type='Polygon2Mask', + downsample_ratio=downsample_ratio, + mask_overlap=mask_overlap), +] + +# dataset settings +text_transform = [ + dict(type='RandomLoadText', + num_neg_samples=(num_classes, num_classes), + max_num_samples=num_training_classes, + padding_to_max=True, + padding_value=''), + dict(type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'flip', + 'flip_direction', 'texts')) +] +mosaic_affine_transform = [ + dict(type='MultiModalMosaic', + img_scale=_base_.img_scale, + pad_val=114.0, + pre_transform=pre_transform), + dict(type='YOLOv5CopyPaste', prob=_base_.copypaste_prob), + dict( + type='YOLOv5RandomAffine', + max_rotate_degree=0.0, + max_shear_degree=0.0, + max_aspect_ratio=100., + scaling_ratio_range=(1 - _base_.affine_scale, 1 + _base_.affine_scale), + # img_scale is (width, height) + border=(-_base_.img_scale[0] // 2, -_base_.img_scale[1] // 2), + border_val=(114, 114, 114), + min_area_ratio=_base_.min_area_ratio, + use_mask_refine=True) +] +train_pipeline = [ + *pre_transform, *mosaic_affine_transform, + dict(type='YOLOv5MultiModalMixUp', + prob=_base_.mixup_prob, + pre_transform=[*pre_transform, *mosaic_affine_transform]), + *last_transform, *text_transform +] + +_train_pipeline_stage2 = [ + *pre_transform, + dict(type='YOLOv5KeepRatioResize', scale=_base_.img_scale), + dict(type='LetterResize', + scale=_base_.img_scale, + allow_scale_up=True, + pad_val=dict(img=114.0)), + dict(type='YOLOv5RandomAffine', + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=(1 - _base_.affine_scale, + 1 + _base_.affine_scale), + max_aspect_ratio=_base_.max_aspect_ratio, + border_val=(114, 114, 114), + min_area_ratio=min_area_ratio, + use_mask_refine=use_mask2refine), *last_transform +] +train_pipeline_stage2 = [*_train_pipeline_stage2, *text_transform] +coco_train_dataset = dict( + _delete_=True, + type='MultiModalDataset', + dataset=dict(type='YOLOv5LVISV1Dataset', + data_root='data/coco', + ann_file='lvis/lvis_v1_train_base.json', + data_prefix=dict(img=''), + filter_cfg=dict(filter_empty_gt=True, min_size=32)), + class_text_path='data/texts/lvis_v1_base_class_texts.json', + pipeline=train_pipeline) +train_dataloader = dict(persistent_workers=persistent_workers, + batch_size=train_batch_size_per_gpu, + collate_fn=dict(type='yolow_collate'), + dataset=coco_train_dataset) + +test_pipeline = [ + *_base_.test_pipeline[:-1], + dict(type='LoadText'), + dict(type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'pad_param', 'texts')) +] + +# training settings +default_hooks = dict(param_scheduler=dict(scheduler_type='linear', + lr_factor=0.01, + max_epochs=max_epochs), + checkpoint=dict(max_keep_ckpts=-1, + save_best=None, + interval=save_epoch_intervals)) +custom_hooks = [ + dict(type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0001, + update_buffers=True, + strict_load=False, + priority=49), + dict(type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - close_mosaic_epochs, + switch_pipeline=train_pipeline_stage2) +] +train_cfg = dict(max_epochs=max_epochs, + val_interval=5, + dynamic_intervals=[((max_epochs - close_mosaic_epochs), + _base_.val_interval_stage2)]) +optim_wrapper = dict(optimizer=dict( + _delete_=True, + type='AdamW', + lr=base_lr, + weight_decay=weight_decay, + batch_size_per_gpu=train_batch_size_per_gpu), + paramwise_cfg=dict(bias_decay_mult=0.0, + norm_decay_mult=0.0, + custom_keys={ + 'backbone.text_model': + dict(lr_mult=0.01), + 'logit_scale': + dict(weight_decay=0.0), + 'neck': + dict(lr_mult=0.0), + 'head.head_module.reg_preds': + dict(lr_mult=0.0), + 'head.head_module.cls_preds': + dict(lr_mult=0.0), + 'head.head_module.cls_contrasts': + dict(lr_mult=0.0) + }), + constructor='YOLOWv5OptimizerConstructor') + +# evaluation settings +coco_val_dataset = dict( + _delete_=True, + type='MultiModalDataset', + dataset=dict(type='YOLOv5LVISV1Dataset', + data_root='data/coco/', + test_mode=True, + ann_file='lvis/lvis_v1_val.json', + data_prefix=dict(img=''), + batch_shapes_cfg=None), + class_text_path='data/texts/lvis_v1_class_texts.json', + pipeline=test_pipeline) +val_dataloader = dict(dataset=coco_val_dataset) +test_dataloader = val_dataloader + +val_evaluator = dict(type='mmdet.LVISMetric', + ann_file='data/coco/lvis/lvis_v1_val.json', + metric=['bbox', 'segm']) +test_evaluator = val_evaluator +find_unused_parameters = True diff --git a/models/YOLO-World/data/texts/coco_class_texts.json b/models/YOLO-World/data/texts/coco_class_texts.json new file mode 100644 index 0000000000000000000000000000000000000000..b83ee71a04c5d2606793ea9e271a8422ca762ed5 --- /dev/null +++ b/models/YOLO-World/data/texts/coco_class_texts.json @@ -0,0 +1 @@ +[["person"], ["bicycle"], ["car"], ["motorcycle"], ["airplane"], ["bus"], ["train"], ["truck"], ["boat"], ["traffic light"], ["fire hydrant"], ["stop sign"], ["parking meter"], ["bench"], ["bird"], ["cat"], ["dog"], ["horse"], ["sheep"], ["cow"], ["elephant"], ["bear"], ["zebra"], ["giraffe"], ["backpack"], ["umbrella"], ["handbag"], ["tie"], ["suitcase"], ["frisbee"], ["skis"], ["snowboard"], ["sports ball"], ["kite"], ["baseball bat"], ["baseball glove"], ["skateboard"], ["surfboard"], ["tennis racket"], ["bottle"], ["wine glass"], ["cup"], ["fork"], ["knife"], ["spoon"], ["bowl"], ["banana"], ["apple"], ["sandwich"], ["orange"], ["broccoli"], ["carrot"], ["hot dog"], ["pizza"], ["donut"], ["cake"], ["chair"], ["couch"], ["potted plant"], ["bed"], ["dining table"], ["toilet"], ["tv"], ["laptop"], ["mouse"], ["remote"], ["keyboard"], ["cell phone"], ["microwave"], ["oven"], ["toaster"], ["sink"], ["refrigerator"], ["book"], ["clock"], ["vase"], ["scissors"], ["teddy bear"], ["hair drier"], ["toothbrush"]] diff --git a/models/YOLO-World/data/texts/lvis_v1_base_class_captions.json b/models/YOLO-World/data/texts/lvis_v1_base_class_captions.json new file mode 100644 index 0000000000000000000000000000000000000000..27e5e72636076fccdbbe7a93ffb56d2d8bbe0a3f --- /dev/null +++ b/models/YOLO-World/data/texts/lvis_v1_base_class_captions.json @@ -0,0 +1 @@ +[["aerosol can", "spray can"], ["air conditioner"], ["airplane", "aeroplane"], ["alarm clock"], ["alcohol", "alcoholic beverage"], ["alligator", "gator"], ["almond"], ["ambulance"], ["amplifier"], ["anklet", "ankle bracelet"], ["antenna", "aerial", "transmitting aerial"], ["apple"], ["apron"], ["aquarium", "fish tank"], ["armband"], ["armchair"], ["artichoke"], ["trash can", "garbage can", "wastebin", "dustbin", "trash barrel", "trash bin"], ["ashtray"], ["asparagus"], ["atomizer", "atomiser", "spray", "sprayer", "nebulizer", "nebuliser"], ["avocado"], ["award", "accolade"], ["awning"], ["baby buggy", "baby carriage", "perambulator", "pram", "stroller"], ["basketball backboard"], ["backpack", "knapsack", "packsack", "rucksack", "haversack"], ["handbag", "purse", "pocketbook"], ["suitcase", "baggage", "luggage"], ["bagel", "beigel"], ["ball"], ["balloon"], ["bamboo"], ["banana"], ["Band Aid"], ["bandage"], ["bandanna", "bandana"], ["banner", "streamer"], ["barrel", "cask"], ["barrette"], ["barrow", "garden cart", "lawn cart", "wheelbarrow"], ["baseball base"], ["baseball"], ["baseball bat"], ["baseball cap", "jockey cap", "golf cap"], ["baseball glove", "baseball mitt"], ["basket", "handbasket"], ["basketball"], ["bat", "bat animal"], ["bath mat"], ["bath towel"], ["bathrobe"], ["bathtub", "bathing tub"], ["battery"], ["bead"], ["bean curd", "tofu"], ["beanbag"], ["beanie", "beany"], ["bear"], ["bed"], ["bedspread", "bedcover", "bed covering", "counterpane", "spread"], ["cow"], ["beef", "beef food", "boeuf", "boeuf food"], ["beer bottle"], ["beer can"], ["bell"], ["bell pepper", "capsicum"], ["belt"], ["belt buckle"], ["bench"], ["beret"], ["bib"], ["bicycle", "bike", "bike bicycle"], ["visor", "vizor"], ["billboard"], ["binder", "ring-binder"], ["binoculars", "field glasses", "opera glasses"], ["bird"], ["birdfeeder"], ["birdbath"], ["birdcage"], ["birdhouse"], ["birthday cake"], ["black sheep"], ["blackberry"], ["blackboard", "chalkboard"], ["blanket"], ["blazer", "sport jacket", "sport coat", "sports jacket", "sports coat"], ["blender", "liquidizer", "liquidiser"], ["blinker", "flasher"], ["blouse"], ["blueberry"], ["boat", "ship", "ship boat"], ["bobbin", "spool", "reel"], ["bobby pin", "hairgrip"], ["boiled egg", "coddled egg"], ["deadbolt"], ["bolt"], ["book"], ["bookcase"], ["booklet", "brochure", "leaflet", "pamphlet"], ["boot"], ["bottle"], ["bottle opener"], ["bouquet"], ["bow", "bow decorative ribbons"], ["bow-tie", "bowtie"], ["bowl"], ["bowler hat", "bowler", "derby hat", "derby", "plug hat"], ["box"], ["suspenders"], ["bracelet", "bangle"], ["brassiere", "bra", "bandeau"], ["bread-bin", "breadbox"], ["bread"], ["bridal gown", "wedding gown", "wedding dress"], ["briefcase"], ["broccoli"], ["broom"], ["brownie"], ["brussels sprouts"], ["bucket", "pail"], ["horned cow"], ["bulldog"], ["bullet train"], ["bulletin board", "notice board"], ["bullhorn", "megaphone"], ["bun", "roll"], ["bunk bed"], ["buoy"], ["bus", "bus vehicle", "autobus", "charabanc", "double-decker", "motorbus", "motorcoach"], ["business card"], ["butter"], ["butterfly"], ["button"], ["cab", "cab taxi", "taxi", "taxicab"], ["cabin car", "caboose"], ["cabinet"], ["cake"], ["calculator"], ["calendar"], ["calf"], ["camcorder"], ["camel"], ["camera"], ["camera lens"], ["camper", "camper vehicle", "camping bus", "motor home"], ["can", "tin can"], ["can opener", "tin opener"], ["candle", "candlestick"], ["candle holder"], ["candy cane"], ["walking cane"], ["canister", "cannister"], ["canoe"], ["cantaloup", "cantaloupe"], ["cap", "cap headwear"], ["bottle cap", "cap", "cap container lid"], ["cape"], ["cappuccino", "coffee cappuccino"], ["car", "car automobile", "auto", "auto automobile", "automobile"], ["railcar", "railcar part of a train", "railway car", "railway car part of a train", "railroad car", "railroad car part of a train"], ["identity card"], ["card"], ["cardigan"], ["horse carriage"], ["carrot"], ["tote bag"], ["cart"], ["carton"], ["cash register", "register", "register for cash transactions"], ["cast", "plaster cast", "plaster bandage"], ["cat"], ["cauliflower"], ["cayenne", "cayenne spice", "cayenne pepper", "cayenne pepper spice", "red pepper", "red pepper spice"], ["CD player"], ["celery"], ["cellular telephone", "cellular phone", "cellphone", "mobile phone", "smart phone"], ["chair"], ["chandelier"], ["cherry"], ["chicken", "chicken animal"], ["chickpea", "garbanzo"], ["chili", "chili vegetable", "chili pepper", "chili pepper vegetable", "chilli", "chilli vegetable", "chilly", "chilly vegetable", "chile", "chile vegetable"], ["crisp", "crisp potato chip", "potato chip"], ["chocolate bar"], ["chocolate cake"], ["choker", "collar", "neckband"], ["chopping board", "cutting board", "chopping block"], ["chopstick"], ["Christmas tree"], ["slide"], ["cigarette"], ["cigarette case", "cigarette pack"], ["cistern", "water tank"], ["clasp"], ["cleansing agent", "cleanser", "cleaner"], ["clip"], ["clipboard"], ["clock", "timepiece", "timekeeper"], ["clock tower"], ["clothes hamper", "laundry basket", "clothes basket"], ["clothespin", "clothes peg"], ["coaster"], ["coat"], ["coat hanger", "clothes hanger", "dress hanger"], ["coatrack", "hatrack"], ["cock", "rooster"], ["coconut", "cocoanut"], ["coffee maker", "coffee machine"], ["coffee table", "cocktail table"], ["coffeepot"], ["coin"], ["colander", "cullender"], ["coleslaw", "slaw"], ["pacifier", "teething ring"], ["computer keyboard", "keyboard", "keyboard computer"], ["condiment"], ["cone", "traffic cone"], ["control", "controller"], ["cookie", "cooky", "biscuit", "biscuit cookie"], ["cooler", "cooler for food", "ice chest"], ["cork", "cork bottle plug", "bottle cork"], ["corkscrew", "bottle screw"], ["edible corn", "corn", "maize"], ["cornet", "horn", "trumpet"], ["cornice", "valance", "valance board", "pelmet"], ["corset", "girdle"], ["costume"], ["cowbell"], ["cowboy hat", "ten-gallon hat"], ["crab", "crab animal"], ["cracker"], ["crate"], ["crayon", "wax crayon"], ["crescent roll", "croissant"], ["crib", "cot"], ["crock pot", "earthenware jar"], ["crossbar"], ["crow"], ["crown"], ["crucifix"], ["cruise ship", "cruise liner"], ["police cruiser", "patrol car", "police car", "squad car"], ["crumb"], ["crutch"], ["cub", "cub animal"], ["cube", "square block"], ["cucumber", "cuke"], ["cufflink"], ["cup"], ["trophy cup"], ["cupboard", "closet"], ["cupcake"], ["curtain", "drapery"], ["cushion"], ["dartboard"], ["deck chair", "beach chair"], ["deer", "cervid"], ["dental floss", "floss"], ["desk"], ["diaper"], ["dining table"], ["dish"], ["dish antenna"], ["dishrag", "dishcloth"], ["dishtowel", "tea towel"], ["dishwasher", "dishwashing machine"], ["dispenser"], ["Dixie cup", "paper cup"], ["dog"], ["dog collar"], ["doll"], ["dolphin"], ["domestic ass", "donkey"], ["doorknob", "doorhandle"], ["doormat", "welcome mat"], ["doughnut", "donut"], ["drawer"], ["underdrawers", "boxers", "boxershorts"], ["dress", "frock"], ["dress hat", "high hat", "opera hat", "silk hat", "top hat"], ["dress suit"], ["dresser"], ["drill"], ["drum", "drum musical instrument"], ["duck"], ["duckling"], ["duct tape"], ["duffel bag", "duffle bag", "duffel", "duffle"], ["dumpster"], ["eagle"], ["earphone", "earpiece", "headphone"], ["earring"], ["easel"], ["egg", "eggs"], ["egg yolk", "yolk", "yolk egg"], ["eggbeater", "eggwhisk"], ["eggplant", "aubergine"], ["refrigerator"], ["elephant"], ["elk", "moose"], ["envelope"], ["eraser"], ["fan"], ["faucet", "spigot", "tap"], ["Ferris wheel"], ["ferry", "ferryboat"], ["fighter jet", "fighter aircraft", "attack aircraft"], ["figurine"], ["file cabinet", "filing cabinet"], ["fire alarm", "smoke alarm"], ["fire engine", "fire truck"], ["fire extinguisher", "extinguisher"], ["fire hose"], ["fireplace"], ["fireplug", "fire hydrant", "hydrant"], ["fish"], ["fish", "fish food"], ["fishing rod", "fishing pole"], ["flag"], ["flagpole", "flagstaff"], ["flamingo"], ["flannel"], ["flap"], ["flashlight", "torch"], ["flip-flop", "flip-flop sandal"], ["flipper", "flipper footwear", "fin", "fin footwear"], ["flower arrangement", "floral arrangement"], ["flute glass", "champagne flute"], ["foal"], ["folding chair"], ["food processor"], ["football", "football American"], ["footstool", "footrest"], ["fork"], ["forklift"], ["freight car"], ["French toast"], ["freshener", "air freshener"], ["frisbee"], ["frog", "toad", "toad frog"], ["fruit juice"], ["frying pan", "frypan", "skillet"], ["garbage truck"], ["garden hose"], ["gargle", "mouthwash"], ["garlic", "ail"], ["gazelle"], ["gelatin", "jelly"], ["giant panda", "panda", "panda bear"], ["gift wrap"], ["ginger", "gingerroot"], ["giraffe"], ["cincture", "sash", "waistband", "waistcloth"], ["glass", "glass drink container", "drinking glass"], ["globe"], ["glove"], ["goat"], ["goggles"], ["golf club", "golf-club"], ["golfcart"], ["goose"], ["grape"], ["grater"], ["gravestone", "headstone", "tombstone"], ["green bean"], ["green onion", "spring onion", "scallion"], ["grill", "grille", "grillwork", "radiator grille"], ["grizzly", "grizzly bear"], ["grocery bag"], ["guitar"], ["gull", "seagull"], ["gun"], ["hairbrush"], ["hairnet"], ["hairpin"], ["ham", "jambon", "gammon"], ["hamburger", "beefburger", "burger"], ["hammer"], ["hammock"], ["hamster"], ["hair dryer"], ["hand towel", "face towel"], ["handcart", "pushcart", "hand truck"], ["handkerchief"], ["handle", "grip", "handgrip"], ["hat"], ["veil"], ["headband"], ["headboard"], ["headlight", "headlamp"], ["headscarf"], ["headstall", "headstall for horses", "headpiece", "headpiece for horses"], ["heart"], ["heater", "warmer"], ["helicopter"], ["helmet"], ["highchair", "feeding chair"], ["hinge"], ["hog", "pig"], ["home plate", "home plate baseball", "home base", "home base baseball"], ["honey"], ["fume hood", "exhaust hood"], ["hook"], ["horse"], ["hose", "hosepipe"], ["hot sauce"], ["hummingbird"], ["polar bear"], ["icecream"], ["ice maker"], ["igniter", "ignitor", "lighter"], ["iPod"], ["iron", "iron for clothing", "smoothing iron", "smoothing iron for clothing"], ["ironing board"], ["jacket"], ["jam"], ["jar"], ["jean", "blue jean", "denim"], ["jeep", "landrover"], ["jersey", "T-shirt", "tee shirt"], ["jet plane", "jet-propelled plane"], ["jewelry", "jewellery"], ["jumpsuit"], ["kayak"], ["kettle", "boiler"], ["key"], ["kilt"], ["kimono"], ["kitchen sink"], ["kite"], ["kitten", "kitty"], ["kiwi fruit"], ["knee pad"], ["knife"], ["knob"], ["ladder"], ["ladle"], ["ladybug", "ladybeetle", "ladybird beetle"], ["lamb", "lamb animal"], ["lamp"], ["lamppost"], ["lampshade"], ["lantern"], ["lanyard", "laniard"], ["laptop computer", "notebook computer"], ["latch"], ["legging", "legging clothing", "leging", "leging clothing", "leg covering"], ["Lego", "Lego set"], ["lemon"], ["lettuce"], ["license plate", "numberplate"], ["life buoy", "lifesaver", "life belt", "life ring"], ["life jacket", "life vest"], ["lightbulb"], ["lime"], ["lion"], ["lip balm"], ["lizard"], ["log"], ["lollipop"], ["speaker", "speaker stereo equipment"], ["loveseat"], ["magazine"], ["magnet"], ["mail slot"], ["mailbox", "mailbox at home", "letter box", "letter box at home"], ["mandarin orange"], ["manger", "trough"], ["manhole"], ["map"], ["marker"], ["mashed potato"], ["mask", "facemask"], ["mast"], ["mat", "mat gym equipment", "gym mat"], ["mattress"], ["measuring cup"], ["measuring stick", "ruler", "ruler measuring stick", "measuring rod"], ["meatball"], ["medicine"], ["melon"], ["microphone"], ["microwave oven"], ["milk"], ["minivan"], ["mirror"], ["mitten"], ["mixer", "mixer kitchen tool", "stand mixer"], ["money"], ["monitor", "monitor computer equipment"], ["monkey"], ["motor"], ["motor scooter", "scooter"], ["motorcycle"], ["mound", "mound baseball", "pitcher's mound"], ["mouse", "mouse computer equipment", "computer mouse"], ["mousepad"], ["muffin"], ["mug"], ["mushroom"], ["musical instrument", "instrument", "instrument musical"], ["napkin", "table napkin", "serviette"], ["necklace"], ["necktie", "tie", "tie necktie"], ["needle"], ["nest"], ["newspaper", "paper", "paper newspaper"], ["newsstand"], ["nightshirt", "nightwear", "sleepwear", "nightclothes"], ["noseband", "noseband for animals", "nosepiece", "nosepiece for animals"], ["notebook"], ["notepad"], ["nut"], ["oar"], ["oil lamp", "kerosene lamp", "kerosine lamp"], ["olive oil"], ["onion"], ["orange", "orange fruit"], ["orange juice"], ["ostrich"], ["ottoman", "pouf", "pouffe", "hassock"], ["oven"], ["overalls", "overalls clothing"], ["owl"], ["packet"], ["pad"], ["paddle", "boat paddle"], ["padlock"], ["paintbrush"], ["painting"], ["pajamas", "pyjamas"], ["palette", "pallet"], ["pan", "pan for cooking", "cooking pan"], ["pancake"], ["paper plate"], ["paper towel"], ["parachute"], ["parakeet", "parrakeet", "parroket", "paraquet", "paroquet", "parroquet"], ["parasail", "parasail sports"], ["parasol", "sunshade"], ["parka", "anorak"], ["parking meter"], ["parrot"], ["passenger car", "passenger car part of a train", "coach", "coach part of a train"], ["passport"], ["pastry"], ["pea", "pea food"], ["peach"], ["peanut butter"], ["pear"], ["peeler", "peeler tool for fruit and vegetables"], ["pelican"], ["pen"], ["pencil"], ["penguin"], ["pepper", "peppercorn"], ["pepper mill", "pepper grinder"], ["perfume"], ["person", "baby", "child", "boy", "girl", "man", "woman", "human"], ["pet"], ["pew", "pew church bench", "church bench"], ["phonograph record", "phonograph recording", "record", "record phonograph recording"], ["piano"], ["pickle"], ["pickup truck"], ["pie"], ["pigeon"], ["pillow"], ["pineapple"], ["pinecone"], ["pipe", "piping"], ["pita", "pita bread", "pocket bread"], ["pitcher", "pitcher vessel for liquid", "ewer"], ["pizza"], ["place mat"], ["plate"], ["platter"], ["pliers", "plyers"], ["pocketknife"], ["poker", "poker fire stirring tool", "stove poker", "fire hook"], ["pole", "post"], ["polo shirt", "sport shirt"], ["pony"], ["pop", "pop soda", "soda", "soda pop", "tonic", "soft drink"], ["postbox", "postbox public", "mailbox", "mailbox public"], ["postcard", "postal card", "mailing-card"], ["poster", "placard"], ["pot"], ["flowerpot"], ["potato"], ["potholder"], ["pottery", "clayware"], ["pouch"], ["power shovel", "excavator", "digger"], ["prawn", "shrimp"], ["pretzel"], ["printer", "printing machine"], ["projectile", "projectile weapon", "missile"], ["projector"], ["propeller", "propellor"], ["pumpkin"], ["puppy"], ["quilt", "comforter"], ["rabbit"], ["racket", "racquet"], ["radiator"], ["radio receiver", "radio set", "radio", "tuner", "tuner radio"], ["radish", "daikon"], ["raft"], ["raincoat", "waterproof jacket"], ["ram", "ram animal"], ["raspberry"], ["razorblade"], ["reamer", "reamer juicer", "juicer", "juice reamer"], ["rearview mirror"], ["receipt"], ["recliner", "reclining chair", "lounger", "lounger chair"], ["record player", "phonograph", "phonograph record player", "turntable"], ["reflector"], ["remote control"], ["rhinoceros"], ["rifle"], ["ring"], ["robe"], ["rocking chair"], ["rolling pin"], ["router", "router computer equipment"], ["rubber band", "elastic band"], ["runner", "runner carpet"], ["plastic bag", "paper bag"], ["saddle", "saddle on an animal"], ["saddle blanket", "saddlecloth", "horse blanket"], ["saddlebag"], ["sail"], ["salad"], ["salami"], ["salmon", "salmon fish"], ["salsa"], ["saltshaker"], ["sandal", "sandal type of shoe"], ["sandwich"], ["saucer"], ["sausage"], ["scale", "scale measuring instrument"], ["scarf"], ["school bus"], ["scissors"], ["scoreboard"], ["screwdriver"], ["scrubbing brush"], ["sculpture"], ["seabird", "seafowl"], ["seahorse"], ["seashell"], ["sewing machine"], ["shaker"], ["shampoo"], ["shark"], ["shaving cream", "shaving soap"], ["sheep"], ["shield"], ["shirt"], ["shoe", "sneaker", "sneaker type of shoe", "tennis shoe"], ["shopping bag"], ["shopping cart"], ["short pants", "shorts", "shorts clothing", "trunks", "trunks clothing"], ["shoulder bag"], ["shovel"], ["shower head"], ["shower curtain"], ["signboard"], ["silo"], ["sink"], ["skateboard"], ["skewer"], ["ski"], ["ski boot"], ["ski parka", "ski jacket"], ["ski pole"], ["skirt"], ["sled", "sledge", "sleigh"], ["sleeping bag"], ["slipper", "slipper footwear", "carpet slipper", "carpet slipper footwear"], ["snowboard"], ["snowman"], ["snowmobile"], ["soap"], ["soccer ball"], ["sock"], ["sofa", "couch", "lounge"], ["solar array", "solar battery", "solar panel"], ["soup"], ["soupspoon"], ["sour cream", "soured cream"], ["spatula"], ["spectacles", "specs", "eyeglasses", "glasses"], ["spice rack"], ["spider"], ["sponge"], ["spoon"], ["sportswear", "athletic wear", "activewear"], ["spotlight"], ["squirrel"], ["stapler", "stapler stapling machine"], ["starfish", "sea star"], ["statue", "statue sculpture"], ["steak", "steak food"], ["steering wheel"], ["step stool"], ["stereo", "stereo sound system"], ["stirrup"], ["stool"], ["stop sign"], ["brake light"], ["stove", "kitchen stove", "range", "range kitchen appliance", "kitchen range", "cooking stove"], ["strainer"], ["strap"], ["straw", "straw for drinking", "drinking straw"], ["strawberry"], ["street sign"], ["streetlight", "street lamp"], ["suit", "suit clothing"], ["sunflower"], ["sunglasses"], ["sunhat"], ["surfboard"], ["sushi"], ["mop"], ["sweat pants"], ["sweatband"], ["sweater"], ["sweatshirt"], ["sweet potato"], ["swimsuit", "swimwear", "bathing suit", "swimming costume", "bathing costume", "swimming trunks", "bathing trunks"], ["sword"], ["table"], ["table lamp"], ["tablecloth"], ["tag"], ["taillight", "rear light"], ["tank", "tank storage vessel", "storage tank"], ["tank top", "tank top clothing"], ["tape", "tape sticky cloth or paper"], ["tape measure", "measuring tape"], ["tapestry"], ["tarp"], ["tartan", "plaid"], ["tassel"], ["tea bag"], ["teacup"], ["teakettle"], ["teapot"], ["teddy bear"], ["telephone", "phone", "telephone set"], ["telephone booth", "phone booth", "call box", "telephone box", "telephone kiosk"], ["telephone pole", "telegraph pole", "telegraph post"], ["television camera", "tv camera"], ["television set", "tv", "tv set"], ["tennis ball"], ["tennis racket"], ["thermometer"], ["thermos bottle"], ["thermostat"], ["thread", "yarn"], ["thumbtack", "drawing pin", "pushpin"], ["tiara"], ["tiger"], ["tights", "tights clothing", "leotards"], ["timer", "stopwatch"], ["tinfoil"], ["tinsel"], ["tissue paper"], ["toast", "toast food"], ["toaster"], ["toaster oven"], ["toilet"], ["toilet tissue", "toilet paper", "bathroom tissue"], ["tomato"], ["tongs"], ["toolbox"], ["toothbrush"], ["toothpaste"], ["toothpick"], ["cover"], ["tortilla"], ["tow truck"], ["towel"], ["towel rack", "towel rail", "towel bar"], ["toy"], ["tractor", "tractor farm equipment"], ["traffic light"], ["dirt bike"], ["trailer truck", "tractor trailer", "trucking rig", "articulated lorry", "semi truck"], ["train", "train railroad vehicle", "railroad train"], ["tray"], ["tricycle"], ["tripod"], ["trousers", "pants", "pants clothing"], ["truck"], ["trunk"], ["turban"], ["turkey", "turkey food"], ["turtle"], ["turtleneck", "turtleneck clothing", "polo-neck"], ["typewriter"], ["umbrella"], ["underwear", "underclothes", "underclothing", "underpants"], ["urinal"], ["urn"], ["vacuum cleaner"], ["vase"], ["vending machine"], ["vent", "blowhole", "air vent"], ["vest", "waistcoat"], ["videotape"], ["volleyball"], ["waffle"], ["wagon"], ["wagon wheel"], ["walking stick"], ["wall clock"], ["wall socket", "wall plug", "electric outlet", "electrical outlet", "outlet", "electric receptacle"], ["wallet", "billfold"], ["automatic washer", "washing machine"], ["watch", "wristwatch"], ["water bottle"], ["water cooler"], ["water faucet", "water tap", "tap", "tap water faucet"], ["water jug"], ["water scooter", "sea scooter", "jet ski"], ["water ski"], ["water tower"], ["watering can"], ["watermelon"], ["weathervane", "vane", "vane weathervane", "wind vane"], ["webcam"], ["wedding cake", "bridecake"], ["wedding ring", "wedding band"], ["wet suit"], ["wheel"], ["wheelchair"], ["whipped cream"], ["whistle"], ["wig"], ["wind chime"], ["windmill"], ["window box", "window box for plants"], ["windshield wiper", "windscreen wiper", "wiper", "wiper for windshield or screen"], ["windsock", "air sock", "air-sleeve", "wind sleeve", "wind cone"], ["wine bottle"], ["wine bucket", "wine cooler"], ["wineglass"], ["blinder", "blinder for horses"], ["wok"], ["wooden spoon"], ["wreath"], ["wrench", "spanner"], ["wristband"], ["wristlet", "wrist band"], ["yacht"], ["yogurt", "yoghurt", "yoghourt"], ["yoke", "yoke animal equipment"], ["zebra"], ["zucchini", "courgette"]] \ No newline at end of file diff --git a/models/YOLO-World/data/texts/lvis_v1_class_texts.json b/models/YOLO-World/data/texts/lvis_v1_class_texts.json new file mode 100644 index 0000000000000000000000000000000000000000..367aaf5430da14c914503b46e4a91bd1542849dd --- /dev/null +++ b/models/YOLO-World/data/texts/lvis_v1_class_texts.json @@ -0,0 +1 @@ +[["aerosol can", "spray can"], ["air conditioner"], ["airplane", "aeroplane"], ["alarm clock"], ["alcohol", "alcoholic beverage"], ["alligator", "gator"], ["almond"], ["ambulance"], ["amplifier"], ["anklet", "ankle bracelet"], ["antenna", "aerial", "transmitting aerial"], ["apple"], ["applesauce"], ["apricot"], ["apron"], ["aquarium", "fish tank"], ["arctic", "arctic type of shoe", "galosh", "golosh", "rubber", "rubber type of shoe", "gumshoe"], ["armband"], ["armchair"], ["armoire"], ["armor", "armour"], ["artichoke"], ["trash can", "garbage can", "wastebin", "dustbin", "trash barrel", "trash bin"], ["ashtray"], ["asparagus"], ["atomizer", "atomiser", "spray", "sprayer", "nebulizer", "nebuliser"], ["avocado"], ["award", "accolade"], ["awning"], ["ax", "axe"], ["baboon"], ["baby buggy", "baby carriage", "perambulator", "pram", "stroller"], ["basketball backboard"], ["backpack", "knapsack", "packsack", "rucksack", "haversack"], ["handbag", "purse", "pocketbook"], ["suitcase", "baggage", "luggage"], ["bagel", "beigel"], ["bagpipe"], ["baguet", "baguette"], ["bait", "lure"], ["ball"], ["ballet skirt", "tutu"], ["balloon"], ["bamboo"], ["banana"], ["Band Aid"], ["bandage"], ["bandanna", "bandana"], ["banjo"], ["banner", "streamer"], ["barbell"], ["barge"], ["barrel", "cask"], ["barrette"], ["barrow", "garden cart", "lawn cart", "wheelbarrow"], ["baseball base"], ["baseball"], ["baseball bat"], ["baseball cap", "jockey cap", "golf cap"], ["baseball glove", "baseball mitt"], ["basket", "handbasket"], ["basketball"], ["bass horn", "sousaphone", "tuba"], ["bat", "bat animal"], ["bath mat"], ["bath towel"], ["bathrobe"], ["bathtub", "bathing tub"], ["batter", "batter food"], ["battery"], ["beachball"], ["bead"], ["bean curd", "tofu"], ["beanbag"], ["beanie", "beany"], ["bear"], ["bed"], ["bedpan"], ["bedspread", "bedcover", "bed covering", "counterpane", "spread"], ["cow"], ["beef", "beef food", "boeuf", "boeuf food"], ["beeper", "pager"], ["beer bottle"], ["beer can"], ["beetle"], ["bell"], ["bell pepper", "capsicum"], ["belt"], ["belt buckle"], ["bench"], ["beret"], ["bib"], ["Bible"], ["bicycle", "bike", "bike bicycle"], ["visor", "vizor"], ["billboard"], ["binder", "ring-binder"], ["binoculars", "field glasses", "opera glasses"], ["bird"], ["birdfeeder"], ["birdbath"], ["birdcage"], ["birdhouse"], ["birthday cake"], ["birthday card"], ["pirate flag"], ["black sheep"], ["blackberry"], ["blackboard", "chalkboard"], ["blanket"], ["blazer", "sport jacket", "sport coat", "sports jacket", "sports coat"], ["blender", "liquidizer", "liquidiser"], ["blimp"], ["blinker", "flasher"], ["blouse"], ["blueberry"], ["gameboard"], ["boat", "ship", "ship boat"], ["bob", "bobber", "bobfloat"], ["bobbin", "spool", "reel"], ["bobby pin", "hairgrip"], ["boiled egg", "coddled egg"], ["bolo tie", "bolo", "bola tie", "bola"], ["deadbolt"], ["bolt"], ["bonnet"], ["book"], ["bookcase"], ["booklet", "brochure", "leaflet", "pamphlet"], ["bookmark", "bookmarker"], ["boom microphone", "microphone boom"], ["boot"], ["bottle"], ["bottle opener"], ["bouquet"], ["bow", "bow weapon"], ["bow", "bow decorative ribbons"], ["bow-tie", "bowtie"], ["bowl"], ["pipe bowl"], ["bowler hat", "bowler", "derby hat", "derby", "plug hat"], ["bowling ball"], ["box"], ["boxing glove"], ["suspenders"], ["bracelet", "bangle"], ["brass plaque"], ["brassiere", "bra", "bandeau"], ["bread-bin", "breadbox"], ["bread"], ["breechcloth", "breechclout", "loincloth"], ["bridal gown", "wedding gown", "wedding dress"], ["briefcase"], ["broccoli"], ["broach"], ["broom"], ["brownie"], ["brussels sprouts"], ["bubble gum"], ["bucket", "pail"], ["horse buggy"], ["horned cow"], ["bulldog"], ["bulldozer", "dozer"], ["bullet train"], ["bulletin board", "notice board"], ["bulletproof vest"], ["bullhorn", "megaphone"], ["bun", "roll"], ["bunk bed"], ["buoy"], ["burrito"], ["bus", "bus vehicle", "autobus", "charabanc", "double-decker", "motorbus", "motorcoach"], ["business card"], ["butter"], ["butterfly"], ["button"], ["cab", "cab taxi", "taxi", "taxicab"], ["cabana"], ["cabin car", "caboose"], ["cabinet"], ["locker", "storage locker"], ["cake"], ["calculator"], ["calendar"], ["calf"], ["camcorder"], ["camel"], ["camera"], ["camera lens"], ["camper", "camper vehicle", "camping bus", "motor home"], ["can", "tin can"], ["can opener", "tin opener"], ["candle", "candlestick"], ["candle holder"], ["candy bar"], ["candy cane"], ["walking cane"], ["canister", "cannister"], ["canoe"], ["cantaloup", "cantaloupe"], ["canteen"], ["cap", "cap headwear"], ["bottle cap", "cap", "cap container lid"], ["cape"], ["cappuccino", "coffee cappuccino"], ["car", "car automobile", "auto", "auto automobile", "automobile"], ["railcar", "railcar part of a train", "railway car", "railway car part of a train", "railroad car", "railroad car part of a train"], ["elevator car"], ["car battery", "automobile battery"], ["identity card"], ["card"], ["cardigan"], ["cargo ship", "cargo vessel"], ["carnation"], ["horse carriage"], ["carrot"], ["tote bag"], ["cart"], ["carton"], ["cash register", "register", "register for cash transactions"], ["casserole"], ["cassette"], ["cast", "plaster cast", "plaster bandage"], ["cat"], ["cauliflower"], ["cayenne", "cayenne spice", "cayenne pepper", "cayenne pepper spice", "red pepper", "red pepper spice"], ["CD player"], ["celery"], ["cellular telephone", "cellular phone", "cellphone", "mobile phone", "smart phone"], ["chain mail", "ring mail", "chain armor", "chain armour", "ring armor", "ring armour"], ["chair"], ["chaise longue", "chaise", "daybed"], ["chalice"], ["chandelier"], ["chap"], ["checkbook", "chequebook"], ["checkerboard"], ["cherry"], ["chessboard"], ["chicken", "chicken animal"], ["chickpea", "garbanzo"], ["chili", "chili vegetable", "chili pepper", "chili pepper vegetable", "chilli", "chilli vegetable", "chilly", "chilly vegetable", "chile", "chile vegetable"], ["chime", "gong"], ["chinaware"], ["crisp", "crisp potato chip", "potato chip"], ["poker chip"], ["chocolate bar"], ["chocolate cake"], ["chocolate milk"], ["chocolate mousse"], ["choker", "collar", "neckband"], ["chopping board", "cutting board", "chopping block"], ["chopstick"], ["Christmas tree"], ["slide"], ["cider", "cyder"], ["cigar box"], ["cigarette"], ["cigarette case", "cigarette pack"], ["cistern", "water tank"], ["clarinet"], ["clasp"], ["cleansing agent", "cleanser", "cleaner"], ["cleat", "cleat for securing rope"], ["clementine"], ["clip"], ["clipboard"], ["clippers", "clippers for plants"], ["cloak"], ["clock", "timepiece", "timekeeper"], ["clock tower"], ["clothes hamper", "laundry basket", "clothes basket"], ["clothespin", "clothes peg"], ["clutch bag"], ["coaster"], ["coat"], ["coat hanger", "clothes hanger", "dress hanger"], ["coatrack", "hatrack"], ["cock", "rooster"], ["cockroach"], ["cocoa", "cocoa beverage", "hot chocolate", "hot chocolate beverage", "drinking chocolate"], ["coconut", "cocoanut"], ["coffee maker", "coffee machine"], ["coffee table", "cocktail table"], ["coffeepot"], ["coil"], ["coin"], ["colander", "cullender"], ["coleslaw", "slaw"], ["coloring material", "colouring material"], ["combination lock"], ["pacifier", "teething ring"], ["comic book"], ["compass"], ["computer keyboard", "keyboard", "keyboard computer"], ["condiment"], ["cone", "traffic cone"], ["control", "controller"], ["convertible", "convertible automobile"], ["sofa bed"], ["cooker"], ["cookie", "cooky", "biscuit", "biscuit cookie"], ["cooking utensil"], ["cooler", "cooler for food", "ice chest"], ["cork", "cork bottle plug", "bottle cork"], ["corkboard"], ["corkscrew", "bottle screw"], ["edible corn", "corn", "maize"], ["cornbread"], ["cornet", "horn", "trumpet"], ["cornice", "valance", "valance board", "pelmet"], ["cornmeal"], ["corset", "girdle"], ["costume"], ["cougar", "puma", "catamount", "mountain lion", "panther"], ["coverall"], ["cowbell"], ["cowboy hat", "ten-gallon hat"], ["crab", "crab animal"], ["crabmeat"], ["cracker"], ["crape", "crepe", "French pancake"], ["crate"], ["crayon", "wax crayon"], ["cream pitcher"], ["crescent roll", "croissant"], ["crib", "cot"], ["crock pot", "earthenware jar"], ["crossbar"], ["crouton"], ["crow"], ["crowbar", "wrecking bar", "pry bar"], ["crown"], ["crucifix"], ["cruise ship", "cruise liner"], ["police cruiser", "patrol car", "police car", "squad car"], ["crumb"], ["crutch"], ["cub", "cub animal"], ["cube", "square block"], ["cucumber", "cuke"], ["cufflink"], ["cup"], ["trophy cup"], ["cupboard", "closet"], ["cupcake"], ["hair curler", "hair roller", "hair crimper"], ["curling iron"], ["curtain", "drapery"], ["cushion"], ["cylinder"], ["cymbal"], ["dagger"], ["dalmatian"], ["dartboard"], ["date", "date fruit"], ["deck chair", "beach chair"], ["deer", "cervid"], ["dental floss", "floss"], ["desk"], ["detergent"], ["diaper"], ["diary", "journal"], ["die", "dice"], ["dinghy", "dory", "rowboat"], ["dining table"], ["tux", "tuxedo"], ["dish"], ["dish antenna"], ["dishrag", "dishcloth"], ["dishtowel", "tea towel"], ["dishwasher", "dishwashing machine"], ["dishwasher detergent", "dishwashing detergent", "dishwashing liquid", "dishsoap"], ["dispenser"], ["diving board"], ["Dixie cup", "paper cup"], ["dog"], ["dog collar"], ["doll"], ["dollar", "dollar bill", "one dollar bill"], ["dollhouse", "doll's house"], ["dolphin"], ["domestic ass", "donkey"], ["doorknob", "doorhandle"], ["doormat", "welcome mat"], ["doughnut", "donut"], ["dove"], ["dragonfly"], ["drawer"], ["underdrawers", "boxers", "boxershorts"], ["dress", "frock"], ["dress hat", "high hat", "opera hat", "silk hat", "top hat"], ["dress suit"], ["dresser"], ["drill"], ["drone"], ["dropper", "eye dropper"], ["drum", "drum musical instrument"], ["drumstick"], ["duck"], ["duckling"], ["duct tape"], ["duffel bag", "duffle bag", "duffel", "duffle"], ["dumbbell"], ["dumpster"], ["dustpan"], ["eagle"], ["earphone", "earpiece", "headphone"], ["earplug"], ["earring"], ["easel"], ["eclair"], ["eel"], ["egg", "eggs"], ["egg roll", "spring roll"], ["egg yolk", "yolk", "yolk egg"], ["eggbeater", "eggwhisk"], ["eggplant", "aubergine"], ["electric chair"], ["refrigerator"], ["elephant"], ["elk", "moose"], ["envelope"], ["eraser"], ["escargot"], ["eyepatch"], ["falcon"], ["fan"], ["faucet", "spigot", "tap"], ["fedora"], ["ferret"], ["Ferris wheel"], ["ferry", "ferryboat"], ["fig", "fig fruit"], ["fighter jet", "fighter aircraft", "attack aircraft"], ["figurine"], ["file cabinet", "filing cabinet"], ["file", "file tool"], ["fire alarm", "smoke alarm"], ["fire engine", "fire truck"], ["fire extinguisher", "extinguisher"], ["fire hose"], ["fireplace"], ["fireplug", "fire hydrant", "hydrant"], ["first-aid kit"], ["fish"], ["fish", "fish food"], ["fishbowl", "goldfish bowl"], ["fishing rod", "fishing pole"], ["flag"], ["flagpole", "flagstaff"], ["flamingo"], ["flannel"], ["flap"], ["flash", "flashbulb"], ["flashlight", "torch"], ["fleece"], ["flip-flop", "flip-flop sandal"], ["flipper", "flipper footwear", "fin", "fin footwear"], ["flower arrangement", "floral arrangement"], ["flute glass", "champagne flute"], ["foal"], ["folding chair"], ["food processor"], ["football", "football American"], ["football helmet"], ["footstool", "footrest"], ["fork"], ["forklift"], ["freight car"], ["French toast"], ["freshener", "air freshener"], ["frisbee"], ["frog", "toad", "toad frog"], ["fruit juice"], ["frying pan", "frypan", "skillet"], ["fudge"], ["funnel"], ["futon"], ["gag", "muzzle"], ["garbage"], ["garbage truck"], ["garden hose"], ["gargle", "mouthwash"], ["gargoyle"], ["garlic", "ail"], ["gasmask", "respirator", "gas helmet"], ["gazelle"], ["gelatin", "jelly"], ["gemstone"], ["generator"], ["giant panda", "panda", "panda bear"], ["gift wrap"], ["ginger", "gingerroot"], ["giraffe"], ["cincture", "sash", "waistband", "waistcloth"], ["glass", "glass drink container", "drinking glass"], ["globe"], ["glove"], ["goat"], ["goggles"], ["goldfish"], ["golf club", "golf-club"], ["golfcart"], ["gondola", "gondola boat"], ["goose"], ["gorilla"], ["gourd"], ["grape"], ["grater"], ["gravestone", "headstone", "tombstone"], ["gravy boat", "gravy holder"], ["green bean"], ["green onion", "spring onion", "scallion"], ["griddle"], ["grill", "grille", "grillwork", "radiator grille"], ["grits", "hominy grits"], ["grizzly", "grizzly bear"], ["grocery bag"], ["guitar"], ["gull", "seagull"], ["gun"], ["hairbrush"], ["hairnet"], ["hairpin"], ["halter top"], ["ham", "jambon", "gammon"], ["hamburger", "beefburger", "burger"], ["hammer"], ["hammock"], ["hamper"], ["hamster"], ["hair dryer"], ["hand glass", "hand mirror"], ["hand towel", "face towel"], ["handcart", "pushcart", "hand truck"], ["handcuff"], ["handkerchief"], ["handle", "grip", "handgrip"], ["handsaw", "carpenter's saw"], ["hardback book", "hardcover book"], ["harmonium", "organ", "organ musical instrument", "reed organ", "reed organ musical instrument"], ["hat"], ["hatbox"], ["veil"], ["headband"], ["headboard"], ["headlight", "headlamp"], ["headscarf"], ["headset"], ["headstall", "headstall for horses", "headpiece", "headpiece for horses"], ["heart"], ["heater", "warmer"], ["helicopter"], ["helmet"], ["heron"], ["highchair", "feeding chair"], ["hinge"], ["hippopotamus"], ["hockey stick"], ["hog", "pig"], ["home plate", "home plate baseball", "home base", "home base baseball"], ["honey"], ["fume hood", "exhaust hood"], ["hook"], ["hookah", "narghile", "nargileh", "sheesha", "shisha", "water pipe"], ["hornet"], ["horse"], ["hose", "hosepipe"], ["hot-air balloon"], ["hotplate"], ["hot sauce"], ["hourglass"], ["houseboat"], ["hummingbird"], ["hummus", "humus", "hommos", "hoummos", "humous"], ["polar bear"], ["icecream"], ["popsicle"], ["ice maker"], ["ice pack", "ice bag"], ["ice skate"], ["igniter", "ignitor", "lighter"], ["inhaler", "inhalator"], ["iPod"], ["iron", "iron for clothing", "smoothing iron", "smoothing iron for clothing"], ["ironing board"], ["jacket"], ["jam"], ["jar"], ["jean", "blue jean", "denim"], ["jeep", "landrover"], ["jelly bean", "jelly egg"], ["jersey", "T-shirt", "tee shirt"], ["jet plane", "jet-propelled plane"], ["jewel", "gem", "precious stone"], ["jewelry", "jewellery"], ["joystick"], ["jumpsuit"], ["kayak"], ["keg"], ["kennel", "doghouse"], ["kettle", "boiler"], ["key"], ["keycard"], ["kilt"], ["kimono"], ["kitchen sink"], ["kitchen table"], ["kite"], ["kitten", "kitty"], ["kiwi fruit"], ["knee pad"], ["knife"], ["knitting needle"], ["knob"], ["knocker", "knocker on a door", "doorknocker"], ["koala", "koala bear"], ["lab coat", "laboratory coat"], ["ladder"], ["ladle"], ["ladybug", "ladybeetle", "ladybird beetle"], ["lamb", "lamb animal"], ["lamb-chop", "lambchop"], ["lamp"], ["lamppost"], ["lampshade"], ["lantern"], ["lanyard", "laniard"], ["laptop computer", "notebook computer"], ["lasagna", "lasagne"], ["latch"], ["lawn mower"], ["leather"], ["legging", "legging clothing", "leging", "leging clothing", "leg covering"], ["Lego", "Lego set"], ["legume"], ["lemon"], ["lemonade"], ["lettuce"], ["license plate", "numberplate"], ["life buoy", "lifesaver", "life belt", "life ring"], ["life jacket", "life vest"], ["lightbulb"], ["lightning rod", "lightning conductor"], ["lime"], ["limousine"], ["lion"], ["lip balm"], ["liquor", "spirits", "hard liquor", "liqueur", "cordial"], ["lizard"], ["log"], ["lollipop"], ["speaker", "speaker stereo equipment"], ["loveseat"], ["machine gun"], ["magazine"], ["magnet"], ["mail slot"], ["mailbox", "mailbox at home", "letter box", "letter box at home"], ["mallard"], ["mallet"], ["mammoth"], ["manatee"], ["mandarin orange"], ["manger", "trough"], ["manhole"], ["map"], ["marker"], ["martini"], ["mascot"], ["mashed potato"], ["masher"], ["mask", "facemask"], ["mast"], ["mat", "mat gym equipment", "gym mat"], ["matchbox"], ["mattress"], ["measuring cup"], ["measuring stick", "ruler", "ruler measuring stick", "measuring rod"], ["meatball"], ["medicine"], ["melon"], ["microphone"], ["microscope"], ["microwave oven"], ["milestone", "milepost"], ["milk"], ["milk can"], ["milkshake"], ["minivan"], ["mint candy"], ["mirror"], ["mitten"], ["mixer", "mixer kitchen tool", "stand mixer"], ["money"], ["monitor", "monitor computer equipment"], ["monkey"], ["motor"], ["motor scooter", "scooter"], ["motor vehicle", "automotive vehicle"], ["motorcycle"], ["mound", "mound baseball", "pitcher's mound"], ["mouse", "mouse computer equipment", "computer mouse"], ["mousepad"], ["muffin"], ["mug"], ["mushroom"], ["music stool", "piano stool"], ["musical instrument", "instrument", "instrument musical"], ["nailfile"], ["napkin", "table napkin", "serviette"], ["neckerchief"], ["necklace"], ["necktie", "tie", "tie necktie"], ["needle"], ["nest"], ["newspaper", "paper", "paper newspaper"], ["newsstand"], ["nightshirt", "nightwear", "sleepwear", "nightclothes"], ["nosebag", "nosebag for animals", "feedbag"], ["noseband", "noseband for animals", "nosepiece", "nosepiece for animals"], ["notebook"], ["notepad"], ["nut"], ["nutcracker"], ["oar"], ["octopus", "octopus food"], ["octopus", "octopus animal"], ["oil lamp", "kerosene lamp", "kerosine lamp"], ["olive oil"], ["omelet", "omelette"], ["onion"], ["orange", "orange fruit"], ["orange juice"], ["ostrich"], ["ottoman", "pouf", "pouffe", "hassock"], ["oven"], ["overalls", "overalls clothing"], ["owl"], ["packet"], ["inkpad", "inking pad", "stamp pad"], ["pad"], ["paddle", "boat paddle"], ["padlock"], ["paintbrush"], ["painting"], ["pajamas", "pyjamas"], ["palette", "pallet"], ["pan", "pan for cooking", "cooking pan"], ["pan", "pan metal container"], ["pancake"], ["pantyhose"], ["papaya"], ["paper plate"], ["paper towel"], ["paperback book", "paper-back book", "softback book", "soft-cover book"], ["paperweight"], ["parachute"], ["parakeet", "parrakeet", "parroket", "paraquet", "paroquet", "parroquet"], ["parasail", "parasail sports"], ["parasol", "sunshade"], ["parchment"], ["parka", "anorak"], ["parking meter"], ["parrot"], ["passenger car", "passenger car part of a train", "coach", "coach part of a train"], ["passenger ship"], ["passport"], ["pastry"], ["patty", "patty food"], ["pea", "pea food"], ["peach"], ["peanut butter"], ["pear"], ["peeler", "peeler tool for fruit and vegetables"], ["wooden leg", "pegleg"], ["pegboard"], ["pelican"], ["pen"], ["pencil"], ["pencil box", "pencil case"], ["pencil sharpener"], ["pendulum"], ["penguin"], ["pennant"], ["penny", "penny coin"], ["pepper", "peppercorn"], ["pepper mill", "pepper grinder"], ["perfume"], ["persimmon"], ["person", "baby", "child", "boy", "girl", "man", "woman", "human"], ["pet"], ["pew", "pew church bench", "church bench"], ["phonebook", "telephone book", "telephone directory"], ["phonograph record", "phonograph recording", "record", "record phonograph recording"], ["piano"], ["pickle"], ["pickup truck"], ["pie"], ["pigeon"], ["piggy bank", "penny bank"], ["pillow"], ["pin", "pin non jewelry"], ["pineapple"], ["pinecone"], ["ping-pong ball"], ["pinwheel"], ["tobacco pipe"], ["pipe", "piping"], ["pistol", "handgun"], ["pita", "pita bread", "pocket bread"], ["pitcher", "pitcher vessel for liquid", "ewer"], ["pitchfork"], ["pizza"], ["place mat"], ["plate"], ["platter"], ["playpen"], ["pliers", "plyers"], ["plow", "plow farm equipment", "plough", "plough farm equipment"], ["plume"], ["pocket watch"], ["pocketknife"], ["poker", "poker fire stirring tool", "stove poker", "fire hook"], ["pole", "post"], ["polo shirt", "sport shirt"], ["poncho"], ["pony"], ["pool table", "billiard table", "snooker table"], ["pop", "pop soda", "soda", "soda pop", "tonic", "soft drink"], ["postbox", "postbox public", "mailbox", "mailbox public"], ["postcard", "postal card", "mailing-card"], ["poster", "placard"], ["pot"], ["flowerpot"], ["potato"], ["potholder"], ["pottery", "clayware"], ["pouch"], ["power shovel", "excavator", "digger"], ["prawn", "shrimp"], ["pretzel"], ["printer", "printing machine"], ["projectile", "projectile weapon", "missile"], ["projector"], ["propeller", "propellor"], ["prune"], ["pudding"], ["puffer", "puffer fish", "pufferfish", "blowfish", "globefish"], ["puffin"], ["pug-dog"], ["pumpkin"], ["puncher"], ["puppet", "marionette"], ["puppy"], ["quesadilla"], ["quiche"], ["quilt", "comforter"], ["rabbit"], ["race car", "racing car"], ["racket", "racquet"], ["radar"], ["radiator"], ["radio receiver", "radio set", "radio", "tuner", "tuner radio"], ["radish", "daikon"], ["raft"], ["rag doll"], ["raincoat", "waterproof jacket"], ["ram", "ram animal"], ["raspberry"], ["rat"], ["razorblade"], ["reamer", "reamer juicer", "juicer", "juice reamer"], ["rearview mirror"], ["receipt"], ["recliner", "reclining chair", "lounger", "lounger chair"], ["record player", "phonograph", "phonograph record player", "turntable"], ["reflector"], ["remote control"], ["rhinoceros"], ["rib", "rib food"], ["rifle"], ["ring"], ["river boat"], ["road map"], ["robe"], ["rocking chair"], ["rodent"], ["roller skate"], ["Rollerblade"], ["rolling pin"], ["root beer"], ["router", "router computer equipment"], ["rubber band", "elastic band"], ["runner", "runner carpet"], ["plastic bag", "paper bag"], ["saddle", "saddle on an animal"], ["saddle blanket", "saddlecloth", "horse blanket"], ["saddlebag"], ["safety pin"], ["sail"], ["salad"], ["salad plate", "salad bowl"], ["salami"], ["salmon", "salmon fish"], ["salmon", "salmon food"], ["salsa"], ["saltshaker"], ["sandal", "sandal type of shoe"], ["sandwich"], ["satchel"], ["saucepan"], ["saucer"], ["sausage"], ["sawhorse", "sawbuck"], ["saxophone"], ["scale", "scale measuring instrument"], ["scarecrow", "strawman"], ["scarf"], ["school bus"], ["scissors"], ["scoreboard"], ["scraper"], ["screwdriver"], ["scrubbing brush"], ["sculpture"], ["seabird", "seafowl"], ["seahorse"], ["seaplane", "hydroplane"], ["seashell"], ["sewing machine"], ["shaker"], ["shampoo"], ["shark"], ["sharpener"], ["Sharpie"], ["shaver", "shaver electric", "electric shaver", "electric razor"], ["shaving cream", "shaving soap"], ["shawl"], ["shears"], ["sheep"], ["shepherd dog", "sheepdog"], ["sherbert", "sherbet"], ["shield"], ["shirt"], ["shoe", "sneaker", "sneaker type of shoe", "tennis shoe"], ["shopping bag"], ["shopping cart"], ["short pants", "shorts", "shorts clothing", "trunks", "trunks clothing"], ["shot glass"], ["shoulder bag"], ["shovel"], ["shower head"], ["shower cap"], ["shower curtain"], ["shredder", "shredder for paper"], ["signboard"], ["silo"], ["sink"], ["skateboard"], ["skewer"], ["ski"], ["ski boot"], ["ski parka", "ski jacket"], ["ski pole"], ["skirt"], ["skullcap"], ["sled", "sledge", "sleigh"], ["sleeping bag"], ["sling", "sling bandage", "triangular bandage"], ["slipper", "slipper footwear", "carpet slipper", "carpet slipper footwear"], ["smoothie"], ["snake", "serpent"], ["snowboard"], ["snowman"], ["snowmobile"], ["soap"], ["soccer ball"], ["sock"], ["sofa", "couch", "lounge"], ["softball"], ["solar array", "solar battery", "solar panel"], ["sombrero"], ["soup"], ["soup bowl"], ["soupspoon"], ["sour cream", "soured cream"], ["soya milk", "soybean milk", "soymilk"], ["space shuttle"], ["sparkler", "sparkler fireworks"], ["spatula"], ["spear", "lance"], ["spectacles", "specs", "eyeglasses", "glasses"], ["spice rack"], ["spider"], ["crawfish", "crayfish"], ["sponge"], ["spoon"], ["sportswear", "athletic wear", "activewear"], ["spotlight"], ["squid", "squid food", "calamari", "calamary"], ["squirrel"], ["stagecoach"], ["stapler", "stapler stapling machine"], ["starfish", "sea star"], ["statue", "statue sculpture"], ["steak", "steak food"], ["steak knife"], ["steering wheel"], ["stepladder"], ["step stool"], ["stereo", "stereo sound system"], ["stew"], ["stirrer"], ["stirrup"], ["stool"], ["stop sign"], ["brake light"], ["stove", "kitchen stove", "range", "range kitchen appliance", "kitchen range", "cooking stove"], ["strainer"], ["strap"], ["straw", "straw for drinking", "drinking straw"], ["strawberry"], ["street sign"], ["streetlight", "street lamp"], ["string cheese"], ["stylus"], ["subwoofer"], ["sugar bowl"], ["sugarcane", "sugarcane plant"], ["suit", "suit clothing"], ["sunflower"], ["sunglasses"], ["sunhat"], ["surfboard"], ["sushi"], ["mop"], ["sweat pants"], ["sweatband"], ["sweater"], ["sweatshirt"], ["sweet potato"], ["swimsuit", "swimwear", "bathing suit", "swimming costume", "bathing costume", "swimming trunks", "bathing trunks"], ["sword"], ["syringe"], ["Tabasco sauce"], ["table-tennis table", "ping-pong table"], ["table"], ["table lamp"], ["tablecloth"], ["tachometer"], ["taco"], ["tag"], ["taillight", "rear light"], ["tambourine"], ["army tank", "armored combat vehicle", "armoured combat vehicle"], ["tank", "tank storage vessel", "storage tank"], ["tank top", "tank top clothing"], ["tape", "tape sticky cloth or paper"], ["tape measure", "measuring tape"], ["tapestry"], ["tarp"], ["tartan", "plaid"], ["tassel"], ["tea bag"], ["teacup"], ["teakettle"], ["teapot"], ["teddy bear"], ["telephone", "phone", "telephone set"], ["telephone booth", "phone booth", "call box", "telephone box", "telephone kiosk"], ["telephone pole", "telegraph pole", "telegraph post"], ["telephoto lens", "zoom lens"], ["television camera", "tv camera"], ["television set", "tv", "tv set"], ["tennis ball"], ["tennis racket"], ["tequila"], ["thermometer"], ["thermos bottle"], ["thermostat"], ["thimble"], ["thread", "yarn"], ["thumbtack", "drawing pin", "pushpin"], ["tiara"], ["tiger"], ["tights", "tights clothing", "leotards"], ["timer", "stopwatch"], ["tinfoil"], ["tinsel"], ["tissue paper"], ["toast", "toast food"], ["toaster"], ["toaster oven"], ["toilet"], ["toilet tissue", "toilet paper", "bathroom tissue"], ["tomato"], ["tongs"], ["toolbox"], ["toothbrush"], ["toothpaste"], ["toothpick"], ["cover"], ["tortilla"], ["tow truck"], ["towel"], ["towel rack", "towel rail", "towel bar"], ["toy"], ["tractor", "tractor farm equipment"], ["traffic light"], ["dirt bike"], ["trailer truck", "tractor trailer", "trucking rig", "articulated lorry", "semi truck"], ["train", "train railroad vehicle", "railroad train"], ["trampoline"], ["tray"], ["trench coat"], ["triangle", "triangle musical instrument"], ["tricycle"], ["tripod"], ["trousers", "pants", "pants clothing"], ["truck"], ["truffle", "truffle chocolate", "chocolate truffle"], ["trunk"], ["vat"], ["turban"], ["turkey", "turkey food"], ["turnip"], ["turtle"], ["turtleneck", "turtleneck clothing", "polo-neck"], ["typewriter"], ["umbrella"], ["underwear", "underclothes", "underclothing", "underpants"], ["unicycle"], ["urinal"], ["urn"], ["vacuum cleaner"], ["vase"], ["vending machine"], ["vent", "blowhole", "air vent"], ["vest", "waistcoat"], ["videotape"], ["vinegar"], ["violin", "fiddle"], ["vodka"], ["volleyball"], ["vulture"], ["waffle"], ["waffle iron"], ["wagon"], ["wagon wheel"], ["walking stick"], ["wall clock"], ["wall socket", "wall plug", "electric outlet", "electrical outlet", "outlet", "electric receptacle"], ["wallet", "billfold"], ["walrus"], ["wardrobe"], ["washbasin", "basin", "basin for washing", "washbowl", "washstand", "handbasin"], ["automatic washer", "washing machine"], ["watch", "wristwatch"], ["water bottle"], ["water cooler"], ["water faucet", "water tap", "tap", "tap water faucet"], ["water heater", "hot-water heater"], ["water jug"], ["water gun", "squirt gun"], ["water scooter", "sea scooter", "jet ski"], ["water ski"], ["water tower"], ["watering can"], ["watermelon"], ["weathervane", "vane", "vane weathervane", "wind vane"], ["webcam"], ["wedding cake", "bridecake"], ["wedding ring", "wedding band"], ["wet suit"], ["wheel"], ["wheelchair"], ["whipped cream"], ["whistle"], ["wig"], ["wind chime"], ["windmill"], ["window box", "window box for plants"], ["windshield wiper", "windscreen wiper", "wiper", "wiper for windshield or screen"], ["windsock", "air sock", "air-sleeve", "wind sleeve", "wind cone"], ["wine bottle"], ["wine bucket", "wine cooler"], ["wineglass"], ["blinder", "blinder for horses"], ["wok"], ["wolf"], ["wooden spoon"], ["wreath"], ["wrench", "spanner"], ["wristband"], ["wristlet", "wrist band"], ["yacht"], ["yogurt", "yoghurt", "yoghourt"], ["yoke", "yoke animal equipment"], ["zebra"], ["zucchini", "courgette"]] \ No newline at end of file diff --git a/models/YOLO-World/data/texts/obj365v1_class_texts.json b/models/YOLO-World/data/texts/obj365v1_class_texts.json new file mode 100644 index 0000000000000000000000000000000000000000..bddc11c0b9721bb4b7addc9a557a2eed1c9fe0fc --- /dev/null +++ b/models/YOLO-World/data/texts/obj365v1_class_texts.json @@ -0,0 +1 @@ +[["person"], ["sneakers"], ["chair"], ["hat"], ["lamp"], ["bottle"], ["cabinet", "shelf"], ["cup"], ["car"], ["glasses"], ["picture", "frame"], ["desk"], ["handbag"], ["street lights"], ["book"], ["plate"], ["helmet"], ["leather shoes"], ["pillow"], ["glove"], ["potted plant"], ["bracelet"], ["flower"], ["tv"], ["storage box"], ["vase"], ["bench"], ["wine glass"], ["boots"], ["bowl"], ["dining table"], ["umbrella"], ["boat"], ["flag"], ["speaker"], ["trash bin", "can"], ["stool"], ["backpack"], ["couch"], ["belt"], ["carpet"], ["basket"], ["towel", "napkin"], ["slippers"], ["barrel", "bucket"], ["coffee table"], ["suv"], ["toy"], ["tie"], ["bed"], ["traffic light"], ["pen", "pencil"], ["microphone"], ["sandals"], ["canned"], ["necklace"], ["mirror"], ["faucet"], ["bicycle"], ["bread"], ["high heels"], ["ring"], ["van"], ["watch"], ["sink"], ["horse"], ["fish"], ["apple"], ["camera"], ["candle"], ["teddy bear"], ["cake"], ["motorcycle"], ["wild bird"], ["laptop"], ["knife"], ["traffic sign"], ["cell phone"], ["paddle"], ["truck"], ["cow"], ["power outlet"], ["clock"], ["drum"], ["fork"], ["bus"], ["hanger"], ["nightstand"], ["pot", "pan"], ["sheep"], ["guitar"], ["traffic cone"], ["tea pot"], ["keyboard"], ["tripod"], ["hockey"], ["fan"], ["dog"], ["spoon"], ["blackboard", "whiteboard"], ["balloon"], ["air conditioner"], ["cymbal"], ["mouse"], ["telephone"], ["pickup truck"], ["orange"], ["banana"], ["airplane"], ["luggage"], ["skis"], ["soccer"], ["trolley"], ["oven"], ["remote"], ["baseball glove"], ["paper towel"], ["refrigerator"], ["train"], ["tomato"], ["machinery vehicle"], ["tent"], ["shampoo", "shower gel"], ["head phone"], ["lantern"], ["donut"], ["cleaning products"], ["sailboat"], ["tangerine"], ["pizza"], ["kite"], ["computer box"], ["elephant"], ["toiletries"], ["gas stove"], ["broccoli"], ["toilet"], ["stroller"], ["shovel"], ["baseball bat"], ["microwave"], ["skateboard"], ["surfboard"], ["surveillance camera"], ["gun"], ["life saver"], ["cat"], ["lemon"], ["liquid soap"], ["zebra"], ["duck"], ["sports car"], ["giraffe"], ["pumpkin"], ["piano"], ["stop sign"], ["radiator"], ["converter"], ["tissue"], ["carrot"], ["washing machine"], ["vent"], ["cookies"], ["cutting", "chopping board"], ["tennis racket"], ["candy"], ["skating and skiing shoes"], ["scissors"], ["folder"], ["baseball"], ["strawberry"], ["bow tie"], ["pigeon"], ["pepper"], ["coffee machine"], ["bathtub"], ["snowboard"], ["suitcase"], ["grapes"], ["ladder"], ["pear"], ["american football"], ["basketball"], ["potato"], ["paint brush"], ["printer"], ["billiards"], ["fire hydrant"], ["goose"], ["projector"], ["sausage"], ["fire extinguisher"], ["extension cord"], ["facial mask"], ["tennis ball"], ["chopsticks"], ["electronic stove and gas stove"], ["pie"], ["frisbee"], ["kettle"], ["hamburger"], ["golf club"], ["cucumber"], ["clutch"], ["blender"], ["tong"], ["slide"], ["hot dog"], ["toothbrush"], ["facial cleanser"], ["mango"], ["deer"], ["egg"], ["violin"], ["marker"], ["ship"], ["chicken"], ["onion"], ["ice cream"], ["tape"], ["wheelchair"], ["plum"], ["bar soap"], ["scale"], ["watermelon"], ["cabbage"], ["router", "modem"], ["golf ball"], ["pine apple"], ["crane"], ["fire truck"], ["peach"], ["cello"], ["notepaper"], ["tricycle"], ["toaster"], ["helicopter"], ["green beans"], ["brush"], ["carriage"], ["cigar"], ["earphone"], ["penguin"], ["hurdle"], ["swing"], ["radio"], ["cd"], ["parking meter"], ["swan"], ["garlic"], ["french fries"], ["horn"], ["avocado"], ["saxophone"], ["trumpet"], ["sandwich"], ["cue"], ["kiwi fruit"], ["bear"], ["fishing rod"], ["cherry"], ["tablet"], ["green vegetables"], ["nuts"], ["corn"], ["key"], ["screwdriver"], ["globe"], ["broom"], ["pliers"], ["volleyball"], ["hammer"], ["eggplant"], ["trophy"], ["dates"], ["board eraser"], ["rice"], ["tape measure", "ruler"], ["dumbbell"], ["hamimelon"], ["stapler"], ["camel"], ["lettuce"], ["goldfish"], ["meat balls"], ["medal"], ["toothpaste"], ["antelope"], ["shrimp"], ["rickshaw"], ["trombone"], ["pomegranate"], ["coconut"], ["jellyfish"], ["mushroom"], ["calculator"], ["treadmill"], ["butterfly"], ["egg tart"], ["cheese"], ["pig"], ["pomelo"], ["race car"], ["rice cooker"], ["tuba"], ["crosswalk sign"], ["papaya"], ["hair drier"], ["green onion"], ["chips"], ["dolphin"], ["sushi"], ["urinal"], ["donkey"], ["electric drill"], ["spring rolls"], ["tortoise", "turtle"], ["parrot"], ["flute"], ["measuring cup"], ["shark"], ["steak"], ["poker card"], ["binoculars"], ["llama"], ["radish"], ["noodles"], ["yak"], ["mop"], ["crab"], ["microscope"], ["barbell"], ["bread", "bun"], ["baozi"], ["lion"], ["red cabbage"], ["polar bear"], ["lighter"], ["seal"], ["mangosteen"], ["comb"], ["eraser"], ["pitaya"], ["scallop"], ["pencil case"], ["saw"], ["table tennis paddle"], ["okra"], ["starfish"], ["eagle"], ["monkey"], ["durian"], ["game board"], ["rabbit"], ["french horn"], ["ambulance"], ["asparagus"], ["hoverboard"], ["pasta"], ["target"], ["hotair balloon"], ["chainsaw"], ["lobster"], ["iron"], ["flashlight"]] \ No newline at end of file diff --git a/models/YOLO-World/demo/README.md b/models/YOLO-World/demo/README.md new file mode 100644 index 0000000000000000000000000000000000000000..c6f607c5044ecb85c52cc5254006382bb648a4b1 --- /dev/null +++ b/models/YOLO-World/demo/README.md @@ -0,0 +1,65 @@ +## YOLO-World Demo + +### Getting Started + +Setting `PYTHONPATH` as the path to `YOLO-World` and run: + +```bash +PYTHONPATH=/xxxx/YOLO-World python demo/yyyy_demo.py +# or directly +PYTHONPATH=./ python demo/yyyy_demo.py +``` + +#### Gradio Demo + +We provide the [Gradio](https://www.gradio.app/) demo for local devices: + +```bash +pip install gradio==4.16.0 +python demo/demo.py path/to/config path/to/weights +``` + +Additionaly, you can use a Dockerfile to build an image with gradio. As a prerequisite, make sure you have respective drivers installed alongside [nvidia-container-runtime](https://stackoverflow.com/questions/59691207/docker-build-with-nvidia-runtime). Replace MODEL_NAME and WEIGHT_NAME with the respective values or ommit this and use default values from the [Dockerfile](Dockerfile#3) + +```bash +docker build --build-arg="MODEL=MODEL_NAME" --build-arg="WEIGHT=WEIGHT_NAME" -t yolo_demo . +docker run --runtime nvidia -p 8080:8080 +``` + +#### Image Demo + +We provide a simple image demo for inference on images with visualization outputs. + +```bash +python demo/image_demo.py path/to/config path/to/weights image/path/directory 'person,dog,cat' --topk 100 --threshold 0.005 --output-dir demo_outputs +``` + +**Notes:** +* The `image` can be a directory or a single image. +* The `texts` can be a string of categories (noun phrases) which is separated by a comma. We also support `txt` file in which each line contains a category ( noun phrases). +* The `topk` and `threshold` control the number of predictions and the confidence threshold. + + +#### Video Demo + +The `video_demo` has similar hyper-parameters with `image_demo`. + +```bash +python demo/video_demo.py path/to/config path/to/weights video_path 'person,dog' --out out_video_path +``` + +### FAQ + +> 1. `Failed to custom import!` +```bash + File "simple_demo.py", line 37, in + cfg = Config.fromfile(config_file) + File "/data/miniconda3/envs/det/lib/python3.8/site-packages/mmengine/config/config.py", line 183, in fromfile + raise ImportError('Failed to custom import!') from e +ImportError: Failed to custom import! +``` +**Solution:** + +```bash +PYTHONPATH=/xxxx/YOLO-World python demo/simple_demo.py +``` \ No newline at end of file diff --git a/models/YOLO-World/demo/gradio_demo.py b/models/YOLO-World/demo/gradio_demo.py new file mode 100644 index 0000000000000000000000000000000000000000..739e97beaa8641885f25fa2a4d1bdcbbfc95c20e --- /dev/null +++ b/models/YOLO-World/demo/gradio_demo.py @@ -0,0 +1,253 @@ +# Copyright (c) Tencent Inc. All rights reserved. +import os +import sys +import argparse +import os.path as osp +from io import BytesIO +from functools import partial + +import cv2 +import onnx +import torch +import onnxsim +import numpy as np +import gradio as gr +from PIL import Image +import supervision as sv +from torchvision.ops import nms +from mmengine.runner import Runner +from mmengine.dataset import Compose +from mmengine.runner.amp import autocast +from mmengine.config import Config, DictAction, ConfigDict +from mmdet.datasets import CocoDataset +from mmyolo.registry import RUNNERS + +sys.path.append('./deploy') +from easydeploy import model as EM + +BOUNDING_BOX_ANNOTATOR = sv.BoundingBoxAnnotator(thickness=1) +MASK_ANNOTATOR = sv.MaskAnnotator() + + +class LabelAnnotator(sv.LabelAnnotator): + + @staticmethod + def resolve_text_background_xyxy( + center_coordinates, + text_wh, + position, + ): + center_x, center_y = center_coordinates + text_w, text_h = text_wh + return center_x, center_y, center_x + text_w, center_y + text_h + + +LABEL_ANNOTATOR = LabelAnnotator(text_padding=4, + text_scale=0.5, + text_thickness=1) + + +def parse_args(): + parser = argparse.ArgumentParser(description='YOLO-World Demo') + parser.add_argument('config', help='test config file path') + parser.add_argument('checkpoint', help='checkpoint file') + parser.add_argument( + '--work-dir', + help='the directory to save the file containing evaluation metrics', + default='output') + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. If the value to ' + 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' + 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' + 'Note that the quotation marks are necessary and that no white space ' + 'is allowed.') + args = parser.parse_args() + return args + + +def run_image(runner, + image, + text, + max_num_boxes, + score_thr, + nms_thr, + image_path='./work_dirs/demo.png'): + # image.save(image_path) + texts = [[t.strip()] for t in text.split(',')] + [[' ']] + data_info = dict(img_id=0, img=np.array(image), texts=texts) + data_info = runner.pipeline(data_info) + data_batch = dict(inputs=data_info['inputs'].unsqueeze(0), + data_samples=[data_info['data_samples']]) + + with autocast(enabled=False), torch.no_grad(): + output = runner.model.test_step(data_batch)[0] + pred_instances = output.pred_instances + + keep = nms(pred_instances.bboxes, + pred_instances.scores, + iou_threshold=nms_thr) + pred_instances = pred_instances[keep] + pred_instances = pred_instances[pred_instances.scores.float() > score_thr] + + if len(pred_instances.scores) > max_num_boxes: + indices = pred_instances.scores.float().topk(max_num_boxes)[1] + pred_instances = pred_instances[indices] + + pred_instances = pred_instances.cpu().numpy() + if 'masks' in pred_instances: + masks = pred_instances['masks'] + else: + masks = None + detections = sv.Detections(xyxy=pred_instances['bboxes'], + class_id=pred_instances['labels'], + confidence=pred_instances['scores'], + mask=masks) + labels = [ + f"{texts[class_id][0]} {confidence:0.2f}" for class_id, confidence in + zip(detections.class_id, detections.confidence) + ] + + image = np.array(image) + image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) # Convert RGB to BGR + image = BOUNDING_BOX_ANNOTATOR.annotate(image, detections) + image = LABEL_ANNOTATOR.annotate(image, detections, labels=labels) + if masks is not None: + image = MASK_ANNOTATOR.annotate(image, detections) + image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # Convert BGR to RGB + image = Image.fromarray(image) + return image + + +def export_model(runner, text, max_num_boxes, score_thr, nms_thr): + + backend = EM.MMYOLOBackend.ONNXRUNTIME + postprocess_cfg = ConfigDict(pre_top_k=10 * max_num_boxes, + keep_top_k=max_num_boxes, + iou_threshold=nms_thr, + score_threshold=score_thr) + + base_model = runner.model + + texts = [[t.strip() for t in text.split(',')] + [' ']] + base_model.reparameterize(texts) + deploy_model = EM.DeployModel(baseModel=base_model, + backend=backend, + postprocess_cfg=postprocess_cfg) + deploy_model.eval() + + device = (next(iter(base_model.parameters()))).device + fake_input = torch.ones([1, 3, 640, 640], device=device) + deploy_model(fake_input) + + save_onnx_path = os.path.join( + args.work_dir, + os.path.basename(args.checkpoint).replace('pth', 'onnx')) + # export onnx + with BytesIO() as f: + output_names = ['num_dets', 'boxes', 'scores', 'labels'] + torch.onnx.export(deploy_model, + fake_input, + f, + input_names=['images'], + output_names=output_names, + opset_version=12) + f.seek(0) + onnx_model = onnx.load(f) + onnx.checker.check_model(onnx_model) + onnx_model, check = onnxsim.simplify(onnx_model) + onnx.save(onnx_model, save_onnx_path) + return gr.update(visible=True), save_onnx_path + + +def demo(runner, args): + with gr.Blocks(title="YOLO-World") as demo: + with gr.Row(): + gr.Markdown('

YOLO-World: Real-Time Open-Vocabulary ' + 'Object Detector

') + with gr.Row(): + with gr.Column(scale=0.3): + with gr.Row(): + image = gr.Image(type='pil', label='input image') + input_text = gr.Textbox( + lines=7, + label='Enter the classes to be detected, ' + 'separated by comma', + value=', '.join(CocoDataset.METAINFO['classes']), + elem_id='textbox') + with gr.Row(): + submit = gr.Button('Submit') + clear = gr.Button('Clear') + with gr.Row(): + export = gr.Button('Deploy and Export ONNX Model') + with gr.Row(): + gr.Markdown( + "It takes a few seconds to generate the ONNX file! YOLO-World-Seg (segmentation) is not supported now" + ) + out_download = gr.File(visible=False) + max_num_boxes = gr.Slider(minimum=1, + maximum=300, + value=100, + step=1, + interactive=True, + label='Maximum Number Boxes') + score_thr = gr.Slider(minimum=0, + maximum=1, + value=0.05, + step=0.001, + interactive=True, + label='Score Threshold') + nms_thr = gr.Slider(minimum=0, + maximum=1, + value=0.7, + step=0.001, + interactive=True, + label='NMS Threshold') + with gr.Column(scale=0.7): + output_image = gr.Image(type='pil', label='output image') + + submit.click(partial(run_image, runner), + [image, input_text, max_num_boxes, score_thr, nms_thr], + [output_image]) + clear.click(lambda: [None, '', None], None, + [image, input_text, output_image]) + + export.click(partial(export_model, runner), + [input_text, max_num_boxes, score_thr, nms_thr], + [out_download, out_download]) + + demo.launch(server_name='0.0.0.0', + server_port=8080) # port 80 does not work for me + + +if __name__ == '__main__': + args = parse_args() + + # load config + cfg = Config.fromfile(args.config) + if args.cfg_options is not None: + cfg.merge_from_dict(args.cfg_options) + + if args.work_dir is not None: + cfg.work_dir = args.work_dir + elif cfg.get('work_dir', None) is None: + cfg.work_dir = osp.join('./work_dirs', + osp.splitext(osp.basename(args.config))[0]) + + cfg.load_from = args.checkpoint + + if 'runner_type' not in cfg: + runner = Runner.from_cfg(cfg) + else: + runner = RUNNERS.build(cfg) + + runner.call_hook('before_run') + runner.load_or_resume() + pipeline = cfg.test_dataloader.dataset.pipeline + pipeline[0].type = 'mmdet.LoadImageFromNDArray' + runner.pipeline = Compose(pipeline) + runner.model.eval() + demo(runner, args) diff --git a/models/YOLO-World/demo/image_demo.py b/models/YOLO-World/demo/image_demo.py new file mode 100644 index 0000000000000000000000000000000000000000..20d225b638bb64d824a9d7e4bf13c609e61f0294 --- /dev/null +++ b/models/YOLO-World/demo/image_demo.py @@ -0,0 +1,220 @@ +# Copyright (c) Tencent Inc. All rights reserved. +import os +import cv2 +import argparse +import os.path as osp + +import torch +from mmengine.config import Config, DictAction +from mmengine.runner.amp import autocast +from mmengine.dataset import Compose +from mmengine.utils import ProgressBar +from mmdet.apis import init_detector +from mmdet.utils import get_test_pipeline_cfg + +import supervision as sv + +BOUNDING_BOX_ANNOTATOR = sv.BoundingBoxAnnotator(thickness=1) +MASK_ANNOTATOR = sv.MaskAnnotator() + + +class LabelAnnotator(sv.LabelAnnotator): + + @staticmethod + def resolve_text_background_xyxy( + center_coordinates, + text_wh, + position, + ): + center_x, center_y = center_coordinates + text_w, text_h = text_wh + return center_x, center_y, center_x + text_w, center_y + text_h + + +LABEL_ANNOTATOR = LabelAnnotator(text_padding=4, + text_scale=0.5, + text_thickness=1) + + +def parse_args(): + parser = argparse.ArgumentParser(description='YOLO-World Demo') + parser.add_argument('config', help='test config file path') + parser.add_argument('checkpoint', help='checkpoint file') + parser.add_argument('image', help='image path, include image file or dir.') + parser.add_argument( + 'text', + help= + 'text prompts, including categories separated by a comma or a txt file with each line as a prompt.' + ) + parser.add_argument('--topk', + default=100, + type=int, + help='keep topk predictions.') + parser.add_argument('--threshold', + default=0.1, + type=float, + help='confidence score threshold for predictions.') + parser.add_argument('--device', + default='cuda:0', + help='device used for inference.') + parser.add_argument('--show', + action='store_true', + help='show the detection results.') + parser.add_argument( + '--annotation', + action='store_true', + help='save the annotated detection results as yolo text format.') + parser.add_argument('--amp', + action='store_true', + help='use mixed precision for inference.') + parser.add_argument('--output-dir', + default='demo_outputs', + help='the directory to save outputs') + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. If the value to ' + 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' + 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' + 'Note that the quotation marks are necessary and that no white space ' + 'is allowed.') + args = parser.parse_args() + return args + + +def inference_detector(model, + image, + texts, + test_pipeline, + max_dets=100, + score_thr=0.3, + output_dir='./work_dir', + use_amp=False, + show=False, + annotation=False): + data_info = dict(img_id=0, img_path=image, texts=texts) + data_info = test_pipeline(data_info) + data_batch = dict(inputs=data_info['inputs'].unsqueeze(0), + data_samples=[data_info['data_samples']]) + + with autocast(enabled=use_amp), torch.no_grad(): + output = model.test_step(data_batch)[0] + pred_instances = output.pred_instances + pred_instances = pred_instances[pred_instances.scores.float() > + score_thr] + + if len(pred_instances.scores) > max_dets: + indices = pred_instances.scores.float().topk(max_dets)[1] + pred_instances = pred_instances[indices] + + pred_instances = pred_instances.cpu().numpy() + + if 'masks' in pred_instances: + masks = pred_instances['masks'] + else: + masks = None + + detections = sv.Detections(xyxy=pred_instances['bboxes'], + class_id=pred_instances['labels'], + confidence=pred_instances['scores'], + mask=masks) + + labels = [ + f"{texts[class_id][0]} {confidence:0.2f}" for class_id, confidence in + zip(detections.class_id, detections.confidence) + ] + + # label images + image = cv2.imread(image_path) + anno_image = image.copy() + image = BOUNDING_BOX_ANNOTATOR.annotate(image, detections) + image = LABEL_ANNOTATOR.annotate(image, detections, labels=labels) + if masks is not None: + image = MASK_ANNOTATOR.annotate(image, detections) + cv2.imwrite(osp.join(output_dir, osp.basename(image_path)), image) + + if annotation: + images_dict = {} + annotations_dict = {} + + images_dict[osp.basename(image_path)] = anno_image + annotations_dict[osp.basename(image_path)] = detections + + ANNOTATIONS_DIRECTORY = os.makedirs(r"./annotations", exist_ok=True) + + MIN_IMAGE_AREA_PERCENTAGE = 0.002 + MAX_IMAGE_AREA_PERCENTAGE = 0.80 + APPROXIMATION_PERCENTAGE = 0.75 + + sv.DetectionDataset( + classes=texts, images=images_dict, + annotations=annotations_dict).as_yolo( + annotations_directory_path=ANNOTATIONS_DIRECTORY, + min_image_area_percentage=MIN_IMAGE_AREA_PERCENTAGE, + max_image_area_percentage=MAX_IMAGE_AREA_PERCENTAGE, + approximation_percentage=APPROXIMATION_PERCENTAGE) + + if show: + cv2.imshow('Image', image) # Provide window name + k = cv2.waitKey(0) + if k == 27: + # wait for ESC key to exit + cv2.destroyAllWindows() + + +if __name__ == '__main__': + args = parse_args() + + # load config + cfg = Config.fromfile(args.config) + if args.cfg_options is not None: + cfg.merge_from_dict(args.cfg_options) + + cfg.work_dir = osp.join('./work_dirs', + osp.splitext(osp.basename(args.config))[0]) + # init model + cfg.load_from = args.checkpoint + model = init_detector(cfg, checkpoint=args.checkpoint, device=args.device) + + # init test pipeline + test_pipeline_cfg = get_test_pipeline_cfg(cfg=cfg) + # test_pipeline[0].type = 'mmdet.LoadImageFromNDArray' + test_pipeline = Compose(test_pipeline_cfg) + + if args.text.endswith('.txt'): + with open(args.text) as f: + lines = f.readlines() + texts = [[t.rstrip('\r\n')] for t in lines] + [[' ']] + else: + texts = [[t.strip()] for t in args.text.split(',')] + [[' ']] + + output_dir = args.output_dir + if not osp.exists(output_dir): + os.mkdir(output_dir) + + # load images + if not osp.isfile(args.image): + images = [ + osp.join(args.image, img) for img in os.listdir(args.image) + if img.endswith('.png') or img.endswith('.jpg') + ] + else: + images = [args.image] + + # reparameterize texts + model.reparameterize(texts) + progress_bar = ProgressBar(len(images)) + for image_path in images: + inference_detector(model, + image_path, + texts, + test_pipeline, + args.topk, + args.threshold, + output_dir=output_dir, + use_amp=args.amp, + show=args.show, + annotation=args.annotation) + progress_bar.update() diff --git a/models/YOLO-World/demo/inference.ipynb b/models/YOLO-World/demo/inference.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..19cc7b1480cc0b0e732762cc166cf52b568f17d2 --- /dev/null +++ b/models/YOLO-World/demo/inference.ipynb @@ -0,0 +1,2836 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "PorcLK9OylD6" + }, + "source": [ + " ![yolo_logo.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAABYoAAAKsCAYAAABCuokkAACAAElEQVR42uzd+X+V1b238effOxWBMKgtgzi1CgjWWq2tnTxHBZRBexQBGQSBkGkPSXbmAGGewjyHMM8hCZlHvs/e97jWute9Q3sEiV4/vF/Z9lSrKeGHT7/nWv9PRP4fAAAAAAAAAOCXi28CAAAAAAAAADAU800AAAAAAAAAAIZiAAAwptFHj8T/PDAyIoNZ/mf1/wYAAAAAAEMxAAA/I71DQ3Kju0uuPOyU5rt3pLK1RYrPn5PkxQuy7fxZ+f70SUm1XJRM6yXJXL4kR7L/nuP378nDwUHpyf65g6OjDMgAAAAAAIZiAADGk77hYekeGpLDd25JsuW8fHF4v7xRm5G59dXyclW5TE6VypSs6eVJ5/OkZKlMSyezEjK7qlJmVJbLb7L+vne3fHpgn6QvtUjNlcvyoL9fOgYGGI0BAAAAAAzFAAA8i3LZiNs93XLuQZusPHZEPt63S16rqZTJqRKZVl4mBdmvk5MlzjAcVeaMxZOyX4PPyTLH5FTCMSNTIR807ZSvjx6Vfbduyq2eHhnm0hgAAAAAwFAMAMCz4eyDNqm/2irv7aiXt+qrZHp5qTsMp4qdodj97AnG4hKZpAzHuXE4HInDoThnYvA5IdPSKZlWnpJPD+yXtSdPyJ3e3qBzDAAAAAAAQzEAAE/Z9a6HUnvlkvx+e63MzCRlSjo3ChcbSgLBWJwbh5PhaByOw4ZUOBbrnxMyMZmQlyor5LWaaqm/ekVaOjoYiwEAAAAADMUAADwtuQvehqutsvTQPilIFSmKIyYrY/Fk22CsXRRHx+KJynXxREXuuniiYsmhg5JuaZEhchQAAAAAAIZiAACerIvtD2Tb+dMyI1Mm0ytKZEq6SKakil3pYm0wnqxcFfsZiqjS/N1iI0UxMSIci9+sq5PlR47Ivd5exmIAAAAAAEMxAABPws7rV2TZkX0yJVXkShcpn4uNwdhMTxTrY3FS7xZPMjIU5iN3ZnrCvSjWx+LnEwmZUVkpf9y5Q863tzMWAwAAAAAYigEA+LGMPHok1ZdbZF5DRqaX58bgbdpIPFZ+Qu0U6xmK0mAs1h+2K7G2iidaHrmzZSimplPyUkW57Ll5k7EYAAAAAMBQDADA/1XX4IDUX22VF8qLZFp5sUxNFwWCsTjtXxSbY/H/LUNhf+TOGImV62J1MJ6USjp/XNl6ibEYAAAAAMBQDADAf+pub4/UXb0kU1KFMjW9zeFeE3sXxdpQrGco7JfFxUqCQs9QFAQDsfHInUfLUBjXxXHN4txYPCNTKaUXLjAWAwAAAAAYigEA+E9G4uorLTKnOhGMxMFQHKQntmmt4gK/VRxcF7vDsNksnqzkKCYnY7rFylVxMBgrQ7EtQWEbjCclk/J6bY0UnjvHWAwAAAAAYCgGAOBxPRwYkN03r8rc+gptJPaHYvdzUTgYq6NxymwXF4/ZLdbGYUurWMtQpIwMhTYc+9fEeoYi98jdgsZGqbh0SQZHRhiMAQAAAAAMxQAA5JMbUls6HsiHu+pkarrQZaQn9PG4yJKhKA4et5titIrNsXiyZTC2ZShih2IvQzExcl2cMCTlb3v2yNF79xiLAQAAAAAMxQAA5HO/r1e+OLRbpqULHcFYnBuF1cHYl1Kui9VecfDAXXHey2I/PVFgTU9EH7jTR+N8GYqEdTBedPCg9AwNMRQDAAAAABiKAQCwGR4dlY2njwUjcWhbJEGhP2y3zegVbzMetnMvi6eMkaGIdIqToUiGIlUW8ofiVFlwWTwpZijOZSi+am6W3D8r/50DAAAAABiKAQAwbL/WKlPTWy3XxP5YXGgdjKeql8XpophWcb4MRUmE+ahd5Jo4uCiOvy6eZE1QJGRufb3UXrlCggIAAAAAwFAMAICq9WG7fNBUIy+W50Zh+1gcl6Hw0xO5r1qzODIc+8NwkWUodjMU0aE4fiyODMdKt1jNUEyyjMWfHTwg7QMDDMUAAAAAAIZiAAByeoYGJdVyVt6sS4cjcXlhJEGhDcXOFXGhJUNhpCdU6XActmUoJgeMDEXSGIyT5nDsj8OlzsN20V5x9IG7KemUrD15kqEYAAAAAMBQDACwG3k0KoMjwzKc/dozNCAPB/qkvb9X2vp7ZCD7r/cOD0luXM39e0cfPRr3Y+PN7i75eN92byQOqRkK21CsjcQp/XPsWJwKH7lzpPWhOLgoTloG45TbKlaHYmt6IhUOxZPyjMV/2tUk59vbGYsBAAAAAAzFAPBLlxuEuwcHpH9kSM6335WDt6/I3putkrh4TNac2C2rju+SFYcb5YOmpPxjT6UsPdQgnx+sl81nDknJ+WNy4PZV58+719st/sg83r4H286dkBcqCiNDcUgfiqdp3WKTd1HsZyiUwbhA6xbnf9jOnqBQHriL9IqN/IT6sF0q4eQnfP5QPDmVlMJz5xiKAQAAAAAMxQDwS9U50CcXO+7J3puX5OujO+XT/VXy/s4y+W3NZnmt5gd5pXqjzMpskJmV62VG1szKDTIj+8czsl9nZr7Pfv0++3/fKPPqi2R29usXhxpk7cl9crrttvPXHR4dlfFwbdzS+UD+vKtWXijfKtPLw3F4utoqLi90rounWrvF9gzF1NhOsf7AXUGeBIU5FhcYrWIzQ2F2is0MRTgSu5fFuT/+cPcuudDRwVgMAAAAAGAoBoBfkra+bmm6flE2nNorbzcUyu9qf5CZletkhmZ98Nkfil0bNL/xVYRf36rbJu/vTMrGUwecy+TcpfHg6MgzOUTm0hrH7t2SN2oTzkg83RmGt2iDsa1VHM1QFObPUATXxf5wbE9PmGPxWBkK86E7a4bC2ioOMxS5q+LaK1cYigEAAAAADMUA8HOXu+59ONgvmdaT8s3RHfJ6zUaZXbVOZmbWOiOxI+MPw+uU4dgbiCuUz8Zw/BtzMHZG4++zX7+X328vk/eb0lLVekaO3Ln2TI6Ry4/slhfK/XF4i3dNbEtP2B652xYMxdOMZrE+HPuXxNsiF8X5xuLworhYz1AkQ5OMDIXz2WeMxX6GwhyKVzQfkQf9/YzFAAAAAACGYgD4ORoZdXvBdVdOy7+aG+SN2o0ys3KtK7NW+RwOxLr12nWxPxD/psJkDsXuWPzr7OdfV3zvfP54b5VsPns4eATvWXC67a58uKvGvSY25W0V589QWLvFMfmJKc4IbMlQpEus3WLtkjgZfp7kjMalUZZH7iYZGYqF2xvlZk83QzEAAAAAgKEYAH5ueocG5WLHXfn6aKMsaNgqszJrHTMrv3PG4VnqUOyMwmuNa2JzLI5eE7sXxd5YrA7FnmAornBH4zdqt8qfmsrl+L2bcr+v5ycdJkcePZI9N6/KixW5YXiLy8tO+EPxdPO6OLgmjhuKCyMZishYrI7GXoaiQBuNx8hQ2AZjJT0R7RWb3eLodfGrNdVyrv2BjIyDpjQAAAAAgKEYAPCYBkaGpOLScfnsQKXMqvxOZmW+C4ZilTsYr1OG4uhYPCNvhsIbis1msZGh+HWF7pXqLVJ64bi0dNz/yYbJ3GXzquMH5KXyreFIrIjNUKjpifJwLJ4WtIrtY/EU85G7lHllXDxmhsJ95K7Ewr8uNh+786+IS8doFidk45nTzgOE/PwAAAAAABiKAWCcGxoZkZ6hAVl5bLv8Ycc2dyAOhuLvnGviWZX6UKyPxesiGQrrWKzwr4n9oVgfjL8P+JfF6nC86vgeOXznmuT+vp/29yr3kN2SgzudPvEL/kVxuXpRvCX6sF36cR65s+cnwkfuioxusZKh8B65KzAH47SfnijWe8VJo1scXBOXKNfFZbEZivC6OCFfNh/hohgAAAAAwFAMAON+JB4dkdbOe/JVc73MqVorszPhQBx8rvSH47XOYDzTSs9QzMystzSL/ZFY6RYHl8TrLQ/buRkKdST2Py86UC+N1y5I99DAUx0pT7fdkbn1SWUgtozF5fpFsf2Ru8JIhmJanlaxzxmLY7rFBalipV2sdoqLI63igpjr4uBhu0ivWL8uzo3Ek7P+1LRTrnfTKQYAAAAAMBQDwLh29O5V+VdzvczOrJFZWbPVgdj5bKQnKtcG18W2wXiGkaHQ28X2ZnEwEhsP2/3aMhSrY/Hf9mScsbh9oE+e3vfrpsyu2iYvlG82GsVKgkLtFWvDcZiemGoMxmM/blfkZijS26K94mAoju8VmxkKdzgujc1QBI/cKaPxRKNZPMnrFJ9vf8BQDAAAAABgKAaA8WrvzRZZcaTWGYkjQ3HlmmAsDq6Jtc/GWJzRe8UznNE4T7M49pE7JUOhDMXqI3eqP+8ql53XWp7aI3fFF056Q7GXnkhbBmPnc9zDdltjWsUxY3EqmqGYomYozMfu4jIUynXx5GRMhsJ85M4zWb0oVjMUWS9XZ+TsAx60AwAAAAAwFAPAuLT/9iX54lBVMBKHvnMG41naZz1BMdMbi8NWcW4otnSLI83i9ZHBOHdNrD9yZ+N2in8d88jduzuSsvN6i3QO9D/RsTKX6dh69phzTewOxZuDz5EMRfDAnTsW+4/cTSs3EhTl4VCsPmw3LU+zOLguVprFBZHrYvtFsZ6hKDZaxeFY7F8RhxfGyiN3qXAwfqE8JZnWSzLCg3YAAAAAAIZiABhfTrXdkOWHa2RO1XfaSPxylT4YmxkKNz2hP3I305ahiH3kbr0yFivNYuWrk6Fw6BkK7bpYaxZvdL6+VVckR+/deKJjZa6HvOHUYXmpYouXnnCZD9vZHrkLWsXmUJz9+h9nKMzrYiVBYY7F+nVxiUWpdlE8SctQlCoZCv2ieGo6KVvPnWUkBgAAAAAwFAPAeHK1q03WnWyS39VukNmZ1fKyMwqvtlwW62NxZDA2MhTxD9ypjWLzuthMT/ijsXFRXGFmKCzXxZUbZUFjqZxvv/dER8svj+xWLok3O+kJ9bM1Q6EMxXqGojAYi+OH4pgMRTAYR9MT7nVx8WNcF+d52C4VNxSryrL/94RsPXtGBkZGGIsBAAAAAAzFADAetPf3SPrSUfl942Z5OTcSV61xhuKXrSNx2C0OMxS262LjsbvMuqBXrGYoZjxGhiLyyJ1lLHbH4Q2KcCx+o7ZQ1p88ILe6Hz6R0XJoZETMofgF7aLYyFAoj9xNS8dcF0ceuVMyFCllKE4VaiOxdlHsXxVrj9wVK1fG9sft9PSELUNRaigLx2IvPVGQSsjqE8eld3iIoRgAAAAAwFAMAM+6wZFhOXr3qvx9T5k7EjtD8WovN+FeFmvpCS1LoVwUWx+5W6tlKKKP3OkZihnGpbGtWfyb4HOebnGQoQgH43n1xbL/1hXJ9YR/7O9hx0Cf/HVPTZCesI3FL9jSE8p1cdAqjvAvivXr4mkx+Qn/YbvoI3dFyiN33nWx9WG76GWxfSwucTMUSeO62BuLC7I+2b9P+oeHGYoBAAAAAAzFAPCs6xjolX8117rjcEY32zNWhsIfiCPXxcojd/YERXy3WLsirtAH4yBD4Xw2rouVR+4cSoZiYWOZtHS0/ejDZd/wkHy8r05eqlAG4vQPRobCvyKOeeQuLkNhmOowH7mLtoojGYpUkTYaF+TpFsc1i7UERbIkoGcnXFPSCVl5rJmhGAAAAADAUAwA40Fl6zF5uWqVzKnyL4lXW0bjNcFobB+Kx3rkTr8unpmJbxbr7eL1sfkJ96J4fTgWV9gyFN9rj9y9VrNVtpw9Ip0D/T/qePlwcEBWHNklL5b/kLXZclWstoo3h/kJn39drGQo7INxeF0cdIpjMhThUGxpFqfUDEVx3rF4snldnLLnJyYllQyFNxZvOnOakRgAAAAAwFAMAM+6cw9uyYdNRfJK1Wp3KM6sUtITKjNDsTqmWxxeEkcfuVPyExl1LA7bxTOtQ/G6vGNxOBob6QnlkbtfV4TD8Xs7knKtq+NHHTAHR0fkq+bd2b/+Fm8oNgfjMEmh9YrNR+7S/mVxmKHQx+JCzdQgQ+EPwoV6szjIUHjXxKk8j9xFMhQlxmAck57wx2IjQfFiRUoKz52V4dFRxmIAAAAAAEMxADyrugb7Jd3SLG83fC9zqlY5V8WO3FhcFaYn9M/29ITeK14TZigqoxkKtVWsPXKXWWdkKNaHQ3FFtFWsZih+E9sr3qClJ3JmVm6SfzU3SftA3482YA6Njsrakwezf89bnZHYtTmPMDnxgmUo9h+5s/eKt0bG4mnaWGzpFquP3KXC9ISaoQhH4iLvyjiaodCui5PKaGxpFU8rT8iO69cYigEAAAAADMUA8CzrHhyQT/annJF4TpCe8Adj47pY6RXPVnrF+nD8XcQsYyyemTdDsc5ifeRRO5WenrBkKJTrYrVZ/KemcjnffvdHGzBzY2j91YvZ70ehMRTHjcbhNXHwyF1aeeQuNj9RGHSKow/bFUbSE9GxuCh6XZwq0sZje4KixNorVnMUwXWxMxSXyIxMWppuXH/mR+LRR4+cMXsk+zWnf3gk6CoPjozIEEM3AAAAADAUA8DPWarliLySG4gzq5Sx2B2MfWp+wr8ofjm2VbxGZlWuMa6LbfSL4miv2NYtXh8wh+IZtoftnNFY7xb/2huLc1fFMyo3OlfFuSHwx/p+Nt+5IXOqt3md4qyKzdYMhfuYndosjmYopjnXxVtdylA8vbww9rp4qsZPTxQGCQqtVZwyusWRsTi8LtZbxdFecYGtW5wsld/vaJAb3V3P1MjaMzQkORc7OmX/7Tuy++YtSV1qlTUnTjtWHj8pS48ck+9OnpZ1p87KlrMXpKr1qqRaWuVk2wO51PnQ+fP5/QMAAAAAGIoB4GfhZneH/Ku5VuZkvg0G4peVz+F1cbRVHGQoMmtiOsWKyjWx6Qlbs3iW0So2m8X5MhTBYByMxUqz2PLI3cd7q+V27483ZF5svy8f7sp4w7Br7AzFlvC6OK0/cOePxX6CIjoYF1qbxUGGIhUdjNVucfRhO/2Ru3wP2032PmvdYqVXXJD1dmOt3O3t/clH1dx18IWODjl8566sPnFK/rJ7ryxs3Clzquuyf5+VMq08I1OzJiTK5bmsydl/7blEhfyqrFyez36dkPVCebW8WtMov8nUyfLm47Lh9Dlp7XwoV7u6xb9K5vcVAAAAAGAoBoBxp7XznrzdsEHmVH2rXRO/nAm/RoWXxfrDdnHd4u+U0VjNUKy1ZCjCr4+foVinJyiCC+NwJI575C53WfxazRZpvHbhRxv4cimPFUd2hRfFkaHYNhor6Qm1WaxkKNTrYlu3eGqeR+6sCYqUjdErVh62sz9yVxK5LA6HYjdDsf7UiR/1YvvfkfvPvdndI1WXL2f/Ps7IvMbt8lJlVfafo1ImJNLynKrM/1we8atAbjiuyP57K5zPBakqmZmpl7/uOSBbz16U613d8qC/n7EYAAAAABiKAWD8yF0/bj27R16vWRMZirXROLgsdhvF5iN3syPdYvtYHM1QrNWui4NWsUl72M4cjC294sgjdxv0XnHFBr1XnP1ceK5ZBkd/nDFzKPvX+eFsc/afdauXnvghvC6u2Gy9LHYzFLlBWP2sDsVKtzgYi+2mOqJDcfwjd8oVcdocim0ZivhusZaeSJY4F8UlF8499eE0l4Vo7++X706elL/t2ScvVWRkQjIlzyfTMiGRckbiyFBs+FVZOBrnroqdkdjiv4LhuFJ+v2O3LDtyXJrv3s/+eqJpDAAAAAAMxQAwDvQOD8rKY/XuSOzkJr5VPptD8apIq9gdjVd718T5x+JZRq94tqVVrF4UzxqjVTxjjLF4hnNF7I/GlqHYaxXnzKj8Xv68q1xudHf+ePmJjjZ5Z3vKGYnDVrF6YWxPUEz3aBkKbTTO88BduXpJvNXSKy40MhSWsTgmQ+FfF4+dodAH4/kNtXK/r++pDaa5C+K2vn7ZdOasfLhrt0wtr/CGYVNaJiTTwWCsjsZOeqIselkcjMVl6meP8nlKulr+q6xS1p48K7VXnv1H/AAAAACAoRgAfuHOPbglb9aulVe8gdh50M4yFpsZCu26WBuO1ygZivhH7iKP2tl6xZW2DMVae6s4JkPhP2w3I5Ke0IfjXKf4zbptcrrt9o826t3t7ZYPmzLykvOQ3aZIhiL3qJ17XZynVxx55C7aLY5kKGKaxe41cTRD4Y/F0V7xNmM4tqQn0iWRwXiyMhJPKy+T5UcOyp3enqcyluY6yEfu3JH3dzZl//vNyPPOBXFWwhUZirNfnzOui/Ur43ILcyguDwbi/1LkrotzX99v2icrj51iLAYAAAAAhmIAeDaNPBqV5ruXZWHj985QHFqlZyiMwfhl78LYvyaebWQowlZx/kfuZpsP21V+Z81QzDIftrNkKPSheL3lkbsN2mgcSVFUfi+v12yV5MUTMvzox8kF5C5ba66ck1eqt3nj8CZLrzjfA3dbot1i47o4fOTOJu6Ru23xvWKtXVykKQgetlM/266Lw4vi3FBcc/nSEx9JcwmVG93dsuzIEZnb0CDPJ5LyfNIXDsW5z8FQHCQoUt44nIrmJ8rixmJvMFaH4kR0LHZVyms12+Wf+w4zFgMAAAAAQzEAPJu2nNkjr1Wv8i6KVwaXxblx+JUgNxHXLl6tPWwXZCgy+iN3+VrFtgxF8LBdZVyzeN0Yj9zFd4sDxkick8tPLD3cKAMjwz/aoHeq7ba8UVssL1nSE2GrOH4s1jIUxsN2ZnrC/VxoHYvNDEXYKs43HNszFAVahqLIcl0c9or/uW+XtD3h7ETHwICcb2+X+Y0N2e9phTsS+5Lu1wmJZHBdPMEcjLX0hHJdXGYfjP1H7bTr4kSFNUOhjsUvVtTJvPomxmIAAAAAYCgGgGdL12CfrDne4F0RrwwuiufEChMUuZFYpWUolLE4HIpXG9fE4edZ+ZrF1vSEPUMxM7M+/BzTLDavi92hOByNPz1QK3d7u360MS83Om89ezT7n+HlJyqiY7GToMiboVDTE+HnaWqv2PscuSouN6+JCy2P3BVGHrZTTbG0i/VecZHRKnbH4llVKVl36rj0Dg89sXG0rb9fMq2tsnB7o0xMJgPPJxLhRbEyGk9wRuOYXnEi2iueEHNdHKQnVOZ1sZee+FVZpfe5Mvv3ViUvVNQyFgMAAAAAQzEAPDvu93XJh01b5VXvoviV6m+NBMW3kUfuXjYzFF52Yo72sJ35yF18hmKWMRa72Ql/KLZ0iy0Zihmx3WL/knhddDCOaRX/qSktt3oe/qhD3t6bV+T1miJ5SRmJzQxFOBLHZyime4PxC0arWO8WhyPx9GAkNgbjcnMo9i+ICyMZiilqhiJyXVxs7xZ7Q/GvKxPS0tnxxEbRrsFBKTp/zhuJEx4/N6F8VjMUiXxjsZqhCIfiCbHpCf3COLwoLtcuip2huCwcjf9Lwe9DAAAAAMBQDAA/uba+bvnHnhLvmnildlUcvS6OZijUoTj4nDEvin3qI3dhhmKW+tnyyJ2eoTCuizPrrPmJ+IftohmK8LrYHYxnZTZKS8f9H33A++HMEXmxYpM7FjsD8Sb9sjhvs9jvFG+ONovL/UviLUqGYstj9YrHylAEY3HK3iwOMxS2sbhEKlsvPrEhdOTRI9l45rQs0EZil3tNnFCui5VWsSGaoVDTEynjmtj+yF2YolCyE2qGwtIs9hWkqxmLAQAAAIChGAB+Wpc678gHOzcbQ7FrjjEUWx+5i7SKV0VaxWNlKMxucZihWKtdF6sP24VDsdEr1h65Wx8MxTONgVgbi4OLYjdDsbCxRC503PvRx7vmuzfk/aYKZywOBuOKuGZx9Lp4euS6eEt4XVxudIutGQr3sniqozCPbfkzFHGP3BkZikUH90pLR/sTG0E3nz0jc+vrZJIxEqtjcXhRHGYoJgQZCvNhO8tQbFwXP5cnQ6H2ioNH7rRmsXlR7A7GBalqebV2B2MxAAAAADAUA8BPp7XzrsxvWBeMw68G6YmVkatiPUOxyugVe3/st4qNDIX+sN3qSH5Cvy6OaRUrn2farouDa+K1MZfFlqtiNUOhDMWtnW1PZLjbdPqIe0ns00ZiczCOZiimqw/cpdVusfHIXTr62F1chkIbiVMxGQpvMJ4S0yv2MxT+SPxWXUbKLp6T3NXvk/g+VrReknkNdTIxWeZJxAiTExMjveKUNUPxfDId88idn6Io1y6KfxXXLVbTE0aG4r+MDMVLFXXy0Z6DjMUAAAAAwFAMAD+NC+235S+7tsorVd/kzU/YMxSWBEXGcl1cFbaK/fzE7BjaUKxlKNZahuO4DMVa74rY7BXnG403BGZXbZRjd288kdGura9Hlh9p0sdiv1VcEWYnzOviF/NkKLSxWH3kLtcoTtsyFIV5MxTWBEWQoSgKMhRT1aE4XRyYVZWUzw7slo6B/ifyPdxzM3eZvcO7JC4zqNfEynWxOharGQp/NHZG4Wi3+Lm818UxveIyL0URjMbuUPxfwVCsJyj8sfiVmu2SarnMWAwAAAAADMUA8PSdfXBTPmzaYk1PaA/bVatjsTsOv2LkJ9xhOK5drD5sZz5yt8aaodAeuPMfufOoGYpgLM6s8x65W6c8cqdmKGyP3IWXxX6v+NXqzXLkzrUnMtgNjY5I3ZXz8ocd5crDdpu0BMULkV5xvkfuzPSEOhpvtXSKoxkK7ZG7SHYi7pG7ImU09q+Li7P/ucXyZl1GLnU+meTEmQdt8j/798n0ipRMSpZ5EtbLYnUsVtMT2udgME55Y3Hcw3ap4GG7aKfYNhpXRDMUylD8q0ir2P38Zn2TnG/vZCwGAAAAAIZiAHi6HvR3B0Pxq9Wu6GhsXhOrrWJbrzjPUJzJk6Go+s7aLLZnKMJmsd4rHuuRu/XxD9t5l8Xz6rfJ5YcPZPQJZRNyNp9pzv4zbPEetttkaRWro/Fmy0DsZyi2BBkKa6vYG4vVzzZTlUvj/L1iI0OhDsXeZfGB20/mGvtub4+sO3lcZlVVOAPxxGSpMhbH5yfUh+2cz4lkVDK8KFa7xW6GIh3NUJTFtYrT0etiJUPxq9gMheu5REYWNO6WG909jMUAAAAAwFAMAE/PrZ52+evuQnm16ht3KK7yVI89FsdlKMJ28Wo3Q2FSMhSzg+FYTU9EMxRxY3F8r1jvFKsZipkxY7Gfnvht7RY5ef/mEx/qNp4+bOkVK6NxhTsSOyrCgdgZjtPKI3dqq9i4KNY/h6NxNEMRjsWRoThVqD1sN0X7rEtcPPdEvm8DIyNy4PYtWdBYJ5NSuWG4VBmIQ+F1ccJ45C4ZCNMT+lgcDMWRDEV0KJ4QuS72xuGydPCwndYrVjMUATc74T5yF5qZaZCt5y5K58AgYzEAAAAAMBQDwNNxs7td/rGnKBiKw0ft/Ift4jMU5lD8svHI3ctVHv+iOPLIXdgsjs9QrNHTE9rntVbONXHGT0/YHrbL3yr+YGdCzrTdfuIjXcdAn2w8fUherNjksjxyF/SKx7wu3hJmKNJbLI/cha3iyHVxub1XHGYozEfuvGax/9m7KF536qj0DQ8/ke9b58CALDq4zx2Jfdo1sT8UGwmKRJkxFkcfuYs+cBeXoIhmKPI1i/1x2E9QmGOxmaFQr4vfbtwtt3v7nuhVOwAAAAAwFAMAAr1DA7LkYMp5zM69KP7G8rCd/ZG7OcHnVdb8hPnInTYSe8Px7NhH7r7Te8WVMYOx5brY/bwuNkMx1lj8xx0Jae/vfSoD3ZWH7fL96UPyUsUmS4ZikzYSR5vF9l5xtFvsjcNp75G78nzdYnUojm8WB4Nx1qxMmaw8dviJfr/KLp6XAu+SeHKqNLwoNgbjSZGH7aLdYrddrGQo1LHYaBarGQrzkTs9PRHNUDxXZgzHfqtYzVBoD9vp3eLFB4/Kw0GuigEAAACAoRgAnpLVx+vk1epvgkaxeV1sM8fxrZGksI3Fq/UMhXJdPNvSLQ7bxWO1im2P3MVkKPwLY+Vhu1x+wqeOxLMyG+R/9lVJW9/Ta8ReaL8vG04fkleqC51x+CU1PVFhuyjWB+Pp6mWxkqGIaxa7o7EtQ7E1MhRPtV0XK+mJt+oqZOXxJzsSn33QJu83bZfJSX8gVukZikmpRHBZPCmmWRxeFCuP3CX0JMWERNwjd3qGIvaRuzJ3KDYzFOrDdvpgXOldFIdjce6q+Hx7h4xwVQwAAAAADMUA8KT1Dw9J4uJB+W3NKm8s/ibsFWv0DMUcW7dYS098G+kVBymKKluGIm4ojo7Gs4LheG3sZXFkKM5+fdwMxZazh576MJcbizedOSxz68u8DEX0YbsXjfREbIYi7Y7H7iN3SooirfeKQ4XhSFweTVBEeBmKd7ZXywYnNzH0xL5fvdm/dmVrS/bXR7kzFOeuicOLYnUotjeLJ8Y+cucNxUG3WLku1rrFMQmKZDpPszjsFUczFEarWHvkLhyK/QzFiuYTjMQAAAAAwFAMAE/HmbYb8u7274Oh2MxQaNfF1fEZCusjdxkjP/HYj9wZneJK/fNjZSg8s4Jr4rV5EhTuWPx6zQ9Sc/nMT9KGvfqwQzKtZ+SDpgqlV2xeFIcJihcjY/EW7fP08ugjd056Iu1fFOsZinAw3hozFm8Lrov/srteGq62Snt//xP9Pg2MDMtf9zTJpGRJdCh2PivZiTyP3OW7LA4euUuq6Qnva9J82M68Lh67VeyPw+Zo7A7E5dYMhX9hvGD7bjnzoJ2xGAAAAAAYigHgaQyU9+X9nZuMi+JvlF6xvVs8x3zkznvUbo7RLX6cR+7UDIU6FL9svSheo2colLF4pvLIXZCg8AXpCbVZvF4bi+fVF8qtns6fbJh70N8r+29dlU/3N7gZCuVhuxfLHyNDkY4+chflP2yXr1dsf+Tut7Vp+e/9O+VOb490Dz35fm7t1csyp7pCJjtDcYk7FCfVoVi/JtYEGYqEl6Eo01rFweegUZwwesUp7ZG7fA/cqRmK5yyjcTgUVygDsj1DEV4Uu5+3nrvIUAwAAAAADMUA8OQNDA/JptM75fWalc5A/Jo/GFfrCQpnLK6OPm5nzVDENItf9rvFQa84fORudkR8fsJsFjsZCnMwzg3FSrdYvyjWr4v9wfiTfRm51vXTXnD2Dg1KS8d9WX/qYPZ7U+iIz1DEP3A3PRiOw15xXLc4biye6l0X/7qiWP62p1GKz5+W9v6+p/L96RwckO9Pn5QCZxzOjcQl3jDsXRdbe8W5Mbg0eORuYvCvuaNwXLfYf+ROzU74reIJ2nWxSxuOkzEJiiA9kZZfWTIUv0p48mQocoPxP/Ydkju9vcLvVQAAAADAUAwAT1ztlePyatXXykBsH4tftaYnVgZjcSQ/YbkmVh+5ezmSoVhjPHJnyVDkuy5WMxSVtl6xPhr7l8S5z7My62XDqb3PxCA38mhUhkdH5eDta/Lt8X3yx53l7iN3lsH4hTyt4iBDoTxyp3WLvYftppsP2wXXxFvlo931subEYekY6JfeoaGn9v3JXS3/ti7jDcTeNbH22egVG+3i3GCsD8VllrE4GclQaNfF/lCcCD//+xmK8HOYnTBVxA7GOQdu32UoBgAAAACGYgB48q48vCd/2bVFH4urwoft/PREZCzW0hPhVXGYntCvi91GsZKeUK+L1cftLBkK87J4ljcazzIyFPp1sdotXhcZit2xeK0zFr+7vUSO37/xk/SJ87nX2y0XO+7L6hP7s3+P6ew/29aYDIXNlrBX7I/FaZ2boQiH4he9VvHf9jTI96ePSkvHA8m1gp/mP3Puv4Om61fljVp1KPYko2OxMxh7Q7HaLZ74GI/cqRkKvVVsdotTwYXx2BkKczgut2YowuviaK/YN62iRkovXJKRZ+zXJQAAAAAwFAPAz1BumPvmaLWTnbCmJ2wP21VFMxT6pfGq2AzFyxkb9ZJYfdjOvzS29IorlYti6yN3a7UMhX5dHKYnctfEf9tTLje6O57JMS53YZz72nz3pmRaz8qfmjLyRm2xMxK/VLFZyVDEj8UvGOmJF/z0RNr9/Ep1afZ7WSKLDzZl/zPOy9Uu93vxUwznI6OjsvnsKZmSLpUCZxwutg7FuQzFpKTtuth85K4s6BWH18XKWKzyExS2sVgRZijS2lj8nDEY6xkKG++KOFFhfdhuQqJS3t2xR9r7BxiKAQAAADAU800AgCerf3hIGq6ekHe2r49JT4Sf/WE4bjSeY/SK9eviVRarjVbxKm0wju8Vm4/cKd3iSn8sdq+LZzpsveK1wVBce+XMM3dNbNOb/e+qL6vy0ln54Uyz/HlXlXywMyOv1bjD8axMoTMca1fGabdZnBuKX8yaUbnNGYtfqy6Vj3bXyZKDTVJ3tUVOtd2Vtv6fvoeby0789/5dzkisiqQnnKG4RB+JLd1itVfsPnJn4ycnfOrDdkqGQrkutj9yF5eh0Adj/5pYuy5WR+OEl54oq5CFjbvkdg+dYgAAAADgmwAAT0Fbf5f8z74SebX6a1dVNEHhXxdbm8XV5sN2K41WsXFdnLENxtEMRTgUx43G3wUJivDS2JaeMC6Kg6F4rSw6UC2XOu+PuyFuYGRE7vb2yL2+Htl147LUXrkgq4/vlzUnDso3x/bJx/saZPHBHbIka1HWssO7ZGlW8fmTUn+1RU7cvy1tfb3SMzQouSveZ+Wfayj79/Lu9jpvHHaviWPHYqNX7KQnnCti85E7PUER97BdpFesDsbGI3eRkTjmYbsJ+cbi4FG7mAxF9o9fqWmkUwwAAAAADMUA8LTGuRE5dLtF/rhjo9EqjnaLzQyF/ZE7y+N2/mVxbjjO6BkK9/Nqo13sCTIUa7QMRfiw3ZqY9IR7TWwbip1mcfbr72p/kE2n98ngU+7wPqmESK4nnBtauwYHJHd9nHuE7n5fj+RG5WHvX3/WL6eP3r0jr9dUSEGq2DHZ+eqnJ8wMRalFmT1DEdMpjjaL/aHY7BanItfFE4J2cfSy+DknRRH3yF25tV38K7Vd7I3FuYvj8ktXGIoBAAAAMBTzTQCAp+NmT7t8sr9UflvzrdEqVjIUtkfuzFax5ZG7V/JkKF7OmO1ipVWstYu9sTiSnoheF2sJCi1DoY/GCxoK5VZPJyPcM6TpxrVgJC7wRmL1mrhAaxV7n5VesT8Sa9fFxmA8KbgqVgbjhD4YP5/3gTs1Q5GyZCjSQbP4OTNBkRuHY7vFRoYi+/n5ZKVsPnuBX6MAAAAAGIr5JgDA03Pq/jV5d/t6ea36a3csrvo6zyN3IfNROz1DYbkqtvSKXw5GY30gtmco1hiP3H1nFbkuNh61a7pxQYYfPTvZhV+63NVz8uJ5mZlJRYbifM3iScp1sf/IXUTMdXFchsK/LtYeuVMlw2ti85G7uAyF6ldGhuI5JUOhPnI3OZWRzw4ckf6REX6dAgAAAGAoBgA8Hff7uqTo3B5Z2LjOGYsjGYoq9brYY7SKzQzFnDwZipcz30Zaxb4gQeGkJ2yDse1hOzdDETxspz5yp3aLK9fKupO75XpXO+PbM5bPWH28WV6oKJWCVJFxWVzspCci18WWbvEkX55H7tSheNJj9IrV6+IJlkfuovmJ6ON2rvLY6+IwO+FnKNyr4kUHm6VvmKEYAAAAAEMxAOAputh+S/6xZ5u84QzCX4fXxEF2ImwVm73iSKu42h2GraNxJnpZHF4Ur4o8bKc/crfGapbSLtYyFBklQZH12f6MHL5zheHtGdMzNCQbTh2TKcZAHBmMlUfuwgyF+qidpVvsXxSnyoxH7hKRbrF7Tex9dkTHYv2Ru7hWcbRX/FzwyF35GL3isFW8+FAzv1YBAAAAMBTzTQCAp+9C+y15d/uGYChW0xOvOekJdTT28xPfWMZiPUPxSuSqWB+Jo71ivVsctov19ERchsL2yN0fdxZLquWY5B7w47/rZ0v/yLCsPtEsU9PhKDwlXWzNUGgXxUqzWM9Q+NkJ/5G7fK3ismh+wpfwx2I1SRGOxIGEJ5nWxuLnlOvi58pSxsN2MQ/cKRmKv+89KA8HB/n1CgAAAIChGADwlAe74SE5cPuivLdjYzgUV32d55E7S4bCoF4TvxK0itVH7sxmsX9FrF8Xz1YeusufoVAGY8/c+s3y9dHt0t7fy+j2DBp59EjWnTom05xxuCjmojiuVewPxPah2ExPmNfFfoJikrVVrD5yZ1wT+2OxLUORjGYonsunLB1eFJeFQ/GSQ83yoL+fX7MAAAAAGIoBAE/fnd5OKbtwQN7fudG5LA5UqRkK9bp4pcaWophjDMb5MhR+s/hlVZChWKONxmNlKHLeqP1ePtpVJq2d9xncnlF9w0NScuGMTHFG4qLYsXiyx01PFFvGYjNDURbbKh4rQxG9MjbSE0qreIIlQxFeF8c/cDdBS09Em8WfH2qWwVEeXQQAAADAUAwA+Inc6Hoga082yB92fK9nKLRH7kL+MGx/5O7bmARFNEdhzVCoY3HwyN2aSIZidtV3kevi16vXy9/2JOTY3WuMbc+wodFRqb58SV6sKHNG4imR9IQ5GPvpCfWzMhYno8NxZChOqQ/blSlDsTkYu+mJibZWsZGhcIfhpJOhMJvFz8VeF9t7xROTlfJl83HpHR7m1y4AAAAAhmIAwE95WdwhXx+tlvn13wVDsdYttj5ytzIP85G7VTHXxH6CwnzYTs9QqIOx7bJ4TtVa+eJQtRy83crQNg7UXrkk0730hDMUp7zPaXuGYrKaoVBaxc5njzoQa9fFqWiGInJNnCgbO0OhXhdrreLow3bqWGxLT5hjce7auPh8iwyPPuLXLwAAAACGYgDAT6trsE9WHKmQ39Z+GyQowutipVWcNz9hz1C8kueyOBiK/QyF/7CdmqHQEhSrI9fE3xxrlCN3rjCyjRP7b92QFytKnPzElLSaoCiKaRXn6RYnw27xJGMwDq+KSy0JirJIq1i9LA6GYn8sTugmJPTBOGwWp6294gnBSKxfF+cyFNPLq6T4Qgu/fgEAAAAwFPNNAIBnx/pTjfLu9g36o3ZKhuKVKr1XHKQoIhmKcDR+vGbxao0tQ6EPxaudC+P1J3fJyfs3GNnGkZs9XTK/scoZiV3hdXFBqjhPt9i4LrY0i6MJirJIisKeobCNxaHwUTtzMA4zFNHL4mi7WBuKvevi2dV12V/DbTL6iItiAAAAAAzFAIBnKQ1w+Zh8sq9E3qpbHY7FkQzFSi1D4Q/FrxojsdorDjIUGbVV/G3kstiljsT6I3e5ofjd7Vul5Pwhudfbxbg2ztzu6Zbfb68JLoqdr1p6Qh2NbY/cGekJ7bPtkTs9PWFeF6tD8fORDEXS3ixOhM1ifyS29YrV4dj2sF3u828qa+Tsg3Z+HQMAAABgKOabAADPnqN3L8vq43Xyxx0bg5HYzFCo18W2bvEcLUOhJyjiusXBw3aBcCie433+8kitNF49w7A2TnUODMjHe3fKC+Ul4Ujs8RMU0UfuohmKyYZJzlhc6pjkjcaRh+0sQ3GYoSizpiie93nDsToSu/mJmAxF5GG7lNEqdi+K/7nvgNzv6+fXMwAAAACGYr4JAH7JRh+NPrMD0bWu+7Lz+mn5/GBKfle7Ss9RVEe7xcFYXL3SSE/YW8XuWPytZTD28xPhI3dzqnJXxFuk9MIhudx5n1FtnCs6f1qmpsOL4oLIWJwvQ6EPxZEMhT8Yp+J6xeYjdwlnKPZZx2L1YTunXRzNUPgP20UfudMzFOrjdhOT5bLk0BEZITsBAAAAAAzFAH6e42//8KAz/HQMdEtr562s23Kx/YbsuXlKdl0/KQdunXU+n2275vzrN7rvy0j2z+sa7JVnaTzuy/5z9I8MSaa1Wf7VnJG3G9bqGYo8j9y5GYpvrdfFtlbxy1q32B2K36hZJ6/XrJXCc/vl0J1WxrSfiYarrc5IPFVLTyijcdqenvCH4slehkIdjQssreJohkJtFZdaEhT5H7nLNxRPCFjSE8lorzgn95Ddnpu36BMDAAAAAEMxgJ+Th4O90tJxU07dvyxlF3bJmuOVsvjAVvnb7g3yYVPuIvZbeW/HKvl94zcyt/ZL+UP2j//U9J28k/3jT/dvkeWHS+X7U3VSeemA7Lt5Vu71dcqD/menwXvl4T05fu+KrDhSmf1nKnSG4tecYdh/5G6lJUPxbYTtuniOd12cuyCeU71a5jV8LwsbNsnWs/vk8J3L0ucN7/h5uN79UN7ZXi1TUtsc6nWxk53w0hPqZ3MoDkbiZLHxsF1JkJ5Qh+LoNXFpMBZPtIoZi5V2cdgtVjIUMZ1iM0OR+/pGbYOcuN/Gr20AAAAAYCgGMJ4Nj44417+5i+Gq1v2y5kSF/GnnKnlv+0p5s3ZZ1nL3a43/ebn8riZL+7xC81bdV44Pdq6VPzetl3UnayTTmnu0rVM6B3rkp/9nHnUun3ODccPVk/LPvcXykTIav1a9MtIqjstQhOkJdyh+q25d9q9VLJ8frJD6K6fkUuddeTjYx4j2M9Q3PCyfHNgVDMUue4Yi/rq4xN4rVh62yw3G0aE4zyN3KX8QNh+5e4wMhdIp1tITkQxF2Cpe0XyMX98AAAAAwFAMYLwaeTQiuaTE8XstsvZEpXyyf5PMrV0mc+uWy1vZr6HlwUBs+p3PH4trPP5o7Hz+0jG//mtZdGCbbD7TKCfvX3ayFs9KnuJe70Np6bgjFZeOyKZTO+Wfe0qyf69J+ePOH+Sdxu/lrbrvnJF4Xv06mVeXy0jkBuG18vvtG+UPO36QD5sK5csj1VJy/oAzDl992CbdQ/0yMDLEgPYzNjQ6KtWXW2ROddK9JA4uircFGQr1YTt7q1gfiyOtYuO6WO8Vl8ZmKPI9bOc/bjcx0icO8xPhUJy0t4qT7kj8SnWdNF67LoOjo/xaBwAAAACGYgDjza3uNmcgXnaoUP62e528VbtU4Q7EbypjcXhZbFrhXRavcD8r18W/rQmH49/WfOlZIQsavpF3Gr+VonNNcur+FWdQfRa+J35fNdcy7hrsk1s97XL2wU259vC+HLh9UXbfOOeounxMdlw7I7uvn5O9Ny84F8O3ezrkfl+XDI4MM5b9wlzreijzGzIyJa2kJ7TROMxQFEQeuQtNjr0uju8V28dis1esXhfr+YkgPeGNxBON6+JgMI5pFT+f/Tqnuk5aOx/y6x4AAAAAGIoBjCcdA11ypu2yLDqwWT7atUbm1i31LHNH4tzXutwovNQYiY2xuGaZdTQOBmPtojgcjn+rjcZfyuIDRVJ8fpdc77r/zA5N6gNdz9IDfXg29A4NydZzJ2Vmpkymprdp18RTtEfu4lrF0bHYfeTOnqHQecmJLDNDMTFZZn3kLpqhSIbXxTa2DIWi7GILPxMAAAAAwFAMYDxp7bwp6Zbd8qedK+Xt+uXOQPxW7RfBWJwbioPBWBmK1bH4rTyDsZ+hiIzFTnpC+axcF+e+/nHHd/KXXRvk8J2LTjeY/64w3hy/d0derUkpQ/E2fSS2ZCimqCNxuiTyyN1kW7c4uCjWMxTuSGw+cleWJ0NRpiUoIhmKZJih0B65S+qD8V/37pPTbQ/4mQUAAAAAhmIA40H3UK9caL8mi/Zvkvd3fCNz675w1X7hDMV6dsJQt8zoFfuDsdIptjxyF3SLa6IZitxY7F8U+6Pxm7VfZb9+JRWXDkjHQI8MMxhjHOkbHpKS82fkxYoSJz/hC8Zi5bPeKy4yronV6+ISy1hcajxyl79ZHHnkLq5XnAgvi/VH7pJahmKCcl38m8oqWXvytAzRJgYAAAAAhmIA48ORO+fkgx1fy3zncvgLmed9dS1VLoq/kLfqlkZ6xW/GXBfHPW4XXBPXWB65C5rFK7ShOPSVrD9ZK9e67jmP3fHfH8aLY/fuyB921Mg0ZxTeprBnKMZ65M7MT4zVLQ6GY++6eKLRLY42ixPWZrGWobA8cjfBG4pfq6mT2z09/IwCAAAAAEMxgPGg8eph+euuVTLPuSD+XBmIv7COxXPrbNfE6mhsYw7Fy7X0xJtaq/jLmEfuQm9kLT+ckCsP7zIWY1zJXL4o09T8hC1DoT12VxzNUHjpieBzslhvFceMxZM8YatYz1BMfMzB+HltNFbSE4mkkp5Iyv5bt0XtdwMAAAAAGIoBPKOqWvfKx3vWOiOxMxTXfa6PxbUe/5q41hiLg0fuPFqGYrmXn9DH4twFsf7ZeNQuGInda2I1Q2H6ZF+htHbekcHRYcYojAu3e7rl2+OHnbHYSU+klKti/5E7k/KwnTkYuywP22mP3JUavWKlWZwy0hOpRNArHjtDkTQ+h6PxhtOnpa2/n59LAAAAAGAoBvCsS1zYIZ/u2yDz6j73fKHxx2I1QxE8bJe3VbxUG4vjMhRBdsLvFFvSE+HDdka32OkVu5//vGuDtHTcZJDCuLH/1nX5aHeDvFhR7DxuN9VMTxiP3Kmt4ilGfsIfisPR2OcPwiVGesJsF/vXxHq3+HEyFG6r2JMIMxT/s3+/HL9/n59JAAAAAGAoBvCsKz7fIEsObFJGYpd/TWyOxZEEhTcOz62zP3D3ptEqDh+20x+5e1PNUPit4qBdrLaKV1haxaEFDd/K2bZrDFMYN3ZcvyyvVCeda+KpynWx9sidn55w8hNhhsLWKrZmKPI0i4OhOGV/4M7MUMReF3vXxP5w/PsdO6TmyhUZ4QE7AAAAAGAoBvBsy7Tukf9tLpL5xkgcjMUqP0OhJCiilhlXxUuVR+2WjtksjgzGlgxFOBSHj9yp5tZ9LZ/u2yYX2rksxvix/dplmZIq9IZipVWsZiiCTnE0Q2F/3M545E7JUPjXxPZmsTcUp8ryPHJXFn9dnPV6ba2sO3VKHg7SDQcAAAAAhmIAz7RdN47KptMZmV//uTMUx43F6sN2kUfuvNH4raBdbLku1lrFy4xH7uzdYr9XrD9sp/oyuCq2dYvf3b5ais7tkju9HYxUGBeGRkdl982rMjtT5g7FabNXHD5yVxCkKIrdz9bB2H/kriT/I3e5TrHtolh95E4biv1BWP9jPz2RG4pfrqqSL5ub5UZ3Nz9/AAAAAMBQDOBZV3lptyyozw3BS7yReIny2T4YRzMUS2MfuQvzE8bDdsYjd7Zr4vBhu/hu8ViP3L3TuEr23TrLUIVxo2twQPbevCZz6yuCy+Lwkbui8LPlkbu4y2KzV2xLUOSuiyclx8hQpPzshJ6emGRcE8/IZGT1iRPS0sH/SAMAAAAADMUAnnmn21rlTzv/V+Y7w/AS56pYH4qXxA7Fc+MGY380rovvFZsZinxjsToUv1m7Iv6Ru2Acjj5yN7fuf+Xsg+sMVhg3Ogb6ncvihY3VWoYi8shdSs1QFAfXxfYMhTsMm2NxgdEqdiTDDIXtkbuJ1uti19R0SracPSstnZ38zAEAAAAAQzGAZ/7/xX1kWL44sEkZiZdog/H8mFax7ZE7dxg2rouNodj9vEzLUIQP2z1eq9iWofitMRjHPWy3+Uyj3OtluML4cb+vVypbL8ifmurD7ERqm6VXXKQ9bOdI2y6Ki/OkJ4wH7rwUxWRrhsL2wF0i++/NSUqy5aJc6+riZw0AAAAAGIoBjAepizvkve0rZH7d4nAgrltiXBfHpSfCx+3mqZ1i9ZE7S4YiHIvd9ERwTVy3fMzH7cwMRfwjd/Zm8R93fCdn264xXmHcXRbvu3Vdlhzc410UK4/cBbkJ9XN4TTzFMhSrGYrJZobCf9guWWLJUERHYjVDMa08JXPr66Th6lXpGRri5wwAAAAAGIoBjAdn2lplxeEtMr9+sTcU28fieQHLw3ZGhsJ6XZwbiGvDsdiWnjAfuRvrslh95C7oFtf6uQnvgTvvkTu1W/y7mq/k0/2FcrO7jREL48rAyLBc6nggm8+ckGnpbfJCRXGYobB2ioti8xO2objA7BUnS6LXxBHuQJy7Is59/ar5iOy7dZOfLQAAAABgKAYwXgyODknD1YPydv3iLC85kRV8DtIT6mjsD8RLjAzFF9poPDfCHYojvWKjXaxeEqvXxW+NkaH4nT8W16qt4miGwh+LP9i5Vo7euySDI8MMWhh/P7sjI7L/1g35/fZqeaO23BmKg7HY2i22dYr1DEWBck1cYHngLhiLnc9l2lD8YkVKXq2pkqrLrXKju5ufKQAAAABgKAYwntzv7ZC/7vrGG4qVi2JnJF4c2yy2ZyjCBIUrzE/M8x+2Ux+5M0Sui61XxcvyPHJn8C+Kg6E42iz+15GUdA32MWph3Lre9VCKz5+W/9nf5FwWT7M9cOeNxY+ToZisPHQXJCiSRq9YGY5/U1kuM7K2nD0jR+7e4WcJAAAAABiKAYw3o49GpfLSLvlg55f6SBwRpifCh+3iBmMjQ1GrD8ZmhiJ6XbxM6xbbh+JlenrCyFD8LvLI3ZeaYCjOfv777k1y9eFdyX0v+DWB8apveFiuPOyUsotn5c+7GmRufaVMLy+2ZijU62L9kbsSe69YeeRukpKhmF1VIQu318vG06fk5P370j00yM8QAAAAADAUAxiPeob65PtT5cE1sW5JeFlsXBfPs/aKl1geubNkKJRH7qKtYjVDYXnkLjY94Q3FteFQbM1QKK1i9aq46NxOBi78TP7Hn0dytatTzj64L2tONsvH+3bKixWljoJItzguQxFtFRekSmVqOvfXScp7Oxtl2ZGD0nDtqtzp7ZHOgQF+fgAAAACAoRjAeHb6/iVZmOsR1y3S0hP65yXKWKw/bDc/GIiX5M1QmI/cxWcnzF5xXIYifigOMhTqw3aRVrGeoVhxOCEdAz2MXfhZDca5r5c7O+Rk2z357kSzLD+8X97bWSdv1lXKK9VpZyz+dWVCpqXd9ETu69SsKVnTyktlRiYlr9dWyjvb6+Rve3bK1nOnpe7qZbnc2en8tQdHR/iZAQAAAACGYgDj3eBI7hG7A/L+jhXydr03FBuDsT8azzeviwNjZSg+N9ITtgxFvtF4mTEYx4k+bKc9cGdkKPyR2L0szv3zfyNH77YweuHn+/M+OiK9w0NOnuJWT7fsuXlNaq5ckm3nz8jKY4dlzYlm+fzQPll+5IBsOXtKUi0XpPryJTnddl86Bgacy+G+7J/P9xIAAAAAGIoB/MyMjI7I+pNJWVC/KMu/Il6k5SfCoVhvFeufw+vi+XVxj9yZF8XqWLw0yFCoQ/HcOq9RbDSLtQxFjTIU1yzTExTBhbHlkbvaMEORG4rn1v1LGq4eZQTDL87AiHsVPDAyLLnOcX/2j3OD8PAozW4AAAAAYCgG8ItwveuOLDu00RuH/bF4kbVXPN/MUNT9OxmKL6y0DIU2En8RuShW0xPmdXHYLdYvi/1H7oJusZmhUK6L36r9l6w8ViEPyU8AAAAAAACGYgC/JC0d1+Tdxs+DoVinZyj8x+18823d4vrPrY/c5XIT+udwJI7rFrsXxUaGotaeoQgfubO1i/38hD8aKw/bac3iL+Wz/YXyoK+LoRgAAAAAADAUA/hlGH00KntuHJU/7/zSS0+4tKHYuSJeZHnYbrHeK67TB2N/FA4vjMfoFj/GI3dzg/zEUsvDdtFW8e9qlmljsT8SRzIUwXXxl/LRrg1yu7ddct8bfo0AAAAAAACGYgA/eyOPRqWiZae807jEOhQvMPMTdYsjj9ypneJ5xlf1Ybv51gyFcV1cG47FTqu4NpqhmFu3zJqfeCvuYbsapVdcowzFNWa3+EtHrlO868ZJRmIAAAAAAMBQDOCXoWeoV4rP1cjChtwo/FlMfsIyGBvXxc6FsWUwnm9tFdubxbYH7vyH7cLshMkcjZdrzFZx+Midf0lsZChq3a9Vlw8xFAMAAAAAAIZiAL8MAyOD8s3RQmckDq+JP7MkKBZFWsXzlWaxvVWsDsVxY3EuPfF5ZCyeZ8tOWK6Lw4vipcZ18fLoZbHRLNbTE+Ejd7mL4kzrQekfGWIsBgAAAAAADMUAfv46B7rk6+at2lBsbxWrY/EiLT2hZSiC62J/OHaTE46YDEWYnjCaxbVKr9jPUNQaD9vVRRMU+TIU6lWxdTD2rouLzu2UvuFBhmIAAAAAAMBQDODn727vA1l0YK0sdIZiVTgULwjSE/oDd+Z1cfCwXV18hmJ+ngzFXOW62BmMa7/Q+M3isFUcPxjrw7F3XVxjH4zDh+xCG0/XMRIDAAAAAACGYgC/DH3D/fLlkR+MkTikZSjqFuUdi4MMhT8U15ud4rEzFPZm8dJwMFbSE9F28bIYy8Ox2NIsdrMT4WA8t+4rKTq3Q3qHBhiLAQAAAAAAQzGAn7+BkSFZfbzYuShe2BC9KF4Q1ys2msV+giK8MLY1iz83huJ818Uxg7E/GtctzfPIXTRD8VZsr3iFl58IMxS5oTjdsleGR0cYioGnZPTRo+zP3KjzM9c7NCT3+/qkc2BA7vX1yq3ubuePb/X0yGD23zMwMiJdg4OS+3P43gEAAABgKAaAH0H3YK98c3SrvNOwKJqfaIj2ioMMhdIqNgfjsF0cfdguHIzzczMU3lBca+kVG4/dzQ3SE3qGwn/kLtoqNq6LPbmxeEH911J8vokBCngK7vf1Sktnh7R2dkrV5VbZdOa0rD11QhYd3C/vN+2Q93Zul7/sbpL5DfXydmOD/Pf+ffLJgf2y/vQpKb5wXhqvXXP+3I4B/j8AAAAAADAUA8B/bGh0WNadKJN3G5eMmZ6IXhbb8xP6ULw4OhQr18XmI3dheuJz/aK4NrwsfsuSoYiIfeRueWQ0DvIT3tcFDf8rTddPMDoBT0DuWnj40agcuH1TCs+dkcUH98kbdVXySnWlvFiRkinphExKlsnkVJnz1ZVwpRIyMemanEpm/70pebmqSqamUvJVc7NsPXfOuT6+3dPDzy8AAAAAhmIA+Hfk8gqpi43yjpGeWBh52M6eoFiQp1UcDsVhr3ie8chdOBAvMcbiaLM4Mhorj9zZExTGYFy3XBmOl1uvi3ND8dzaL2X/rTMMTcCPKJeQuN7VJRtPn5R/7t0lr9dkZFKyVAqcQbjU4A7EE3NfU8rnZDgUh5LyfML9Oq28XObW18vH+/ZJsqVFWjo6pHd4mJ9lAAAAAAzFADCWXOOzunWX/KFxiTMU+63ihfX+57BV/HZcr1jJUMyvWxS9Lq7Pk6F4rF7xGI/cBb3iL8ZsFkevi6P+0rROWjpuMS4BP4IH/X1y7N5dWXp4v8xrqJGXKpMyOVUayA3D7ucyYyjWB+OQPxCXRQZjX0EqJc9nv/5l9x757sRJaX34UEboGQMAAABgKAaA/BqvHpAPm5bLgvpPw4E40io2H7mzPXC32PmaG4vVVnF4UaxkKOrzZSiWxHSLoxkKW684MhLXua3iN2MzFPpQ/NGudfKg/yGjEvB/dPjObVl38rjMyKTkhfJcLqLElSxxx+GkJzcYB6OxmpwoixmLyyxDsXddrIzGubH4N5UZeSmr+vJludndzc81AAAAAIZiAIhzufOGfLz3G1lY/2kwFi/wPke7xYsig3FchmJ+zCN3YYZiiZahsD9yZ/aKjcE44A7D6lisP3K3zJKhWG595G718Qq539vJoAT8h250d0ntlVaZlUnJr50LYncgLghGYn8oLgnGYv+aWLsuToXpCUcqYc1QPJ8oUz4nnIH4eS9J8XwiHI2/bG6WnddvSM/QED/fAAAAABiKAcB0p6dN/js3FDd86vlMy1DYhmI/Q/G2lb1ZHIzElkfu5kXEPXJn5CeCi2L/kbulwWhsTVBYH7kLr4nn1n0pRed2MCIB/6FDd27JyuNHZFp5qUxxxuFiT4nCH4RLgoviyZFOsT1DoV4X25vFiWAYdodibzhOuP7Y1CQ/nDnDg3cAAAAAGIoBwDQwMijpi43yh8bFzlWxMxarGYqG+AzF285oHD8W5wbhyGDsUT+b18W2R+7ytoq1z+FFcbRVvNSaofCH4j/tXCOH75yXkdFRRiTg39A7NCRNN67J2w3V8uvKsmAgnqwMxZOTxcp1cWmYolCaxX63OLgo9obiid5YHE1PlMWPxQklQ+ENxbnR+I3aOvn7nr1yuq2Nn3MAAAAADMUAoDp4+4S8t32JNxB/aslQ2AZj02KtVfx2nX0wnh+5Ll4SuS62Zyg+99ITnlpLhiKSnsh+9rgZiqXaRbHWLK5bLh/sXC1XH95hPAL+nf+vhN4eqb1ySaaWl8jUtHpFrI7FJc5Xl9IqThoZCusjd356wr8kjn/kzh2H1c/KdbH/OZlLYmTkg6Zdsv8WD1cCAAAAYCgGgMCNrjuy4tD3YXrCH4wbzFbxp5aH7T6LGYwXGc1iS684EF4W6w/bxXWL3YE4b4Yi9rp4mYXbKy690CT9I4MMR8BjutndJZWtF2VGJiFTnEG4KBiIp6SLY9ITJco1cUmQoZikPHLnD8XR62KVeVGsXxc/7/MTFImklqGYXlEpH+3ZI3sZiwEAAAAwFAOAa/TRqBSeqZR3GpT0REMo+qhdtF3sZyii18VR89UMRd2SyCN3Zq84biy2ZiiU6+LcYGztFVseuftz0xppun6cwQh4TLd6uiXVcl5+W1vhDMThUFwUuSpWL4ujY3GplqDIDcaRoVjpFE80r4tT4Tg8yRiKtUfu/LFYyVAUpNPy3/v2y75bt/nZBwAAAMBQDAC5obj5zhn5eM83srD+E2Uo9q6L/QfuFLYMhfbIXZ3SLK5bFBmK5xvNYj1D8fmYY/HcfNfFdUvtGQplLH5TvS6uW579Z/9eugf7GIuAx/Cgv08SF8/J/MYqmeKMxEXeBbE7GPuf82cobINxqb1ZnCqzD8aRDEUij2SQnwiTFEmZkPXFocNy5sEDGX30iN8DAAAAADAUA/hl6xnqlW+ObvUuij9Rrok/DcZiNT2hjcVGr9gfiBcE43D0kbv5SoZCuy7WhuPPrRmKuepntVUcyVDYLIs8creg/kvZfu2oDJCdAMbUPzzsNInf21ErU1LbvJHY4w3GzlisDMRTtHaxMRQrj9ypV8X6YFyWN0Mx0UpPUJhjsS4l3xw7Lp0DA/weAAAAAIChGMAv29DosBy8dVL+uedrZyDWMxSfBdfFeqfYMOYjd4sj3WL/uji4JjYftgtGYfWzMRybD9tZHrnzh+G55sN22c+f7tskp9suMxABj+Hg7Rvy8b6dMtUZhreFF8WaMEOht4qj3eLJpmTImqFIleljccrsFkc9r47G6sN2CXck9v/4u5On+H0AAAAAAEMxANzreyBfHdkYDMXvNKit4pj0hKVXbMtQ6NfF0QyFOw4vDq6Jw4ft8iUovrD2itUMRfCwXa29WTyvbrlsPVPPOAQ8hovtbfKv5gMyNb1NpgSMi2LvwjgYilPFMd3ikjyP3JVGrov1XnE0QzFxjLHYuSZO+KOxcVHsdYtnVVVLyYULJCgAAAAAMBQD+GXLtYrPt1+Wf+75X+vDdguMXvHjPGz3dr3ZKl5kuTBeotEyFNpYHF4Xq9fE/mhszVDURTMU6lC85MAW6RrsZRQCxszTDEni4llnJA6G4tQ257J4qjoWWzIU5iN3k9VusZqeSI7dLI7LUEyMjMbxg/FEtVNsjMV/2b1HDt+5IyOjo/y+AAAAAIChGMAv+Kq494GsPVEif2hcpLWKF9aHFijUwXhhkJ74zNosNh+2m19ndIvr1Ovi6FhstorN6+K5+TIU5iN3WR/s/Fb23DgpvcN0SYGxnGm7Jy+U5wbgwnAsTimXxbZesZGemGK9KC4OmsWRsTiZ55E7P0GhZSgSxlAcfeAud1XsjsS26+KU46vmo9nfF4b5fQEAAAAAQzGAX7bcVfGfm5ZH8xPGdbEzDtd/FtMtXhT7yN3blgyFNhT7n430hPu4XXhNPN8yFOe7LlYzFG/XL5f1JzNyresuYxAwhs6BfvlwV71MLy8KRuJQkTIUb9NH4lSRck1cZAzFWWl7eqIgNwZbrovNDMXkYCRWrotTCVcyEbkqdkfi8LM7Fif1bnEyJS9WVErR+fP83gAAAACAoRjAL1v3YK80Xt0vH+1aIe80fGK0io1ucexQ7IrNUMQ+chdeEb+tXhPXxWUo4rvF2lBcG14V5wbjLw5ulea7FxiCgMeQajknb9aXy9R0oSc6FqvC9IQ5GkcftZtsXBdbH7hTRmL9Ybs8j9wlw+viuAfugoE49zWR1Hy0e4+cfdDO7xEAAAAAGIoB/LJd77otm0+n5c9Ny2RhwyduhqLeyFAEreJPI93i2AxF3dgZirfrw0ftoumJ8Lo4fjD2MhSRwdjNTvx991ppuHqYAQh4rN8LHsqyw3vDkVhLT+gZCmurOJKhsA/G+nhsXBc7lAxF0s9PlFp7xWqrOByKyyJDsdksnmg0izefOcvvEwAAAAAYigGgtfO6LDmwRt5t/MwbiPM1i/3r4rETFJEMRW4krlvsmK9yRuLF1lbx/Eh6QuenJ8wMxfs7vpFtZxsYf4DHtPP6ZZmWLnToF8X5WsV6hqLAf9jOedxO7xbHXRfbH7nTh+JIhiLIT5iP3HlDcaIsT4YiGfHujp1yoaOD3y8AAAAAMBQDwJWHN+Ufu/8VDMSRDIVyXRyXn4gfi5XkRN3iSK84/OyPxIsj18X5B2Plorg2NxJ/LauPp2RgZIjhB3gM9/p65W97GuTF8m3BUDwtkp3wxmK1Waw+bOd/DjrFRXnH4rBXbGQo/PRE9uskR2mMMgszPVGmJyjMsTjhfp2YTEntlSv8fgEAAACAoRgABkeG5Mz9Fvl077faUPyO2SpW0hML6z+LDMdvK4Oxmp5YELSKbb3iPEOx8cidnqH4ItIr/nDnSll5tEza+h8y+gCPYfTRI7nQ3iZ/2FHljcRblYviaIbC/sidPhSrY3H4yF3+obhgjAxF/Fhc6l4Tp8zLYlPSy1B46QllMJ6cSslHe/bInZ5eft8AAAAAwFAMAA8HuuXcg1b58vDG6IN2aq/Ycl2sD8a29ERMhsJ/2K7OeOSu3p6hmJenWZwbidefrJAb3fcYe4B/Q+G5kzKtvFBLT0wzx+JIt9jvFSvXxeojd2kvQ5EqDpIU1gyFNT1hJCg0ZUavuDRPrzihZSjCVrE6HrteqanhUTsAAAAADMUA4Osb7pfWjmuy9kRJ0Cp+RxuK7RmKhcHDdt5Q3BDTKw4es1ukPWwX9oqN6+LII3efx4zE30h5yy651nWXoQf4N+Qesftk/w6ZnhuKnbF4qzsYK8NxmKGwjMVqszjvdbE/GtszFJONx+20DEXKzVCo18X6lbHZKjaH4+iFsXZdnPViZaVsO39e+oeH+T0EAAAAAEMxAOQMjQ7Jnd42SVyoc0biSIbCe9jOvy72H7kzvR17XWxPT2iDsZGfUDMU6iN37zQuz/59LZN9N0/K3V6uAYF/16XOdnmjJukNxFud9MQ0y0XxNONhu0i7OOVfFkf5Q3GB8uCdmaAoCLIT/iN3xmDsie0WKw/bTUolrBmK541H7sxu8X/v2y99DMUAAAAAGIoBIJTrlvYM9UrT9UPybuMi+cP2xdEcRYN7SbzgMR+584fiBXkGYz9DkbskDj6rI3Hw+XP5YMf/yorDhXL6fqsM8nAd8B/Zeva4zMwUy/TyrcFYPC1mLNaG4ki3uCgQPHCX1h+2s7eK/bHYaBcn9W6xmp/wr4ttvWL7dXHCelUcpCcS7lj87o4dcreXTjEAAAAAhmIAiBgZHZHzD1rl6+bN8j97V4bZiXrLdbHWLNYzFHGP3IUXxYuM6+KYVnHWOw1L5cOmr6Xu8gFp7bjJqAP8h/qGhmT1iUPOMJwbiqeruQktPWEbjAtlipGhcPvE6mdFMBa7g/DjZigK8jSL/ZF4cnBNXJqnWRyXoUgGppaXS/Nd8jUAAAAAGIoBINaDvk7ZfvWArDle7AzEuStjfzAOMxSW6+JgMM6XoQg/6+mJUG44XlD/hfx996rs30NCLnXckN7hfgYd4P/gTm+3/HFntbxQvlW7KJ4etIq3WsfiSIbCaxWrj9yZnWL3YTv1s8eSoXCH4uJoeiI3EDufSyO94slaekLPUNiG4twFsf45KQXptDRcvcrvKwAAAAAYigEgn6HRYbnVfU92XT8iXzdvkc/2rw4vixs+81rFn8YmKN4OhmM/QREmJxZYesX+59wF8af712X/M4vl3IMrcq+vgyEH+DH+B6D+Xnl3R8a7JjbTE3qGIhyJw9F42li94rQ+GD9+hkLvFruXxaXRh+2Sloft1JHY8rCdm6GwP3D3Qnm5bDt3TgZHRvg9BgAAAABDMQA8jnt97XKj+44kL9TLt0cL5YOdS+X9HUuDBEU4HNtaxZ8ZrWK9Wbwgdz3csCT711shXzdvk61nq+XKw1vSMdDFeAP8iI7euyXzGtLBUGwfi/1hOK5bvC1GmJ2Ymi6KjMUFecbiyZELYzU9oXz2HraL9opdE70MRTASp/xROK5bnJSvjx3jQTsAAAAADMUA8O8YfTTqjCk3uu7IxfYrkrzQID+cLpd/7P5G/rbrX/Le9s8jGYqF9YuC6+KFDYsduZH4ve1L5a+7v87+eV/LV4e3SubSbtlz45jc7+uQ3CUz32/gyQzFMzNFMr18izsUpz3qYBy0irfm7RVPNdITWoZC7RSn86Un9LE4kqAIHrkrjfSKoxkKtVVs6xbbH7j74vAhfr8B8P/Zu++3Ku987fv3P/jse+5JZnYSo8aZibEmFnqRIthF0dh7x16ogrAWvUkTBFQQbID0XhZr8XnW1b/tWpiZaMhw/vA6WGbP3pOQyBzHuT/z/gIAAABgKAYA+E9oD99p3eCu0Q/0YqiLHrwqovsvC+lI3TX6tf4G7a0+R0llJ+jw0yuUHvxje6vP0+WWR/q/JrPDSw0f2+n9xEca1MZhP8ZhgM/6+3UhQKXvu+gfeXeNofjRdfra5p6g4B+5s66HQ2cojJGY/Sy0ipmxWGwW/4W9LH4gjMb/RoYi1AN3/19QfHkZjcyifw4AAAAAABiKAQB+VzP+ORqdnaA5v4/6pgZpYHqYxucmaSz4xzQz/lmaD8xTYGEBwwzAF5bV2UYr9IviG/ZYHLpX7GQovvqtGQrVUGyOxVaGwu26WNUs5h65e2AMxsqhOOQjd3KvOLq0lD5MTODnEQAAAAAAYCgGAACA/34+v58K33TQ/2aZI7E9FC+WoZAfuPvqEwfjvzLdYidD4fSK//rwdshH7v7i2i3m0xOqXjF7UexcGMtDcVRpCY3OzWEoBgAAAAAADMUAAACwDIbigJ/yul7Qypybzkj86DqXoVBeF7PpicwM+7JYHI7/+jBDGImtdvEtfjB+xHeLF3vk7i92hkI9Fqu6xWKG4n9ce8X3aKvXQ/1TUxiKAQAAAAAAQzEAAAAsD5mdbfT3zOu6vz26Zl8Wf/3IGY5de8WKR+6+lq6J5UfuHLeUGYrFHrlzrosVGQrpUTvz11aCQshQqFrFiRXlSE8AAAAAAACGYgAAAFg+Hne/pG+ybhhDsTUYZzL5Cfui2BiMF28Xi4/cya3ikL3ihyEyFI/uSK1i4/Nd5XWx9cjd/7Ufubvn+sjd/zBXxgkV5TTp82EoBgAAAAAADMUAAADw3097QLKy5w39mH+PGYet62J5LGYzFH9jWsVfCUOx7WFGyLHYHo2FXrEzGFvXw7e49IQzFrtcFrukJ+R2sfiw3b3gv/8Dii8vo9HZWQzFAAAAAACAoRgAAACWh2f9vfpQ/PfMa1x6whmKmQyF2CvOdL8olh+2E3rFdquYuSwWhmI2PfHXh+4ZCjY9oV8Xh+gW8xfFd+l/hEfuNBdamskfCGAoBgAAAAAADMUAAACwPHSMDOqP2WlDsUG4LOYeubMSFNcXHYq/Nsdh9rM9Fj+8qe4WP3LSE87jdrdcWsV3uOvivzy4LbSKQ18XO+kJPkPx14cP6NaLdozEAAAAAACAoRgAAACWj+GZadpV7TFH4WvOw3ahmsUhWsVfhbgu/tolQ2GMxNZQfJNrFcsZCnkwZkdj1VistYr55IQ2DgujsTkW/5CXQ69GhknLcuCfDwAAAAAAwFAMAAAAy4Iv4KfTz6qN9IQ9FLtnKORH7qwMRQZ3XfyVPhp/arP4ptMqfiQ/bMc+cOeen7jNPXRnP3JnpyiEy2LFdbGWodhQWEBtQ4MYiQEAAAAAAEMxAAAALB/+hQDdevGMVufeYtITzmgsDcZmeoIdifnPGYoMRYY8GrPpCem6+BY/HGsJipAZitt6ekKZoXBpFdsZiodOr1hzuP4pTfl8GIoBAAAAAABDMQAAACwvL4cH6OeiR0yn2ElQSOkJ4ZE7VX7CuS7OMD+zrWK3h+1uyg/bWRkK6dLYPUPxF5UH5lj8wMhQsEPxX/TshDEYf5v9iC4/b6E5vx9DMQAAAAAAYCgGAACA5aV3cpy2ejLpf5mhWLskdobia4pWsXVdbDSLlYOxfU18Q/GwnTpBYYzGiqFYkaHgr4vvKP1F2S2+y3eLzdH4m6xH1DaM7AQAAAAAAGAoBgAAgGXI5/fT7RfPaGXuTeGq+JqQoWB7xdZ18Q3J15mqsViVoLgZolt8S7gu5rvFoZrFXIZCGozZ9MQdJ0ERlFJVQW/GxzAUAwAAAAAAhmIAAABYnqp73nIXxdajdux18d9cmsVGo9i5LpYfuQvVKuYfuRMzFF+xQ7GQn7C4ZyhuK4ZidbP4b5kP6EJLM0ZiAAAAAADAUAwAAADL19jsDB2pKxPGYn44tgbivyuGYtUjd5avmMtidiyWMhTCYOxgBmJ2LH7kjMXqDIVzXcynJ/hWsWZzYQG9nRjHUAwAAAAAABiKAQAAYHnL6myjb7K0Ufiqy1i82CN3N/jr4ky3ZrFqMA7VK+YH4/8nZSjUF8XicMxdFz9wHrn7JushHW2oo49TUxiKAQAAAAAAQzEAQCiBhQV7QJkPzNP0/Iz+6zm/j2b9cxRYCOjwvQL48xqfm6XDdWX6UGxcFsuDMZ+hcMbir116xX/T0xPWSMxfF7vlJ6RWsemrR+rLYmcslkfjvwhDMZ+iMK6JV+VmUUXPe/z8AgAAAAAADMUAAPwoHCBtDP44NUgvh7uovq+FCrpKKfe1l7I6Cuneizzdg5f5lNnxhJ50lwX/eBFVfKij1sEOejveow8uU74ZDC8AfyL+QIAKul/RpsKHIS6KrYfthFax/dnoFIsZCmcwvhG6V2xfE2c4vWIrQcGOxY+s9ASfoXAfio0MxV8UGYrTzxqCP/Pw/+gCAAAAAAAMxQAA+pXwyOwY9U5+JM+bCrr3MpeO11+mqOI9FF96gOJK9lOYJ5W2Fe2kCO8u2l6Uon/eXpRKWwt3Bv9nu4L/urTg5xTaV32aTjXeoJxOLz3tbaYPE300Mz9L7FUyACxdJxorzUH4KnNdLKcn7ATFI2YsNlvFVn5CLUN/5O6rR/xDd18vkqEwrojZz2aGgrsuDvWw3W3pYbvYUi+9GB4mP34+AQAAAAAAhmIAWM58/nl6Ofyait9W0aHaM7S/+gTFl+ynbUXJtD1om6UwWR+GdYU7nc9B2jisK0rRR2Pr1+GePRTh3Uu/1l2hSy33qX2ok96N92KMAVjS/42CBeocGaTkyidCeuKqMBZfk1rFf9dTE9e5XrF1XawajF0zFC5j8VeKofivUq+YH4vVj9wZI/Haghx61PGSxubm8HMJAAAAAAAwFAPA8vWsv5VyOoso2ruL4kr2UbhHuxBOMgfiJGcsLhQG40JnLN5qj8YpwmBsfU41P6dSmGcPnW26RfldZfooMxfwYZwBWIJm5ufpRlsj/ZB3S7gmZlrFLDFD8Yh95M7EPmwnZCi+WiRDIV0XM91iNkPhPHJ3e5FH7u7Qt9kPaF9NJXWPjeLnEAAAAAAAYCgGgOWpZaCdMjvyKaF0P0V4UijMk2wOxMZILNom2MpcF2uf9SticzS2L4rZsZiTStHFB+ho3RVqGXipd5Dx9wRgaTpQW6wPxf+b5d4qNi6KmbFYkaGQhmKhVcxnKG4u+sid0S4WhmKpVew2FGtXxndoq6eAavt68PMHAAAAAAAwFAPA8uJf8NPA9BA9eJlL+6qPUVhRIoV5kpiBONH+vE0Yja2BmB+N+QyFNBRzn1Noi0kbijVbghLKjgT/fAqouf8FxhqAJejD5Bjtry22W8WhxmKuV2xlKB45w7FrrzjTGohvcK1i90fuROJQzF4XC+mJR8ajdmvyMinndQd+7gAAAAAAAIZiAFh+Kj/U0uWWWxTmSTQUJSrGYtk2m5ihUPeK7QxFUYrysngLJzX4f3MPHam7TAXd5RhtAJaYOb+fanvfUWpVITMWy4Oxdk3M9orZbrGdoMi8sch1Md8rDvWwnZOgWLxXLF4Xr8h5SBdamvDzBgAAAAAAMBQDwPIRWAjoY0h2Zz4dqNGuiBPMcdgYibebQ7E1FocVJQnDsUuGwk5PiBkK5yt7TcyPxancUGx9TSw/SleeP6RJ3zQGHIAlZD4QoMI3HZRUUWBkKKSH7dhusTAQP+KHYtbX+mCc4TIU31BeFP9VyFD8lX3kTuwWS73iW7Qm7xEdqa/BzxgAAAAAAMBQDADLS9/kR7r/Mpsi9DHYuiJO4IfiIuez+2VxsiJDsdN82C6ZbxVL3B+521LIi/Dup33VZ6lnsh9DDsASU/i2g47Ul9H3ORn6UKzuFl+3r4v/Zn62RuOv7XYxf1EsP3KX4ZKguOmaodCH4ofCI3eP+AzFuoIcOtpQS9pDffj7CQAAAAAAGIoBYNl4PdJNd148onBPginRyU5w6QlrJE7UL4rd8hPuD9ztlC6K+atisVW8U3FdzA/GcaWH6OVwN8YcgCWmpvctJVbk0+rcm/ow/L+Z7o/c/U3MULBDsfnInTo9wT9y97X0yN1N6br4r24ZCvPzpsJcOtvcQIPTU/i5AgAAAAAAGIoBYPl4Ndxpj8TaBbH11b4m9jgjsfNrOT0R5kl2HYy3KTIU2wqTpV6x6rJ4i1Iq1y3eWXGMWgZeYtQBWGLqPr6nvTUeWldwT9ks/lum3Cv+u6JXbLWKVb3irxS94q/sz6pe8S1nLLaui82h+OeiPMp+/Yr6pibx8wQAAAAAADAUA8Dy0TnSRbfaH1CEJ5G5JrZGYlORS4bCk2ReFYd65E68KGYGYyFDYT9spw3Fwa/bij4lQ2GMxeGefZRWe4Ea+9sw7gAsMV1jw3TleR1FFOcYl8VZqqH4Gpee0K6IlYMxl57gr4vVrWImQ/FQHowt3+fc0y+Jnw8O0PDMDH6OAAAAAAAAhmIAWD56Jnrpwctsii/ZZQ7EO7ixmMtQiNfFi2UoPMnKx+3U18WhmsWhHrnjhXv30YmGG1Tf14qRB2CJGZqZppJ3ryntaakyPcGOxtIDd2Z6whiJmc/cI3eim/xY/FCdodBG4s1FOXS0vprejI9SYGEBPz8AAAAAAABDMQAsHyMzo+R5U0IpFQfMgZjFj8QsKUMhDcWJQnoiyRyHk5iH7dTNYrFdrG4W80kK1SN3557doRdDXRh7AJaYOb+f3o2P0v1XLXq3eFXOTaFbrEpPWK1i55E754r4unuv2PWBO+dhu38+fkgxpYXU0N9HfVMT+JkBAAAAAAAYigFgeZmZn6H6j010+OlxivDs0IkjcVjRDnWGQjEUh3EZiiRFhiLZHo63uWUoFIMxm6HQbBEGY75XzLv3Ip9m/XMYfgCWoPlAgJr6eyi9vowiSnKcDMUjl+viR8Jw7NIqVmUoVA/brc69TwkVXsrqfEn901PkC/jxswIAAAAAADAUA8DyElgI0PvxHjrbdNkeiXkJygyFPRZbj9wVJfLs62L5kTtrKHbSE/JgvFXIUOhf7etidat4i31hnKocjPO7yjH+ACxhg9NTVP/xA51trtEfu1ude8seie2hWOsWP2IG4kd8r9gejRUZCuuRu79pA/HDDP2C+ODTcrr3qpUmfT6amZ/HzwgAAAAAAMBQDADLkz/gp8yOXIrwxJv4i2L+1wnKDEWY3S2WiRmKMMXDdmx6QspQ6I/cqdITDOFhO20o1rEZiieptL/6HD0beIEhCGBJ/0wKkM/vp7q+93T3ZTPFlD6mnwsf6UPxt1kZ5kXxtZAZCvnCOIP+nnmTVmTfpjV59ymqJJ+utjZR0dvXNOGbI1/w3xPfewAAAAAAwFAMAMtafV8TRXkTzEE4niK8/Di8aIbCwzxqJ7KGYg/fKnbLULCj8XZlekLVK7auiT8tQ3H/ZQFN+qYxCgH8CUz65mgqqLbvPRW97aSDT0tpd7WXwotz6J+P79IPeXfom6wbtCLnFn2ffVMfjb8Lfl2Zc5tW594Nfr1D2725tKu6mA49raC7r1rp+WA/dY0O43oYAAAAAAAwFAMAWHon++hY3WmK1ofieJf0hDgUCwkKO0ORaGco+EfukhzKDIUwEhequsWKwVjKUDhDsTwWO4NxhHc/VX5oxEAE8Ge7NF5Y0K9/tfH4xfAAvRwepLL33fS4+yXldr2gG+1NdPtFM90Kymh/RqXB/1lDfy+1DfXT8Mw0zfrnSXs8D99LAAAAAADAUAwAwJjzz1H1h1raU3WQyU7E25fF4cJ1cYQ3QZGesIZhtlucyIzG7MN2zmd7JPbIGYpt4iN3hXy3WLooVjxyx2YoxKF4W9Fuut6aScMzYxiMAP5rfp45A3BgYQG/twEAAAAAAEMxAMCn/1e6p+hi8zWK9Mbr+LE4XnlVrMpQSNfF4lDMJCiUGQqPulnMd4utS2K2XewMxepWMXtdzOcnoooP0Iuh1xiTAAAAAAAAAABDMQAsb5Ufaii+JNkeiuXBmB2J45mB2C1DYT1qx6Qn7MfuhAyFPRzLzWLrmnj7YvmJQr5bLDeL+QQF+7BdpHcf5b4uoen5GYzFAAAAAAAAAIChGACWp5HZUbrz4gHFeBO4cZgbi71CeoK7LE5whmL2YbuiBHss5i6K7aE4MWSrmB+Mk2WFIR65s7ITbIaCHYqFBMX+6rM0OD2MoRgAAAAAAAAAMBQDwPIdivdXH6JIT5wxDge/RujiXcgJigiPulnMjcb2I3eJUobCui7mBmKPkJ4oZK6LC5OFbrEwFHPtYqFVLD1wl0o7yg7T074Wmg/MYywGAAAAAAAAAAzFALD85Hc9oR2lOynSGyelJyIXGYvDpV6xaihONB+5S7Q/W5fF25lH7pyRWMxQJCu7xeJ1sdMqVj1yJwzFRanMr42r4sst92k+4MdQDAAAAAAAAAAYigFgeRmbHac77feNa2L9ojiO+eykJ4zPO8wMxQ5duGIwNtITO6QMBTsah7OtYu7COMl55K7ILUPhjMTyYByiV2wlKIRuMZuhSKs9T4PTIxiKAQAAAAAAAABDMQAsLx8mPlBqxR7zmpjBDcVxQrt4h2uGwmkWMxfGYrfYFM61illJyqF4G3NdbGUotokZCnEoNm2zL4rl62JrMN5etJuqehoxFAMAAAAAAAAAhmIAWD78C3561t9Muyr3ykOxN84eicWhWKJ45C5Ut5gdi9lH7rYXidfFhjDFRbH1WTkWcxkK4bqYe+TOyVBsMRMU2Z0eDMUAAAAAAAAAgKEYAJYXrU8c5Y2lSE9s8GucLpL5KnaLjWFY7hUrMxSuzeIEJz1RxH7mWa3iT89Q7OS6xVuZR+6c7IRzUbyF+5wS/NelUnrdJRqZGcNYDAAAAAAAAAAYigFgeZjzz9GjV1nmQGwMxdpgLCco5Efu5GtidjRWYa6JhUfuwsRmsZae0PMT7q3ibexQXJgkP27HNoulXjE/HG/RGWPxodrzNDwziqEYAAAAAAAAADAUA8DyMDU/Racaz1B0cTxzTRzLj8Ueh5GhEIdiq1ls8vLpCalX7LH6xOxw7FwXy5fFSVyGQjUYG/j0hH5dXOgyGFu4TrHx633Vp2nCN4WhGAAAAAAAAAAwFAPA8jAyM0Lnmi6Y18ROeiJKSE9E2ZfETq840hNvD8fiw3bh3CN3Ca4ZCudRO0Wz2ExPOA/bCd1iT7LrdbHcLd6pZyikXrH4sF3wc0zxAWod7MBQDAAAAAAAAAAYigFgeeid6KVjdSeYodi6Jo51rotdHrmLNMdht0fuwoVesfOw3Q73ZrF4Xcy1ip1mcaheMdst5q6JC5PtsVjuFTvXxRHevVTb8wxDMQAAAAAAAABgKAaA5ZOeSKtJ54ZifSxmh2IrQ2ElKKzrYk9ciGZxvJ2hCFeMxqEyFHyrmH/YzrosVg/GyXaCwnUsFnrF24pSmKHYGIujivdRw8dW8i/4MRYDfEb+hQX8HgMAAAAAAMBQDABLwcfJj3SoVh6Ko4SLYumRO6ZZzI7E6kfueM5IrHjkroi5LC4SxmKPk6EI1S3eVsQ8bCdlKHYqpHDXxRHePVT+vg5DMcDvaGx2moZnpmjGP0/tQx+ptvcNNX58T9U9b6jkXQe1D3+ktqE+GpyepEnfnP6v1YbkAMZkAAAAAAAADMUA8Pm9H39Puyr3UnRxrMtYHKtsFosXxcpH7uxOcbzykTtVfsK5KOZbxaztzHWxeixOlq6LubHYJPaKreviSO8eyu8qxTgF8G+amffRlG+O3o2PkOftK7r3qomO1hdTUkUuxZRm0sbC27Q2P4P+9fgGrcm7Thuf3Kb1Bbf0zzGlWRRXlkMnGsvpRls9lbzrpK7RIRqbncHvSQAAAAAAAAzFAPC5jM6OUnrtEYryxlC0PgrHhLgujnOui9nBWBqNVZxH7sKly2JFq1jMUCjGYtcMhSdZ6hVv1wdhJkPBXBeL3eJI86I4sBDAMAXwG2hXwB0jA5Tz+jntrs6n7d77tCrnMn2XfZFWal+zLgZdom+DXw2XBJfpG/Prd9lXaEX2VdpQcId+yL1OJ5vK6faLRuqdHKeeIHy/AQAAAAAAMBQDwO9owjdBZ5rO6hfF2lAc7XW/LJbSE67XxVZ6Qs5QOA/bqXvF4mBskZrFZoaCH4kThaviJD09wV4Uu3WLjeyE8TXCs5uqexoxRAF8Ii0XUdPbTeebK+iXotv0r8fX6LvsC/pALDOGYu2r9VkejC/RN/poHJR5OfjrK/rnVTnXaIvnISVX5NOjjhZ6Mz6iZyrw9wAAAAAAAABDMQD8x0PxJJ1uPEMx2lBcHCo9wT5wJ4zFHh6boXCGYyE94WVH4wQKL9qxeIZCHIrt62JjHHZrFrtmKKyhuJBvFieUHaKGj88xPgEsQstLeN6+oFONpbQm9zKtzrlEK/SB2JTFjMVZF4XPl/Tr4u+Ei2Lnq3Fd/I392XIl+L9zRf+aWvWEMtobqHtsGL9fAQAAAAAAMBQDwH9izj9HGW0ZFFcST9F6diLGJT8RZw/Fi2conGax2CoOF66LIxa9KnYeuQtjH7njEhQhHrljMhT8I3fOYLyVy1DspB2lafR+vBfDE0AIFR866dLzKlqbf41W5lzUB2LHRX0o1r7qn7XBOIsZiW3ORbFxXWzKvmyPxd+yQ3GmMRRrn//X/Lyu4A5tLrxHFR+6aHxuFr9vAQAAAAAAMBQDwL9D6/AWvfFQbEmcPhQ7rIE4hktP2EOxR7gu5jIU8aEzFGanWMXtkTsuQWGPxsxQrGoVC4/bbZcuiq2B2PmsDcV7qk7Qu/EeDE4AAn/w58XQ9CTdaKuliOJ7tCL7vOkC85UfjFnWRfG3zGjMpycuSs1idig2Pl/hros132Zd1b9eaKmh1qE+mp734fcvAAAAAAAAhmIA+K1qempoR2kCRRczQ3FxiFYxxyVDwVwX25+98UJ6gn3k7tOaxWHKXnESMxS7jcVyhmKb/cgd3yq+2HyHpudnMDQBCCp7XtOxBg99n31et4L5uiKLGYqz5Ovi74SxWMxQqDrF/CN3zlhsXBY7QzFrV1UhPel+SQPTk/g9DAAAAAAAGIrxTQCA36JzpJMO1R6y0xPWWBzlOhgLGQq3R+488Q7Fw3asUA/cGRfECU6rWNUr9iRx3LrF9vVwIfvZSU9EenfTzfYsmvXjgSwAVm5XC6VUZtsjMYu/LDZ8Z47FdquYI2QozIftrGYxPxwrEhQuGQpd5hUKL86kOy+a6O34CH4fAwAAAAAAhmIAgE81MTdBJxtOUbQ3mhuJ5V5xjOKRuzipW8yOxnKrWMxQOA/bRYRMT5ijsfSwnZChMFvFoTIU29wyFIXGhfGz/naMSwCMh68a6ZeiDPo++5zpPPNVc8G8Lr5gfr6gzFA4V8QXuPSEMRKbn10uipVjsT4MC58zjbF4c9EDut5WT1146A4AAAAAADAUAwB8mpn5Gbr+/DrFl+7g8hNRXK84JmSGIsrtYTsv3yvWv4pDscfIUHzK43ZhYoaCGYr5VrEiQ2E/bJfk2iw+WHOaXo++xbAEEDQX8FNeVwutyjlPK3OMkXhlDntJfE5xVSxkKNzGYvGi2O4VX1TmJ75ZbDA2sdfFa/Nv052XTfRuYhS/pwEAAAAAAEMxAMCneNb/jHaW7zSGYk6sPhTrXLrFfHpClaFghuJFMhThrpwMhfGwnTUaK7rFniT7unj7b7wuvtR8B4MSQNCkb5a8717QSn0cPmcPxbocdX5CGoxdMhTsYMyPx5dsbtfF7GD8jctY7IzGV2hV7g26/6qF+qYm8HsbAAAAAAAwFAMALObt+FvaVbmLYuyLYjlDYQ3G6kfu4rh2sfSwnZmhYIfiSK+RneDGYjFDUcQMxUU7+ASFjbkolgZjczR2edyOfdguoTSN6j+20Jzfh0EJlvl/y8BHdX3d9IM+CJ/lR2J9KHYfjK0MxQorQ5ElD8YrzMft3K+LVa1ieSz+RpTptIu1y2JtKDZG46t071UzTfnwexsAAAAAADAUAwCENDs/S5mvMilWvyDmh2K5WRwTcix2HrljLoq5kVjuFoczGQrxutitW2w/bGdLVIzGzlDMP2wnZyh2VR6l7rF3GJJgWZsP+IO/Dwbpl6IbtNIciY2h+KyRnsgWRmMhQ7HCvi62RuEQGQrrYTtWtpOgkMbi7MvK/IT6kTtnLLbcefkMv78BAAAAAABDMQDAYp72PqWEsgSK8cYoEhQxZnpCHou13AT/WfGwndAttnrF+lUx1yuOVwzF1lis6hYnmvkJtltsPWzHNotDZSiMoTiroxAjEuD/aeSfp8SKh7Qy5yw3FOuyHXyGQm4Vf69IULAjMcu+KGY+8+mJiy4P3F2yh2Jls9i+Ljb86/Etynrdit/nAAAAAACAoRgAIBRfwEf3Xtwzr4mjnYHYGyM8cmeMwqEyFFKrmMlQRHiYbrEp0s5PxLs3i9n0RNEObiy2EhRSs9i+Lk5y6RQbQ/Hhp2eobaiDAgsBjEiwrP3aUESrrJFYwGYojPGYSU7khOoVmwOxkKH4jhmNxaGYzVB8a35WD8V8huJbfSR27xYnVxZQY/8H/D4HAAAAAAAMxQAAbrSRtL6vXm8V22Mxk57gesXMw3bRLvkJ7rrY41wWR7hlKLzyUCwPxsw1cRHTK7byE/o4LD5yl2TThmEnQ2FcF0cX76bLLXdofG4S4xEsa9mdTbSh4Io+FNtjcY5BH4nta+KzQn7inDQWy91iOUNhP2yXdcHJUGQzI7FF1SzOvsyNxt+Y18XfhMhQWJ8vtNTQbMCP3+8AAAAAAIChGAAglFttt/RH7Sz2dTEzGEf/llaxyHrYzqVXzGYo2PQEi2sVC59tReoMheq6+NVwF0YjWNaaB95TSmWmMxKb2M9OgkI1FKu6xc518ffKTjEzFIvXxAw+QyE+cienJ6QMhf7I3RU7Q/Ft1lXK62rH73kAAAAAAMBQDADgZj4wT88HWuhQ7SFjJC6ONjkDMTsau43FkSY2Q2EMx/H8I3dst9grDsU7uEfuwtlWcdEO5QN3bIbCui62HrbbXsRyRuLHXV6anp/BaATL1uD0BN19+ZTW5l+kVTlnaKXtLJ+hYIZirl3MZSjOcdfEbhmK77KE6+JsIT2RbaUnLjLpCVWrmL0uFh+7s5ITcobi4NNi6h4bxu97AAAAAADAUAwA4GbGP0NXWq5QTHE01yuOYXvFxUZyQlesbhVb6Qnrc6QiQxFpZShMfIZiBzcYh6u6xXazmP8qtYo9zlgcZj9ql0jnn2VQ58gbjEWwjJMzC/R2fIgiim/qI7F1UWx8PuNcFIsZCnMgdrsmFjMU/FAsP3DHZSjYR+6ymQyFeVn8nWIs/oYZivkMhXoo/jb7Kj158xK/9wEAAAAAAEMxAEAowzPDdKbpNP+wnUu3ePEMRSzXLeYet/M4zWJrJI70ug/GfLPYahXLg3G43S1WP2ynjcX7q49Rybsq8qNVCsvcyUYPrbaHYXYsVmQouAfu1OkJ7rqYe/BOnZ8wRmL2kTvrsviCdF38nfiwnSJD8U0omZf1/MR32VcpseIxvR4bwu9/AAAAAADAUAwA4GZ6fppqeqrpcM0h/bLYvib2ChkKLkHhNhbHujSLrQwF/7BdpLJXrGKNwnyGgr0utsdilieREssOUEbbAxqfm8BIBMta3cduiiu7y4zEZ+z8hNtgzCYnjGHY7br4nJSi+N7lothJUDBjsTUUM61iVYbiOzY9kX1ZSlB8Y2cojM/Ww3Y/5N2gyp5u8i8s4OcAAAAAAABgKAYAcDMxN0F32+9SUlmSk6EojnHYzeJYeyiO9saGvC6ONK+Ko7gMBd8qjlQ+bueMxuF2r1hOULCtYpZ9URz8GlOyi47UnaWeiT6MQ7DM/x9Cc5TV2Uirc89IQ7FqMF7JZSjOCZ/dBmMxPREqQ3FRH4qtzyy3djGfoLgsZSikh+2YDIU2GKdWPaGPU/h/GAEAAAAAAIZiAICQfAEfXW25ymQnopnH7KKlwdjqFrsPxewjd3H8w3aq0VhPT8gZinBhNHauifkMhfGwndMsjvTupF2Vh6l1EG1SAO0Ru4gSrU18Wuc2GCvHYuGROyc7YY7FOUZ2YiWXnhCbxRcUj9y5ZCjEZrHVKw7xyJ1bfuJbplW81fOQeibHKICrYgAAAAAAwFAMABDapG+SrrZcsYdi8ZE7NkNhXRfLj9zFyb1i4WG7CKFbrLwo9obKUOxQZijC7BxFoj4SV/fUYxCCZU8bRr1v22ir57ozFAsD8ercs1K3eGWI62LtkbtF8xM5YoaCdZ5PUHAZCvai2GkVW5xm8WWhV3zJHIeNPjHbKtaG4pU51+lmewNNz/vwcwEAAAAAADAUAwCEvCr2+6h7rJuuP7+ut4qlDIV1Xax82C5m0WaxMkMRaiwWu8XcpXGCMkOhDcVptSeo9F0lxiCAoFn/PF1vraDVudpAzA7Fp02q62JnKFY2i5kMRaixeEX2eeGzqldsZSjM9AT3yB3bLbZGYuGRuyx1q1h82E4biw889eLnAgAAAAAAYCgGAPgU/gU/tQ+2m2OxNRSzj9ypusWxro/cRdriFPj8hDUOcxkKZiwWe8WqR+6O15+n0ndVGIMATO8nhimp4r49FK820xPWWBwqQ8FfE6sfuXOcN0biHOOaeIVrt9gciLPOS4Ox1Cpme8XMI3eq/AR/XWyNxk56QrPhyV1qGujBzwcAAAAAAMBQDADwqVoHW+lO+x2KL4ln0hPRzlDs0iyWMxTsaOySoWCGYnYw5pMT/GAsS6CLzdepFrkJAC470Ts1RmHeG8JQbLCui/mxmM9QrMw+I4zEZ4WLYv662B6M2QwFd10s94rFDIU0FuvZCT5DYV0XK4fiTJcMRdYVyux8jp8RAAAAAACAoRgA4Ld4M/aGPG88tLtyNzMUs4/cxXAZimjX9IT4yB0zGHO9Yjk9EelVpycivE6rOLo4ma633qG34+9Jy2fg7x2AI/d1E/34+JyQnlBnKFbnnJFaxWx6YpV4XWw9cufaKz7Ht4pzLthDMZehkB62E7rF2cYlsdMudsbh77JUGQr+utgai1dkX6Wzz6po0jeHnxMAAAAAAIChGADgtxiaHqK2wTY6Xn+cYrXLYfua2LkudrITMa4JCvGROzk/wVwXh+oVC4/c/Vp3mrI788nIZgQw/gAwfP55uveyllbnnqIfcvlrYnsozj0dolfs9sjdOWWGwrouFnvFKxTXxaqL4r/ZFisAAIAASURBVBXSRbHLI3dMguK7EAkKKUOReZkSyvOof3oSPysAAAAAAABDMQDAbzXnn6NZ/yzde3GPDlQfoNgSZjAudsZip1msfuQuUhiN+bGYH47lDEW8naAID0oo2003Wu/S84E20rrK+PsEIBuZnaLdVY9ojd4kPuUMxgrqDMUZPT0RMkOhaBZ/z2UozisfuVvhMhh/xz1yZ1iRLV4TW8OxulX8DXdRfIl74C62NIfG52bxMwMAAAAAADAUAwD8J+r66uhO+11KrUjlMhTuj9y5PW4X5FFkKIShWP9sivImBP9vJ9KVlgwqfltOs/45CuCKGCDkULyr6iH9kHvKHop1OUKv2P61MxCrMhQrzeFY9ciddk1sjMNOr/j7HHYsPs9nKLIvKHvF3wkZCnswZjIUxq+Na2I2Q+E2Gn/DDMU/F92jN+PD+LkBAAAAAAAYigEA/lN9k33UPtRO556dp4M1B5UZCvlhOzlDESleFnvipAyFNhjHliRTTHESXX1+k0rfVdDg9BBGHoBP0DHSR9ElN42h2HbaGI1zTnGtYr5XfMYlQ6HqFp/jxmJ9KM52hmK5VXyOG4tdH7YTW8W6i0KGwhiK7c/2eHxZfV2ceYk2F96jxv4P+BkCAAAAAAAYigEAfg/zgXn9mre5v5mK3xbTodrDdKD6oJCfcOsVi91iZyyOsjIUnjhKrdgX/L/7K2V15lHLQCsNzeAKEOC3eDM2QJufXLaH4h+E9MQPufJF8WrXXvEZ46JY6BU76Qnmc/Y5+7pY9cDdCiFB4TxsJzxyx/aK2YviLAebofjO9bLY6RWvK7hNbYN9+FkCAAAAAAAYigEAfm++gI8Gpwf1R++yO7LpeN0J2lu1n5LLUvSheEdJIsUWxyszFPpQXBxHicF/bVzwX7en8gBdbblBma9y6NVwh349PO2bxqgD8G/oGhugcO91Lj1hfxYetjN+fUaZoVCNxUaG4iyXoVgltIrFDIVxVcy2is8pesXqR+64brFwWfyt3St2HrkTh2IrQfHj45vUMtCLnykAAAAAAIChGADgc7F6we/HP9DA9AA1fmyiyg9VVND1RO8a3w46Xn+KzjSepyst1+jK82vBP36Pcjrz6NnHZmobbKf34+/1x+m0i2U/+sMA/5HmgXe0Lv+8eUnssNMTdobC6RWvsgfjM65jMfuwndsjd2KGYqUyQ3Fe8cCdults9Yr5h+1YzkWx3C2+bGcoNjy5TeUfXuNnCwAAAAAAYCgGAPjiF8d+nz7KTPmmaHxunLSvwzMjNDE3of/xOf+cPg7jewXw+2of6qF1BeJQbIzD7GfucTuhW7wqxHWxqllsP3JnYh+5c/ITwsN2wiN3qmti52E7plespHrkzmkWr39ym572vcPPGwAAAAAAwFAMAAAAy8PL4V6KKdUesztJP+ScdMbiHPHC2LkoVo3F7o/cOSOxult8zsxTuPeKv5d6xe5jMTsUr8hWD8V8hkIYjLMv6w/d4TE7AAAAAADAUAwAAADLRt/kKCWU3zGG4lxzKNYG4xxnLLYyFOwjd3K32BqGT3OtYvszl6EQxmLFULzSviA+J+Unvv/EVrGdoeDSE/xgrHrYbqvnPr0a6cdQDAAAAAAAGIoBAABgeRiamaD4slu0RmoUn5R7xTmnuAzFqhx+LLZG4dXSRbHQLmbHYiZBYQ3F/CN3ivREzoVFH7cTMxRuj9zZGQpmKI4ofkhjszMYigEAAAAAAEMxAAAALA9js9N0rCHfHIpPSo/aWUOx/jVXHovdMxSqsViRoWCvi7Odh+1U6QnVI3eLXRazj9zZ3WLzulhPUGRfsh+506zIvkzxZTn0YWIUQzEAAAAAAGAoBgAAgOVh1j9Pt19UMUPxSSlDwT5sx7LzE3p6gh2N3TIU1lB8Rn7czs5QnJN7xUK7mL0kdq6LL4TsFjsXxRfsi+JvXTIUxxtKaWwOF8UAAAAAAIChGAAAAJaRzM46WldwjtbkntSJD9uJGQpnJHZ55C73jOJRO2cwdjj5iVV2buIsl57gMxTn7MviFYpH7kJfF7OtYjZDcYkZii8G/32u0PHGMpoPBDAUAwAAAAAAhmIAAABYPmp6O2hr0WV+KFZkKORWsaJZbF4Xr3IdjM/y18XZ/GAsZijk6+LzUrd4hXIwDp2hWMH2ipkMxZq8a1T8toMCCwsYigEAAAAAAEMxAAAALB8fp0YptvQmNxI7j9udlB66szIU+kisuC5exXG7LnbJUDCP3H2vE3rFOef5dnGOMxQbGYoLITMU/MN2QoYi+xKtfXyD6vreYSQGAAAAAAAMxQAAALC8DM1MUFptNv0jTxuJT8it4pxQj9yddsbiXP5hu9WLDMYrpXbxWSE9cXaRXrF1ScxnKNxbxUyGgn3YjmkV76t5QtPzPgzFAAAAAACAoRgAAACWlzn/PBW+aaG1j8/Y+Yk19iUxm6E4KWUolNfFtjPmw3bu3eLFMxRurWL2utitVazKUFzksL3if+Rdo8vPq2lqfg5DMQAAAAAAYCgGAACA5ef54HsKL75Ga3JPGCNxnjgYn5K6xdZQvDqXbxXzn63r4jMhHrk7Kz9wZ4/F5+wMBTsUr8wxG8VCs9jOUOijMDMUZzmD8XcmcSxelXuFWgZ7MBIDAAAAAACGYgAAAFiefAE/3XlZ5QzFuU6GQn1dzAzGYoYi57RyKF61yCN3LC5DwY3EZ9W9YnYw5liXxOflh+2s0djMUJxqKqORmSkMxQAAAAAAgKEYAAAAlq/avk7+qpgZivlu8SlFt/i0TR+IxdE494zykbuVbIYim3/YTt0tti6Kz8kP3SkyFPx18QVlhkIbizc+uUlZnc0YiQEAAAAAAEMxAAAALG/+hQBdaPYaQzGXnjgpP3AXtDrnpOJhO8V1MffInTEKOxfGbhkKcSh27xXbGYrg5xUhm8XmQJwlP2wXV5ZJkz60iQEAAAAAAEMxAAAALPehOBCgmt5Oiiy+bl4Tn1COxWvEXnHOKemRO7FTzDEfuZO7xVZ6QrguznbG4u9VGYoccyzOVgs1FmvXxP/IvUKVPV2k5TfwzwEAAAAAAGAoBgAAgGVvPuCnk40FZn7ihJ2fkEdi9pG7U8oMhfjI3SqhW8xfFy/WKpbHYq5XrMhPsJ1iltgq/rWhmLrGhjASAwAAAAAAhmIAAAAATWAhQN1j/ZRSed9pFeed5K6LuQRFzknXsXi1iMlQiK1itbNCs/gcl6HQB2Nlr9i6JOYzFE6r2Lks/qXoNhV0t2MkBgAAAAAADMUAAAAArIm5GbreWkabnpzn0hOuveIc55E7/YJYzFBIj9yp0xOrlQ/bGZ9XKjIUK7PlR+6MFMV56bJ4hSJDsTL7AqXXFWEkBgAAAAAADMUAAAAAKuNz07S3+iGXoGCH4jVChkJ1VWynJ+zH7IQMhT4Y84/cqR+2Mz+Lg3E2m6FgH7ZjPrv2ii9QRPE9GpqZwlAMAAAAAAAYigEA/hv5mQeptP8aPb4nAP/O76MAdY1qCYp7wljMP3LnXBOfVA7FdobCGopz+YftVn1ShuKsOkOR/QkZCpdH7tYVXKe6vjf4+QAAAAAAABiKAQD+zGb9szQ8M0y9k73UNdpNrQOt1PixKaiRmvub6WnvU6rrq6cXwy+pqf8ZdYx00sD0II3NjevDkH/Bj4EIYBFjs9OU1VlP0SU3jJE4z7okPqFOUAjNYm0YNh62s4ZjRa9YT0+oH7mTr4tVj9yZ6QkmQ6F+4O4cd1Gc+7oFPwMAAAAAAABDMQDAn43P76M5/xy9HnlNlR8qKbsjm9Jr0+lAzUFKqzlEOytSKNobQ8llOymhNIniSnbQjtJESixLDv6xFDpYc5hSKvbQlZbrlBn8333W30xvxt7ShG8SYxFAqLF4bpouNHtpQ8E5KUEhZSiYVjE3GDMZCtfrYnMwDnVZbGQozKE4+yx3USz2ig3nlZfFGe211DzwAb/3AQAAAAAAQzEAwJ/F0MwQdY91U1ZHJl1qvkTpT9MpviSeYotjddHeaH0gji42RGmf9V9r/zNHlP41LvjVkFqxh5LKUul2+z0q6CqkSd8kDc+MYDiCL5p2mDczKfPBz77AvP45sLCw5P451P48jzfkK9MT4gN3axStYvZhO/GROzZD4VwX84/cOemJM/xFsfiwXbaToVBeFOecp4stFRiJAQAAAAAAQzEAwJ9F/1Q/NQ8004Vn52l/1V5KLEugmOKYoGidNhBrvzaGYnMs9jJDsfnZECuI4z7HlyTRvuo0evAyk5721ZtjHdrG8PuZmtdSKeM0NT9DnaO9wX/OXlJt7wuq7GmjnNe1lN1ZQ4+76ujhq0r9j7cMvqHusY+k/XOoPSq3VP46Lj8vEa6KTyiGYvex2MpQiEPxKuGRO2cgPi2MxXKzWBqNmUfuxKH47LNSejM+hN/bAAAAAACAoRgAYKkbmR2h2t4autJyiRJLtXxEHMV4o+yBmKddDkebnIHY+swPxW6DsSO2OIF2Ve7Xr4z7pvpp3OwZA/wW2kWwdh085/dRdmc1ed420ummHNpXfYt2VWVQmPc0bXpyjH4pPEHrC47SxuDnDQW/0rr8o8E/fpy2e84Ef32MEsuv0v6au3S9rZi8b5/R69FeGp2dpNng/90/8q8vs7OO/pHnDMU/KMbiH7hH7rQR+KR8Xcw9cnda8cjdmUV6xepH7uRe8dng9/kqnWoqCX7v5vF7GgAAAAAAMBQDACztcc1PT3tr6WZbBsUWR1FciXY9HOXwWp+jg59N5mWxlZ2IKXYSFE6GIlbPUKiHYkekxxiOIzWeOEp/eoy8b0r0jjH+/sCnpiSGZybo/cQA3Wz30sYnR+mXwmP0U/5h2lBwlH56fFj/rH1dq0uX/Kh/PWL7MWh9wTFan3+Mksuv0+6qW+R5+4yaB7rJSkL8EX+tJe/a9LH4n3knpWax/MjdKWc4zmGuie1eMfvI3ZkQGYrTXKvY+excEvPXxcZQHFd6n6631eD3MQAAAAAAYCjGNwEAlrr34++ooCuPdlemUFxJtD4UO6KZwTiauy62r4mtsdjLfuavi6Ndh2JtHI7V6Z89sfpQrA3GyeW76WLzVartrcPIBCH1TAxS+9BbOlb/gJIrrtBPjw/Zo7D9lfucHvycHnIs/lH/fMSRp43HR4Ofj9LOygw60/SYWgff0rvxgT/kn89n/W8ownuVNj05L2UoeGKr+KTULeYzFKcVj9zJF8V8r/iM1CpelXOO9tXkkfftC5qan8PvYQAAAAAAwFCMbwIALGUNH+spo/UaxRZHmqL0rzH25yj+sriYz1BEsxkKrzgaiwmKGPO6ONY1Q6GPxuZQrInxJtCB6nTK7czH0ASSwekxah7ookO1tym65Cytyz+k04dieyy2fs0MxfnpxljMDMQ/5R9hBuMj3HXxj3nCaKw7SpHFF+hYQzaVvn9uXBh/4bZ2z8QwnW56QjvKbikzFGuky2K5WcxdFFutYilD4bSL1Y/c8fmJ7d6bdLzRQ0MzEzQ978PvXQAAAAAAAAzFAEsnqzDnn6Vp3ySNz43S6OwQDU330+DMR/IF5mhibkyn/ev8C3/Mf5X8j1D+vpSO1x+huJJIaSi2xKiGYtdmcTSfn3AZi6MWuS52RmNLPCWUpdDFZ1dpJvj3Ef9Mg6Z54DXdaCukrUVaY/iwPRJbQ7Hx+bAwEh9yrov1IdjIULhdF4sZCva62HA0+O9xXHeuuUC/MNbG0S/5fZien6Pit610qumJPhb/I++UPhj/ICUo+EtiaTA2sZ9XKYZiEdsqXpN7ntLrCuhJd2vwZy56xAAAAAAAABiKAf5gCxSg+YCPBqc/0vOBOqrtKabczlt0pfkoXW3+lS4+O0zptTvoVMMeutLyK11qPkKPXl2n7I6bVPo2n9oGm2hgqo8CC4H/yuF4Zn6aKt6XUWJZLMWVRFGcOQrLg7F1URzpXBZ73QfjaC97XSynJ7Rr4uhFxmI+QxHHXBfHm+3i4zQwPYgBajmnUiYGqPhdE20vOkZbirRH6NIYh5T0kThfzlDYQ7E9GquH4h+Vg/FRSVLFDbr3soK6xj5+8X9G344PBr8vrRTmvRr8vlwW0hPiI3fyYLxaui7mH7lb5ZKhWJN7jlblnqUj9U/oUUcjzQX8GIkBAAAAAAAwFAP8sbRxeHhmgF4MPaP7Ly7SleYjdKgmhlLLf6HdFVsopfxn2lm2OehnTrIppXwr7amM0D9faTlGd9sv0cuhFnoz1knGZfKX/a+Vfw6js6NU9aGc4oojbLG6SIUoYTA2xKqui63shIgdiu30BPvIXWyI6+I4LkMREfwcW5JI6bXH6e34ewxRy9Cz/k663PKYNj05TOvz02z8UJwmXRcbF8aH+cFYyFCsdR2L0+1e8VrrmjjEWBzmPU+nm/Kosf/1F/9n1B/8GdUzMUJZnfV0vCGfNhdepI1PLuhXxm4ZCmssXp2j4lwW8w/bnQn+tV6kfz6+QKeavFTQ/ZyGZyfxexIAAAAAAABDMcASuDIc76La3mI6VZ9Kvz5NoFR9FN5k2mwzxmJzMC7/WT0al2pff9HtrYyk0w37Ka/zHr0f79azFX/W79HwzBBVf6ighNJoiiuJ+MSxmB+NY7hH7tTpCeu6mB2Ko4QMRbQ9CMeEHIrtDAVzXbyjZCedqD9DHSOvCf/sLw/aAFr/8SXtqrpCvxQd5UZig3BB/DiNH4rzTWKr+LEzFq81m8VyhkJsFysyFOZI/K884+vGJydpf809Kn7X8of8MzoXmKdJ3yw1D7yjkndtdLgul3ZW3tevi9fmn3VtFq9mMxQ5p7lH7rSBeF3BJQrzZlB0yW26/6qeGj6+oaEZDMQAAAAAAAAYigGWgLdjHVTT46VDNdFBUZRaro3Bm4I26yNxijYOl22WBmOH+rrYGoo1SaXG15P1e+nhy2vUMdL2pxtGJnzj1ND3lPZW7qR4ayTWvpZE8hQZCv5hu0jhmtjKT8iP3EVzGQr5uti9WWwNxOxnvlmcWJZKF5uv0svhVxip/stN+mb0kXjTk0O0qeCQOQwflC6KVYOx4zAzFB+Sh+LHh+1rYilDkX9EahUbn48qr4v/ZQ7Hax//Sonl1ymvq/4P/Wd0PuCnwMJC8OdWH9X1vaab7VV0svEJ7al+ROHe6xRZnEEbCy7S+oILtOHJRX0s1j5HBP94uDeDwoJfTzQW0o22SsrsbKD2oV7qmxqjGTxSBwAAAAAAgKEYYCkYmx2mxo8VdPHZAdpftZ1S9XFYxRmKjfF4s+tgnMxwhuNfONqF8YHqWCp5+1jvGM/9CR5Xmw3+OXaNvqYjTw+Y47A2FIfbY3GsfVnMD8RuzeJY1QN3QdFmhsK6JjYetmM/y81it+viSFWGQhiMd5SmUEbbHXo92o3B6r/UyOwEPe1rpzDvMX0c3lCQxgzFB4XsRNoiveLDHCc9wV8Yr1VkKH7kHrZzuSwWEhTadbEmtfIW5Xc10PT80vhZoQ28Pv88fZgYpt7JUXo10kcNH7upbaiHSt6166p6Oqh18AO9HR+ij1NjNDY7Tb6AH7/PAAAAAAAAMBQDLC2vR9roSfc9Si3fSKkV2gC80VCmMQfisk3O53JnKObGYpcMRbJLhiLJ/rxFvzK+8fw0VfeU0MjM0s1RaI/xDU7308Xm0xRfEm5cE1tDsf3ZGosjzQQFn6HQHrwTh2JeNHddHM08cqfqFUcxj9xF2YOx+mE79oG7SLZb7DEui6O9OyjvdQFN+aYwYv23/T+D5ibpaW877Sg7SxsKDjrjcAGTnOA+H1L0iuUMhTMSuz1y52Qn5AwF2y12RuK19kCs7hbvrrpNpe9bacI3s4R/VgTwewgAAAAAAABDMcCfR+tAHV1vOUp7Kn4xhmLdJvOieCN/WcyMxqEzFD8r8RmKn7kUheVE/R7ydOfQx6meJTmy+AI+uv/yFsWVhOu4sThIzFBYg7EzGvOt4hhFr9i5KlZ3i42R2BqKzVYxOxTbl8SqR+74DEWkyyN3lR9qMHL9F5n1z1HrYBftrrqij8Qb8pmhmJMmWWenKNwyFEyCQsxQ5Kebj9s5j9z9pHjYzslQpAutYvVQrF0W76u5Ry+GP9CsH8kGAAAAAAAADMUA8G8LLAToxVAjpdVE0N7KLeZAvIEbi1O40VjwCekJ7at+TVzOXhb/IuUn2G6xJr02iQq7M6l38v2SG4Aq35dSYmmUPhBrV8Tx5mAsXhTHMeOxNRRz6Qn9qtjJUHCtYkasoldsPXJnDcUxxfJQHC21imOUD9zxrWKDdlV8sDqdWgf/fO1oUF+2vhvvp2P1d2lDwQFjKDbHYu6yWDEa2yNxgTUKuw3Gh+XrYjNDsZZrF7tdFztD8Vrlw3ZH9FaxlZ+w7K25S0MzE/jnFAAAAAAAAEMxAPx714XT9GqomXZXbKJd5Rt1qQrcUFwmXBeb2QlrLJYyFMJ1cbLyovhn19H4QHUcFb/Jo/6p3iUzAnWOvKKjTw/QjtII85KYGYqty2KrVczQr4pLnItieTCO4rrF0mDMpSei+LHYSlBwrWJVhiJGkaGIMxIUnlhhLI4P/jkmUXZHHo3PjWOE+5MbnZ2g6235tCH/gDkUH+AHY3MsNj7zD9ut4z6H6hUf0h+3U2YoHjtj8VrmkTt1fiKde+hu7SIZCu2Bu33V92hwGv+cAgAAAAAAYCgGgN88EnePvqAjtdG0q2KjPRRrn1PdBmOtVay8KGbbxZvtFAU/GP/sMharMxTsdXFqeRiVvXtCU77JP3wEGp0doayO+7Srcgc3Escz+Yl4KT0hD8axqgyFcF2sfuSOT09IvWKb2yN3scqLYnkkdnrFMSUJ9Ky/5U87wGlX83N+H03Pz9D43CQNzYzQh4k+6p3s13/9brxXN+OfpaHpEf1fF1hYoP+2tmxBVw1tKNjvDMT5DCZD4dotVmQo5G6xNRQ718U/sY/c5csZisWG4h/z0kM+bmcJ85ynm+2lwb+/GIsBAAAAAAAwFAPAJ5kP+Ghwpo/ONqbSrvINOi03YYzE5mflWGyMwux1sTQUM51i47r4Z8Ujd24Zip+Zh+3MXjHzua6v4g8dgLTB8cPEOzpen0bxJWEm9Vgcx4zGcSK3oZjjZChiQ6YnnFaxfVnMjsVcrzjG7hW7DcZ2hsLDJyiuNF+noSX8uCCXWAj4yReY18fg5oF2qvhQRxmtj+hkw1U625RBh2rPBf+69tLeqpO0r/oUJZcfCX49HfzjF+ho3WW60ZpJt9pyqLb3GT0ffEUTc5M0Mz/7px4f24e6aVtROm16cpC/JGaG4vXBr1J2Qrwu5oZjJ0Mhdoqty2L+otjJUDj94lAP2wmtYpcMBTsWJ1XcoGcD3cG//36MxQAAAAAAABiKAWDRa+L5acp4/qsxEldssMdiazB2uF0Xb+KIvWLrkbvQ3WJ2KHZvFrOj8f6qWKrvq/xDB6DMV3dpR2kYMxSrx2KjVSxkKMyH7bjPDHEotsbiWCk9wWcopGYxl6FwroujvKzFx2K2W5xSsZdej3Yt6fFNuw7WroRL3lXT5Za7dLTuQvD7f4AivLt024tSaGvhTtoW/LrN/Gz8OjX4NYUT7tkb/OveF/x7fZhON2VQRluWPhr3T/05xnLW4PQonWq8rw/FG/WBeL/5VUxPHJAvihUP3GkZinXcWJzm/rgdl6E47AzFdoYi3c5QqFrFIvVgfJQbjPfX3KOxuSkMxQAAAAAAABiKAWAxj19n0K7y9bS7whyKK/jB2Lko5gfjFJcMhVuzmE1PyN1iPkOxM0SrOLlsiz4U7yzfRpebf6VXw61/yAj0Ivjve6hmN+0oCdPHYv1rSbjObSy2rovZR+5ii50sRaiLYlV6QmoXa/kJr5ChYB+5E3ljDcV8r1hLT7CfrYHYGotjixMpsyOHJn1Lb4AbmB6iloF2uth8i/ZWHaOE0oO0vSiZtnF20jZzGJbYo7E5FBel0hZ7NNY+p1KEd5/+9VzTbXr0qpBGZ8eX5PdCpF1WV35opp0V52mjnp3Y7wzF+eJYzPeK2W6xmKFY59orTpMet7NYI/E6YTR2bxUfMTIUeencULzWJUNhjcXbPOcot6uOZvw+jMUAAAAAAAAYigHATX1fCaVVb9OH4l0V6/mLYu6zNQqrMxQpNrFRLPSKy8UMBZ+ecH/k7hcpQ6EPxsHPj1/fpwnfl+2QzszPUNWH0uCfa7Q5EItXxaEzFHF2t5i5LhaaxZY4tlVcEiWNxrFCrziae+TOGIqji0O0iq2x2KS6LHbG4lg7Q3Gs/tSSetRueGaUanob6VLzreD3ZE/wzzHVHoa3m+yRWMG6Lt7KXBdbY/EWk/M51Rbp3U+7q05RQXcFtQ+9XtJj5GTwn9tLLdn6MLxRH4j304b8/fZlMTcWKzIU3MN2BUJ6okDVKpabxWyGgrsuZrvFWoLiEzMU0mWxolec9vQBTcxNYygGAAAAAADAUAwAKu/GO+lK80HaW7nJGYrLLeJF8QauV8w/crfJPT9h2sk8bGd9dlrFfH5Cvi7+hR+NS/kERUr5NnraU/ZFRyBfwEenG9JpR8l25ppYNRiHL/7IXYmYobDSE6Gvi2O8kcJ1cbS6WcwNxdHMQKzuFUdL6Qk+Q2FdFieVpVLlh2qaD8z/oQPcfMBPNT0NdKP1AW0vSqIIT4r+1RiHk2hbYZJzSVzoXBVvLUyWhmLnoti6Jk6RMhRbFGOxdWV8tO4K5XeXk/XntdR+z9f2ttJ2T7o9EjsOcDYoUhTr7dGYvSg+qKcnxOvidaoMxWP5kTtjMGYvitOFz6rB+IhzXWyPxlZ64oiyVby58BQ96qihebSKAQAAAAAAMBQDAG/KN05VH/IpvTaCdlesN1lXxOxnZyjeJbWKN7g2i90yFDvLNjFDsdwtTjYfueNbxeIjd7ydZVspo/UM9U/1frERqOFjLR2oTjZG4tIwfiwuDddHYjtDURzqujiCSVJEcEOxulVsiLGJ6Ykol0fuYjj2RXGxOBrHSt3iSGEwjmRaxbfa7/6hw9ur4S7K6iyghLL9FO5JNgfiJGYoFq+Jk/nRuFCRoSjir4k5doYi1cxQOIPxL0+swXg/nWzIoFcjb2hwemTJDJNam/hCc2bwzzFNMRQbl8UbpJH4gGt6Qm4XqzMU6xftFR9yHrazW8XOZ2sg/ulx6GaxdF1sjcV5WobiKJ1oyKHRWbSKAQAAAAAAMBQDAGfCN0aXn+3TB+Jd5evssdi+KK7YoMxQpNoZCvmBuxQ2Q1G2UXlZLHaLnYtivlecrOgWuzeLfwn+OYVTS3/dFxmBAgt+KnlbaFwTMxfF8fqvnatio1UsXxYbwzDzsF0Jn57gP5uDcbGJzVAomsXiw3ZShsJOUTDpCSFDEeqBu0ihWXywJp3ejb//Q8a3snfVdKbxKoUVJdJ2XZJkm0kci7fr43Ayc02crI/F3GUxl57gMxRbmUvircJlsWFX8J/r45TV4aWOkTdLYpz8MDFAOyvO0aaC/bTpiTYO7ws9FucfcHnkTjUcOw/brVf2it0euHNyE1KGQjEWu7eLnaF4rZieyDO+bik6QyXvWjAUAwAAAAAAYCgGAJb3zQPaV7nJHIjX6ZzsxHr7slgajSs2MtfFxjAsNouNkdgciplH7qQMheKRO6dVvNkei5OVnPSE1Su+3Hzsi1wVj8wM09mmo5RYGm4Oxdud0VjZKjaG4h3K9EQ4n59ghmL7kbvi3/7InXNJbH72CkOxl28WR0nXxcYwLGYonEfu4uzBuOpD7Rcd38Zmx+lxl4eSyg5QmCeJwoqS9LHY+qwai+2hmM1Q2L1iuV3s9rDdFhdbhZHY6RcfoOutWfR84NUfPlDef+nVr4k3maPwJik94SQoxMHY+XxQJ2YoNrCtYtehmM1QWBfF1jXxIemROz09wbIui5kMxY/sUJzHZyiMwdi5LF6b/ytltJXQpG8WYzEAAAAAAACGYgDQ9Ex00Y3nh82BeD3HahVz18VCs1jsFbvlJ4yL4o3OWFzGs9ITYq/YeORus+KRO+uCmL8sTjKbxWk18fR69AUtUOCzDkHD04N0quEQJZRuDwozhuLS7UKneLu6V7xYhkIajY1L4tgQY3EM2y2WRHMXxVaGIpodjoVuMZ+hUImzxRYn0KNX2V+sU/xu/ANldjymsKIECveYA7HOHIw9ScrLYuOa2BmNQ2YozBSF9rAdi78odhuO+QyFNRxfbL5Pz/rbKbCw8IeMlJ0j7+lM0wPzingfMxLv+7RWcb7z1XrYjlMgJCgKnASFlp5gKTMU3MN2h+yL4rWP+cftQj9sly4/bJfHeHw0+M/MeWobeoehGAAAAAAAAEMxAAQWAtQ10k5p1VvtS+LdTHpid7kwHFvXxPaFsfGZH4o3hByLUxWtYsMm+WE7IUEhZih2hkhQaIPxtZaTNDb7ebuwjR+fBr8HMfoFsTEWb5cyFOLjds41sZyhiCsxaZ+LFdfFLs1i1wyFV52h4D5LgzE/EvPUY3GkfnEcT2m1R2hk9vO3eDtGuujByxx9JDYk2rabY/F26zMzGIdx7eJk9XWx+MidKkPh+sidNQjzD9z9wnzVhuPzz+7Q88FXNOf3ffGhsnush7YUHTRHYmso3qdsFRsDsTwab7RzE0yGwrwulobikBmKQ+4ZiseHmKH4kJCfkDMUP+UfUXaLf7RTFEeZR+6O0uYnJ6mm5yX5AwHCfx4AAAAAAABgKAZY1vwL8/To5TnaW7mB9lhDMTsYlztj8a5y4ZE7bjS2MhRyrzilbAOXoXAG4o3CWCw3i1PKQ4/FcquYH41P1e+j8bnRzzYCaUN7bU8FJemD8DZ+JLaH4u2KoTiMS0/skK6JmcHYLUNhXxeLj9xFKUndYp1zRcyNxcVCq7g4lukVqy+LjfRELKU/PUYjs5/ve26NxLfaHgT/fZMp3JNAYZ5ER5Hzdbs0Gquvi9WP3FnXxOZYLPWKdyozFOwjd6pW8S/2aLyLzjbd1pvFs/65LzZU+gLzlNNZHvweHaZNT/bpfWK9U1ywz2wVqx+2c+sWc81iM0XBPWynZyic0XjdYhkK6ZE7p1vMZijW2g/dpYfoFh9RDsZWr3hd/jG62uqhqXnkJwAAAAAAADAUAyxzwzP9dPP5EX0kdobi9XarWHrYrsLk9sidbaNrhsIZizca2QlrIC5jhuKyTYs0i53B2B6LS+XheFd5GDX315E26H6O79+cf47utF8J/rlE2tfE1lhsZCjCuF4xOxTHh7wujpAzFOLDdqFaxSXWQCwOx9Guj9yp0xPR3FgcxT1yp7av+iD1TvZ9tuHt7dh7utF6l+JLd+sjsT4U6+mJRHkw5lrFiz1yl6zIUOx0kcL0indKGQoxPSGPxrtsJxsyaHB65ItlKGb9PrramkubCvaa47A1FrPXxerLYuu62B6L8+UMBTsUb1BcFosZCm4ofpzmnqF4zA7FTLc45FCcbmYojsgZCvO6+GDtfZpCpxgAAAAAAABDMcB/dpk3R9PzE0HjNDzTR32Tr4Nfe2nSN0ofp97Q2OwgzfinaD7w5f+r5Z+qc6SZDlVv5YZi4/N65rpYzE+4PHKn7BVvENITzMN25kWx9Mgd0yxmR2L+ulhMTyiui0uNz943OZ/t+z89P0132q9yIzF7VZzAXhSXygkK1SN3bIbCeuQuTtktti6JI+zLYrcH7qxx2PWROy/fLnYGYnWGItR1cWxJArUOtn2W7/nQzDA9eJlNO8v3cyOxPRRbCQrxuth+5E7oFnusS2KhV1wo9IqZ62JuLHbJUPCP2qW4ZCh22RfGx+qv0fjc5Bf5OdEzMUDpdTfMoXivMRSbrWKe+1jsPHLHXhM7w7HULOaui1XcW8U85rJYuC7+SXjYzshQHJEzFHnpXLM4qeIGvR7tI/xnGgAAAAAAAIZigN9kPjBH78ZfUOtABRV0XaQ7bfvpdtCFxhg69nQzXWyK011tSQr+zw7S7dYDVP7uAdX3FtL43JBuKf21NH0so8M12lD8E5ed2GO3iuWhmMe3ilUP3PEZCrlbnCLhL4qVj9yV/RyCk57YWbaFHry8SrP+mc/yfR+bG6Ebz89Rgpad0G3X7RAk2COxulu8Q7gojisOU3SLxVYxc13smqGItC+LY3SKx+3MBAWbnrA/M6KY0ThUszihNJme9bf87t/vKd80Fb0ppf3VR+2R2JFoXhSbwzF7UVwkZyjCpPQE+8hdsvq62HzgbqvQK+aui4VWsc41Q2G0irWheLtnL51ouKFfFn/u3/fjs5PBf+8D5iXxPnsodtITocdiI0PhPG7HPXJnd4uNVrHzWZTG9YpDjcXaNbHxsN0h7mE71uIZiiOuGYotRaep4kMb4T/fAAAAAAAAMBQDfNKgOjs/RY19hVTYdZlO1W+lI7U/0ZGanyit6h90sGqNzvn8D1ta1T8prfqfwc//pCvNyXSvPZ1aByr1C+TPlUP4LfI6r+oj8R77kngdn6Gw0xNyhsK+Lq5wron5sXjjJz1yx47EykfuylS94p/1r8lct/hnIUNhjMWnG/bT4PTHz/K9nvFP09nGdGMktq+JzcHY/CyOxvHMWOyeoQhXPHLHZyekodgciOPEsbgkVK84WpGhiBY+x3BXxWyGIsort4uTylKoY+Q1+Rf8v9v3XPu98mzgOR2vP0fhnh2KoTjBGYo9TIaiiM9QhOwVM9fFrs1i5qJ4q2I0ljIU0nWxeizWLovjS9Mpu9NLE3NTn/XnQkFXVfDv3xH9mngzOxY/2Sc0i/eHfOROT1CwI3G+PBazQ/EGrlXMP3KnahX/xGUozKHY/ix0i+1L43TlI3fOOCw/crcu/1cqedeCB+0AAAAAAAAwFAO48y/4SBuJa3ty6OGLw5Re8y9Kq15jqPpBd1C3hnOg0hqLnc8HTIeq1wb/9/9FWa9OUl1PAU3Mff4LQjeTvrHgX9cZcyj+SRqMufREOf/ZyU2sl9IT0mBsPnK3S9EqVj5yVyZeFztjsZOh+PkTMxQ/07G6lM82FA/PDNLZpiOUULqNEku36V8NcoZCRTUUu43FToYigusWOw/bhWgWC61iaSi2ExRiq1iRoVA8cqcrNobiHaVJ9LT39+1C905+pIzWu+ZIvIMZi/nR2EpQOBfGYobCSk+wQ3GislmszFCE6BY7I7HbI3ful8VWhuJgzQXqmxz8rD8TKj88M5MTpgL2otjJUFiDsTYK84/csemJ/c5FMfOw3Xq7XcxfFMuP3KVJ18XOWJymzFAYvWLhkbt8twzFYa5V7Hx20hMbnxynRx3V5A/4Cf+5BwAAAAAAgKEYQPFfcx+l7tFmevAijU7UbaS0qtWUVv2DPRCzn8Wh2B6MTfpQXLnGHostZxqi6Prz3cF/n+c07Rv/4iPF6Oxg8K8tmvbqw/BPZn7iJ/Vlsd0t5i+Kd3ONYrlVzF8XuzF6xalCo9j6vNO22cXPikfuDFqGYn9lVPB73EELn+GCu2+yh47V7aNEfRTe5iQouEftRExyojRMeOQuXOoVG61i5rpYahXzQ7HVLQ41Fou94thi54rYYY7DxSYhPeF85tMT2lBc9r78d/tezwfmqeRtOUV4dlCEN8H4qktwvS62LovZDMV25rp4uzkayw/b8RfF1iN3/GDsDMPuj9ylcBkKt16xcU3sfN5auJvOPbtDE77Pc1WsfS8rPjQF/9oP2UOxflXMPmz3ya1i8ZG7A/xonO+0i9cru8Vp0iN366VH7uTrYuuymM1QrJXaxcYjdz9JV8X8dbE2Fm8oOEaXWp6QL/i9wX/2AQAAAAAAYCgG4HMC8xPk6b5G15oT6FD1amMktrFj8Rr9otj5rPIP56KYHYsrja9p1WuDX/+pd4wHpt5/0aFiyjdO158f1AfivZXruMtidjDeLWUoQrWK14foFLO94g3KoTiVy1BsZi6MnQyFPBqrMxTW1/1VUdTc//SzfG+n5yfpVEMaJZRsda6JS5zLYj5DwXAZi51rYuFhO+armKGI59rFkVyGwmgWR3FjcYwwGMvXxVHSaGwPxYoMhdgr1obiho+Nv1t6onvsLcWUJNsDcTjzNWSGoogdihOkVrE4FvPDcbJiNGYftkvW0xP8NbHWKOavicVmsephO/aBO20s3l11Kvj9a6P5z3ThmtVREvxzPkibzZGYxfWKC9jReL9rfkIcipXN4vyDrr3idVyGgr8sXu/SLDbG4sPOdbE4FAvpCfaRu7X5cq/4YnMBzfkxFAMAAAAAAGAoBrCv7eZoZn6SbrftphN1G/SR2MJfFK/m0hOhMhRis/ggd1X8T0PlP2h/0JOuq9Q/+Zb8C19msJjwjdLN1nTaW/mTkJ/gMxS7ReXOaGyNw9xYHPy6u8I9Q+EMxBuE9ISpLNQjd3KGQiSmJ/ZWRtLzgfrP8j2d9E3QucYjenZCTk/I18UyvlXs/FqdnrAetuO6xcxoLGYo+Fax+rLYTbSZoRCHYv6Ru1gpQ5FYlqwPxb/LP6NzE3T9+S2KL00xx+F4IT9hDcdu18VMhkK8KOaG40+5LhbTE6GGYmEwLlQ8ciekJ35hEhRnm25/lqE4sLBAua/LaVPBHnMo3mteE1spCjlDsSlEhmIDc12sZSjYXjGfpFAMxVyzOM0cjdMUQj9y5zx2x14XpzOP3KVLxFbxmaY8mvH7MBQDAAAAAABgKAag/+Pzz1LfZKeemjhUtYofie3PZnKi+gchQ7GGGYp/UA7FRoYixFhsyuk4S6Mz/V9kLNb6yBeaUmifcE28x/71OtcMxW7uYbt1LvmJUI/cbXR52I4djNUZCvGRO6lVXP4zd128u2I7tQ42fpb0hNYoPvp0t2Io3iYlJ9gMxWJjsXNRHCZdF7tmKJhH7mJVQ7H9sJ05FJeoH7hjMxTRwiN3crdYfuRuT+U+aup/9h9/r7XGcdvgCzry9P9n7z1j40jPfN/Fxfl8cYCFcXEuzoeL8+HCMBYXi4Vh+J49a/jYx7u273qsmXFYe8aT7AkaSVTOgSJFUZHKmdJoNIozozR5RoFBzFmUSGUqkSIlkmLOZPO5/VR3Vb1v1VtNSnyr2U3+P/zQpQlUs7qqu+tf//f37KaFWStcjeIFzsA4Y4XDVSwOuVORNIKGQnQW20GxHBwr1BMXTS1FijIsnhWxXZxqPCbl7aCCR9zK1nvMsof31M2LoUbxeUdQLBEKiqeJvuILGyJqKMzQWBpyd07WUIiuYnnInYPzm4V28eYRhtypXcViaKwKi02mnt9BaSWnqHuwD0ExAAAAAAAAACAoBpMdDhAftFfT57e2hAPh16Sw2NROSBoKMSwuMsPiMEWqRvGbSvWEvW23iz+7tZP6h3p8Dy26+rlRvJnWWSHxO8pW8RpDReEx5M6B2S6O7CwOtYnldrGHhsI55C5XCIxzTRWFgMNVvMpQTyymsoY8X/Zn72APHa1Op+TceYJ6wqmhCAfElzyG3Dk0FMuF0Hi5hKNd7Bhw5xxyZ2kosr2dxUsMvIbcOfQTWYvtNnGW2lnMj8m5KcHz6eGY9/dAYIC+vPMNLchcHsIIi5cLjmJVYCwPtpNcxYKv2GoXhwfb2UPuImkoHIPtImgojEcTV0gsD7mbpRhyx+3io9VfUN9Qv96gOPhed9oIijdajWKb9bav+IKsnpDdxXZI7DnkzlJRpCmG3IVUFJ6+4vPOAXfqwXbvia5iUz9hOYrNNnE4ND7noaEIu4oPV2dQD4JiAAAAAAAAAEBQDEBTz0M6dzedthS9FuR1qU3sZJNDQ5EmbXu5it/ybBcbIbED1lCcvL7Z0GD4+Xu397XQ2Zv7DPWEGRa7NRTvWc7iNV4aikjO4vxQm3i1IzCWfcXqwFjEdhWbCoqRBtzZQ+42Fy+nG82VvuzLwPAQfXv3DK3MmW01ipXt4ks2YpvYGGxnBMVqb7E7JJaH3C2TlBSOdrE15C7MJVWz2HYWu/QTWerAWFZPuH3FGwo3Um1H7Zj3952WGkot2GAHxRK2hmLBiOqJUFgcelxpO4sdg+3mZYRDY08NRaTAODkcGHtoKERXsTTkTmgTX5CH3C2/tIVKGq5qPW55YFvWw5Lg37FBUE+slQLj6aar2KGhENvFKm+x7CveoHAVyxoK93A7OzR+L6yhUA22c/Kus1nsGnIXWUPBnLiejZAYAAAAAAAAABAUg8lO10AbFT36PNQiNik0t9VBsaWhMF3FooZCUlC8EdFVnCaGxWENxQYjLP4rbS+ZQRfvHaWugXbfAoyBQD99dftgOCgOtYnXOcNih4ZCGRg7NBRyu1jtKl5tNYyneQbGpoLCbhcLQbGDVYaGIkEx5C6kobjbetO3/Zj14FtKyV8QDohnW0GxMiy22sRzIjqL3e3i+Z76CbNdbITF2QtkHBoKdVjsVE+YIbHcLnb6iqV2sRAYH646Qn1DY2tn9g/1U/6jYkrKTTWC4YVZ3CiWg2JvZ3GipKFwDbkTEdrFc6VBd0kRvMW2gmKOQkHB7WKrVXzRw1mcIbaLVyuH27GG4vPbF2kwoE9DwzqP/EeVNDeTh9mtpRnnQ0x3aCjEwXYzFK5ibw3FhrB6IhwUi95iq12c5mKqQkPhHHJn8d0mT2expJ+w2sVbrXaxKix+79x2+ux2AYbZAQAAAAAAAACCYjCZ4dDkTksJbSn6S7hN/JocGCs0FGZIbOLlLJZDYu8hd6Z+QuUtPnw1mWpaK4kDXT9+/8HAAGXdPx38u94PB8XvCBoKZ7vYrZ5waSiMIXcq9cRUqV0sDrbjoNgg38NZLKkn7O1VlrtY9hRLzuKwhmJfxTpq6Kolv46ja82VtKk4iRKNkHi21C5OVA65m/tMQ+7soNh7yJ2IylssqyecGopFSqSgWOEqdnuLlwT/zuX0yfVPqXewd0z7e2h4iD68eiQUEIeD4oWKoNipnlgworN4ZbhdrHYWm41ic1vpK76YpHAVq3zFTlexHBbPUiKHxcl5O6mm7aHWY7f0cTUtvrQtFBRzi1gx2M7eXu8m3Ci2t728xaan2OEsPm/6ije6fMX2kDunfkJsFstD7t49J2soxMD4Pae32NJQbBGC4m2UXXuFBgIIigEAAAAAAAAAQTGYtLT3NdEHl+fQ1uLXrKDYDIstDYXkKnZ7i0UNhatdXBQ5KN5o4dBQCL7iT6+nUf/Q2EI37zBukG49raAtxbOMoNigwMtVLGsoxNB4rVI9ofIVm21ilYZiWoQhd9OtgHi101fs0FDI7eIQR6t2+RoAPemqp20lq6WQmB8Tc5yuYoeGwjHsztJQ5MyTNRSX1BqKZdmyhiJiWHzJHGwXColdQ+4sDYX3YDult9gMi4VGcfnjcgqMcQjbrZbblFa8jRZmLqMFQeSQ2AyKTW+xE4eGwhEY295ih6/Y2g5pKOa5XMXicLskhXpiVWhb4Ss2wmIRq1m8WuErZvVEaHtR1kYqrq8c8/4Uedz9lFLy0ynBCIrX2oGxA2uwnektPi97i0NhsVpD4XQWyxoKu1081THkTkI15M6pofhuk+QtftcMjCO0i9/9LhQUvxsOiudlplNeXTVCYgAAAAAAAABAUAwmb5t4kHJrT9De8mlGo3izo1W8xdUsVmgoCp24NRRmu9g95E6hoHAMuWMNxbaS6ZRX+xkNBPwZtFTfeY92lM63QmITdVDsDIlV3mJFYKzSUOS97+Esfn9EX7FTQ2ENtgtvi/qJ1Xmz6JuaT6l7oNO3IKijv50OVG6j5Ny5DvXEbFlBIYTFK1QaihyHfiJHHRRHahcvM8me7wiMFypxDraz28QLHa5i9ZC7xWFMDUVa8Sa61XKLxnZuBuhKY1Xw560wgmK7TbzM5SqeL2konO3iRKV+Yp6oochwBMYODcW8CIPtvDQU8mC7ZLevWBho5x5yJzeK52Sk0olrXwWPX33DLTv6u2lzyWEhJF6rHGxnNotniM5ip7vYCorXj6ihmKYIiu02sd0udg+522T5it+T2sWR3MWyq1g55C4cFK/MO0JNPf4pfgAAAAAAAAAAICgGMU5D5206e2NDWDvxF0E/8RdXUMzNYu8hd2Y47Bxy53QWq9rFiqBYJNwuPn1jC/UP6QuKRJ72PKZPrm8JB8XvWL5it6vYraFYEwF2Fbsaxfnvh8JiEUFDoQqJbUdxSEOhbBc7vMW2emJG8GfPovLH+b6GQEOBIcp5eD74d84VWsWz3UGxi7mu4XayemKuOzDOmW8ExSsi+YrFwXbSttku9m4Ui0HxUtdgO/eQO1tDscTg0NVDY97XvYN99OnNM66gWGwVL8xa4W4XR9JQKIfc2ciD7extOyBeOaKvWKmh8HQVuzUUoXbx6nBobIfFey4fD57/A9qO4fb+Ljpc/aXdKLbUE2tHp6G44NRQhPAccndug1pDcV7tK5bD4k2uwPg9l7fYHRKLWGGxsG0GxaydSM4/So3dbQiKAQAAAAAAAABBMZisVD45T1uL7ZDY1k+428XykDu7TbzZOdjO5SoWQ+M3PfUTdlD8ptJVvKX4PSp+9LU/+onAIOU8/MwOisOuYhnTWSwMtit4T/IWuzUUcsM4FBQ7fMXKIXde6gk7NJa9xSoNRTgwDpJ+eR3db7tFfh9P1U0Vwd9jkTIoTgxrKOTQeK4SDoctDYXkKhYH3c13aSicreLlKvWEY9Cd21Vsh8VLlEPuzCax3Cw2g+Lk3GTKqc2hvsGxtd8HAgN0uOpYKCQ2CTeKFwq+4hArlBoKKSiW1BMqDUU4LM5QDbmLHBTPCWso5goairkKV7FLQyEExbM8vcWhoHhd0T5q7G7RdgwPBoboUm1Z8GevVzSKIwXFHs5iccjdeW9nsRkST5NcxeFtRaNYHGzn5S2WXMVKDUU4LP7Oa8jdVvr0Zg7xPsHnIgAAAAAAAAAgKAaTkLa+J3SiOpG2Fb8uhcVbHK5itYbitQjqidftdrE15O7NkHbCoZ6QNRTOwFhuFPP2V7fTaWhYX6tQ5FpTEe0qXUDrC+yw2NJQ5Hs7i9d4touneiIPuLPVE6KCQhxsl+qhoEgRyZ0uDbkzg2LWURy6soV6B7t9D4E6+9vpWPV+IxxOypXD4kRVs9hTQxF+NHGoJ2RXscpbbAbFclis1FBYobHKVSwExVmChkKhn1gSVk+k5qeOWTvB3G9/QOsK02hhlhkShxCDYllDsWLEIXeSeiJDCIpdOILiiM7iZCskNrYvCt5ip4bioh0UWyFxhlpB4WRR1gaqbLyp9Rgue3yNFmdvtYLihAiuYpeGwjMwNkPh9Y7AeKMSsVlsuIo5HD5nayhcCorz6qB4qlI9sVlQT4S3v3NqKLbSrIzd9O29EoTEAAAAAAAAAICgGExGhocD9Lizhg5fWURbjYD4VYd+QtRQOAbbRXIVF4VRDLlLK7QD4zRpyN2bo9JQpBX+lXaWzKQH7dd9CTR6B7vo5I0dYfXEOw4NxbsKDcV7jsBYoZ/I8w6MZfWE3S5e7QqNp3n4iqepA+NwaLwq3C5OzZ9NZQ25NBDoj0oQdO7u54KnWK2gMMLiS6ohd7ar2AqKTW+xpJ4QQ2MzEA7+2TXYTgyOneqJcFicHSaChmKJiix1WHz21hkt+7qm9S6tyku1AmKLEXzFordY5St2OovNwNh2FSdKzuK5Qlg8V2oXqxUULvWEONgu7CqWg2KxXewecjcrrKGYl7mWiuortR7DTT2ttKPsuBEQS0PtBA3FdIeGIuQqXhdCpaEY1ZA71WA7WUOhDorV3mIzKPYKi21XscJXHGRx9gF63NVC+GwEAAAAAAAAAATFYJJSUHeStha/arSJzUax1CyWguK/uHzFkYfceWso0gpFhJC46C1XYLzRERhvLZ5KVxtzfQk0ONzLvH+SNhcnhP3EsoZCOeCuwKmdeDesnjBDYu/A2Bpsp3QVOwfcTVN6i5UairB6wnQV77+8nmpar0UtBGrpbaYPr+zwCIrlsFhsF6+01BNCu9jTWxzSUCyXNBTe2MPtHEFxWD0hBsVyYLxIUk/Y7WJ1YJxWvJFKGkpoaHjsS/jz6vIpmYPizKVWo3gRB8RWu3i50lssqifkNrE52E7WULiH3KnUE/JgO5WCYo5iyN0cy11s6ibsoHjOSK5iabBdKCjOf1ROrOTQdawGhofpxPVvgz9/nTssdgy5s7UTZkgsBscmG1waimkevG8hqyfMsHiqOOQuzPsK9cRUqVEc3HaoJ6R2cVg7IWoopp7bSkeqL1BLbyfhcxEAAAAAAAAAEBSDSUj3QBt9W7OLthkh8au0tehVo1m81RkWC85it3pCraGwh9sJQ+5EJF+xIjAWNBQbHWHx5qJ36LNbu6h30J9Qg4faHaxMpnX5b8vN4nwzMDZDYqeGwgyL33X5ilVhsaye8PAV509zDLlThMXiYDuns9hoEyfQV3eORzUA6h3soW/vnqFVefNoZc4sZVhstondQ+68fcXqsFitnnAOthOx28SjdxWHkIPjpQ4NxdLsJbSzbAe19raOeX/zYMDshzm0OJtD4KWhsFhsFIdZYLaLnY1ij7DYCoodKgqnhkIcbCcFx2H9xFzXkDt1YCxrKFbJA+4c3mKnhmKWQkNx+tZ3WgfaDQ0HqKrpDiXl7aaEi3ZQnOAIie1GsdtdHNFbLA65U2ooBHexw1ksBcWudrEqMN7s8BU7XMXfmcHxFktBwdvTL2ynjAcVCIkBAAAAAAAAAEExmKx09DfRwcuzQ0FxUSgo3hIOi+XBdt7eYrtVbIbEr6k1FEXCUDupXTyShkIOi0Ou4rfo5PU06hns8CXY6Opvo+9qjgR/hxm2q9ihoVgrNIxVzmJ5qJ042M5LQSEGxW5v8Wg0FGZAvNrhKt5XkUotvU0UGA5ENQjqHGin9MubKYldxR7N4kRryJ3sKja2Fb5iY7CdiKChUIbFLg3FAmnbraIIN4rDjKShWGoNtrPD4uqmai37uXewl769ey748xNpkdEkDiH5iqXAWK2fkAJjachdKBg2guOMFQoNhcNVnDkaV7E7LHa6ikUNxWxBQ2GoJ8LMkdQTdkjMGoqj1Z/TkOZjubWvgzaVHKKEC2tohsFaRbvY7SyW2sSOsHiaErV+QqmhsNrFEdQTnkPuvF3FcnAcahRvLP6UWvu6EBQDAAAAAAAAAIJiMFmp67hOh68stNQTW8JhsbkdyVesDoxfd2korLDY8hW/ISNoKDa5Btu95ekrPlqVQr1D/g1me9RZQ7vKFtC6grdDYbHoKi4Qt0cIivM8NBSOQXepYdYofMXOwNg7LHYPudtYOJ9yar+hzv62qIdA3QNdlHn/m+A+WhIKi3O91RMrLfWE12C7OVKjONEKiR3eYklDoW4XG67ibMFZnO1oGFsD7lQaioVWs3hJ9kKHemIRnbp5khq7G7Xt6zO3PqNF3CYO4gqLTfWEwle8UBkay+3iBVa72BvLVewcapcRQUORmezST7i9xclCkzhZ0Sj2HnJ3/PqX1Dmg99wfDAxScf1VWnppmxUWR1JQyEGxUz0h+IovbBghKA6pJ8SgeJrCVWyGxeJ2JFexpKHwCo3PhYJiHmL38Y0s6ujvJnwuAgAAAAAAAACCYjBJudlcQLtL/0bbjKDYDolFX7EXm4UBd25nsdtV7NJQWIPt7G07IH7Dc7idGRTvKZtD9Z13fAs2eKhd0aPvaGfZfCMsFjUUa/Od7WLvoNjZLvZyFodcxYKSQuksnhZuFk8zEAPjkH5Cbhen5s2gj65uoXttN8YtAGrsbqCdpWuERvEshatYMeRO1FAY7eK50pA7b2cxt4vnK4bcucPiUKPYDI0XuDA1FMsiOItF/cTOsu1U9rhM6742g2ITWz2x1NEuXi6Hxl7tYpeGwq2gsFkp4dJQuILilYrBdiMMuVOoJ6TQ2Okszkihj6pOU1uf/tUEtR2PKTlvD80U2sS2hmKdNdhuhmuwne0rltvFqiF3EXzF5za4h9oJ6glr+1yY8yOHxV4aCkM9EX6ck7mbbjytRUgMAAAAAAAAAAiKwWQlMDxE91ov056ydwQ/8av2YDtHaBxCcBULGgrJU1zooaEocmgoVL7ioki+4rcsDQVv7yidQffaqnwNNxo679Px6o20oXCqERSvd/qKhSF36rBYdhWvlTQUdqs41ctbbA2zmyoNtkv1bBMLQXHutOD+XEjF9ZnjGgD1D/VR5ZMS2lKSFA6JZ41aQzGStzjUKraD4uWe3uL5nt7iUKNY3B6ts1gOjVfnr6Kzt85oHbLWM9hDH1UdoaWXVghh8bKQr9hsFmcqNBTCYLvQ9oqIGgq3q1jWUMzLDJPhNeTODopHq6GYa7mK1b5iKyzOMEPiUGA85+Lq4Lmwi/o17mf7PTFApQ3VtCh7ixQU26zz1FC4XcVuDYU92E72FsuOYqFdfC6yhuJ9a7Cdt4ZCDoo3K4fcfVlTGHUtDQAAAAAAAAAABMUgxqhuyqKdJW+Gw2ExKH7VW0MhOosd+gl5qJ1jwJ1CQyG2iw1XcZGsnnA6izcW2KHx3rK51Nz9yPdwo7bjNu0snW81iiVncVg9EQqO1UHxGoeGYq2HekLSUOQ5fcXvS0Gx3SQW28VO9cQ0+uL24ZgIf7hVfPLGIVpbsNjyFTs1FInidnjInctbbLSL5QF3kTQUy0cZGNvuYpV+YoFCPbHQUk9wSLwsezFtLdlET3uatesQTlz/RAiKl7lY6MAebLfM0lAsGGHI3QKXhmKFt4LCqaGQmsa2t1jUUEiD7TKSZFfxCGGx7Sq2Q+PDVWeoo9+fQZZ1nU9oZ9nx4N+zzlBQJAi+4hmqdrHCWzxDbBQ72sWmgmL6CK5ilYbCGmx3bqNisJ16yN17IziLt5Seopq2eoTEAAAAAAAAAICgGCAozqJtxa/Y6gkRoV3s7StmBYW6XTzqIXcOX/EmwVecpiSkoNhVNpNq22/6HnAMBvrpdksl7S1fIgy1e1sIi0MhsYGnhiIUGIeQ1RN2u9gZGL8f8hWLGgphqJ0qNBZbxYevbqaB4HOPlWPtUecD2laSEmoU56obxbZ2QgiKxcF2Fs6gWGwXO4fczbd8xStc4fB892A701vsbBcLYbFTQ7Hs0mKqadWvQeGg+Nu739GirCWCemKpo128zG4Xi95iQUOxYBQaigVZiUK7OFGpoXC1izNWKjBDYme7OFlQUSSrw+LwkDtRO+FyFxvqCW5uD/p2bJc9vhYKiS+uDYfFZlC8xuUrnn5e0S4OM+OCUzshtIvPb4joLRaDYqWzeKQhd66gWPQWh1icnU6nb16i3sHYeZ8AAAAAAAAAAICgGIyjemJf+XuhoLjoFXdYLA65s7zFkYfchdQTbg2Fa8idpJ6w/5wmBcVveIbF+ysWBp//lagEHB39LVRaf4HSK5bZ2gknRkhsb9vh8Xue7WK1q/g9KSx2aiic7WK7TWyHxrvLVtLjroc0NDxIsXTMPWyvoe2lqy0FhVdYnCiFxuaQOzMQFjQUQlDsdhXPlYbbmY3iFZE0FNluX/FSSUGhDoxLG4p928+ZDzJpRc5KyVMshsYuDYUqMPbSUBihsNtb7KWhsAJjMyxWaCjUrmKns9g55E6toVD5iudlptKHV09Sn4/hZvdgL527nx/8uzZKQbHcKnYMuHN4i01f8YwLgqdYQtBQeDmLz29QqCfcQ+5sNtkh8XkOhr28xZsp4eJ22lD0MTV0PUVIDAAAAAAAAAAIigGgv3vSdZc+rJwVbhW/IofFRc52sWrI3WtySOwx5M4Oj+1G8WZxsJ017E7QULB6wtNX/Cbtr1hAT3uit2T6ac9jynpw0g6LTQ2FEBZbQXH+u6N2Frs0FHnvWWGx21c81Q6LxXZxfigsXpM/I7gf59P9tpvUP9QbcwFQz2AXVTWV086ytZTk8hXPcrmKXRoKKyh2O4utoDhHFRCL6gnVkLsFju0FdnAcbhcvtcLjhZK3+ML976hnoMe3fV3ZeIXWFqynxVlLaXH2Uper2D3kTqGhMIJiR7vYQz8xX0kizc9wt4ulIXeZYfWEoZ8YyVUc3r4oaCgiDbmzQuMUg+/u5QSP7wFfj+977Y/o4NWzUlg8wxEaJ0gaCrez2FZQOFzFjnYxh8LTXRoKoVmsGHI3VRhsN9XZLhZdxY5GMfP+uc3B138vXW9+gJAYAAAAAAAAABAUAxCiuechfXA5wWgUmwoKq11c9Ko85M7RJJYC4+LXIviKVUPuQtoJa7tQHmxnh8Xmn91B8aEry6m5x39HsUhbXzOdv3vU0FDIrmJnw/jd8IC7d5RBsamg8AyLHb7iNZZ6YqoUFosaivWFs2hryWK623qdhoaHYjYAautrofy6DNpbsVHhKla3i10ainBQvOKSHBivCJMoqSdGP+RuWRjZVSxoKAxn8UKrWfz57TPUO9jj676u66yjdYWhoDgUCi9RtotdGgphsJ2Xq9hzyJ3lKV7h8BQ7h9yZbeJEwVec5Bhy59UsNn3Famfx7IvJ7mZxkAWZayjrYWFUju+rTbcpKW8XzQorKGZIGoq1noPtJFex2TC+sN4VFqvUE+aQu/cVGor3XUPuwgqKc2Zg7FRPqIbcbTb0E5dqrxA+AwEAAAAAAAAAQTEAFgNDvfT1nW203REUW4Gxs1Fc9GpEX7HcLjZDY/dgO0lBUfi6rZ4olJ3FaUJQ7AyLv769j4YC/rYK1a3YTrpw7zilVywNuYrzHWFxgVM9oWoU2xoKWz3h1lA4ncXyYDs7MN5QMJuOXt1Mt55WxkX409HfRoWPsulg5bZwo3iUGgqPIXe2dsLZLpYDYzssFgPieeqg2FRQWO1ie8jd6vwkOnXj06js66aeJtpUvJkWGwHxEldQvFAx5C6SeiKSr1jUUCxQaCik0NjSUDiCYkk/MZKGIlkIjUMYGgpHYGw7ilfR6oKddK+tNmrHOYfFczI3WIPt1EGxIzQ+7w6MVRqKaYpmsVM9YSsonIFxmlI/wcGw210sayhO3symoeEAgmIAAAAAAAAAQFAMgExh3SmjQWzpJ8LhsCssNrQTwnbEsPgvHhoKZ1D8hrTtDIrFwDhN0lC8RefvHhpXB29+3Zd06EoKrTdCYbez2GwTu8Nip3rCbhdLYXGYUKPYVlBY7eIwa/Km0YV7p+he2424Cn76Bnvp8pNiOnHtwAjOYjsoFp3FTgUFt4vVQfFcR7vYHnLn5SoONYrnKwfbbS7eQGdunozavu4e7KbTN0/TkmxTPSEGxcKQu8yliqDY21nsDo1D7eL5Bipf8QqXp3i+GBabGgqrTRwKiW13sUJDcTFJCorneugnzHbx3IwUWpm7heo7G6O2/wcDQ1T2uJqW52y3dRMXneoJd7t4usNZ7NUulpvGXkPtNthD7SQNRZqns9jdJk4zwuIDV76izoEewmcfAAAAAAAAACAoBkBieDhA91or6MDl6VZQbLuKX5HVE9K2rZ5weYu9NBSWesKtoZAbxm/YKgpBQyEOudteMpUetl+nYRrfVtzNp2WUcf8T2lk6z24Xu5zF76oVFAXvuULjtUZzWNBQOIbcieqJjYWzaXvpEqp4nENPe57EbfBzu+U6fVNzmlblzg1rKNSBcaI02G6Op6tYHRTL6okVEfQTTk/xciEo3nt5J2U/yKCewe6o7e9A8By9cO8CLb203GgVh1jqOdzOdBdLg+0UGgopLLaG3IXJcnqLw0GxQz1hNotNX/E8F6aGIimsoVjp6S12eYovul3FzMmbX/vuJ3bS0ttO+XWXKSV/r9AoVg+5U7mKp5+Xt0VncSgstofameoJVWBshsbKIXfnQt5is1FshcXhIXczLmyh7eWngsduPw0F0CYGAAAAAAAAAATFACj1E310+vpqQT3xiuwrduLQUHi3i92+YpeGovA1Wz0hsFkKiZ3e4jfp4+q19LjrXkyEHa29jfSw4yYdr94YCowt9UT4seBdqV08ooYigreYQ+INhQmUVjSHzt39mO60VFGA4j/0edz1iIrrc4Ov70qXt9hWT8yWfcWXnN5iOShOtFzFCg3FCK7i5eE2sRkUJ+cuow8q91JN623qGuiK+v4ubSij1IK1lnpisctVvMyxvcyjXbxcGnJn+4mdzWInjkaxy1VsB8ZODYXpKjbCYiswduonzCF3blexsR0OjRdkptJ39y6Ny/He3NNK2bWltKZwv6ChWCO1ieV2sfeQO7tFrB5yNy1Cu1jE3SbeqBhslxZ8nXYZIXHvUD8CYgAAAAAAAABAUIydACIpALoo6/6HtKPkNatNLIbFW5XIQbGngkLRLra9xc5G8RuCr/h1SUORJqooit6ii/ePxFTgERgeIqb40Xf09Z1DtKFwKm0sfN8OjMPNYrWzWD3kzhkUbyycRbvLVtBXdw5TTWsVtfe1TKjQp3ugizgw/vDKzuD+W6YYcjdH8BZ7hMUGEXzFriF35p/ne4bFu8u30ee3TxtD6wYD46M66ezvpH2X91meYjsodqon7G1psJ0jKDa3xXaxW0MR0k7I24kRfMXhVrEqKBZcxWZgLIfFziF3Dg1FOChOLdhJHNiO1zHKzeKLDwopreSQw1e8JhwUB7fPC+3i82tdIbE95E5AaBZ7DbmzvMVh9YR3WCyrJxLzDtDBq19Tc/C54/MOAAAAAAAAAAB2AhhFo7OGDl5OkPQTooZCbBe7vcWyemKrEQ7LruItLlexoKFQeIudQbHIR1eWGW1iDmZjcV+29TXRg/YbdObGbjp8dS1tKOCQd1pIQ+E15K5Adhbz4zrDP/we7SpbSh9f2045D7+kJ1211NnfNqEDH1YtZD34lo5W7w0PulP7ihMtb/FcTw2FOOTO1FAsH6WGIq1oNR2rPkR3Wm5R9zi0iEV6B3vpiztf0srcpJB6gn3F4bDYU0NhhcUhDcWiCK7ihZZ6YrmlnpjvCIzVJHrgDornjTjczh0Ui2Hx/MzVwWPiLDX3jO8Nkq6BHsqtLaed5ccF9USEIXcOV7FLQ6EMizdE1FC8L3Juo6yhEMLi5PyDdOF+KT3pbiF8zgEAAAAAAAAAQFAMRkV7XyN9dXsL7Sj5i9wmLnJ6i2UFhayeeFXhKpZVFJs9YQ2Fol1sDLsL6yeC7CiZagyxa+9vjvngo3ewi+o67lBZQ0bwOZ+g49Wb6MjV9cF9NId2ly0xHjkk3lSUYIXFO0sXB//d0uC/m0tf3D5I+XXf0K2WSuof6jOYTMfk/bY7VFSfQ1tLUmhD4XK3huLSbI/hdm4NxQqlhsIRGBvD7eYFX4eV9Mn1o1RcX2A0iIdi5IbEw/aHtL5wg+ApDoXEJl4aCrFdLG1zOJzp1lDYgfFyh6fYA6eGIuwrtgbbma5iyVscCoZlDUWSp4ZibjgozqktiYnXggfclTRU0eGqLwQNhR0U20PuvHEOtpvm1FB4uoqFIXfiYDthe0HWTlqV/yHdbqml1t5OhMQAAAAAAAAAABAUg2fj9tNC2lX6Jm0r/rOyWWwGx5KrODzkbouHhmKrqJ4YccidPNjO1lC8Ef7nb9Lusul0p6UiroIPbsh2D3QagSP7jB913qX6znt0tbGAqhoL6VpTMV1+nEPVjUV0r/UaNXQ9oOaeBuob7Jn0AQ+H4219rZRfl0Gf3/6Y1hcup3UFS9VD7iI4i2XmSRqKlTkLaHX+UkorXk3f1nxB5Y9LaCDQH3y9BmNq/7f3t9PH1z6m5ZcSraDYqaHwHnLnGGynHHK3XKGhcA+2k73FZkAcyVlst4nldnGSpaLw0lDMuWgPuUu/fCx4HsXWOfGgvZ4u1ZYG99lmmn1xvaShmCFpKNRBsaWhOO90F693KCicwbHaVTz9fJoREH96MzP4HtNM+FwDAAAAAAAAAICgGDwXrb0NlHH/AO1WhsWvSmy1NBR/8WwXq4bbbfYYcufWT9ihsTjkLvfhyQkVfrA+QwwkOVTGsegdtt9tvUWFj7LpePV+o2m8KndBkPlhDcVcSsqdJwTG3Bie4xhwN89qF28sSqbdFZvp+LWDdOVJOT1ovxsz7WEvbjTfCP4eibQ42wyG7cBYxhkWL4sw5C6snshc7h5yF8ZbQxFJPWGGxm4FhdNZ7KWgMFvFK/M2U+bDfOqLwVZ972Af1XY8pn2XT9LaggMeCop1Smcxh8R2cKx2FsuuYu8hdykFH1J65WdU19lELb0deB8BAAAAAAAAAICgGIyNu61ldLByJm03QuBXpMA41CaONOAugobCCohtZ7HKV7xFERibGoovbu2grv7xG2QFYgMOC1kJcb35Kl1trKCL97+ir+6comPXDtCeik30QeUO2lKymrYUr6btpWuDrKN9FVvoQPCff3hlD31Vc4Zy6zLpflsNtfY+pY7++Bny1dHfQWdvnaXlOSuMsNjADIyzRQXFyEPupGZxOChe6GgUyxqKSM5iu00sD7kL6SacGoq5isBYFRbPMfQUq2ht4U6q7ain2D4u+6m4/ip9euNc8DmnGS1jSUMhtIunK7zFIXexGBI7A2M5KJ5utIk30Lqiw/TZnRy61fIweCx3U2B4GO+RAAAAAAAAAAAQFAM9XGvKou1K9YQYFIeD46JXZUwlhWLIne0sDmkoNhuhsaNZ7KGhOHp1JdV23KDBwABCEGAxFBiyjonOgQ5j6FxT9xOqbb9vPD5sv0ePOmtpIPjftPe3SaFwvLa3rzReocTclUJALDuLFykxW8RCWJwps0DwFS/MdIbFKxQaisQR28WmrzjUKk4UPMXCsLvMJM9msekqLqqPH91M50A33W55QF/euUT7Lp8Kvj5bjeB45sX1snrCIyyebrqKHRoKDotnXkwL/qxtwf8njfZVnqVv7xXSg/bHCIcBAAAAAAAAACAoBj4FHf3NVNbwJe0tf9vbVVzs0S4ukjUUtqvYGRg7ed3VLjZD4gMV8+jqk2zqGmhDGAImPRx6f3f3O1qctdilnljsaBNH0k+YjeJF4aF2NraGQmoTWyGx15A7d0hsNozNbUlDIYbFysF2IY5fO0sd/fE3kI2VFAOBQbradJsKHlUaofGO8uO0MHsLLbm0neZkpFHChfUGtoZigxUWz7qYRjMvbDRC4hW5e2ljyRHaUnqCMh+UUXXzXaM9jPMBAAAAAAAAAACCYuA7T3vqqOTRWfrg8nSPsPgVpbNYHnJnu4q9NRTOIXeydmJP2TS60phFbX1PEIoAEObG0xu0rXQ7LZHUE05nsZeGQmgXS65ih4Yia7nSV6xSUIiD7RZ4eIvloDjRoZ5YGW4UC0PuMpNpe9kButp4nbg5Hv/N90Fq6m6hWy0PqKrpDn1xJ5u+rLlEH1V9QR9c/YzSK0/TropP6YMrn9HBq5/Toaqv6Ju7+ZRTW2EEw+39XdQ50END8JgDAAAAAAAAAEBQDKIfFtdSUd0pOnxlnmK4nVtDsc3pKlagGnBnDrkTNRRbi9801BM3mwupcwBeYgCcZD3MpqWXljk8xaPRUCwNh8QKDYU12E7YzooUFC8PB8XPMuRO1SgWg+JQWJySv4W+uZtBXQMTrzlrak/6hwaoZ7DXaB639nYEf9ce65/3BwbxvgcAAAAAAAAAAEExiB24yVvVmEFnbqxWhMTikDuzTfyKY7Ddq66QWKWhEIPinaXv0tGq5XT7aSkFhuO7SQiAX7T0ttBntz6TFBRmm9gebBfaVmkozJBY3FaHxu4hdyoNxfwRhtxZvmKzTewMiq0hd0m07NJaOnj1BDV0YSUBAAAAAAAAAACAoBggKEZQDACCYrzWAAAAAAAAAAAAgmIQG/QOdtKjjhuUcf8DIxjeWfK6KzR2OYpFV7HCV+w13G5Hyd/o0oPj9KCtCgERACNwp+UOnbh2gpZlLwuHxO4Bd4siaihMX7H9KAXFxkC7ZR6eYkFDkbVCGHBnaia8B9y5Bttl2KHxwqxVtKU0nW621OA9AAAAAAAAAAAAQFAMYg1u9g4ND9L15hw6dWMVfVg5yxpot1UYbGeGxUpfsREUv6p0Fe8tf58OX1lM15vyqK2vEQERAKOkurma0i+n04pLiVKz2BkWywPuHO3icFC8yDnUzuErjjzYztku9nAUh5vFrnZx8HF+RhJtKNpJJQ2X8R4AAAAAAAAAAAAgKAaxTlPPA7rRnEtf3d5C6eVTaUfJG2oNhedwu9esgPjDynn0ybVVdK3pEj3uqqFhCiAgAuAZudJ4hbaUbqEVOStCA+2kAXdqDcVCcbidUz0hBsaKoNitofBSUCR6hMYcDieGsTUUm0v2UE5tId4DAAAAAAAAAAAABMUgXhgM9BsN45tP86n40Vn69FoyHa1abLSL95a9S7tK36JtxX+h7cWvWRqK3WVv046Styi9YgZ9cWsLXXp4jJq6HxJ7kPnnYb8C8PzceHqDUvNTaalCQxEKiL0axXKzWNJPKDQUYmi8UFJOrLA0FJF8xZaGIjNRahfvLDuAkBgAAAAAAAAAAEBQDOKZvqEu6h/qoeaeh1TTUko3mvMot/Y4FdSdpNyHxynn4VEqqf+cbj0tDP77MmrorKGh4QEjHMawOgD0cb/tAa0rXE9LspZ6aCjMUNg55G6Zy1e8yNEuNkLi8GA7t3rCOeTO2S4WPcWOIXfBx/TKw1T4qCz4foAVBQAAAAAAAAAAAIJiMOHoH+o1AmSzgRwYHkQIBIDPNPc006Grh2hNwVpLQ7FICozdbeKFVlhsby90DrnLCgfFI/iK2VU8f4SwWGwVf3j1BN1pvUd9Q1hVAAAAAAAAAAAAICgGAACgjcHAIH139zvaUbbD1SyWm8TuwNjGERYL2gmRBS4NxXJrsF0kX/G6om10tPok9Q8N0EAQvG4AAAAAAAAAAACCYgAAAD5Q2lBKx6qP0crcJEs3YWooxMB4sVe72GPI3QLDV6xoF2fJrmJbQ5FoBcaLslbS/srDlFdXRH1DfQiIAQAAAAAAAAAABMUAAAD85n77fcqpzaVNxZtpiTXUTuUtVjuLJfWEEBqbg+0k9USW010ss610L526+QW19LYiJAYAAAAAAAAAABAUAwAAiCYd/R3U2N1ktIs3l2xxBMVLImoo3GGxQz2RudxqF6u8xayh2Fi8nc7c+oJut9RgYB0AAAAAAAAAAICgGAAAwHhT2XiFvrt3jpLzUiglP9XQTnh5i1k9YYXELg3FcmV4zOHwoqxEWpGzmraW7qGL97PpRvMtGgoMISQGAAAAAAAAAAAQFAMAAIgVegZ76HHXY8p8kEWf3jhFq/PX0Oq8VFp2aYVSQxEKiZcKQ+3swHiREQ4HH7MTKbVgI+0qT6dTNz+j6803qa7jkREQY58DAAAAAAAAAAAIigEAAMQo3PBlV/D9tvt0vfk6nb31OZ28cYo2FW+lraU7aFVeKi3JXk6p+euN7cTcVZSStzbIGlpfuCn43+yij6qOUVlDBWU+yKZ7bQ+oa6CLOvu7EA4DAAAAAAAAAAAIigEAIDYZDAzS0HCo4do90EXdA53E/6xnsJsGhgYoFJ5O7gZsR3CfPO19Suw1vt1yh2623KLKJ1fochDermqqpuqma9Tc89TA3K84vgAAAAAAAAAAAATFQBPD8HcC4AtNPY/pQXsN3W6pppyH5+nc3c/oi9uf0Nlbx+n4tQ/o1M0j9E3NGfryzkkqayigqqYKetLdQJ39HTgnAQAAAAAAAAAAgKAY+MtQYIC6B1qpsesO1XdUUV3HFbrbWkD320qpo/8JNXXfpf6hbhoMDCCsAmCUcCO4d7CH2vvaqPhRDn1bc4b2VmygtOIVtK5gCa3MmW2QlDPH3s6dG3ycQ6vzFobIX0T7Lm+hQ1d3U+WT0uA5eSfcNsbNHAAAAAAAAAAAACAoBhroHWyn1t46uvz4LOU+3E+f3VhGBytepyNX3qVDlW/SvrI/0oeX36CjV6bSyWvzKePeDipvOENPex5S3xC8nwB43ngZHqLmnidGqHv65mE6ULmZUvLmUVLuLErOnUNJObPCzLYCYpFE43GO8Zh4if8c/H9y5wW351B6xVY6deMo3W29TfWddTgPAQAAAAAAAAAAgKAYPB+d/Y3U0FlN39xOodPXF9CB8j/RntKXaG/p74KPL4co+51Nqc2HlW/Rh5ffopL6j+nO03yEVAA4eNJdTzefXqX9lWm0tSTJCIftYDjEyjChoHiWMixeabSM5xghsfloM5c2FiXRR1f3Un5dNnUNdNJAoB/nIwAAAAAAAAAAABAUg5EZDPRTc/ddyry/nT6umkH7yl6mvQyHxOFtKyg2w+Lg427jz3JgvDvIyWsLKPPeTuoZaENABSY9/UN9dKulmo5fS6dNxcspOXdmmFlWWCxuy0GxV1g82wqKVzqC4lDLeL6xfaz6A7rxtIra+3EuAgAAAAAAAAAAAEExiED3QAvVtl+mj6um04GKP9HespdoX9lLxmOIl6Ww2B0c/054DAXFTHr5K3SiarbhMMZ+BpOVR50PKL8ug1bnzaHU/LlGQJyUE8QKi2eG2sRCUBz682wrLObtyBoKd7t4pREYh9hTsYlyai9SfRd0FAAAAAAAAAAAAEBQDDxUEzebM+jT6gTaV/Yi7S19UQiIwwit4r1CUBzid1ZQvFsRFjMHKl6jB23lCKjApONe2y06c/MjWl+4iJJzE4QmcSgstkJjIzgWQ2IxNA4FxnJYPEd6NF3FRlB8yd0uXhH8ZxuLkunUzWN0vfkqzkUAAAAAAAAAAAAgKAY2rIUobzhJJ6/NNhrEJhwW75NC4hAhV7FbQ7FXdBWXuTUUe0p/Twcvv0U1LQUIqMCk4X77bdpdnkrrCxbQKkdILKknhHZxKDR2O4vtwXbOIXdzXN5iKSy+JJAzN/g8FtH+y9upuD4P5yIAAAAAAAAAAAAQFAP6u6HhAapu/JpOX59nNIlDvCQHxopm8Z5wu3iPsf2yAllDwSExk17+Zzp1fSHVtBQioAIT/NwaosdddbSpaCmtzpsdbhIHyTHDYnHbbhZbf85VaChGNeRujgu5WRzkUuhxb8UWKq7Pp8BwAOcjAAAAAAAAAAAAEBRP6rZjWzF9cztZCIlt9gqh8V4rOH7Z7Sw228UuX/HvXBoKDov3lf+JztdsoYbOGwinwIRkMDBAzT2PaXdZqtEiNjHDYnE7FBjb6gmrXZwjDLnz9BbLYXGi6C0OayjczJU4eGU3VTwpob6hPpyPAAAAAAAAAAAAQFA8Gensf0J5D9MpvdwOh9PFoLjUDIodzuJSD2+xGBS71BMvS75iprThJPUP9SCcAhMKbud2D3bR4apttCp3Roi8BCkwloNjs1Est4ttX/FM2VkshMVmKJykaBQnhoNjKyAWncWX5Hbxoat76XpzFfUN9eJ8BAAAAAAAAAAAAILiycTwcIDutRZSetkUC1Wr2AqKw9tGYFz6koStoTBdxd6Bsamh4KD4w8t/pbutRQimwITj7M2P7JDYgEPgGcp2scpZnJQrtIlz1M5iW0Mx2xp056WfMAPilUYwLAbGoaB4hREW76HmnkacjwAAAAAAAAAAAEBQPJnoHnhKX95aSunlHARPEXgxQmDsdBa/bAfGYV+xoaFQ+oqdzuLfBf/uVyi/9hD1DLQjnAIThuwHX1FK3gxXUCyHxs6gOEERFMvqiWRRP2GoJ+SwWK2hENvFag3FCrNdfGkuHa06QL2DaBUDAAAAAAAAAAAAQfHkaBMTt4kL6NS1hFCb2FBPhILikHrCHRibGoq9gq/Y5So2fcXK4XZ2aLzb8hX/jg5V/o0ed91EMAUmBNVNZbS5aDGl5CVIYXGKqJ5waSgiD7ZzuYo9NRSqwXaz3QoKl6vY3t5YlEzZD89RzyCUMAAAAAAAAAAAAEBQPOEZDPRTQe0BSi/7LaWX2+oJDozTHZ5iZ6t4b9mLdlBcqmgXi6Gxwe+UGgpzyN2Biteo8vEXNBBAMAXimydddfTxtb20Jn82pXA4bITE0xXNYo+gWNEuTsrxwj3YziB3tiIsniO3iy+pvMVzw+1iHm63ix531RO7lvG6AgAAAAAAAAAAAEHxBOZpzz06e2Me7Rf8xGJYbDaLxSF3tnriRVlBIYTFexwaitE6i7+4mUzdA20IpUDc0j/US5cf59P2khWUkjfdaBOn5Dr1E7KGQnQWJ1uBscJXnCNoKMyQ2DHkbqWAOiyWNRSit9gabBd8XGEwlz65/hENBYZwTgIAAAAAAAAAAABB8USmtfchnah6W2oU7+Nta6idqJ6Y4hps53QWy6Hxy1a7WAyLXQiB8afX5lHPIIJiEL/0DnbTx9d22yGxAisozkuQ2sXJRmjs8BbnqIfcSd5i5WA7kVG0i10aihBpRclU3lBIQ8MIiwEAAAAAAAAAAICgeMJy5clZ+vDynyi9/Le0X1JPyO1iOyieMqKGwtNZXBbZV2zqJ+62FiGQAnFL0aMMWlcwJxQU55qN4umGekJsF8tD7hIcjeIEx6A7tYYi2RxslyN4iy1CCgpnszjR5SxWaCgcgfHZWydoMDCI8xIAAAAAAAAAAAAIiiciQ8MDVNHwaTggNhvFv7WDYg9nsayhkIfc7fXSUIgD7soUIbHlL/4dVTV+i0AKxCVPexvpzM2DlJo/IxwST7cD43BQ7HYVi+1iecidU0OR5Bhyl5QbRtJQuAfbeTuLHeoJDw3FpuIUutFchfMSAAAAAAAAAAAACIonIjzIrrBuP31Q8TLtLxeDYlFDMUXSUKhD4peU7WIrMLYIBcVOV/FeQT2RXv4nuvrkaxoM9CGUAnFHfecD2laylFbnTbewwmJTPSG0i729xbaGYlUEZ3FIPWG3i700FOaAO7e3eI6knrCH3NmB8YpwYJxbm4FzEgAAAAAAAAAAAAiKJ2SjODBIebV76EDFS0ZQLDWLne1iSzsxRVJQqIfcOfQTVqPYHHJnB8ZO9pX9kSoff0FDgQGEUiDu+K7mE1pXMItScqcJAbEiLHa4it2hcYLbVeyhoTD1EyoNxUqXt1huE9uOYne7WNxOyp1Ph67uoda+FpyXAAAAAAAAAAAAQFA80egf6qaSRx8ZgXAoKP6tHRi7eNFuF5e/OMKQOy9Xsewt3qPQUByoeIXKGj5FGAXijs7+Njp76yCtzuOQ2MShnsiVNRSirzjFoZ5Qu4oTPDUUdqN4puwqljQUIZJyRtZQhALkuZaveE3BMnrQfhfnJgAAAAAAAAAAABAUT7hG8fAAVTd+QQfKXwypJ4TAWKWhcA2583IVu3BoKBxBsbEd5oOKV+lGcyYNUwCBFIgr6jruUVrRfEk7IaonxG15sJ1DQ2EFxu4hd8kqDUXYVSwOubOcxUJIHBpuZw+5U3uLQ+1iabBdWEOxrnAFlTdg0CQAAAAAAAAAAAAQFE9IbjVn0EeV/2EHxGX2o5tQULzPchd7D7ZzN4rVGgpnq3h/+Z/pbmshwigQVwSGA3SzuZK2Fi8ONYpzp4WDYkez2Kmh8FRPhEh2aChWWdoJb2+xq10suYrdGoqkHLWz2DnkblXuAjp782MaDAzi/AQAAAAAAAAAAACC4olGY/ct+rj6bYd6wmwVCxqKsimuIXdOX7EqLN7rpaEQ2sWieuKzm8ups78RQRSIO7IefG74iTkcNrEUFLmyhmK15SmWvcWrwoQ0FPJgu0gaCvWQO4eGQgiLV3o4i01vcaIQGtsqirn0yfVD1DvYg/MTAAAAAAAAAAAACIonXhNykL69s5L2l79A+8tekJrF6V7OYkE9kW45iqd4aijEoNjtLZY1FF/fXk39Q10IokBc0TPYTefufmqEw6lCUGzoJnLlbclbLLSLlYPtFBoK7wF3YeWEU0PhGnIXST0hD7kLqSdCGoqVuXNpV/lGetxVj/MTAAAAAAAAAAAACIonGjzQrqz+KH14+ffqsFhEDIqFIXeGgsKloXgpHBKHA+PSFyNoKEIN4/TyP9DN5ixidzJeGxBfQXEXHa/e7giJ5cDY6SyWwuLckLdYqaHIc/uKV0nqiQSFeiIUGFuN4pxZjm2RSIPtZluD7UxX8a2W6zg/AQAAAAAAAAAAgKB4ItLa+5A+qX5XqZ/Ybw61C6sorDaxMNhun6NdnB5BQ2G1iktfcg25++zmMqrvrEIIBeIyKP742q5wUPy+Z1icIoTGkqvY8BXb2ypfsegtTna1ixWuYtFXrBhw59RQyO3iOUrWFiyjy09KcI4CAAAAAAAAAAAAQfFEpHvgKRXU7qVDl/9AB6yA+AWXrzjEFFe7eF+ZylvsHmy3z+ErljUUv6Och+kIoEBc0tzzmD66mjZCUOx0FU9zDLaTh9yluNrFCdK2Wz0RfMxJcLmK7eB4luQrFr3FdkA8yxEWy8HxuoJldLWxnIaGh3CuAgAAAAAAAOKW3NxcWrd2nTauXLmCa6Qx8OjRI9q8abO21+PQh4eopwfzdRAUg+c/KTsu00eV/xEOiF9Qaii4XWw2jL28xXajWA6M94rNYoWG4tPqWdTR30jDwwGcyCDu6Oxvo4OV66ygONVgmndgHG4Xi+oJp4ZCGnJn6ibyIviKc7wbxnK7WA6Kk6Q28UhD7uagUQwAAAAAAACIa4aGhmjpkqX0n/63/6SF7//f36fKykpcJ42B8+fOa3s9GH59+XXGvkVQDJ6T/qFOuvLkLH1U+adQq1gKiRXeYqtdLGso0iX1xBQPBYXsLD50+XW62vgldQ+04iQGcXr+9NKx6m1GQLw6zDNpKJxD7nLNwFitnnD6ipUBcY5jyJ2koZjl9hV7DrmbY7WLV+UtoIrHRThPAXgO2traqLm5WUlLSwsFArhRCiY23OrxOgeY/v5+nAMAAACi9r3sD7//g7ZQ8l9/8a/U2NiIz7ExwG1inUHxR4c+wuuBoBiMlcbuW5R9f7MRFsvqCY+g2Bp0J6snQsPt7EZxulJDEdpOL/td8O/cQY3dt3ESg7ilo7+VDl3ZEG4Svy8FxmJYzIGwvO0YbidqKCz1RHjIndJZLIfF8pC7mQoNheArzpVdxSNrKObQhsIV9LD9LgUmYfO/pqaG1q9br3V5GsNfiHiZVTzvm+vXr2vfL8ye3XuotTW+biBy4FtQUEC7du6i6dOm089/9nP63t9/b9RfaP/b//Xf6N//v3+n5cuW0+effW4cGxM5QNa95JOPmc7OTnyfGOcw+Nq1a3Ts6DHjOP7Nv/+GfvD9H4z6HPjP//t/pp/+5KfG+XNg/wHjZyFAjuEb5cHX5vix4758BvB7qW8rwYLvE5FuWjwLfh2fOp+jn5+l/Fnlx+v/8YmPaWAAA86Bf9y+fZt+9MMfaQsl+XOrt7cXx+xzwvuO96Gu1+O//p//lUpLS/F6ICgGOqjvvEJnrs+iDy//XgiIX1AOuXNpKARfcbrSVewOjL+9k0r324pxAoO4pnugk45WbaY1+dOFsNhuFEdqF4tN4tWShmKGjCscltvFbg2FOiw2PcXJCl+xHBa7FRTrCpfRndYbk/J8ra+vN5oCOu9ym/Ayq3jdLxyM/vEPf9S+Tzgwzc7Kjvn90tDQQCeOn6A333jTeM5+HB/8c2cmzKSszKwJ5VnTveQTF2njQ1dXl3Fs8mv5w3/6oS/nAIfHL734khE+P3nyBK9vjJG0MsmX131N6hrfXmv+2bH8Gc7H+ZTfTtHy/P7pH/+Jbtzw57sb/1z++X587pWUQHUG/CU7O1vrccvlD+zXMZQWGxu1Xmv9/H/+PO7LOABBcUzxuKuaPq56OzzY7gVPDUXozwr1RLnKVywHxayfOH93A91pycXJC+KewcAAfVNzjNaYjuJ8u1mcmm+HwqkKBYWneiIv7CrOFV3FYfISXK3iZFVg7FBPiCSLYXGuqZ4QncWhoFjc3lORRvfbaiblOetHqGWSsiqF4nWfbFi/wZd9wj83Fp1i3O412+X//cf/3ZfffaSL51XJq+jBgwdxfx7qXvKJi7To3iDiGyQc3nKIG81zgP++N15/g/Lz86FriRF4aa8fr7WfbsmjR45qe578s2L5+W3dstWXfTg4OEjLli7z7XsR/3ycX8BP9u7Zi+JHDMF+Z/Y863o93nn7Heru7sZrgqAYaL0I6HlAp6/NEIbb/dYREDvVE6aSQgyMXwxrKNyN4qz72+lRx1WcuGBCwEMYM++fpbX50yT1hFtDoQ6KrcDYY7DdKg9fsRUU58nqCZFkD6TgWNRQSNt2mzgpZw7tq0ijnsHJ+4Gre8CCCYdlHJrFXRMjK9uXFi23QrmlGEu/a3t7u9Fm5CXx0Q6HvcKyv771V7p3717cno+6l3ziIs3nG6KDg8SqEA5pox0Oe8Hn44XzFxAYT7BWnsnU96b6toqCj+VYbT7rXMHE+qO6ujpf9iE3fv34DvCT//GTuP5sA/GjzZk7Z6624/Yf/59/JFaxYd+OTWMTL6tSAILiSU3fUCedq1lFR668olBP2EGxvT1FwYuSu/jktVlUUHuQegfbKTCMCZRgYsDO3itPCmh7yWKpTbw6gq9Y1k+I7eJQSLzaaBEL3uKwfiJF6Sl2ayhCIbFiyJ2ooZCC4pkO/YTsLF6VO4/O3DpG3YNdk/a8vX//Pv3LP/+L9osiP5eF+rby5PFjenHKi9r3Bbd02UkaK78nt4dnz5r9TJ7haAfGixctjsvhKbrDJVyk+XeTZPeu3cb+jcVzgHnhNy9QWVkZXvtxgs87P46PX/3yV4ZfN9afM6s3dD639H3pMd12ZjjA5yDfj/PZr+cMgHNlDDv0dR23/LP4Z2LfPj+8kkDnewkHz9ivCIqBj5TXnzACY/YWHyh/0Q6LyxzBcblDQRFmf/lLdKjyVcp5sItqoJoAE/ULR28j7a9IMYLiNZJ6Qq2hkILi3FBYnGJtC+1icdBdOCiOpKFINkLjBAUzR/QWJ+U6GsWWqzjULs6tvTipz1/d7YN4/TLDzULdX+bMoRMZFzNiYj9cvnyZXvnzKzEbjDnhYWEnPz0ZV81K3Us+cZGm3xXI53ms3iRR3TThZfAdHR04BqIMh7kc6up+TfnGLN+g9eM5s7eS/ZWx1nzm9i+3gHU8L54f4Nd7It/o489s3a85r1jgm1M4r0C8aQ74+gCDV58f/ux+/bXXtb0e/Nrya4x9i6AY+ExT9x162FZK52pS6KtbS+lI5av00eU/h4PjKcHHlwx4++DlP9DhytfoyJU36JOq96nk0REjIO4f6iZeoo/9CSYiXQMddKRqE60r4IF2U8O4NRSpeeqwOKSesNvFloLCoaEQfcXuwFj2FcutYnnIXchTLLaL5aDYdBWbYfGOslSqab056c/f06dOx52LUTfnvjvny9JznnI+3k5CbjPzhWq8BMQqbUc8tIv9uOmCizR9oR87wuMlIFYtW8cQrOjiV7vUzwt9nY50Djd03aBgn7CuG68c5vq1ysCPz0nWWLASBOcUiEfNAd/8xn6NnVWbrO+Jx9V2AEFxnC6vH6KBoV5q7X1ID9tLqablEhU/OkQFtfvp69vLKev+FiqqPUhFdYeoqvFrauisopae+zQQwARyMDm4/DiXNhXNCQfC6rDYVFB4tYslZ3GuoJ5wDLlT+oqFsFhGraFQDbmTg2J7sN3OsjXBc//ppD+X/ZrwHS+eYp1tJ5F333l3XL3E7IRkhUOsuFfHGpTF+jL8pqYm+vWvfo2LtBgL+44cPmK00+P9HOCQ+9CHhzAMK4rwkE0/Xku/gsPe3l7jxlosKTLu3r2rbUjq/Hnzqa+vz5d999nZz3z5rMQAOxBN2F+rc0ULbnKMjYKCAq3vK/z+zu/z2LcIisE4MBQINXc4QO4b7KDB4J8Dw4M0TGgNg8nJvbbrRqPYUE+ISOqJyM5iVk/Yw+2m2UPuLFex0C72HHA3w2oWu9vFbvVESD8haCjEoDisnsivywie4wOEMCX+mlO64ItOvvicSF5iDqc3b9oct+3JSM0sbr9PliWfjF/tuQlfAggEjIFwP/7RjyfUOcCwiiLWBmNOVNgrG2++Wl3hNg/l5OGcY7qmGhoiXcHVP/zgH3z7PvHkyROa8tspGGAH4pru7m565+134kKTM1n46NBHWt9T+Ls99iuCYgAAiI0gbaiHvq05ZrWJbVfxVNeQu9WWhuL90WsoxHaxOeDOIkEx5E7lLDbbxI52cRiVhmJnWSpdb76CD9wwB/Yf8OWC+OMTH8f0Pj5x/IT2FhH/PFZZjMfvw61bHoI10cIxcd9yQzQWvcW6l3zqCGomI7wsc6I06b2Y9v40eIujALfp/Hj9/LzY1xVM6BikqXO1EgfOfqmsdA7awwA7MF7odJTr1s9MyvJh8P2KFXw631POnzuP1wNBMQAAxA7lDZeEoHiqS0Ehtos5KDbIV+kn7HZxqEVsD7lbZQXHMyQNhZez2Oku9nIW20GxPOTukxsH8WHrcxsy1j3F3PjVtSR2vL3E3DDkJtlEDsfEsJhvbMTacl6dSz5xkfZ8LeKvvvxqQmgmRqu2efoU6iQ/4Rs1fMNG92uXtDLJt9eNg4RYUGTw537iisSYb+byz+WfjwF2IN4pLS3VOozRz/epyYBOZ7yum3cAQTEAAOj13g120+kb+4yQWAqK88028VRXs3i154A7WT1hh8Z2UGxqKFTqCW9v8UzPdrEZGBvkzKJtJauoruM+BTCI0kL3ZF4TdrayuzXWfl8OVnW5HKM1kd2LioqKCd0i9gqLjx09FjPHle4ln7hIezY4MJ3oLWIvXyE0FP7B7+W/+fffaH/dWPXEyqdYD4t4lcRYbj6zLiLWm7mb0jZhgB2YEPAKvsm0IjDW4VCXw11drwd/FkX7+gIgKAYAgIgMBgap8NE52lw0NxQUh4msoZjmCo1F9UToz46wWKGhUAXGyZKGIsHhLp6pcBbbQXFq/nz6quYkPe1pwoetA14OO1k8xawviHcvMTdqOSzli9LJFI6JF+PZWbHh8NW95BMXac92o+SX//bLSXkOjNcKhklzk1zjcDg/BsX53YJ+3oCWj0d2acf6zVe/Bvny745zEkQbvrms6xjmm0180wn7NTZWdzBz58yl/v5+vCYIigEAIPY4FW4V243iMGZQnO/WUKxWaijsIXcpYqNY2p4hhMWiesLtKpbUEwp3sTjkbm/FerreXIkP2igsW4tVT5/OppPYcGXfcTQb4EsWL5m04VgsDA3089zBRdroVBOffvLppL1REqvt+omGbqWM30OiOIDmIFrH82Sd0fM8h5KSEi3npZ++f51hNgbYgYm2KpBvfPMNcOzb2CnfsH8e+xVBMQAAxGCreIDutV6no1Wb7bBYqaF4X9r2Uk+4h9w5FBQOX7EYGqdY6glZQ5Hs6SoOba/Om0sZ97/CB22EZbYvv/Sy9gunWLoLzs5AdgfGc6uPAwY/Xqd45cUpL9Ljx4/H9fjSveQTF2mR4WX7KatSJp1qIh7a9VjOPb6uST43WG0xXjMG+vr6aNbMWTGv6NAVZjvh1Uo4b0C00e1TZ5UWK7Wwb2NjNQp/1ykoKMDrgaAYAABik/6hXip6dJ72VSR5+IpVrmJRQ2G3jO3A2KmekNvFoSF3MwxWKVEPuHM2itlTfPzaPnzIjgCHLxPZU7x1y9a4DirLysp8GbyD5fexs+QTF2kj+4j90AHghglQwRfnftyQ8NNhq+v96HmC2uzsbC2rKzjE5TA31sP08Z5RAIB53uk8lnklBfbr89PY2Ej/+ot/jYtVKABBMQAA6FnuOzxE5+5+bPiJzbB4jamdkLAdxe528fuKdrEiMHZoKFbleqPWUNhB8Z7yddTR34YP2RHgi1fdF8WxsoyefzfdDSL2G0ZLfZCVmUU/+P4PEIp5HGMZFzPG5RjzYxAkLtLU1NXVGWEMjnk1fKMPbtTYbupFw0G+d89eLc/xD7//A7W1jf57k84Alt8Dn7XNPFpYZ+HH9xwO63DOgPFA1zmvY5AlCCnueEaLrteDv2Pyd03sWwTFAAAQ83x5+5AQFJuD7aa6BtuNrKEwXcVhRG+xY7CdDPuh2AAAgABJREFUt7c4QWoYy0FxAqUVLqG7rTdpIIAhACNRX1+v9S54rLi1uGnHjbt4dYN+8/U3k97FOhKs4+AWx0QIknCRplau8NRvHOuRW5h+NlUnIxyUcmCq+7XiYMev58zvH+MxdO/ihYtaAlg/Pb/c+PXjZhMG2IFxW+3Z328o3ib6EOp4Qtd7sAmvEsF+RVAMAABx0Spu7mkgDoutcDjPW0NhtotTIwy2kxrFuWpn8SozNHY2ivPc6onQdigovtJYQt2DXfiQHQXc4GEv4UTyFPPFG6sJ4vXC8PSp0+MWEvNF/y/+1y9o+bLlRijOrea7d+8a4YEJh7PFxcV05vQZ47/72U9/Nm5BWfq+9KgfY7qXfOIizc2tW7fol//2y3E7rn74Tz80dBe7du4iDsMqKiqkc4DhlQU85TxtY5oRRH3v7783Ls+VHezsYsdxE5shTDQu/HXpMnjFzI0bN0b1PHX6//18H//s7Gfa28Q8VJXfo3C+gPGAb37ovInKZZHxuOk+kdCtI/NzBQpAUAwAANrD4gftN+nzWwclX7GlobCCY9lZPPKQO4e3OFdoFluBsYeCQgiMU/JmGY+VT4qob7AHH7DPAIcdui+Kx3M4FysJdDgTx8MHOh4hMesteAk760KeN9znCxcOln/6k59G9bnzBTsH2fG85BMXaeMfEnPI+9e3/kq8RP15naN87uTk5BjhWbSH7vH7Bo4dfeieXu/3oDYOdznkjeZNK106hym/nUJPnjzxZb/wz+WfjwF2YCLBgzF5QOZEHEAdj+jWkcWKvg8gKAYAgGfiTstVo1m8oTAh3CY2NRSyesLcdg+5c2goPH3FTvVEKDBOUbiKV+fNot3lq6msIQ8frM+5xJsHJ0wET3FDQ4P25er/8IN/iErjMzsrO2ohMV/gc6DFy9Z1tqQDgQBlZmQaS5ijFZL56baMRtsQF2n+KmMiwTc2+AaH7kYuL6NPmJEQtcDYz7ANy4j1wJ9Lfg0+0zlIaTQqE106Bz4/uPHr1+vITWUMsAMod4yfFgfXUPFVtAEIigEAYEzUddRQXt03tL1kcTgodrSLFUPuDA2FY7AdKyfswNhDQyEExaaGwhkWn7zxAV1txN3XWFtqG+0vnxx4sh4iHr3E0QqJ+ffh5mRNTY2vv1NfXx/t3LEzKsvxo9kq1r3kExdp4xMSc0DMF9t+q2Ty8vLoxz/6MVrFcQbf5NS9KsXPi3+djbbRLHnWpXPws2XNN2vYfYwBdgArHiKDYzo21D8m77z9DnV3d+M1QVAMAADxSWtvE9W0VtFHVzfSuoIZCvXEVGmwXarQIk710FAYIbEZFJvqiTyHrzg82C41bxatLZhL2Q+/ptqOu/hA1aA80B1csOOzt7c3aq+NHy7C+fPmE4eefj5vdp1y2Ol3kPTCb16gsrKyqJ4r/PfpvlhXsSltU1R+L91LPnGRFqKrq8t4v4iGZoVv/Ph9Tos8ffo0Kr8bD3dE0zF2V9k8i/93PG/2jjSIVpfOgW+MlpSU+LI/eIUJrzTBADsw0eAAkYNEXcc0D+blAb3Yt88Pv2fqXiWH/YqgGAAA4pqBQGi5dMaDM/TR1Q20rmC6a7Ddag9SXa5ixZC7PHdQnJo309g+e/OQ4SPG66AHbmTqDiujuXyKvaa6n380vMTRaFFyq5fbvdEMx5yhi+4WrsrzW19f7/vvp3vJJy7S/Bs+6eRvf/0bsZpmPH5Hbk0mJyX7vlqAB+/h82zstLW10R9+/wftr9FotA7Pi65gdFXyqojP8eiRozEfurIqipVRGGAHJhr8nZq/W+s6rvl9jt/vsG+f/6aU7oHgrD7CvkVQDAAAE4aa1moqrr9Iu8qWU1rRHMlbbA61UwfHbg2F21fM4XECbSpaTIerttGVJ0X0tAc+Rt1BBi8D1R1c+Hlh7GcbkdtOfj/3aLQoeShYRUXFuJ8r0QjEo7H0XveST1yk0d9xw9dPly+fy/x3jHcTMBqB+KyZs8bthhAu/sd3kr2uAJd/by/nO9+M0+FC9rNd7YeCimHfMc4NMNG0OJHOdxB9HdmzDBQFCIoBACBu6Bvsofa+Frp4/zR9cm2n0S7eUDjLPeTOCoS9B9yZCooNRfNpX3kqfXxtD91uqaInXRD8x/qFZrT9q7qDpmh4iaMRGi2Yv4B42XusHF9+Kzb8DslYo6I72J/sF2l+u7lj5UZJtG4ORdPXPdHhz654+jzkG5s6nmMkb7Cu4XBbt2z1bT+wzkL3ewoG2IFYgW826Ty2R1LNgOjqyPwcegoQFAMAQEzwuKuWqptK6ZuaY/TxtZ20tzyZNhbNpY2Fc4yweF3BTFpfwJ7hBFqbnxDcnk3rg/8urWgBbSpaSIerttLXd45RcX0mdfS3GQF0YDiAD08fqaqq0r5c089hNWb4yO0k3W5lDnT83NfnvjvnW4uSf+72bdtjslnoZzDoZ0vNjyWfk/0ira6ujn7+s5/7FprycK/xUk2MZ7ueb/jh8yz2NDOj0TrEQmDhFVToOl/Zb8ye43hZGcXtzYyLGTinQEyQtDJJ63dFHsSG/Ro7nxPsmmfnPPYtgmIAAJjw9A72UM9gF91vu0l1HXepvOES5dd9RzkPv6Ivbx+mrAefU27t13Tp4ZdU1pBjtIbvtFRTU3eDEQwPBTA4JFronJxuwgOB2FHrVzvv3Xfe1fp8+SLZ73DJz2Yt+4i5DR0IxOZNFW7Pbli/IS5DMt1LPifzRZrfzdqZCTOJ389i9ffn9qdfN0z8vjk3WeDlv7wMOF5eG103slgt0djY6HqO3ALW8Z7HQ2fj6QZsNAba+rFiiW9I8fvMgf0HjBsUL734krHC4p//3392/Y78z/jf/flPf6bU1al05vQZ4u8peB/RA38f45svtbW1dOnSJcrKzDIe+c+dnZ00Xt/R/fx+Hq39yu977OZnLRh/p1Ad4+bxzf+ezwdeZaTr2NatI4vGKkyAoBgAAGLvQ304YLWCh4OPwxSgwcCAQejfDxGDfTWxlttmZ2f78prqWgYruky58RqvARk//2h4escKB/F+DbfzUz+he8lnvF+kxZIuxjkky+8VATqCnJRVKdBPxDB+rCDw00muawCfamXGvXv36Cf/4ydj/tlvvP4Gtbe3+/L7cxDHioh4WqWiC24gFhUVGYHwL/7XL7S+t/LP271rt/FZFas3oE34xisrvcYKf7ccy2cI7yfeX7zfXvjNC8YNfB36Kf6Z/L1B56qbWL6h6nWef/XlV/TmG2+O6WYrnyN88+Sbr7957tDYDx2ZX9dLAEExAAAAEFOtSYbvuMeDhzAaw2qOHD7iW5P4xPETcfMFkwNtP/aDn+GrziWf8XqRFqu6GJPZs2bHfEhswmGuXysLMDU9NlfYeLV1Y0m7wJ///D1AXAWyJnWNlp/rZwjix4yFWB5gx+9z3KDmYzRSEKmTn/7kp8aKpVh9j9X1Gf28y//5JgjvH95PfuinuCGu8yYA7694acjz7843mvy4wfyD7/+Adu7Y+czHNb+X6xjuafKjH/6Ibt++jc9uBMUAAABA7KGrleRnIMZfzl5+6eW48xL7FZBFY/ieH60Q3a+hn40MP0KjeLlIi3VdTDTPYd1sStvky76YjMeWH5ocbvvFU0OVG6U6nieHMubP5Oer43PLT4WDH75z/nzyK9Qf6/cIviEWrXBYZ7AWLzd2nnX5PwfEXIh41vLCs+qndK/441VSsf594dCHh7QOi4vEj3/0Y7pw/sKom/O69UR+rjgBCIoBAACAmAsvdLY8uVnASwN1Pj++wOQLTT/3Kbe9EmYk+PLllvcH75d4O87YFefH/vCjwa57yedkbX1y692PRhAPh2MnZ7ztDz8GiOKCUx/c9tP92oghbKw+X9P1zmF54orEmA/IdWuoYm2AHQdXOTk5vt1cjVaw5ic6P6NHe7OZb3zwTXoOzv3+bswNZ24663rtOODkoDMW33d5vx4/djxqAbEzvGd91WjKLbp1ZM+iIQEIigEAAICoo3t5G8OTgXU8N3YI61RORMNLzPAAHz8CsnhsUfq99N6PgVHc+tH5+sXyRZpf6PKcqpy83LKLx33CF8Ts1da9T+LFrRrrcGAUT1oQ/pzV2ajk9ygdNzJ4EF48va/EygA7MyBmz20sBcRO/vbXv/k+BDhan9GjDW/5uHvlz69EbbUdr8LSOdvBTw3OWCgrK4uJ45195yMVSHTryJ5FQwIQFAMAAABRp6mpiX79q19r/QLEjsOxPi8/hqD5eQFr8uTJE5ry2ynav8jyxTFfrMTrcaa7IWM1xP/nz41J2LHcLIzVizQ/l/Hr8Jyq2j98Eyae9w0Hh35c6Oq6OTeZuX79uvZWm59hgK4ZA6yw4FUq3KyL5RU7fryvxMpNFv5sZydrLAfEzv3mZ1s+Wp/RI4W3HN6f/PTkc7eIn1cRpPu9iEsGPIwtVt5rnz59SosXLfZtyO3zrlTyeu/SrSN7Vg0JQFAMAAAAjEuoo2O5qU5PMV+0pqxKics2ru5lsbG4NDaWQjLdbV0/XKWxdpHmN7qaiU74PSEetSt+a02ex7MJ3OgeVmSGsH49Xx6ExAORdAzz4tBPx+odU2MRL+8r4z3Ajr+TsD5pPB3EYxmqy07ZaL8n6/yMjhTe8kol/szRFWY+iyNY12oBPxVdz0teXp6hMYnFY9pLa6X7c9vPQcwAQTEAAAAQc0tYdbV0eLq3zqYBL1e/deuW71/K/FpuPxECMkbXoCQ/25R+DHiMpYu0aOgVeCm37teYVxeM93JnHXD4wLoU3fsHvsOx093dTe+8/U7c3CRqbm6mX/3yV2N+jq/95TUtQyd5+TYvmY+X95XxHmDHy+5/+W+/jLuA2NmM3LB+Q1S/n+j8jPYKb/m40Nkg5Zv9vAJgtL8jf2eYaCtO+Bzevm17TLWIR1sq0a0j0z30GyAoBgAAAHyBl+7zEv5Y+GKqO2yNlpeY0T0YMFrD9+LxAs+v5d1+LD+fTFqAoqIiLcvhnWEE3zyaKPtIdwjAcMDJQSc+z8aGbg+ln4MG/brp8LxB2GiHgj0P/LN1vq+M53sKB2Y7d+yMyxZxLITFum44e4W33PTUrT17FkUW31jisFLX383fZ/h7zXirJnT+Tn4fzzy00E8d2bNoSACCYgAAAGBC+WO5Bfs8F74JMxK0Po91a9dF5QLGj2FtE8HJ6rfWQffybt3t+li4SItmAOLHsLZYGTYVq8cYw81Sbpji82xssDohnvzkuoPtWDxHud331ptvTYj3FF4VobOpGsvhWqy/f6rCWw6h/VgV9iw38nQrcDj09qvpP1rdku7g3W/EVYh+XB/5OeQUICgGAAAAYtof+zxNKr7Q0Lm8i5fTRsNL7FebmNtiHJ5PpONMdzND937S3fYc74u0eG8Tx8qwKZ340VqfiPtpPGBXbzy9LuymHu9Qhb3BOj3xfquo+DW5du1a1M+VqqoqX0LIWCFaq7d0fUY7w1sOBv1SgTzLgGddQypFBzmHneOlG4vXY55nt3C5gb+/6Qy6dc/VAAiKAQAAgLhqxD7rBTJfuOn8+/lnReti0I82MV8ocPA20Y4zXkIcq8vudS/5HO+LtInQJuYbMBNtX/kxNG0yNdfjYVl7tKbbs2N1vAMVDsH88mNzSMPuY53Pd+uWrVE/T7Iys+gH3//BhA2Jo6XK0vkZLYa3PMCMB5n5tV+epUGq+5wer0Gnur/Tj8cNML65o/vGrt+rTACCYgAAACDmfYej/XLMrV+dAV20/YMH9h/Q/iWVQ7eJtNzehJsU3KiIxWX3fgR443WRNh5tOb6w0r38k2/CTLR9xUNs/Fh+zm1YfJaNDV0D4qLlKNfdgH5WuC3IcwXiRQUy5bdT6MmTJ1E9T775+hujbTvRQ2KTZUuX+ab70vkZbZ6XHBLrvhkxlpt4unUyfrrDJ2pILN6o5muJeBlwChAUAwAAAHFxUTbagQ1HDh+JSy+x2Xji6eloE4+O27dv049++KOYDIp1h9h8w2KyhHd+qFcmYpvYr+Y6guLYvWGqc9im3w3oZ4W/M/j1u3EzlRuq8TzA7vSp05MqJPZ7sKGuz2gzvNVdUhirfkr3TUT+rsXfueAkfj5+/atf08IFC7X+TFan4LMWQTEAAAAwqVuBo/EU8xd/nX8nN0Oi6YS9eOGiVn/iRHUT+9nY0xUU6/Z0/8s//wvxRdNEf9+or6/X3sSe6M5dHsCIoHhyvDY6h21GYxVErHzWsiJC5/PlQbnR/FzNuJgx6UJikzdef4Pa29u172tdn9EcZPJ3Bt3H2Fj1U/x9gb83jOeskLHgt8JjPIY0/pf/47/EzQoTgKAYAAAAiIsl0SMNbdDtH4yml9gvLyt/MeXwGUu7o9+aYWehzufF5xKfUxP9fYNbc34NkkEYOXrYbYnPsdjz/vq51Ngvjcl4t3N1N6X9HrjnhIe6TdaQ2M/jQ9dnNIe3HNhF4zV6Fv0U+8x1Fg9Gu6pPB9FoZ8c7mCUAsBMAAADELbqnqHt5ijkE2rB+g9YLk8/OfhbVL2B+DLFjjUU0G9HRRvcEaV3tUx6Gx0PxxkO9Es/4cbNkIqtX/Lop4bcGYDKhO6zx84YRNxU59Ip24OHnqhf+bqD7/IjmALvx8LP+7Kc/M5rrfJO5trbWaMua8PeUr778ipYuWRrVgXpvvfmWoXbQtV91fkbzZxb7taOxH55Fw8Gamni9eahbITfa7/0vvfgSbd+2nS5dukTcaBaP/YqKCjr80WHjv9G98s9vDQlAUAwAAADEFKWlpUZQo+uLEV+cqJqBuhs30fQS++V0nsheVj8doD//nz+nR48ejWm/6V7yOVkann44Uif6zRK/3juwpDU2Peq61DjRvOkQCf7cLikp8e330a2jiuYAu2guved24o7tO57pd+PvSByovfCbF6LiKtZ5w48/4/mzPh6aoxzI//Lffkn/8cf/oJqamlHtA/6ezN+Xde5//j4/ERv0P/3JT+nY0WPPpDfh84TLKd/7+++N23HxLBoSgKAYAAAAiCnYZ8ZeM53DIJqamsjPiykOltjVGO+B50iqjomAH81dHUGM7hZhNC/SJlrg+SxLdeMV3c0xOIpjd9WD375tP87BSHAw7ZcWhlcozJ83Py4H2PF3AvYg+73/f/yjHxsNYd5XY9nPBz846HtolrIqRdu+111i0HFs8ffb9evWW03uzs5OipX3HR030GPt5gh/18vMyKRAIPDcv9e9e/folT+/Mi7HzGT4bgMQFAMAAJjAcKvVr/CTGy3c/o1XL7GfDdTJ4LSN1WF2uoO7aF2kjSd+LHufDDdL/HIUIyjWA/uEdbo2/b5pxK97tIIOXq7PQYtvzcTsbK1hYDQH2HHD0c/l7Rzqbt60WavOwe+Be6qiwPOi2x3+vE1hDr/5fNbdDGV3LbfEdT1XviHPN+b9POb5+zzvD7/3Ox/7fGNjLDdHnDd1+HlHW0fxLBoSgKAY6HBZDQ8Qw9uB4UGcgAD4eb4FBql7oIN6BruopfcxNXTep8ddD6hvqJeaexqotbeRAsMB4v8O+yt+4QtPnV+gRHcmX5jouhDk58gXZ+Oxj3iZt+4vkXwRONGPLT+C4rF633Qv+YzWRdp448dS4GhPaZ8oQTG/F3IrHp9fsalz8FMLojtgikT6vnTffg9eRv7G62/E5QA73boMVUDvl+7DT20Avy/puoHFzv/xCIc5pJw9azZdvXp1TE3WaH+n5Pcwv497nd/nvWBNih8D4DjkZnVLtMJiXUOXAYJi4MHwcOgNunvgKT1sK6SbzV9TUd1uutxwhK48Pk41LRepqfs6tfSE7nYPUwAnJABjuhEzSAOBPrrXWk2VT3Io8/5JOlq1gY4F+aAymTYXzaS9Fcvowyurg39eRSev7zAoa8gInp/l1DXQQf1DfTgP4wxugHATRLeXq66ujn7+s59rXdYYbS+xie4WxWRRFfgRLo51sJJu3Uq0LtIm2g2lyXKzxI8bE37rDSYbutuLfg4aZA9sNJbk+32M8TBane8n0RpgpzvgVq00amho8PV34RsAfj1/He/pfnxGj6Y9vHPHzmfy4I4F3k/RGCStU9Hzxz/80dfXYGbCTF9X2ele4TjZVwwCBMXjxmCgh9r6aqms/gPKuJtIR6/8ho5WvhB8nEIfXf41Hbr8Kzp+5fd07MrLdP7OUsp7sJlae+5RR389TkoAnpGu/jZq6XlM+XVf0ue30mlvxRJaX/AOpRVNo42FU2ldcJtZX/Aurfv/2XvTILmqK9/3xov7+cWN6HDc6Bf3w4v74t5wOBwdDgfhcLf7EhgZEBgwYCNh6G5AEhJmtjHzZAZjNDIISQwGNEsgRhsQCAkkRGsWqkETmsfSPJTGKlWV1tPKqsw6mZWZZ5+Te5/cJ/P34RcqWyLr5Dn77L32f6/1X4vO/bxomDyXQX++VcYsvfvcv7tD3l03Qb7c9q7sPr5FTnec4F1MkZDxxONPWC0/VJHYpvegeqKpN1o17o+LTcvFF12cuM9yNXDR/KxUw0RTbDevSmKT5gO2N7b1Uprpa0NHcOeHqhnkaRlL1bA30UZT2nTOZpWJa3E1y9QpU53d70cfedSq1UQ1xG4b1TUu1ugkLT6StrtJwsLJ5bhXnnryqURsY/Q5Dxs6zPm40ox41lbgJjjJamzPZAx/sflBmdxwiUxadcm5P/vLpB6CP6tgrExu+KW8v3aQLN31iuxqXcbLCWDiOdl5Sg6eapFPN02USU1Py/NL75Dhi26R4Yt7yPw8tJdFPSzuFopVMM7+nOX5Zb+XEYtvk/nbP5A1B3gX04LNMjjdcN867FZr2UJJlpQmtWmpl27ItpvG2cjeVXESn93qbmwV9fxW7+9afwdsNy2yYb8C+dj2oHc1v2ujtiR9NjXT2sX9ttmQT++HZicnMU42bNiQ6ZOQZpFYHPlD2zzEsr1Gl0LFcpce3KXQJAFNFkhL4oHt6sBiMZ0tP2ITtM+J7QSGpOZOQCiuexbteF7eXfNvGZE4IxQ3XFIgDvcVirNMa7paPt/0oKw/8DEvKEC5QOXkTmnc97W82fiEjFl6m4xYfEseGZF4UUAszonEAdF4cY9YHBCKn1t8a8/Pt8rrq/4k76wbK4dO75MzXe28k3VmEZB2X2KXm5Z66Yasmba2712lZd16731t4OPtemF5Y1tPpZkuGmFWar8CbqtGXHiWJ1H6nYQ1jG3RKal3QcvWVcx1cZ/1EC5JkVhRYe6eu++x/l1s2GrZXqOLZRFPmjipalZmerCsB8w2x48e5rq6XrV1cfUsqjH2XX+nerGWA4TixDOJl+x8UaY1Xi5TGi/JCcWTe7KIM2Lxqkt6ReJVvYLxxFW9ovHEVZfK37+7S9bt/5t0ne3kRQUIvmddHbLnxFaZvma4vLLyPhmxeEgfkbiboYHs4p6fe8Ti5xYFhOOASJyfXXxrhuGLfyfjVz4s3x1qkOPtR3kffc0ub2/PZEH5JhSrfUWSmQZJbVrqpRuyC7uCSsqhXYzzesgOt72xrafSTNu2Bi6tDVj/7DwfbeCpjTx9zcJNyubHtUCTZLWRNpdz0QSumrZa8+bOc5KhXokVk+tYVDNJXVmqVOsA3aXXv2Zca3NFF8+immPf5ffCGgoQil1sRPZOkckNFwcE4uJicV/riW7ROCsWq1CszNn0sOxsXSZdZzt4WQFEG0O2ys5j38lr3z4oo5fc2iMSD8kTi4cvGtJXMC5GQCx+LkNBZnFOLO7+c+SSO2TJ7s/l4Kk9vI91lP2Z1iAy6N9MIyp/7Aoq7STtwgagHrLDbXdod93wyydsN0qjrNX/Qy3bc7zr0u+ksv5te9aPHDHSupBdKvvWZs+FLGpjoWXw1RrzLS0t1itFKhUuXazRWVQY9CH2st0cWddnV9fqqvFhtce+Mmb0GG99ugGhGIKleUfmy9vN1/RkEhcXiycFbCgml7GfCLJw+yjp6DrNywp1z7H2w7L2wFJ5q/EJGbl4SI6sUDwyIwqXzy4ekWloFxSMh/XYT/TaUDxXwF8C2cX688Kdf6fRnads2bLFmf9eVDRzp9pZH5nDlXPBngZ9Nr+bi2yzerErqNSb1UVmbD1kh0+eNDk1jbJ8QzOnbdvxqPc3a5a/B6U2S49VCFUPz2qswza9T21bN6hwrgJ6WrOJfbDVcnEQXmnFw7p16+RH//SjmhQmM3uxY8cyBzC+HJ5X4yBBx/7MGTOr/iyWLl3qxKe70j4agFAMAY6c3iKfbbxHpjVdLlMaLukmYD0xpbF/H7/i4jYUPQJx4OepTVdJ497pvLBQ15zqOC4r98yVSU1/kpGLB3eLxEt6xeKRS24pKhR3i8MBz+JcNvEtAb/iYX2a3JWyociyYPuHvJM+jpOEO6r7vIHKooKuCrtkG/hRcl9pKbTtrHmXmzSf0I2/zfumQoAKAnjfJlPW2tXVlTlg0fksDVTDf9n2IZKtAyS9LrVYqMZabDMz2qbYmmQDO11vnnj8Cev39q477/LCZ9zFIWAlvtEuqldUJG5ubvZivbHtWa/ri64zLq71/ffedzKv+DL2tbeE9pjwyXoFEIohGLyePSNbjnwp76/9N5mSySS+OCcWh9lQTOqTWdwjGBfYUMzd/Li0tu3kpYU69f7ukLUHFsu01c/2iMSDA9nEgwPZxeUyiru9ikcUE4pz1hNBillQ9DJ66V2ycAcNJ32kGj6IvgaRLkpl68XT1tVYqtSuwHZmnstNWi0fIOlGWTfMtf4OuJg/4ogwvjYr9akRkO17ZMNaxZXlQdKZ/7YbpyXZzNFFpVWS3sphaHWCbZ/iSp6P7b4GSR4qVON+u/ARV7TB3KCbBzk5fPIhs9tVRr0eNvrybgNCcU0IxQu3PStTGi/OCMVZJucREIkbsz7Fl5T2LM5rbNc/Ixav3U8GI9Qn6kn84XfjZNSSwb1C8ZJ864mReV7FWbF4SBGhON+zeETQqziTQRz0LQ4IxYtvLfj5Vnmr8Vlp2r+IhpOesXr16qplL/niS+y6DLIeGlG5aEhTqYDkwkbE1SbNJ7Bf8cv3PY7/p4vsfldUqxGQ7ZJwG+XHmpVc7edmQ/C2+T2SFlnf+OsbTkrTfVk3fDnMUlz0NRj+3HBR25NatXHSz0tLRZiizSxr2VbLpl0PIBTXPftONMrf1g/pFoqzYnHm596M4sLs4kkF2cWTimYX99pQTG68XL7eNlxOdxzhxYW64mjbfpm//e2MOKxCcU4szvwctJ4YUuBb3CMULxrSx4ZieO7nYk3usjYUhdYTxX2LP/juVTl0ai/vpU9e1pY3y1F9iRfM98vvVbOpfPLvSwu2yysVLRHUUkFfMgZdbtJ8woX9Sj0IxS4OSzQTLU6Gp4uGeq6oljWP7eyySg+RWltb5aYbb6r686hU8Lb9PZJqYKeoXcuvr/m1dRsEzVJmfnff1+CC8y+QnTv9qSi2Pce49Ku3ndmdbSa4detWr9b92Z/Otvod9aBDDzzYTwJCcYWcPdsl+0+ukbebr5apwYzivJ8vCVhRFLOgyDa5K2JD0SMUa0bxzNUD5dCpTby4UFesO7hERi0ZJCMXDyrIKC6eXVzahmJIXmZxjkXBjOL87OKsUFxKMM6KxXO3vsN76RmvvvJqVTakU6dM9W4suBCKbWRo1WMmpfpEViIQ2M6QqZemYi6EhCRLx2vpsERFFG0wVO3yWp+FSV/WvkoFby2Xt20JUA2rJJvfI8kGdoqLZlc+ZRO7mt/jNp217RPum2hn27PelYWTC299ZczoMd6t+bYF8TgVP4BQDEVtJzpl/YEPZGbzVT1C8UVFhOKLc43tCpvcFbOh6G1s103Wq/iDdbfI4dPbeHmhrrKJ32h8OJdJ3C0UD8plF/f6FQ/Ot6FYErSkKO9XPCLrV7y4UCju9SsOisZB64ks41c+ImsPruDd9IhqlClrQK+eaPUgeNa6UGzbjzKLNrmp5LpsZ1XWi8+uC8GzHoRiF42A4mSputr0u6KajYBsNtKqJGt+z549GbHNh+ehFUZaaRSranTfPrnqV1el1mtWhS3b3sRq78VBYDJrtG+inTa+1Qa4PrybSe8BfBz7LqxOKo1TAaEYghmPB97PiMQZobjxojwLisl9PIuzIvHFfbKKC72KC20o3l3zHwjFUDd0dJ2Rxbv+Lq+s/EMmozgoFvcypDejeEl+Y7u+fsWlGNorGC8OZBYHmtz1isSlm9zN2zaLd9OnQ4aEhYWks4Si4GNDNt9x0fzHhm/pk3960hsBJU345GHJYUn0TahtccIl1W4EZDOjUe+53vs416E+njasnGwIgJXMva+/9rq1sTFs6LBED5NdxEE+zns+CcW212jfRDubB1GK3q+0VBXqeqjrok/Pw7bVSSVzPiAUQ7EA9tCnMqP58r5CcZCsaNxYzH6irw1FfpO7bq/iWWv+TfYcpwsl1InPbPsh+duGcTIqYzkxKE8sztpQZLOKc4JxkSZ3xQXjfN/i/OzioUWyi4dlPIuL21B0C8UvLb9fthxZw/vpEbYzadLkS+xaKNYgvJbHjovmP5XaTrjw3n7m6WfqYs5yIRTXusjuoiloHNsJRRuJpSWbuFIfcp+Eg7iit633TS0ObPhvajNXbeoa9XuoF6l6ktqKE5YvXy5pPTTIolUGWAsls0bHHbdpsjnQDGzb16jPTZ+fbxVhaXjH9WBJD5jYQwJCsSV2tS6R99b+tlskbuih0Iaix34iS1YcLhSOJxXJLM7aUHy84U452raTlxfqgrUHF8uYpbd0C8RBsXhx9n/32k7k21AUNLZbUuhbXMSGItfYrkc4LuFXHCTfhmLYuc++Q5a3zOP99MybNwl/RN86ICchFNdyMzsXzX90HM6bW9n84MI+oZol8mkXimu9mZ2Lg7a4hyXV8pyvhh9upainsHoL2/o+URsPdnR0yKOPPGqtSseWEBKngaKNrOhq+vraXvt1DtW5tB6E4jgxju01Oq5PclpsDtQaQi0ibF+ni6a/NirC0mAtV2kDU0AohsJN5enN8sG6GzJC8dQeejOJL8qznuhubFcuu7h/QTbxJTnB+MN1Q+XEmf28vFDznDhzVD7b/Ka8sHSYjF4yqIfBfTKLg9nF+X7Fg2PYUAwNNLnrzSp+LtDsrpwNxYjFt8mk5r9Ia/sh3lFP0Kwuze6qR19i1x7FtSwUz/l8jvUDBhWeK93w2c6qrHaJfNo9imtZKFaBToU6m/dLRQFtrBX1WlR0VfE1LUKxD9UWNsvfo9oMadasZs9WerA2c8ZMsSn6RP0eNg+XqmFN5eK98bWKwoUwGMcbWBvD2owdqn3o5NrmwJX4avs5+PgsXGV4T540mT0sIBRbPb0/c0DmbPqDTG/qnxOKpxaxnshmF09uCBJsbFdOMO4vK1velLaOVl5gqHk6z3bIzDXPyuglN8uoxTfnZxMXEYvzs4uz2cN9m9yFC8a9AnHw58Ls4uJexcPk1W8fk70nyfr3Zhx1dmYy2FwJAuphu2HDBu+ft2ZS2f7uterP2traKjfdeJP1+6VWFr557umGTzd+9TAXuMg48zW7zgY2fVmzDLp5UKxDNT1g8aUpmgl6oFPt56ebfZuZsEmXfOscrHOxzXL+KN/DVlZ0oeiddGWM7fcmyj1Me8VIHEsEm++djxZftm0OXMWRLqrofBRQbWd461ylIjv7R0Aotr1IHfxQpjVdkskg7m5sd1FPhnGvWBz8ubvJ3SUZ+gjGARuKbFO7t1cPlA0HZ/PyQn34fh9eKRNW3p3JJB6lYvGSm/taT5xjZIYesTjjT9zNqJ7mdjnyrCdKi8XD82woignG+dYTwwuE4heX3Svf7l0gnV0dvKueYLvxRlp8iV0LxbXqz+oim1gPFLQ5nm/ZYbrB0I0GQjEZ2a6ziSvxNnXhs+oKXxoB2aw+iFKKbGP+LPTy1d+t15DkfGcjK7qY6J128dRXqyLbMU4cwczWOPXt0MlldVqcrG0TtOKt2uMhjRneWnWl1VfsHQGh2DJ7jq+SD9f9R69I3HBRzopiSsns4ktyYnGpJndZsXju5kfl7NkuXl6oC1bvX5jJJu61nRjUIxp3k7GhyPMt7hGKs2Jxn8Z2gwNicRmheNEtBU3uCrOJs43thmZE4r6N7YbJ55un857WeEmiMnLEyNT4eLkQWnz1a6s0A2vggIHWx4oNb0oX2WGuNmk+4qIRoK/NbSrFpi9rlqt+dZXs27dPfBAnXOJLIyBtgKWNsJLM+tPnq8/ZxXxpo5rC9NnYbIRVjQZ2rsRTV36yvmXQxxXMdGzpGKu1Q6cg+m76vn66aGTnq4BqO7av9Qa9gFBcVZr2TglYT1zUk10caHDXcHE+ARuKwiZ3QduJD9beLHuPN0nn2XZeXqh5OrrOyNwtk+WlZbfmrCcKheJ8ejOKC20o8skXiUeW9CrOzy7OE4oDXsXPFbGhGL74Vvngu1flTGcb76onuAgahw0d5r0vsevMolrMpnRRrvjDH/xQVq9eLT4JP7UsciY5D/hYGuzjXFGp9YptccIlvjQCsplpZurFbcOupJSXr43KINPDTZtVJc88/YyojUUtrGe+Wu24yOSNI5ipqKvibq0dOuVsNi03ydR4RuOaNFQP+dqPwPYhqnrbs28EhGJHHD29VRbtGJ4nFk/NWU4EbCgCQnHfxnb5NhTTm66WNfvfpYkd1A1nutrk042v9mQU99KbPVzoWZwViAcV9SzuKxQPLuFXPKRPY7sRxbyKi/oVD80Jxa9++6gcaTvA++pRlqjtTMy0NXtwETj7XIbqU7m9+lzaEApsW6i42qT5jO1yVJ8b3MQ6pLXoy2qrkZdtccI1cXxNfT8YMcmm27p1q5z/r+c78/K1cVBmcrhp06O+Gg3sXB6w+CqWuWhaHEcws91s1pdDpyy2G8JqXF5pg98krtPnnhy233Ff1i9AKK5Zdhz9Rr7c8pBMb7w0L7N4Sp5oXNyGIpddHBCKF+98SQ6cXMeLC3VDa9sB+WjD2D5C8egekTiXXVy0yV2vSNxHMM7zKi5hQbFoSLgNRRGhuNeGYqi8vuqJc+/sbt5ZT7CdiZnGZg+uyu41W6pWBDL9Li6yiW1lXdvubO1qk+YztpsBKipQqFBRC/fny3lfZsrLbd8jzTT1zTrIBb6V5ts6GAkTWFXMsiFYlPPytZUhHeb5+tGHH1nJJq5WA7sgKnbWg1Cs75zteSvOIbjt9cW3hASNe232b3B1yOqiKsZHodj2IarP1jKAUFxTbD0yT2ZvuFNmNl+Ra2qXbXKXtaHIyy7OeBVnReNez+JluybIvhNNvLRQV5zuOCHvrH1OxvQRim/O8y0eVWhHkc0uDjS5y2NJX6/i/CZ3t5T0Lc5RaEOxuFcwHt7T3G5S07PS1nGS99YTbGdiprHZg4vSTB9LI30TyJ54/AkrGUG2O1vXWiZsteaCWtpc7dmzx3rlhaLiXktLi/gkArnCN992m9YD6ndbzidTD8VcevnaEkX0nrj2WK5mAzuXVju+CsW2D1HjWGzYbjbr47pi2wfalW2Ti+bNvmV3uzhErcW+I4BQ7C07WxfJV1sezTS4mxKwochmF09pLOFbrJ7E626U5n0z5Hh7Cy8s1B1tnafk/fVjZPSSm3ooLxSPNvUrXlLcrzg/u7iYX/HQnuziobmf80TiHN0+xZpRfOQ01hO1uolIa7MHF9mUteBT7Mpywqafo+3O1rXorWuCi6aOtXAvXVlOhAlzJmgpbFpsJ1TIVEHTl+dqUzAp9Rzb2trkvj/el4iXr40MWc2ydumxXO0GdvUmFGtVzK+v+bXV7zno5kGRe1DYtr/wTbRzkWwQlt3vk1Dso3ev7UNUX+01AKG4hj2Lt8nO1sXyzfbn5G/rb5IZzZfLtKb+mUziqY2X5ITjqY2Xyszmq2Xm6qtlZctrsv3o17yoULecaD8qH28cJ89nROCbZczSYkLxzSWsJwaV9CoutKEYUaTBXVAsHr5oSF/B2MCv+L314/Ao9iU73UEmZlqbPdj2z6sFkUw3g7bHh41ye9cbgjTap/gquNdCp/Dp06ZbLSm2lVXpqhLCFWq/4NNztVmCrQeurqox1NtYPY6TyGjU+V7jAhceyz40sKs3oXje3HnW5644jTdtH0L6duiklWNaQWbr+2nTP23+lxah2EcR1fYhaqk5HoCb4PIU7mx3aWfLsZWy5fA8+Xrb0/LV1sfk/bXXy6cb7pB5mx+W+Vv/JBsOfix7jn8rHV2neVGhrunoOiOfb35Tnl86qIz1hIrEN/e1nggSsKHI9y4eUtR+YkQ57+KseFyYXZxjWI/9RLdQfKazjfe4RoWhtDZws92RO4tm86TR61Y38sOfG+5EMNLyZS1j9nVDkEb7FF8PjrJlwkuXLk3nAdL8BZkMSBf3xFXGGJhnW9qyEyl2QKqfP3DAwMSyzm0cdhbzZrflsRxF9E7iEFQzY2tZKNZs9nvuvsd6X4HVq1dH/o4aF9byoZPt+NGlbVm9CMW2PcjVmot1ExCKq0jX2Q7p6GqTU2cOyeFTm+R0x5GMMNzW2crLCRBg4Y5ZMqaI9UR+dvGg4jYUgezijEi8uNuKIt+3eEjxJndLbiniW5yfXZzX2C5gQ6HZxKMW3yYfb3xLznQhFNdiqXmarRZsZ4QEs1M1q4csyt77MefzOeLzhiDtGbA+WdHY9qNOkrVr18q//PRfnNwPtbKodlYlFTX2DkaKCSU2PJCjZJ3baExb7JDMhseyLasVnzOKfTtg1MM52/7lKjyrAB31WmwdNPiakGC7Is2l56/Go7bXM9+a/9puUK3zqs6vrJuAUAwAnh+odMqKPbPlxWW3ZMTiMSW8ikeVyy4OsaHIJ9+GIphRPLKoZ3HAhqJIk7tlLXOk62wXC64H2M7y0OxkzVJO6/3QklgXolAcT79azKJ0IZDZ3hCk2T7FVraRiwOCuJlo1WLv3r1y9VVXu2nqdmE/Ue9v1qDqY0vAKhRKbHi76xxcrkmeiwqhwsNem/7c1W5g51oo9umg3MX3U95/7/3I389Wo0WfExJs97hQGxmXa7ztceHbIYlei15TrQrhgFAMAFCSAyd3yJsND+SE4r4+xfmCcT7ZDOPBvRYUBfTNJh6cE4tHFKW4WBxExeLxKx+QNQeWsNjW2CY5zN8wLWhpmQthyEUWrSsWLVpkzbczibJj2xsCRa0s6nVOsN0pPI1ZtCq42T58CM4FH334EWtgjflYFgolL77wYuJevrbEwaAlijads3FoGFX0TquQ6kt5usYbtg/84tpo2V5TfEtIaG9vl3v/cK9VWyLtu5Amodi3vg66xtoc//p89TmzZgJCMQB4T2vbAZna/ERAJO4VjMcs6WFpseziQX2b3PWIxVnriTyxeEmBZ3GBDUW3IFzY5O6Wgp97m9pNavqznOlisfUB21ketdDswYXoGNxk+Z5t7VIkdiWW6+bE5obA9SbNd2xven0Wi5IWibGc8A9b80ewNNlGk7y4h2pPP/W0tWxGm2Kqb+Pe1TznQwy0Z88ea97blTaxU2w3m/UtIcF2rw8V1VVcT5NQ7FPjZhvVHLXUlBoQigGgzujs6pAluz6Sl5ffmrOe6M4uvrmoDUWhZ/HootYTvY3tSllQ9OWWkhnFw4P0+BMv2P6+nOo4zoJbo5mDaW/24FIkU7Q5nK8i0fyv5ssPvv+D1H13FTWs2gI43qTVoyVNELVzUFsHH7+3bjBtNB9L43evV2w2oVIBxpZVQ1wvXxu+yCo228xK9aWBnQtR3Td/ex1/Liy0KrHLsd1s1reEBNu9PjSBQxM5XF2vDS9zXy3W9HDrrjvvsv7daDwLCMUAkCo2Hf5WXlo2JM+nuFcovqmsDUVpsTjfeqKvaNzrUZznVZyXXVzcfuL5pXfK6v2LWGxrLJOq1po9qAefK6FIs2q+nPelV/eoq6tLtHHd9/7he86+t6tsam32ok1fbF6r601aGtiyZYuzJm5xSuqTQP2TVdBy9Z3TkE1dj2gpva3sSxXEVFCoNHuyEi9fG9mCmkWsh2W2Dk2mTpnq5bi3Iar75sXuqgnt66+9Hvs72W4261tCgu2DVbWES8vhmG99CFyMf71Xes9YLwGhGABSw4n2IzJ702vy/LJBPbYTpa0nijW5C/oV52woivgVd1tPdPsV534uEIxHGngWf7B+vJzuOMFi6wm2MzFrpdmDS49WRQW4tWvXenGfNPNJs99cbCyT+L5Hjx6VAdcOSNUmLRUVKw4E+EIbEt3Q+XJQ8u6sd51m0/v0fcFdFclzf3kuI/JW80DBhu2FruUvPP9CzTWwK0RFfRfv+5jRY6ryfV01oa0km9h2s1kfExJs9/qIW01gysGDB+XS/pc6Gft6LzR+qFaSh4vxrzGmxpqsl4BQDACpYtXeufLisltkzJIbu4XiQGZxllI2FHlCcdC3OJdVPKggu3hIkSZ3pYTifMH41W8flqZ9C+VMVxuLbY0KQbXS7EHvzROPP+FMNPJFLNbN1pVXXOn0e2rQrptXV9/BhihSiGYHMUfIf5k3d57TAwQfxNMkDkp8t5wB+S9aym7jOf/jf//HisdSpV6+NkQg/Q76XWo9i95VCb761ba0tCT6vTWecFEFomNh5oyZsb+L7b4PviUkuOj14fqdcdXIUdF4TOOypJ+DVuq5EIkV3StVS/wGhGIAgPhZxWeOyKebXsnZT5SyoSidXTyoOCWa3OUJxaWa3BVkEo9ecqtMW/2cHDzVwkLrCS4yMWup2YOtTu9hYnE1mqa1tbXJW2++5dRqIikhUEtQbV6z+gyq3yBzRHdJvku/3uwY0QZJ1RBR9R13aTURbLxUbd9GSLZ0vJpevtrkS8ecD9/H98aNNm1HbFo1xDkwdTWXVZoRbtvizLeEBBcVaEkcrtjOgg5y3x/vE40zk3oGsz+d7TReV0sh1klAKAaAVNK8f4G82fBAj/XETb02FDmv4tJN7kYZNbkbXLLJ3YgCG4qcZ3FAKB65eKisO7iMhdYjXHiU1VKzBw1y77n7HucbaQ1uZ70zS7T8PYnvtXLlSudZxElmUdrKBKw1+xSbpZxJjJWHH3o4sQZQ6pX90IMPOc8ipnldetADu0p9hX3y8nUpAqW9gV1SFjuaWZlE1ZCOXVd+8hqf6IFaJdenCQS1nJDgYu5IQih2eThWaRa6KRpf6kGzy7Vcn201EjoAoRgAwBrzt02X59WfuEAoHrOk17e4sMndqKBgHLSeWFymyV0PpWwoillPLNzxoZyVLhbaGvbmq8VmDzaaEply/333O2n2FixLrdQ707csSheZc7Vin2KLffv2yVW/uiqRMdP/kv6iBxkufRlHjhjpPJPeRy9ySLY8Pg6avW/rkEoz4GpF9E6zYObyoEgPl/WQ2WUmpc6XlZTc2/T/zgqQmqHs0/hx0RAxCfsrrZzSCqq0rn96sKwHzK7nsWpZaQBCMQCAPc+pM8dk3tZJ3V7FBRnF2SZ3WVF4TIFXcTc3F3gWFyHPr7hHLA6Ss6G4JScUf7zxdTnRThMA37Cd5VGLzR6SyioOZu9ohqytxj8q1GpJXlIZxEmX2quwrl6Q2KfURlZxVggYPGiwbN682cpz0IyjpqYm+f09v09MIEYkxoopTtaazYoc2+X+1RS9k7BtsO1z73o9TKIqwsYztG3toYc5eqhT6xnp2iPDtSduEnOeq3VQD5T1YDmpuZmMYkAoBoBU03W2U3Yd+04+2TiuT2O7QsYUaWw3uogFxWgVg4tkF/exociJxIPzmtvNXDNS9hzfKp1naeLjE7azPGq52UMSXsWFqKB15x13yvyv5kcS31UU27lzZyYbRbuMJymMZRk2dJgcOnQokXHgIiPmk48/Ec08TTu6Qbdl+5GEV3Ex9IBDPa7VAzKKNYter5buPv7Y4/KD7/8g8euulvc4+LUmVtPL14W1VLVEb+dJFg4bewXFYhvroo4R9eX/6U9+mgqBz3azQI1rkrIoqqbYGjeLNWoM/szTzySSkWvLSiNJ26ggep9oRgsIxQCQejYdXplpbvfS8lvybSgy9hOlhOJu64lej+JSTe4KbChyQnG+b/GoxbfIlOZnZN+J7dLWeYrF1TNcNHCZPGlyTT5nDbyr7feoYtc1V18jTz/1dMb3N8izf35Wrv/t9fKzf/5Z1UuNkxSJXZcMpx3bXstJ2rCUOjzpd2E/eeD+B/q8A4oKMfr31TgcQSSmyqbSMbNhwwbxfY2vlQZ21aqaUHF37hdzY/Uj0Ps5b+48ubT/pYlUddhqQmu72eyTf3rSq3Hl0rJGRXETOzI95Fq4cGHGWmzRokWR7o+OqSREV/0dKvDGtVdTC6zRo0YnnrQRvH61h2tubq65yklAKAaAOmN76xqZu3WSvLLyjt4Gd0HP4iJCcdC3eFQRz+LROYG4tG+xisQvL79bpq5+Vo607ZeOLrw+fcR2loePvnE22bNnT9U23Wnh7rvuTjzTx4eGTb5i22tZhYokso/SjDbvWr16NWteSrEtalXTy9eFf3u1RO8kaGlpsW5jVK5SQi2hwuwodM5ds2aNjBg+wmq8lmQTWtvNZtV726dx47oJpj53bdamlWLZ9Vwz4LOVY7ffdnueeBo1c/fAgQNy2aWXJXrgqwdJagcVNsb0sEsrvG64/oaq2ugUonEn6yUgFANAusWt45tkecsnMqnp4QK/4m6P4myTu9FFbCjysouLNrnrEYkX52cXv/btA/LRhgly4OQuFtI62hD75hvngi/nfVnVjEqfeerJp0Q3L0k+j5MnT8rQW4Zy/xP0WubApHwGd63PgbWO6+ZOSXv5VuMg7fXXXk/tOzBm9JjE75dWAg26eVCf6ogLzr+gKuKYTT9l24cVPnrFVutwqRRxLB6qMe6zCSY6znWMFFbInffj87xd65NoNAgIxQAAzmltOyibDn8r768bKc8vvekcQfuJrHhcKrs4a0PR+2evWNztXTwyw2B5afkd8saqR2T1/m/k4KndLKKeY3sD6ZtvnAs0+0EDWUSx/ED/5bEvV6XMWH1r+/28H8+hBK48QhfMX1C18k9fMS0RBr9Jek5x7eWbtDVPmhrYFWPLli2ZjOh6ncdsW0fZbjar76a+oz6NmWlTp3kVj8Wp7NMqmB/+4Ies5QboQaIeKLJeAkIxANQE6hF8tG2/LNzxjvx11R9l7PJheWJxb5O78kLx6CJexWOW3iKTGp+QzzdPygjEWE34j4tMTN9845wdvLS2ZnzgCJi7fZM/m/1Z1Z67boh8Kkf0CS1XVXsZV/deswa5z90bc7XjSDqbHtygh50q+teKl6/rsvhC0VurbtI+Bl584cW6nMuuvupq2bt3r9XnZztDX+NWjV99Gi/am8OXZ1hJA7wnHn+ipsazHmbr/bD9uXrwwaEwIBQDQE2i2cVfbZsqU5ofl1dW3iXPLx3U41dczIZiUK7JXVYgHr2426N47PI7ZNa60TJny2TZf3KnHG8/wsJZx1lTvvnGuUQ7gddz1pHS/5L+ToXItG3Qar2RXSFamlwN/1PfNqKz3pkVqzEV+IkKJo88/EjNePm6bLRVyH1/vE/a2tpS/y7Uo72OC5HYRUa7j96w2mA47UJxVtSvlaxiPcD96+t/ldt+d5sTaxa1VGG9BIRiAKhJTrQfkbbOk7J6/0JZ3vKpvL32LzJt9ZPy0rKh8uLSW+SFpUPl+aVDcry4bJi8uvIP8saqh2TGmr/I1ztmSfO5//ZUx3Fp7zwtXWfZKKcJ21lGPvrGuUY7S7vIVkgDDz/0cNVtRpIUdGhkV7qsOMnsSw5KoJYOoJLw8j169KgMuHYADewiMufzOXVTreLSNse2xZmP3rC1kFGcjalGjhhZE2NaLeL04OPS/pda/2xtzsg6CQjFAFAXnOlql+Pth6W17YCsP7hEmvbNl7lbJ8v8bTNl6a6PZdGuD6Vh75eZ5nTK4dN7pUsQhtOMZv/aDJx89I1LgnrzalWriXdnvetFBmVSAgiN7MqjmzHNRqsnqwm1C6h1P/Z6Rj2Da8XLVw+L9NCIBnb0IyjG3Xfd7Wwus21x5qs3rE8exZUIxbWSTZ9txqj3wUUyhzYvZJ0EhGIAqN/yy64Oae9sy2QL6//WjOGzZA3XDOonbDNw8tE3LklRQQXUWt9Qqi/zjh07vHnGGzdulJ+c9xNE4YQb2ZUSi1X4qvV7+tOf/LQmPFihPJoprh7fteLlqxlwNLCLVzHx62t+XbMHXmNfGuvUKsS2xZmv3rBJHCyZohm0Bw8erOgepTkBIisS6/f45ptvUtf7ARCKAQAAqoaLZj0++sYlbeVRq57FKoJ/8vEn3vmw+rQ58w0V0FVIT/J5HDp0SIYNHVazoor6UGY3oFDbqBilolStePna9omtxQZ29dSPIKkmtLabzfrqDesqc7VaQnFas+l/f8/v89ZoF/Oe694PgFAMAABQNVw0t/HRN64a97WWso++9w/fy5TY+9rdWa0VEIWLo5Ycas2R9DM5deqUPPP0MzXl7anv9MqVK9kY1hG2S+ZtlobHQcv1tWyfBnaIxTdcf0NilUG2vXt99YZ1kXxRTaFYSVuz2qeefEo0/nBtCZJE7wdAKAYAAKgKtrM8fPWNq9aGQcXVtAtlajOxdetWb59pUr6baUWb/Gljmmo8G808Vx/rtNuxqM3E3C/mepdJD+m0Z6qml69tC4Bqit7Vorm5OdVisR78vvXmW4mJ+i6azSZppxSVN/76Rk0JxWmpEtJxPWniJNEs6CQqKZLq/QAIxQAAAIljO8uDUqy+Qpk2u/ClFDFqtlFDQ4P3z1LHW9obrrhE3/FqPyM9aNDxlEaB+L1336v5LElIdp2sppevq4zHWmtgZ1I1lMZ1pxoHv7abzVbDTikKu3btkn4X9qv6s7bdL8Rnsfj8fz1fli9fLuUs4dQap14OKwChGAAAwKssD0qxSvtcPvTgQ95nF+v16UZSy+vTkj3pspS6Fvx0tWrAh+ekYuv0adNTkV18wfkXyKx3ZvUpX4X6xHYjpGp6+bpY99WSxVdbItei+8MPPZyKteDKK66UhQsXVmVdt91stlp2SlGYOWNm1eO93936O+trmG9jXu+xxtYqYpe77gMHDshll15WN4cVgFAMAADgVSYmpVjh/oYqxPq2idSu1tqgy2eLiVK4bM6UdtR/XDPffHpeWgo7csTITJmoj4ckKgoWK12F+sV2g6pqe/na9HTX92bO53Pq9n3xvWpID720AW01x5vtZrPVtFMyxYcmcC6E4ux300NfjRurffgRpWfAiy+8aO13a1WGiuasj4BQDAAANce6devkR//0I0qxqiQYDx40uKoZJ/q71Q5AN/lpzpz8y7N/QRRO4WZGBePRo0ZXfbOpQsprr74m+/btY+6CkmNV/T5rxctXhU0a2NX2AZiKaPPmzvPi0Mt2s1kf7JRM0CZw2qeiGl69v7/n96JxpmtLqWokPuiarXNY1LGtVQ+2mkyrbz1rIyAUAwBATWJzs0gpVjxUnHp57MuJNcZRUe7mm27OZBi1tram/lmp/5768CEKp3czo4cUsz+dLddcfU0iByf6O7QEdcL4CbJjxw7mKzAao5qdVytevrbselT0di1GpY2WlhZ55ulnqiIY+1gZZLvZrE92SqbZtzOmz3BuuZQ9+E86ttOMem30quKt6/GtIu/XX39d0eHHnj17rHi0vz3zbeY9QCgGAIDa5Pkxz1sN4tLgG+czGsCqr50KubayLM/78Xlyx+13ZMoEdfNYayX1u3fvln4/74coXCObGc3Amv/V/ExpsY5dW+LJdQOvk7EvjRVtaIPvMMRBBbha8fK1NW9qKTdjozgq1ul6rpm9Lud4PWTWsdnU1OTl+m7bG9ZHOyXTbNZn//ystdhOheGLfnGRjBg+IrOuVbs3iI49tW3SDGObB746vnWesXmoqxUQKt7HrahUj3m958xzgFAMAAAAiXP8+PFMtpZmfquor353D9z/gPS/pH8O9Zh+7NHHMn83auSoTDbJsmXLMmIETQUh7egY3rlzZyaL6I2/vpEZ5yqKaPZx8D3QwxD9O0UPRfTf63+n7xD3EQCqifaDUIsnPQD72T//rKJDr6t+dVVGHFRRjoP5dK5pS5cuzaxjamcTlnmuoqtm6w66eVAmDlQ7EV3bfD701/Gusaiuy1EzqfXf63fVQxZNnnAtbq9Zs0ZeeP6FTExR7Fr1fb3+t9dn3jn9Tlu2bOGwGRCKAQAAAAAAAMBeub6KaSo6aQVFFj0UViEw+P/pYbH6H3PwW9visT7jIDo+dJzUStJD9sA3O64/m/1Z7mc99Ni7dy8CLCAUAwAAAAAAAAAAAABCMQAAAAAAAAAAAAAgFAMAAAAAAAAAAAAAQjEAAAAAAAAAAAAAIBQDQO3TdbZDznSdltMdJ+Tgqe2y69ga2X9yq+w5vkG2H22UY+375cSZw8K9AgAAAAAAAABAKAaAGuNY+wHZ2dos3+75u3y6cZR8sP5pmdR4h7y2cpBMPPfnX1fdcu5/3yWz1j4m76x55Ny/+1g2HV4mXWc7pfNsB8IxAAAAAAAAAABCMQCkVyDeL2sPfCWfbXpeXvv25nMMkpeXXScvL7+u58/f9mHc8hvO/Xm9TG26Vz5c/2fZeuRbOXx6N2IxAAAAAAAAAABCMQCkCc0EXn9wgXyx5WUZt/w6mbBCxd+B3QJxIct6ReOxy4Ki8fUy9hx/XTVMPt/8sqw9MB+xGAAAAAAAAAAAoRgA0kBr+z5Z3vKuTGy4VcYvv07GrbguIxb35be92cXLe7OLs2Kx/tn98/XnuEGmNt8rX++YhFgMAAAAAAAAAIBQDAA+03JsnXyzY5KMWz4wj5czf2YF4u6f+2YX51tQZIXisbmfr5dXVg6SD9b/mYZ3AAAAAAAAAAAIxQDgI7uPr5Gvtr0q41f0CsTBnwuF46BIPC4nEBcRjJfni8XK5Mbfy4FT2xGLAQAAAAAAAAAQigHAFw6e3CYLd7wpr337HxlxePzyAT1/dpMTiVdcl5ddnCcYLytuQdHtVdwtFmd9i8evuFE+/O5ZOXRqB2IxAAAAAAAAAABCMQBUG7WBaNr3qUxqGJYRiMepSJwjKBIHs4qvy9lPjCtsbNeTWTx22XVlbSjearjz3O/9Qto6TyIWAwAAAAAAAAAgFANAteg82yG7jq2Wd9beL+NXDOgmIBaPy1FoP5EvFI8r6lVcTijuFounNv1RjpxuQSgGAAAAAAAAAEAoBoBq0dHVJnM2Py/jl1+bEYknZMXiFT3WEwEbilCv4mXhDe4y1hPLsrYUakHx7/LZ5rFyrP0gYjEAAAAAAAAAAEIxAFSDNfu/kDcbBuWE4u6M4mt7xOEBvUJxmcZ2fbyK+9hQ/LaPDUV3k7tuv+JpzffJ7uPrEYoBAAAAAAAAABCKobpZpafk7NmujFDX3nlMus52INrVAa1te2Xelpfk1ZXXZcThCQHriZxQnLOh6BWGxxfxKi6ktGActKG4vkc0vkFmb3oBr2IAAAAAAAAAAIRiSBoVhg+d2iC7ji2Whj1vyJKdo+Sb7X+Wb1tekzX735ZNh2bLsbad0tbRinhXoxw6tUOmNt0mE1Z0i8RZerOJ80Xj4l7FWbG424aiZJO7gP1EsSZ3M9c8dG4srmWsAQAAAAAAAAAgFENS7DvRKOsOzJK/rb9R3l3za5nZ/EuZ1vQLmdr4C5ne1P/cz5fIe2sGnvu7AdK8b7rsOf4tAl4NsmrPhxnbCRWKsxnFE3qsJ/K9inttKHJZxctL2VDk+xaPK+tXHPQqvlGWt3zIOAMAAAAAAAAAQCgG51nE0iUbD/1d5my6R2Y0XSLTGn9xjn49f3YztYdpjRed+/OizJ9fbnlY1u6fhSVFDdHeeVIW7ZrcnU2cEYavzQnG41f0sDyYUTyw4OdigvF1ednFfZvc/bb752W/7eNXrGLx19snSWfXGcYYAAAAAAAAAABCMbjiTNcJ2XjoY5nR1F+mZwTgfgUEROKmi/oIxh9/N0Qa9ryJFUWN0Nq2T95f90hOIA4yfkXQr7hXMB4XEI6zNhTjDTyLuzOLs4LwdUWtJ15edoN8sO4ZOd1xjPEFAAAAAAAAAIBQDK7YfWypTG/ql0GF4e6ff1EgFPfrk1ncTbdY/OmG38n6Ax9IW+dRxLyUo4Lsu2vuLyoUT1iRbz3Rp8ldgVjczYA864lije3G9QjFeTYUy7KC8fUypfkPcrRtL2MLAAAAAAAAAAChGNxkj+6UD9Ze1y0ON/YKxTmRuOkXfWwophblIvls492y/0QzNhQpZ1drk0xt+p1MWPGbHvKtJ3I/Z/53uSZ3pX2Ku0XigAVFjw3FuCJexcqstY/JifbDjCsAAAAAAAAAAIRisE1H12lZtOM5mbX6cpneeKFMy1AoFAezi38REI57rSeCmcVzN98vbZ1YUKRaKD7WLK+suDaDCsWvlMgszgrFeTYUOaF4YF9WXJdnQxHMLn55WYFoHLShWPZbmdZ8nxxvP8C4AgAAAAAAAABAKAbb7DvRIF9svjtnOzG96cI8C4piXsVZG4qpfZrcdQvFH677d9l2dL5oczzucTrZfWyNTG++I5BR/JvyNhR9ROKsZ/HAvJ/HFaWvDUVvY7teG4rJTXfL/pNbGVMAAAAAAAAAAAjFYJOurjOy/sC7PeLwhQGh+MKAcFzahmJqHxuKi3r+v4tl2a6XsJ9IMXtOrJd31vyhQCguJxgHbCfybCiy/sRZoXhAGRuKEoJxDx9996ycOHOIMQUAAAAAAAAAgFAMNmnrOCKLdvxZZjT9ImM7kUdGHM4XjDMicZaADUWeT3FD959fbX1EjrXvRtRLKSfPHJb31j3QYzvxm5wFRU4oXv6bIhnF1+Y3tssTj3vtJ8YtL+9b3NeGolcoPtN1mjEFAAAAAAAAAIBQDLaZv/UhmdF0YYbpjT/vk1E8LSca9/oVF9pQFGtw9/fvBsmR01sQ9VLK6Y5jMmfzaHlt5cAeobib/Czick3uCpraLe/NLu72Ki5mP5GfXRxk/PIbZOmud6St8yRjCgAAAAAAAAAAoRhs0t55TL7c8sdukbgPhdYTRZrclRCL9c931/xGDp5aj6iXUtQ2ZN2BufLXb28IiMTB7OLi9hO9QvG1fYXiHguKojYUuSZ31/VBheJXVt4ozfu+YDwBAAAAAAAAACAUg/Ws0c4j8vnG38lMtZ7ICsSNFxYIx/1K+xXnvIr79ckq/vv6QbLveCPCXorZ0bpKJjcOyROKw/2KB+TRx4YiJxpnM4kHFLeeWD4wL6P43bWPybG2A4wnAAAAAAAAAACEYrBN59kzsmL3S/J288V9rScaA2KxZhI39hWKp+e8ivsKxbM33C7tnccR9lJuP7Fg2yvyysrCjOK+QvH45X1tKMbnvIuLCMU91hPFbSh6hWMViV9deZN8sWWcnDxzhPEEAAAAAAAAAIBQDC7YcPCDXo/ipp/nZxM39v45LUcZv+KexnbTGy+WL7c8JCfP7EfYSznN+z6RN779t5JCcWkbioAFRaENxYqBPc3tSjS5K7ChePXbG2VHaxNjCQAAAAAAAAAAoRhcceDEapm9YbDMaPp5TijOp19fG4qeDONpZWwo1u1/V85KF+JeyjnTdVoWbJsgr6z4tUw4h2YX50Tildf2CMW/KelZrCJxrwVFb3bxuOXFbCgKs4u7heIlu2ZK19kzjCUAAAAAAAAAAIRicMmSncO7heLGn2f+zP5c1IYiIBTn+RUHbCjmbLpH9p2o/QzQzrNt0nW2U9o6WuXwqS1ytG2HaBO44+375Oy5/78WvqN+j93Hm+XjDU9nxOLubOJfl/AtLmJDsSIgFBcwrqDBXTH7ib9veE5ajq9DJAYAAAAAAAAAQCgGl6iwueXwp/Lx+v/oEYm7LShyNF7YS8CGIuhXHBSM3197rSzb9aKc6TpRs+LekdPbZfexVbKiZaLM3zZCPt3woMxovkE+Wn+XfLLhfpmz6U/SsHembD3yjbR1HquJ+7D+4Fcyrel38uqKa3MicXG/4r5exROCfsXBJncr+tpQBEXiGavvlaZ9n8mZztMIxQAAAAAAAAAACMXg3F6g84Qs3vHnXDZxof1EfmO73v9d6Fk8s/lS+XTDMGlt216Twt6hU5tl/YFP5cP1t2WE4cmN18ibq36Z4a1VV5z78/IM+rPy4fo75YvNT8v+E+ulo6st9fdkxe635fVvr8tlFmeb3L2S51X8mz4+xdnM4pxXcZ8mdwNzFhRZsXhS420yf9vrcrz9ACIxAAAAAAAAAABCMSRmMSBdsnDbY3nWEyWb3AXIZBc39pO3my+TD9ZeJ3uOr6hJYW/rka9l/ta/yKSGK2ViRgz+ZYY3v80KxZf3/Hx5L99eLhMbrjr33/xa1h34VI6175F0j5FOWdHydq9QXGBDUd6vuMCGIigU59lPDJC3GobK/G2vyYGTWxGJAQAAAAAAAAAQiiFpC4rj7btl2a7RGaF4ZlO/PBuKGX38ivvlUJF47uZ7ZfvRBTUrEr+7dpBMabw6JxDnc3m3UJwTjrtF4iBvnGPJrtdEbSvOnk1vkz/N8F3ZMisvm3hCSb/i3xQRivM9iyesCDS1WzFQ/rrqZlm6e6bsP7kZkRgAAAAAAAAAAKEYqiUWq4i58dBH8smGm+T9tVfnZRcXNrZTgXhW8+XSuPct2XN8ZU0Key3HVsnEhl/KpIYrZGJGFL6srFjcKxpnuSInHKtYvGjHhNTfpxPtB2XToW9kYuMg+eu31+fZUEwotKFY/puSNhS92cUD5I1VN2dE4vUH5svRtj3C+wgAAAAAAAAAgFAMHrDvRKNsOPiB/Of2J2XOptvk3TVXyt/WXy8frh0gf1t3vczecKs07HlNWo4tlbbOozUp7LW27ZR3Vv+7TFx1WUYs7hWKi4vFmk2cl11cYEORFY5X7J6U+vulViVHT++WuVvGyKw19/ZpbNfHhmJ53yZ3yqsrbzh3b4fKf+6cLLuOrUYgBgAAAAAAAABAKAbf6Og6nRHujp7eIgdPrpN9J1bJrtZFcvj0Jjne3iKnO45I7X73U/Ll1qdlauPV3UJxj1icFYonFhOKCym0ochkF18hf1v/B9l1bGWqLSiynDk3RjYcWiCLd06WtxpulilNt+bE4ldXDghkF+fzVsNgeW/dQ/LVtgmy+9gamtYBAAAAAAAAACAUQ+qySWtA4Axj06F58tH638nEVZf20C0WvxXILp5YRCh+q1hju8DPb/SIxst2vymdXWdq5j52dLXLgZNbZMuRpRnReM6WMTK9+U75cP2j8s6ae2XG6rvlvXUPy7wtY2Xhjjdkzf45cuLMYTnd0YpADAAAAAAAAACAUAzgH+2dx2XZrldlUsNlBSJxvmBc1oYiKxgX0iMUv7/ud7L7WEPNiaRdZzsz3+nEmUNyrG2/tLbtzQjIR0/vkcOnd8nJM0dEbSvUD5uxBgAAAAAAAACAUAzgLQdPbZS3m6/rFokblMt67ScC5AvFlxUVioNicf7PV8l3Bz+Xsz3CKgAAAAAAAAAAAEIxgCeclU7ZfHievLPm33ptJxou7c0u1j+DwnHDL4v7FX+b/3NhU7u3Vl0pC7aNlpNnDiEUAwAAAAAAAAAAQjGAb6zaM0mmNl4pkxouDXgUX1rShqKYUPxWLqO4iA1Fj1j8yYYHpK3zOEIxAAAAAAAAAAAgFAP4REdXm6zaMzkjEgeZ2HBp0cZ2bxX1K76siGB8eR/P4o+/u0+Ot+9DKAYAAAAAAAAAAIRiAJ/QJmvLd78qkxsul4mr+meE4UlZ64kgOeuJXpF4YlAg/vayAhuKrFDcm1382cZH5Fh7C0IxAAAAAAAAAAAgFAP4JhQ37p0h05qu6rGe6J9vQZETiUs1uStlQ5H1Ks5aUVwhf//uXmnvPIFQDAAAAAAAAAAACMUAfgnFnbL96H/K9KZr8q0neoTivr7FvTYU3Y3tstnFJcTinoziiQ2/yjSzO93RilAMAAAAAAAAAAAIxQC+sed4o8xovlYmNfTv41Wcs6EozC4u8C2e2MenON+zWK0nGvbORCQGAAAAAAAAAACEYgAfaW3bJfO2PCGTM6JwQCxeVZhd3NeGIl8ovqxIY7tuZq0ZIjtal8nZs12IxQAAAAAAAAAAgFAM4Btnz3ZKw54pvULxqizlbCgKm9z9ssd+oq8NxcSGK+SDdbfJsfa9iMQAAAAAAAAAAIBQDOAr+06skS82PdQjFvfNLC4qFGeyh/vaUGTE4QKxeNPhLxGJAQAAAAAAAAAAoRjAd1bvmyXTm67uEYn7F/UszgnFBX7Fb/Wxnuhtcrdox1g5enoHQjEAAAAAAAAAACAUA6SBFS2v99pPBG0oAkKxehVn6bWe6PUqVhuKrFA8Z9Ojsv3oYkRiAAAAAAAAAABAKAZIC6c7WmVly5sZkThjQ1EgEhf1Ky4QjLM/z974gHx36DPp7GpHKAYAAAAAAAAAAIRigDRxrG1XfnO7POuJ/kX8ivN9i7Mi8c7W5dLWeQyRGAAAAAAAAAAAEIoB0khr2y5Zf+Dv8s6a38rkjM3EpcX9ihuyWcTd//vDdbfK/K1/kZNnDsqZrlOIxAAAAAAAAAAAgFAMkGZOdxyVI6e3y/ytf5bZG++VKQ2Xy9TGX+WE4qlNV8mUxiszXsUff3e3LNw+WnYcXSxqX8H9AwAAAAAAAAAAhGKAGqLzbIccOLleth1ZKKv3vSPLdr8iS3dNkCW7xsua/e/LliPzpbVtZ0Yc7jr3b7lnAAAAAAAAAACAUAxQw2TtJDrPtktH12lEYQAAAAAAAAAAQCgGAAAAAAAAAAAAAIRiAAAAAAAAAAAAAEAoBgAAAAAAAAAAAACEYgAAAAAAAAAAAABAKAYAAAAAAAAAAAAAhGIAAAAAAAAAAAAAQCgGAAAAAAAAAAAAAIRiAAAAAAAAAAAAAEAoBgAAAAAAAAAAAACEYgAAAAAAAAAAAABAKAYAAAAAAAAAAAAAhGIAAAAAAAAAAAAAQCgGAAAAAAAAAAAAAIRiAAAAAAAAAAAAAEAoBgAAAAAAAAAAAACEYgAAAAAAAAAAAABAKAYAAAAAAAAAAAAAhGIAAAAAAAAAAAAAQCgGAAAAAAAAAAAAAIRiAAAAAAAAAAAAAEAoBgAAAAAAAAAAAACEYgAAAAAAAAAAAABAKAYAAAAAAAAAAAAAhGIAAAAAAAAAAAAAQCgGAAAAAAAAAAAAAIRiAAAAAAAAAAAAAEAoBgAAAAAAAAAAAACEYgAAAAAAAAAAAABAKAYAAAAAAAAAAAAAhGIAAAAAAAAAAAAAQCgGAAAAAAAAAAAAAIRiAAAAAAAAAAAAAEAoBgAAAAAAAAAAAACEYgAAAAAAAAAAAABAKAYAAAAAAAAAAAAAhGIAAAAAAAAAAAAAQCgGAAAAAAAAAAAAAIRiAAAAAAAAAAAAAEAoBgAAAAAAAAAAAACEYgAAAAAAAAAAAABAKAYAAAAAAAAAAAAAhGIAAAAAAAAAAAAAQCgGAAAAAAAAAAAAAIRiAAAAAAAAAAAAAEAoBgAAAAAAAAAAAACEYgAAAAAAAAAAAABAKAYAAAAAAAAAAAAAhGIAAAAAAAAAAAAAQCgGAAAAAAAAAAAAAIRiAAAAAAAAAAAAAEAoBgAAAAAAAAAAAACEYgAAAAAAAAAAAABAKAYAAAAAAAAAAAAAhGIAAAAAAAAAAAAAQCgGAAAAAAAAAAAAAIRiAAAAAAAAAAAAAEAoBgAAAAAAAAAAAACEYgAAAAAAAAAAAABAKIakOHr0qBw8eLAkp06dEu6TOR0dHXL48OGS91P/Tv8N9woAAIC4xAZdXV1lYw+lvb2d+wbecvz48bLjV//e53mJ9wsAmMf9nscRigEM0c3W7279nfzX/+u/lmTa1Gm80BFYv369/PhHPy55P/Xv9N9wr6AQ3WToIrplyxaZ/9V8+eabb2Tv3r2Zzb+KANwjACAuIS4phq4dl/a/tOx90zWFewW+xiFPP/V02fGrf+/zvMT7BQD1js/zOEJxgmigXm4gxOEH3/+BXP/b6+WNv74h+/btYyCxIYt0rUmj11OY2YRQDFE2ZEuXLpXHH3tczvvxeWXH2n/7v/+bXHbpZTJh/ATZtm0bwjEkzry58zLjsNw4HTN6jLNx2dLSIhdfdHHZ3//v//bvollfPC/iEoRihCxIVxyCUAw+Y0v30PfogvMvyOgdY18aK4sXL5YTJ04wdgChGBCKo0ykgwcNlq1btzKg2JAhFENNbehHjxot//P//Z+xx54KAircYWMCSaEZZb++5tdlx6X+vf47F7//izlfhL4XesjMsyIuQShGyIL0xSEIxaDofQwbdzp+a0n3UM3jmquvkdmfzq4LayVfnzFxVuVxFkIxQnEiQnGW7/3D92Tcy+Okra2NgYVQjFAMqUUzBp4f83xmTrM1Bq+84kpZuXIl4wkSQYXYsM2Oi82yZr3d+4d7y/7uf/npv2TKpXlOxCUIxQhZkL44BKEY6lUoDvKjf/qRzJg+o6Z1D4RihGJAKLbK3XfdLceOHWNwIRQjFEPqWL58uZz/r+c7y0TQhZfSNXCNCrEqyJYbj088/oR0dnZK0r/3kYcfsf57gbgEoRiIQ5KJQxCKAaG4PhJBEIoRigGh2Dp33H4HYghCMUIxpAYtyZw0cZLV7J1yZf/qG8h9B1dohss9d99Tdhyqj7D6Cdv8ve+/936oSKEl0Dwj4hKEYoQsSGccglAMCMV9q6rVE7zWbOYQihGKAaHYCcOfG44vJ0IxQjGkYnM2csTI0AZgNtFsIcYXuMSkqZ36CSc597v0RgbiEoRiIA5xH4cgFANCcXEefeTRmkqUQyhGKAaEYmflTXM+n8MgQyhGKAavN2d6qFWNcYlYDC4xaWqnfsLqK2zj94XNqTSxIy4BhGJIfxyCUAwIxfWRKIdQjFAMdS4Uq5/g3//2d5n/1fxQPnj/A3ng/gfkB9//gdFkedWvrpJ9+/Yx0OpoQ6bek6tWrTIaT4UMGzqs7HfUTrN6+BD1c/V68MSEYkyfNj1SBo92Hr/9ttszc6GOrY0bN8qyZcsy5fQvPP+CXPSLiyIFlFdfdbXs3buXsQlO0GZISTWWC2ug98Mf/FBWr17NWCcuQShGKIYUxyEIDJBmoThM99D3aPy48fLsn5+VgQMGRraC0XdZ32meMRBnQeqF4jgvuZZVjBo5yiiwYQAzUdgKPotlBgNUEgDphssk8PvpT34qn3z8idH427p1q9x/3/3GGz8tVcOmB1ygwqwKtOXGn/oKV/p7NCYYdPOgsr9HPZNruTs4cQnxH0Ix1EMcglAMaRaKo16Xvhf6XW+4/oZIhzm1MI8jFBNnAUJxrJe8q6tLpk6ZGhqE3PgfN8qxY8cYxEwUCMXgDfv37w8ty6+0Q7hp53L9HR99+BHjGqxj0tROBd5KPfUaGxvl+//7+zSxIy5hA4NQDDUehyAUQz0JxXHeJ0WzkdPekwGhmDgLEIpjv+QmA/h//D//Q1asWMEgZqJAKAZvePGFF402TnoYpodicX/Prl27MmWdYb/risuvkD179jC2wTrasM61JUSYxcXFF10sLS0tjG/iEoRihGJIeRyCUAz1KhRnD3g0Cc5ELE77GohQTJwFCMUVveRLly7NiMHlfsfkSZMrHsQavMycMVPuuP0OueD8C/pkMmuZh3oijxg+QvSabDXoKUQ/V4XvsS+Nlet/e7387J9/VjSw02vUa9VrrlQAQii2LxRrlvvChQtLelTp35XLhD9w4ECs/1bHj45P/a767hV6X+l4evyxx6W5uTnSxmDHjh3y6iuvynUDr+tTxhj3M6OUZa1Zsyb3Tpz34/P6PDv9/zRzUX1Mt23b5uQ6TDFpumXTY0y9/0w2aa+/9nro71u7dm3Jcaf3tdT8oX+vmaWF81V23lRfw82bNzt/LkmPlTDPdL2f5eb6cu+qevXrPdVA2mfrEBVoVagtN/Z07oj7+SZN88aMHmPl/ug8p++ljo9ia68+E/Wy1/Gs4yyp5xKMT4pdl75nl//y8sxY0vFi+8BTMw31czX+0e9f7L3S8dvvwn6ZPhNavl5pplMlcYn+7lnvzMo8x8Jr1Weo/79eY2tra+xrLDdXhr37Lj83SaFYx+XfPvpb5pnrHFDM4iD7ztiKncNiK42dSlUpaoyisYqOUx2v+i5t2rRJvvvuu7L3fPHixRXb2uhaU+rzFyxYIIcOHRLikMqEYp2ntM9IsXlSx6bGr/p9K+ltY9InpdQYTOOetFjMor9Tf3ex913vu86vep91Pa1k7pswfkJsL2CXvWNc6x5RxGJbvZp0zdQ1Uefy7PxYuL7r93ryT09m7m/cKjGfnnFwPOsaVaw/lo5nW1qLD/Fe4fqp86V+93LPIPvMo851rmKkasbsad/z1aRQrIPwsksvK/s7Hnn4kViThT4ILVe98oorI3cd1Rd19KjRVk66sv5Ed95xZ2RT+yz6HT6b/VmsYBah2L5QHBak69+V6wRd7pS12H+ri/a4l8cZN4JUbrrxptBAbuXKlUali1mGDB5ibTHVTbwGET/6px9Ffh80sNagpxqepSpahV3fXXfeZVXMMfEhNMm6LPeOFM4BOuY009PU/zC7Hnz15VfWBeNqjZWwubPYhlY3ANrEJMp90/ldgxNf59awMT/g2gFy9OjRWNcfdlhcacayPnd9/joOoo4dHW867ioRHMtdl67pceITjSMeevAh2b59u1Qi1OomQn0TozTCCopQusYsWrQo1vseJy7Rd0u9UE3jKP13OofF2fS6ynqs9HNdC8UqSrz26mux3pds7KzfIe7YDIutin039dPVsVgqlgrzWq+0clE3iPf+4d5EKyJqNQ4p9g5EjUV0btL5UecLF/NS3PfLlz1pIfqu6j2PErPEia/CnnsUXFaIJiEUZ4U2FUrDvqse1sX5fF2XdY+nc2PUNT5ujOHDM9b5J2oMnkX3wyq02t7DJBXvmRwgRqHcXOcqRqpmzJ72PV9NCsWnT5/OnKrYnix0cozzQhZ7QVWgiyMyVDLYyw2udevWCUJx/QjF//mf/5lpQhJnvKgXVrHr0FNH3XDHEQj6X9JfNmzYUJFwoyeEUUTvcs1Z9P74lF0Z9uzjnnL+5dm/hN6PsMZipkKxzp+mPmouDxSqPVaiBA16rW+9+Vbsw0D9jpp55uPcaiK0qODrQvCoxAP5y3lfxp47C5/NjOkzrGUB2IxPogqh+m8nTZwU69DF5vseNS7RTVbceUDnMvWGRCguLxbpZjTu/GUrdo4iFOv7qGO51DVn12KTZpmVVC3s3r1b+v28X8nPfubpZ4Q4JN47UEksovOF7sGiiD6uhGIf9qTF1gJdP2y88/rd9DsiFNv9XYoeQkXNKC91eBbnQFj3iqa9oqr5jG2P56haiw/xXtqF4mrH7Gnf89WkUGxb4NMHp2XJcQSwsFOmUqXZLieGUoNLU+ARimtbKNbgVkWyShe9woYIWkKo/18ln6kBSJwTOxUTTL25ogQyuhFLwn9aN0Bh12OrTL6QLVu2hGYfhIlqJkKxLtRxTuKLlZRVkqXlw1gxDRq0rDjswNP0nlVyCFNNn7M4496koihMdCh1vfqcbccB+owrKSHX69KssGrEJ7qezP1irpUgvJQYGyXzO0pcomWala6DOqdFGUv1IhTb3FwX4+677o7UkNpUKNYN4MgRI8u+S8E4LGzt1ncorp1KOR/3Sg7R6jEOCb4Duomu9JBYx8fLY182FgxsC8W+7EkL0Ww2TfqweU06h+jBTal7jVAcz3rr//zs/xg/a13n1ZbJRgxfuL6HHQRU8xmrqJv0ePYx3kurUOxLzJ72PV9NCsVhJVtRJgsNTH5/z++dBLvZB6oeaDYm/krRRWDB/AWCUFy7QrGKxLYmTfVrtSUSxx0z+p0qyVIN4/bbbo+0IXUxV9lo7FVJ9uX3//f3pbGxMbZQrHOKzQAz7ompL2PFJGjQgGHY0GHWrk2zN3z0LA4TJ1TwjerdqGW45ea4OCXb+lz1+boaO9qwKc4GXa9LRTNX11WqeiSKwGMjRjL1pTONS2zOSbrxU9EZoTia6Ghjs2aa9W4iFJuIxIVxWFgWbiWCrm5wXQjQ9RiHZN8B07J8U7FYx4vJumpTKPZlT1qIJhrZqNKKeq8RioujPR7Cvq9JHG06L1aiO2giiW9CsY0DpXJoFUXUDP5qxXtpFIp9itnTvuerSaFYB0hY1pgGmWpREbYg21D3bW2ETINvNefWU7Asxcy6S6GG9NqNGKG49oRi3cyWK/WOioo4mqGpE5LNU03TDZDNoN/WhtR2eanr4NW0+We5d7ncO6KbXZ1Tqike+TZWwuZObZw0/LnhVq/L9SbfVbmzbk5U+I1SxvzE40+UvRdRS7aTigO0qZMeuqXtukx8TbPNdINxSbHmdpVWm5jEJWqFYHsu0M2uiQ1FvQjFpokNet/0/c+OiVLN7SppcmYqFKtYEbYOFovDwsZ/HIuIsKoI25m9tR6HKENvGWo92UbntY8+/Cgxodi3PWlOVLOcDBCliSJCcXH0UEUPV8r9Tq36CBOJbceicZLUkn7GSYxnRe+tqZhXzXgvbUKxbzF72vd8NSkUh3kfZk9zwjaceopm2nxFjcqDk4+e0G/cuNHYzN9kwGlDEO1WWqrRh54Cl/Ic0hdHfbVMSkT13oQ1+kMoTpdQrOP0H//7P1oP3B584EGrJ82mGThq9G66EdVnqF3Kgwvy8ePHM++saUOGKAt6FLQzetjvnzxpstP3qNLmnzaDOBdWJb6NlbC5U7MYTASLuBUAaWtqp8KvaePZMMEjaoafaRyQ9KFUUhs4k+sqtW6pX/GLL7wo6mtY6n1QQVH92Ewyd0ziCZO4xBWFdkz17lFcKs5X+zRtpFQqxtcy582bN8v9990fKbu3kthKD9FNDzQLf2eYwBmnKqLcZ1baJI84xH4Wns5xroViH/ekSR7ClxIU1YIjSmKUVoDoux78b7KoYBOWQJYWoVjXIs16rMSnWOdFk5hX5/TZn87Oaz6sa/7OnTszFi0mvQvKHU4k+Yy1ZN9kPOt30u+mMWfQr1zvu94LE5vQUocfvsV7uh5fN/C6SAe6mghQ7P4r5davSmMZH2P2tO/5alIoNslwUf+vSjvxmjYyUqHC5HRDB3fYhlgffHBw6SYrSrajZgtrABA2YWtJKUJx7QjF5RZUzQjWsjENCtVjTE+ZKym50c/U0ri4n6klUzYWAtNmSJoJFmZJoHNBJV3fS/H2zLdDAwndxLkc4yZlpwOuHZAXBFayQSt2sKV/6pyjwlKUhlhTp0xN3ViJK2bpe3XnHXdmuhxrAK5BtXqdmfrDxmlekgRhQosKvxqMm3yWClA2m9iZxAHBuKVQAIt6yKD/xsTGIErGS6nr0vs+eNDgiq+rMItbx6Npx/osOheEbc5Mqk3ivFv63fQ+6P3Q+5IVK3XMTZk8JZL/clhgXk9CcWG1gL4DUbIU9Rno/B42Pk2ya8NiK/XrNLE80k2ubph14xxMwijX1C5qVYSi8VKpz9NqSdt2WPUYh2TnFH02wXlF3wWdL8PehyiHmTaEYh/3pFEy9zR+eerJp/LivqyYqPOmadynz0yvP+4+yLYg66tQbPI+lZtLTARTnRPfnfVuaGNHHSf6fobN5XfdeZfRftrVM9bfrddgoxGf3hONg8L2vSZ+sj7Fe671n0pjGR9j9rTv+WpOKNYbGZYiH2birllqYZ09o5bm6EDRRTIsVbycB1c2+NZgddTIUbHL4U3ukb48CMW1LRRrgFcq4DLZuJcS3HRCK/aZ+s6FnXCHZY1kS6rCKgai2kWYXFvcZnvlCOv2/ZPzfpLJAnE9zsP8zMqJdVE2aJolVi7IzwaVOr+ZLNJhVjk+jpWoQYPeBy2XL3XfTL249Jpt+lraIkxoMVmPTDdGUdYkkzggG8zpgW2YMGpyyGAypvUZmnjCa7CsDWjCNnFh16Wfo754GjyXmpe1ikvXCxX2ovruBTtTlzswMMmkjPpu6fcOs4ww3eSa+F/Xk1CcTWzQeNWkYVGpTKpyXr1h4mElpbPZA2/dYIfFeGG2cFGqIvS76HeKe5BOHGLW50BFnHJzY5Tu82FlvpUKxb7uSdV2w2ReVEEy7CDeZJ696BcXyYTxE8pm6CMUm78TpX6nzrthloJRehplP1PXg3LPV/9uzudzqiYU6+8OG89Rq0s1ZgnbS5fzk/Ux3vNVKPY1Zk/7nq+mhGLTDpVhQVtYM5yoE2SUF94koAwLiittlGEi1iEUp1coNm3CYerZl0WDzrB7oRNv2GmfesmdPHnSKHvNhten6bXFyQxyFcglPSeXG3smG7QojV+Cm2+Tk+FSWXy+jpUoQYNp8KdBSljpdFKbfRdN7e65+57QoC6ssZpJpUyUOMC0EUucw7Jymakmm/OoDR8Ly4f15xHDR8iaNWuM3ll91yrNcjR5L8JEsijvVpRmJKbZrYqOZYTi3ucR9+DANC4Ka3IWVSjWTaSuVVHW3TDLmygNNMt5i7rymq+XOCQ7t0Wx7tD53SQOKZfZXqlQ7OOe1FTA0sMW04P4whJ7fRdVZFZR31TwQCg2/52lDl7C7DtNLROKHb7c98f7Kq76cvGMTRIW4varCYvZys3rPsZ7vgrFvsbsad/zeSkU66mhfjl90U3QRV9FKpOT37CmIzqR6abUVTdBfZnLiW9h2c62UOuNSrI0EIrTKxTrIYHJ+DVpChl1ATUZN+UW+TAxyPREupTYEZZZY7t835cNms5LLjdocT2eVZwxyVYpJlL5OlZMg4aowXiY5ZKpn2c10DWvXPm3ichrM6vPJA4wsT6Jm+VeymbBZOzE3cRpabn+dyYWLNXKKAybf03frajZd6bZrWGHGvUmFNvAJA4JqzgwFYr138T5niaHkmF2d8Xs5Sq1ziEOsSOomsQh5axxKhGKfd2T6vWGiTJxDuI1K/DNN97MZPLHibURiis/eAmLJSuprgybi00qh1w84zALNJOK70rikmKNBX2N93wUin2N2Wthz+elUOySMJEsTFyI2gzHRdBrA82+LucJFTbRIhSnUygOK5GI4pUXxWMpiDZFibvIh4lBJj6WlXQKtn2QUw8btKhjLmopUamMMl/HimnQEFVcN2lIVC2xx4bQUm49Cbunel+ifHcTYamSzVJYgFfqek2uy2Xn9mrPQ2HfzXRjpVk6ca7PJIuj3BqBUByPJ//0ZEWxpsl7E1dENBUawqr0TMavq5i6XoRi7YHgKg4pJ3BVIhT7uicNOzTT64qS5WgLhOLKhGKNicOaP5ermjGJ9XQujCqaun7GYTGZSVWb7eQ8X+M9H4ViX2P2Wtjz1ZVQbHK6GfYy2/D8CBPfNCiu5PPVjDubbV1qAqlUlLQ9Ueg9VcuBUl0yo6AdUhGK7UzeYU1OTJvJRNkMaJlmMW8ekwBDszwreQZh/oCKzeC3Hko+44ofUcqJdJymZayYzJ1xxHWTQKlc0KCbXBvzb1hX47hCS7lsurBSyaiHAmFN8SrdLJk8q2KbpbDrcmGPYwt9X7JxSalMsUo3gCbvVqVrtnpkxt0wIBT3zdLW91J/v/5Zqtyy0u9n8r69/trrFX2/sNJlk0PmctfpsiKkHuKQq351lezbty/2d5j96ezY8XUlQrGPe1KT2MdV9jtCsTlhSTnFrCfCEiBslLOHxTHl7AddPGOTw5LCPYbtqjlN3NMEvjTEez4Kxb7G7D7v+RCKY5YahpUT2xAGw8S3KF2N9XRE/Zu09P+8H59X8jN/9s8/y/wb7c6opQa+CcUmGxVTKhWmalUojpMNG/aZcTzzwk7BSn1PDYbDvIIqzbg5ffp0aBdnm81k0tJEptx1uBaKtXHJZZdeFmkj4/NYMZk7ox6+2BB7ojbDtB2chAXr5eaacuXaYf5hcd7LSisLTMaAji0dY1Guq1yzpyRRQUZjDRULSnX91jXggvMvkAfufyDnQZmEUFzpex+2kS73O+pZKFYRWGPwF55/Qa684sqSVnGa2auinnomZsvPXQvFtkTYsL1N2Ea1XBVMpRltxCGVxSFhPtTlfkclQrGPe1J9BvoskoqTEYqTOwAKGws2rjNsDxgWx9h+xibjudL1z2RPUmhP5Gu856NQ7GvM7vOeD6E4gGY4mZTGm2z8k6BURmWQrVu3ZpoEmPgylzq9iiPWIRSnVyiOs8C7+My439NkMU+CSjP+owRlJn5dlaIbcfX+jPucXW/QTK6v8D30eay4CrLSLhSbCL7FNp9hAnPUJnYmcUBYxouNLK7Cd87kuqptO9HQ0JAp7zNp+lZMOC5nh+WLUGyy4Ss159WjUKzi5nvvvpc5FIgzl6hw/L/+v/+VCqE4zCagnNgbts5Vkg1FHFJ5HFLJ/BtXKPZ1T+qzzRVCsfnYKZaUFia4JUGx7FqXz9hkPCeBZoCnId7zTSj2NWb3fc+HUNyz8VCTf9Ps3CidCV1SLmjVUh4drHEFYhvXgFCMUFwNoThK53KX2HzmJgFKMHhwgUnGbqmT0iQ2aCbBa6G/l89jBaE4vtBSzMdNBYxylhVRM/FMno+NMR0mzhRuzpO6rjjodT704EPON1s+CMUmv6OUH229CcUrV67MZA+7nmd9EYrDvNbLHVqVy1iNethFHOJmbgz7HaWqQeMKxb7uScNihWo2zkUo7l2TNYaI2hzWxOs7CSqJVaPeS5uxr611zOd4zzeh2NeYHaHYc6FYT6Sidqg0yRSpplCsAzDMRwehGKG4VoViX059bT5zk3JGlyWnJt6wYc0lktigRQ2yfR4rCMXxhZZijXvKnfLH8XAzuY82xnTUzXZS1xUVrdbSqq0k3idfmuzGjQvqSSjWDFjNBk56g11NodhkPS01/sp50RYTdGxCHOJW7IsrFPu6J63ESxqhOJnrivM++ZLBnrRQbNKLJ+l1zNd4z0eh2NeYHaE4BRnFUTvcmhj0V0uk1UxikwlcN8Z6Kq8vhXZQzPLYo4/JRb+4yFg8QShGKPZNKA7LHEyjUGxSbuk66K6km2tSG7Qw78LCcejzWEEoDp9zyq1TQS+vsA7dUZvYmcYBNsZ0WJOiQj/OpK4rCtocWJsEh40HrYAaOGCgPPvnZ/PiEvUn1v4JaROK1VIGobhMw9r5C4xEYvWv1rg2OCaUct7WvgvFYe9psbERdkBW6F1ZDdsH4pDw5mC2hWJf96RhjaMQiqsvFIe9T8XmFZN5oBaFYpNGaEkLxT7Ge74Kxb7G7AjFDidMFTT1Zmc7ZBdDT4DCRE9drEwa2KXBekJF77CN2MgRI0O7+upC0NzcnAlK8ShGKMZ6orpCcVgDm0qM7k1Qr/Pz//X8sr9bD560LLSaG7SowgzWE+kVisPE3+B4DMuaifPeVGIt4HLjmNR1RfGeve+P94UKgW+9+ZZo092we67P8vbbbq8J64lSPvb1IBSbrClqR/H1119LR0dH6HXpJj5uE7FqCMVhXuvFmnKWs11IqmERcUjl71kpH8x6s57QZp9RK3oRiu1d165du6Tfhf1iNffCeiI91hNJxntpt56oRsyOUFzlCdN0Ab3rzruMRR0dRDqYwgLczz/7XOZ/Nd8ZWjYdLPEK823UzI0v530pSYqStge+igMaZGkJa6WMfWksQnENCsUm5ZHqSe7y3VSiHD7Z8GXNehSaNOSMWuavh0uVbg5db9DCmpUV8y70eaz4GjRoFraN+VeptPFROaElaCfxzNPPRBJkbMUBpbwobR5+FPoxm1xXMQ/napWK65ylh9JJrjdJCMUm3o/qqV6vQnFYJpseBkR9dyr9fkkLxWFrus5vplUUmtSRhBhAHFIeFYB1j+Kq632x98vXPamKwCoGl7suPeBBKK6OUByWWFbOziaseu+nP/mpfPThR07H28KFC8uuEbafscl4Vl3B9X4hKNz7Fu/5LBT7GrMjFHswYerLrZvBsHKlOZ/PMb5RYZNkKbNql4R1gp86ZaokLUomVeJZbRCK/RGKTQTDUplcPqOLnEmn4SiHXqbPNqw82ERsC3tHKvU21N8fNs8XCjM+j5W0Bw1JECZaqHiyc+fOsoJdJeMuLA6IK0KbZk2XyoAIK31OKpPLxEs66uF1WoRiE+9HrXirR6G4paWl7Dupfqt79uyRpL9f0kJx2Puh1hpqJxfmsx7HY504xI1QbCKklzogiisU+7onNTksq1a2Y70LxcuXLzey/SllZxNWXq/9n9atW1fVONL2MzZJLHHdzDOO1U21Mvd9E4p9jtkRij2YME1OzqIEp2H+iLpBqDRbyqZ3WNyyNIRihOK0CcVhGYTVPGGtFBMxVOel6dOmW/lupt6iJtlMYe+IBq0avLr0WiuWveLrWEEoNrM1UKG33Lqnlgbl1motpXblkxynB0IQFX/CPr/YGDDx3nZVHh7FEy5u9kYSQvGE8RNi3x+1StBKhLibt7C5Mk7TMpMy46SE4rDxqZu5agjhSQvFYe94cIyUe5fC7BaIQ8zHyIMPPBhqdVKO1197Pbb4VolQ7Nue1NTLVkV1FdcRipMTik3fp3K9G0xs26qVLe7qGZu8n64bisZZT5OK99IgFPsasyMUezBhqifvVb+6KnRi1NImk9PNsIyIpF/MsA1Z3MUFoRihOI1CcdhkrYuqZnzValaxfnetIOjq6or9HVVYMAkmTctMTTzNbrrxplCf0mLo7w/L4ik1XnwdKwjF9gKzcllWupbH/d3lPEOzqKememtG/Wx9D/R9iJP9YJLVELU3Q1AELVY2XQwtkVSPQ9vrYhJCcdznZpqtVe4AKmyujCoM6jPT5m+V9m2wNXeENQVKevNYTaE47F3Niublyp71sJM4xE4cUsmBtYmHcinP10qFYt/2pFE8rXX8xhHn1eYj7tiqV6FYx54mxZnER+XmYa100IoHl1WCPh4GhFVuV+Pgw5d4Lw1Csa8xO0KxJxOmWkuEbSj1IZqm6IdlsMV9MbMv/p+e+JPMnDHTKMs5bCDEXVzCFnkbQrGW0yEUIxTb/J4mC+ewocPyyjqjsHLlSnn8scczflFxP8NlA4rsJk3HZZzr041S2IYnqqWNafMLFTSibBr0+6nnX9zmgr6OFYRie0Gyy42zSSa7js+o40ffq7DPLbcRe3nsy06uSzPmdG655uprMhmD5eKTsDk87roYds9tCMVx749ptla5sRdWHhn18Eqfk8lhig2h2CSLLCy2jzOvmcwDPgrFYeJD9kCh1Jio1kFmLcch+v7qexzlWk3jkHJzdiVCsW970izbt2+XCy+4MFScXzB/QawKCW2EqvGVvgNRMjnrTShWQV0z2U2bNw8cMLBkNrGpPhDXWipbLTZ61OjMvKfidpwDARfP2MRaJu7Bh/LZ7M8y82XU8exDvJcGodjnmB2h2IMJ06T7dhRRwKT8Ks5gK5b9kV0M9WEW+7ywBgpxgkltMBM2IYZ535gM/EIfNoRihGIb3zPs5DduaWQxIeCiX1yUKVXW66mkbNEUDapN/MWyTSU0QDS5Ls1KeejBh4wzNKPMb1G6JD/15FNG74uWrT/80MNGn1nOYsDHsYJQbE4p786wTYyNUlyTOCDKmNYNkQacYe9gmC+pyYZGufuuu43sH/S69B343j98r2gWzYjhI2Tz5s15G7owj8o42Te68Qyb+8KstkyF4mw8eOjQIeMMapNDizAfvLCMW9M4tdwzi9JkK8rcYVL+v2DBAquZaPpvTTJcw/xQqyUUl3tXdZ7SQ8ZS2XvlysOJQ+LHIZp1WSrztxCdH0xE4rA5u1Kh2Kc9aVRhRj9Px7mpSKxCZuFn6DyndkaaFBZ2TWF7lrAo5n2VAAAPWElEQVQ9c9wsy6R1DxUbdb6NcphumlVvksUe59Cl2OGmXpPGKvpsTec7F884zFc+7sGHooc4wXUgK9BOmTwlVCz3Id6LM5+VW+vPnDlTcr2oRCj2NWZHKPbkZM3EV0cfpnbrtFV+FWWjoS+MihblBlyxtHjTbo5Hjhwxuo5ly5YZTTrKH37/B9GFO+7AL1Wepp8ZZ4FBKEYoNrWb0bGn75upuKvCh77P5T5TD6Ncl1uZlhIXBuI6R3zy8SeZrBJ9Hvp+6bPTU3sNSKKU8Pe/pH8k3/MoQnG2S7dm45ZamDWQ1X9j8lk6DnQ8pGmsIBTbF2tdHFDq2quWVaZjWsdtqeBas6/CPB2jrCkmvRlMr+v+++43mh9uHXZr7rpMmkXqnGQyX+q16dxlKkzpRqbUhjKKUJwVufR3l3tXNYbROdbk89S/uNw8YuI5mG20WWoM6yYurAyy2AZXRepSmUwmc4d+RmEWmT47HUPZGCCse7yOs9mfzjZ6N/X7h3UZDwr0mrFV6vtVSygO83K94fobSl5XNSvyaj0O0WtVj/tS75h+fxW/dX4w+bwwa61KhWKf9qRRLTmyQq+O51L3SK9Fx43J/dbP+vrrr2Pvg7LzvgqT+lz0Wes41flR10tXXrRhuofOYTrf6XtXDh3nep2mh4RxkyLCrjfqoYvJOm8qZLt6xiY6kl6j6RqWXa/DLEHCvHOrHe/F1X/0UHDHjh2Z6zl+/LhokqKO33/64T+VTOaoRCj2NWZHKPaoBMOk2YCWtJQSP4NoGr6J30/YaXoU4aNUiZWJF5QGnOUWdD0h1LKJqItLqQDaRMAOnlI99uhj8sD9D8h5Pz6vbNMHhGKEYpNNm25YTTbcGsDrQl1uAjcRAiop7Yvj/6QnnnFK7itF39Wo3zOqUBzMwH32z89mgkdFf77g/AsiBb5aLmmSqejTWEEottfUznYTu7h2A8H3Rzd7Op51jdMSyygCialFlh5WRMkkyq7BH7z/Qazr0s1R4dgxySbTTYlea7lN1F133hXZi7rUuhNVKA4KRxqf6EZas99UQLn9ttuNxWtTL1UTH71gd/nsWMrOjzpnVjK/l9pkmQj/wXlbP0czGPW+BavPTGwi9J7qPFpKnNdYWrNzwuayKN+vWkJxXK/1ajQoq8c4RMeivuf6vut7r++/zgOmB0Ol5kbbQrFve9IgGoOZju9sZvD4ceMz73ic+63rRbk9l4mIGNbfoNya5Ur3cE0c67ewhIjs2qnjpNwzMT3cNLV2cPmMTWwGsiJouT4YeiiihyNhmou+02G2Dz7EezbirCCTJ022LhT7GrMjFHskFGuQWqxspRA9mTVpbBel/KpwoxF1ASxXNmTqGZZdhINBj04WYT5Scb2K45QFV9r9GqEYoThOxktQlNQAVQNVfVdMDk5sdvmOEqRpdlrSm7M4m9Owd0SfZxTBxRTTUkvfxgpCsTuhpVxDoSTKsCsh6jxTWM6Y9CZTg2OTbO+sGBMUYYOHxnGIKxSbNvuJ8+xMDq1MylvjxmlqexNXSA3LfA0jePBvkrBRbIOm8eTNN91c0bvmo1Acx2td1xuTMmLiELM45PJfXh67MaqNRuk2hGKf9qSVZp+7PEBYt25d5pAt7u9wdUhTLaFYx71W4cSphowSYxQeuqhIqfqD6X8fxcrC5TM29SQPWkiouKhrWFShMYqVRbXjvUJMq31KUSqru1Kh2MeYHaHYs+6fJtljUbrfanaS6wFnMkGaBt9RBriKtUMGD4ktFFfSld5VeQ9CcX0IxUluYqKexNvMphz70lhnG5zCMk+TzuJx3hH1u7O9kYiaceTTWEEojoaJX14UH9W4Hd5dxwFx5pkkAuJSgoFeq+13Sr/L2zPfDn3ecYXizz/73ChLyuWz09jT5jPLblYWLVoUulaU22SF+bmbNi42TWyIGiOrEJ9GoTjOvdV/71NVR9rjkEkTJ2WyFat1WG1LKPZpTxpXXKtkbTAR1bRBpDaKdJHtmDahWJMbdOxXsn/RdcW0UV5S8bzrZ6wZvKYVNkklBlQ73ovTbyFOVrcNodi3mB2h2DOh2HTzEuYpVditMkppTNTMAZMyF5sLcdY7WA3dw7JIygXQJv6fYR2fEYoRiivZtOm91w2Ai02MfqZ2nK2GSFx4IOMqUNPvqPNlJdlLJgu7zflL74UGr3FKaX0YKwjF0TGxOYjT2DUK6kvnIvCsdJ5xuZELa5AStczPxANXMzDDNoFxhWId/ypEqSBl6x6ZNkZxIbCrEJDNZDbxVyy3yTL5700P/m1uaHV8qfhs4u/sq1AcxWs9rCEicUj0OETXU9viTxQ/YJtCsS970iQP46M0xKv00MtVIlPSQrFai6xatcrKd9CeIiZe1EkeDrl+xqaNLOPGOnEt0qoZ78VN4IiS1W1LKPYpZv//27t/FinOAI7j70AOgk2wCBZBLIMEsROrWMYXEIlNrKKgRdQosYlEYxWFWMcoUV+A55HTUwut1AMbQUEbQRHkGkVlwndhZDPuzjzPzDOzs7vf4kNIcnc7O/PM8+c3zzyPQXHPguLQBfZDXxUcftUhdIOl0ILGKwoxAwwGZSFLa1Q1uHQyQivbqg40mwPWCV7aeEXYoHj+guJ8zbXLly4n7ThzDGW7mHaNjnvMbuFNN5VLHRTn62DSEWlyzNTrTY65D2XFoLidoKWLh48MvFL2A4rtcV1sGMI+BSlnIp3540zQ66rU1U0HkcP3dcjyDE2CYn6OvkfTZSiazNZKEbAXy07IEgdVg6zQTXNC7j3617F7Yoya3ZSHcSED074GxTFrrafajNN+yKftKWWp6RsFfHceOMf0+1MHxX0Zk456CMYsyZThDN+R7xpbTmOXe/nfXkbbd0RtpNinoLhqg9a6WEe36g3kWOxhEPqwZRLXmHNIP6hpO1YMxpsG+JPs79Vdnzx0CdKUQXFf+uwGxT0MikMXJK/a0XVUpXHh7wuN1sbJN567d+9ere9H2MJT29ibk5/f8/2ewQ6UMZVtVQeajkHVzrnjnqiletppUDzfQfFwWebeaNKo87sMAtrYzCIFdmJloNbkO9JoEmymmikd07DX7Xjlm0ekui6TLCsGxfU2Vqo6Z129rs2xEBA2edhA2aH8paxnuLeuXL4StHN81dtWMf2iJoPI/DwUB4vMMCpbj69pUNykL5Uq3GKWbJ0H/2V1IWskltVpVYOsuhuY0Zcu9i1x+/btWuWRPjaD0GIbVTUw7WtQHLNUW526335IWFDcNPyhLC9dW8p44Jy6/arTNvdhTDpusladTUqLgQyhc93As+6bI4xN2xgDtBkUU5YZ31OG2nwDknJP+Y/ZdHpc+73873L0fTSpa0x55vw2Lc/UO6keAk6yvzec/zCztm4fijfIimUgdVDchz67QXFPg2KWlUi5y2bxojOlPWZnRAooa3Wurq42rhzzhc1DKi4qRF6xYNfRsqdT485VaAeaQVPV0z2OlR2/z/15LmPZCjezMyhuY9BG2aLxiln4nw3Mpqlc0tlYvLr4cef5qgaOQIKnt6MG810GxcOvLbFhXNXMk/wBV+wO6H0uKwbF9dcJLlvj7vHjx1nX4XVsP4DjpLy1Wc9wXNQNDP5j+ydN7jP6NTdv3gya5cPnnfj1ROmu4WUzQVIExcM7su/ft78yOOJc0k/i1c8UfbjYoIefYVMiQrqqfTrGDeRDBlmhYUDerhDeli3jRlvFIC3kO1K3Xvrn0tg+FX11zte4Nq/PQXHIjO8ujmMe+yGj2tPQPkgeajCLrG5o2VZQ3Kcx6biAjTorNMjONwejTkkRqPE3WD+96hrz/3nbjfarrb1zUgXFHCtvVnDP8XCcdrLr/X6ohymv9M1DH7jk5zh1mN3lNaY8kxmFBo7D5Tl0qdNp6e/F5j/DYycmFqQcT/a9z25QPOcoANxsvOpx6uSpweLWOSpxLhJPINpqiKkk7969O/is/HM5Do6HSi2mQubmXVlZGawFBQbea2trUcfN64ccD79Px4p/cg541XLSa71q/jA4ozwyM2H43gS78tLJii3jfa2HaDS4z7jfqJP49y7WAW/SsFMnUE9xLYp1J9cm1UMYy4om1Q+gPFGuJrEmPwMijouZpgymuuqfcC/Rlzh75uzHz+PzmUHI656hn8fPsUxE3ifhbz579qyVeiE/V8W6iOvXdl1UVg9yjWIfxnLeuK78LueN4+eaxPbB+Fz+BteNMsTfoizz32PLTH4ti/cI3zH2/uBa8Z3ycsHvv3jxotd9zJANmFieIvUr4/PSD2l676Uol5MKivs0Jq2qS4r9q3y8yjG3VcfmdfvwOeFc5O2JY9Nm55asYFQfg/af+rmLc9zlNR5uX4ezl67Kc9/6e8V7PD8n+bno49ipr332PrKikyRNJCiWJGnWhWzGV3eDI/V/wkLVmujT/LaPJMmgWJIkg2JJkgKd/v30TGzwrHhVm7Fu+nJTdv/+fa+9JMmgWJJkUCxJ0ixjPVzWFS1rJ9lLpOu1RpXOuNeUWR6Q9WTLrj1lo68bKEuSDIolSTIoliQpkbKNOHOsK+m5mk6scbxj+47s+C/Hs0ePHg0Cf9ZEffDgQdCm6ocPHc7ev3/v9ZckGRRLkgyKJUmaVWwSXbU+LTNKmXXs+Zo+BMJHDh+pDIPHWVi3MNgs0nMpSTIoliQZFEuSNAPYJX3x6mL2/PnzwW73zCplt/dd3+6qDAuPHT3mjNIpxTUn7K0bFO/9Ye+gvHguJUkGxZIkg2JJkmbA+b/O1woKN3y+Ibtz545t5IzOFi/DBnduYidJMiiWJBkUS5JkUJwdPHAwe/PmjW3klGEG+MnfTtYOidd/tj67eOGi112SZFAsSTIoliRp3oPibVu3ZQ8fPrR9nEKvX7/ODv10qFZIvPGLjRkbHHoeJUkGxZIkg2JJkuY8KCYsvHH9hm3jFPvw4UO2dG0p2/r11uDrvvu73dmTJ0+87pIkg2JJkkGxJEnzHhRv3rQ5u7583XZxRrx79y67detWtu/HfYNrO+p6H/35aLa6upoRLnvOJEkGxZIkg2LPkyRpzoNiZpQ+ffrUNnHGg+NXr15lL1++zN6+feu1liQZFEuS5sva2tpgQDQO/9/zJEmaVSsrK9nOb3ZmC+sWPgmHt3y1ZTCj1PWIJUmSQbEkSZIkzQGWFshnkzqjVJIkGRRLkiRJkiRJkgyKJUmSJEmSJEkGxZIkSZIkSZIkg2JJkiRJkiRJkkGxJEmSJEmSJGlq/Af4RDlDl3vokQAAAABJRU5ErkJggg==)\n", + "\n", + "\n", + " This YOLO-World notebook is a Inferencing notebook presenting Real-Time Open-Vocabulary Object Detection.\n", + "\n", + "We hope that the resources in this notebook will help you for inferencing." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "zw1OxP87zjCM" + }, + "source": [ + "## Setup\n", + "\n", + "Clone GitHub [repository](https://github.com/AILab-CVC/YOLO-World) and install dependencies" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "rlsGVhscqjY0", + "outputId": "382bd549-11ee-4e1b-ec00-5e1401911bf4" + }, + "outputs": [], + "source": [ + "!git clone --recursive https://github.com/AILab-CVC/YOLO-World\n", + "%cd YOLO-World/" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "uE1GmCSAJHXC", + "outputId": "43654a80-834a-4d34-caa6-00ae9a030f2e" + }, + "outputs": [], + "source": [ + "import os\n", + "# Install certain version of requests, tqdm, rich for openxlab (fix for yolo_world)\n", + "# Install mmcv before avoding compiling of mmcv and shortining waiting time installs \"whl\" file\n", + "# Downgrade pytorch version for fast installing mmcv (your on prem should finish faster with latest pytorch)\n", + "\n", + "\n", + "if 'COLAB_GPU' in os.environ:\n", + " !pip install torch==2.1.2 torchvision==0.16.2 torchaudio==2.1.2 --index-url https://download.pytorch.org/whl/cu121 -q\n", + " !pip install requests==2.28.2 tqdm==4.65.0 rich==13.4.2 -q\n", + " %pip install -U openmim -q\n", + " !mim install \"mmengine>=0.7.0\" -q\n", + " !mim install \"mmcv\" -q\n", + "else:\n", + " !pip install torch wheel requests==2.28.2 tqdm==4.65.0 rich==13.4.2 -q\n", + "\n", + "!pip install -e . -vv -q" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": { + "id": "v_Pgd1urgbj8" + }, + "outputs": [], + "source": [ + "if 'COLAB_GPU' in os.environ:\n", + " # Restart colab session (required for yolo_world to work in google colab)\n", + " quit()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ZWq1gYXD2c4n" + }, + "source": [ + "## Pretrained Models\n", + "\n", + "Download Pretrained weights from Huggingface and set configuration" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "LGuy6naerg4e", + "outputId": "c57e8147-c06c-4782-f5bf-6aa3e8ddeb58" + }, + "outputs": [], + "source": [ + "import os\n", + "\n", + "%cd YOLO-World/\n", + "if not os.path.exists(\"pretrained_weights\"):\n", + " os.makedirs(\"pretrained_weights\")\n", + "\n", + "# Download pretrained weights of YOLO-Worldv2-L\tO365+GoldG img_size=1280 model\n", + "!wget -P pretrained_weights/ https://huggingface.co/wondervictor/YOLO-World/resolve/main/yolo_world_v2_l_obj365v1_goldg_pretrain_1280ft-9babe3f6.pth\n", + "!wget https://media.roboflow.com/notebooks/examples/dog.jpeg" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "YECjGYE7-Ojg" + }, + "source": [ + "## Loading model configurations" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 1000, + "referenced_widgets": [ + "0bc8d02b9b0941f8b38f822b8552e54c", + "c5081cf89abc4514b81b0a705850b26f", + "93a7172913a84728a2919fe8796567c0", + "c50ae95e956d456395d05f12367ff8e3", + "c80456ab37c844b1beb074e74b17d8fb", + "4e47a4bc196e44dba1d7ce4faa5b74af", + "d0bad9ce27a742a49667d1cd58eea350", + "32f222c92f844a8ea780960c0e25a64c", + "06c1c81b5e8544d8aaca394f2e13539e", + "81df29145f4449339e75f78919147899", + "614d44b9730b4fe9a01305ac6c822388", + "1745520fa3834cbf900b1646fec5d6aa", + "768b536c12f84b1cb24d38675573baa2", + "569e8aabbcd74e4f9288bdebeb91400b", + "ad5431bc98784ee7adcf489989aba432", + "8614da2bade94ade978fe71994c777fa", + "6113de583b7a4a22bbbbfcf9a0ae6ea7", + "164ffff1e1944183b01d8cf76541556a", + "cd8f2fffa9a845cfbc2ce664647acda5", + "32b452668efa4b61acacd04d289edde0", + "e46b4e1e95da4d6f924a851265403480", + "ee06192a75fc403ba6d945da2efe4317", + "828a59ea87f34d4f8be9fa6fb63fe991", + "0becbcf3af914252b73937ffd789c533", + "8dc08812835f40e9a85c73ea57710029", + "bd6743fab19a4056a741fb923f1d66c6", + "cfc1570a53d4467397583e5614f35515", + "52d5fe0cd2514f87917ab8bcf923becf", + "0cee1b12a94c4fdaa97d7b0e57a9d8f6", + "ef7a3e2a70624fdfa2d590635e962ffd", + "794250f1a0b44831864f487cfe4be7b3", + "4b48981f033a4e0b89b3dc1cd088599e", + "46da2b5501cf471a99f354f17e85fc1d", + "084791b432c64ea383eeb10dd912d27f", + "c7e34cc6b3b54c36933cf4b21f32b469", + "961b3186964b4aa694ed50e601ca6ea6", + "9c7aebef36c94f659420f35c6951ac14", + "0381e7fdec3642d7af08a11841aaaba4", + "b69eb52454c64fb4bac7c9f008241d24", + "5dfaba276a3c480d837a75767300e96f", + "309c33ce179144ac9b23d6396f2fdcd6", + "dc6812fd13504f6bae35d81aaf2593fa", + "f7463653c82e41b087e794191e70c43e", + "7c53e4cff8344da8858060970b931a80", + "07cb92c22899453291baccd1f9b11a49", + "cbc909708fca4191a80767479a9c9c55", + "152972aaf5c7433da0a7ce4889694cf4", + "b769fadb878c43beaec040a779ba9067", + "483f26b6d2e54bb581e8a6392b8e1b39", + "b2dd4e48fb974451979e37fb99bbdf5b", + "53a11753fc664f12942c0a5a8f62e695", + "e908586e492443c6a28ed16750df6748", + "013ebfb59e88443d978bb2a4f3a68f96", + "265d430fcc604c6984d70b7e63f11e37", + "f55df7a2f0474b5ab6d0a23bcedf8cc2", + "8a23897839594ba4827c5a34463dbb35", + "ce8d0eadfac444a6b88e0ba16ab6f3f9", + "2d181d3861c64d0c9d71331751de111e", + "fd9cc05ff50e4463b004cacd050b59c3", + "dedf6f98735643d5bb53ff2e874137c7", + "5dbdd01ad0bd4939937fa32eb32182a1", + "fd7d351c2a5943cd9934b36be67481ca", + "f9ecf05660fa4512b4ff4cbb9d30f3e1", + "898c2d408c0a4b34851f7fbf537f45b1", + "d5797b57dcf04274a5f7077d104a62b6", + "ec8e16b5e78d4c55b100090ee7e23ddc", + "14b64b065ef740cbbff5587f062b04a3", + "5ede178010f54c259c9802698a599664", + "225ca87fffb54bfa9514513ace1fdbf1", + "cd906068e1cb46e4b5b62fc6267e8e6d", + "0aafe16d6e6d4561932cba3bed69f562", + "a81ab5c22fdc4ea99ebe396d3b43c552", + "8841ee0d44fe4073b3dc5237c8045185", + "d839228be8b84096a587489217630b7f", + "b76961c341d64959ae6ed7ad40f6abab", + "df073637968a4ca499a861f74869d45d", + "2f5098940d27496983565ddb3ab158bd" + ] + }, + "id": "tFQXnK-FsXlj", + "outputId": "6e6286aa-fbf8-44b1-94f6-2ccc661d040e" + }, + "outputs": [], + "source": [ + "import numpy as np\n", + "import torch\n", + "from mmengine.config import Config\n", + "from mmengine.dataset import Compose\n", + "from mmengine.runner import Runner\n", + "from mmengine.runner.amp import autocast\n", + "from mmyolo.registry import RUNNERS\n", + "from torchvision.ops import nms\n", + "\n", + "\n", + "if __name__ == \"__main__\":\n", + " # load config\n", + " cfg = Config.fromfile(\n", + " \"configs/pretrain/yolo_world_v2_l_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_1280ft_lvis_minival.py\"\n", + " )\n", + " cfg.work_dir = \".\"\n", + " cfg.load_from = \"pretrained_weights/yolo_world_v2_l_obj365v1_goldg_pretrain_1280ft-9babe3f6.pth\"\n", + " runner = Runner.from_cfg(cfg)\n", + " runner.call_hook(\"before_run\")\n", + " runner.load_or_resume()\n", + " pipeline = cfg.test_dataloader.dataset.pipeline\n", + " runner.pipeline = Compose(pipeline)\n", + "\n", + " # run model evaluation\n", + " runner.model.eval()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "7syIir2qHoc9" + }, + "outputs": [], + "source": [ + "def colorstr(*input):\n", + " \"\"\"\n", + " Helper function for style logging\n", + " \"\"\"\n", + " *args, string = input if len(input) > 1 else (\"bold\", input[0])\n", + " colors = {\"bold\": \"\\033[1m\"}\n", + "\n", + " return \"\".join(colors[x] for x in args) + f\"{string}\"" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "NI1DSw4SCCUU" + }, + "source": [ + "# Run Image Inference" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": { + "id": "ozklQl6BnsLI" + }, + "outputs": [], + "source": [ + "import PIL.Image\n", + "import cv2\n", + "import supervision as sv\n", + "\n", + "bounding_box_annotator = sv.BoxAnnotator()\n", + "label_annotator = sv.LabelAnnotator(text_position=sv.Position.CENTER)\n", + "mask_annotator = sv.MaskAnnotator()\n", + "\n", + "class_names = (\"person, bicycle, car, motorcycle, airplane, bus, train, truck, boat, \"\n", + " \"traffic light, fire hydrant, stop sign, parking meter, bench, bird, \"\n", + " \"cat, dog, horse, sheep, cow, elephant, bear, zebra, giraffe, \"\n", + " \"backpack, umbrella, handbag, tie, suitcase, frisbee, skis, snowboard, \"\n", + " \"sports ball, kite, baseball bat, baseball glove, skateboard, \"\n", + " \"surfboard, tennis racket, bottle, wine glass, cup, fork, knife, \"\n", + " \"spoon, bowl, banana, apple, sandwich, orange, broccoli, carrot, \"\n", + " \"hot dog, pizza, donut, cake, chair, couch, potted plant, bed, \"\n", + " \"dining table, toilet, tv, laptop, mouse, remote, keyboard, \"\n", + " \"cell phone, microwave, oven, toaster, sink, refrigerator, book, \"\n", + " \"clock, vase, scissors, teddy bear, hair drier, toothbrush\")\n", + "\n", + "class_names2 = (\"dog, eye, tongue, ear, leash\")\n", + "\n", + "\n", + "def run_image(\n", + " runner,\n", + " input_image,\n", + " max_num_boxes=100,\n", + " score_thr=0.05,\n", + " nms_thr=0.5,\n", + " output_image=\"output.png\",\n", + "):\n", + " output_image = \"runs/detect/\"+output_image\n", + " texts = [[t.strip()] for t in class_names.split(\",\")] + [[\" \"]]\n", + " data_info = runner.pipeline(dict(img_id=0, img_path=input_image,\n", + " texts=texts))\n", + "\n", + " data_batch = dict(\n", + " inputs=data_info[\"inputs\"].unsqueeze(0),\n", + " data_samples=[data_info[\"data_samples\"]],\n", + " )\n", + "\n", + " with autocast(enabled=False), torch.no_grad():\n", + " output = runner.model.test_step(data_batch)[0]\n", + " runner.model.class_names = texts\n", + " pred_instances = output.pred_instances\n", + "\n", + " # nms\n", + " keep_idxs = nms(pred_instances.bboxes, pred_instances.scores, iou_threshold=nms_thr)\n", + " pred_instances = pred_instances[keep_idxs]\n", + " pred_instances = pred_instances[pred_instances.scores.float() > score_thr]\n", + "\n", + " if len(pred_instances.scores) > max_num_boxes:\n", + " indices = pred_instances.scores.float().topk(max_num_boxes)[1]\n", + " pred_instances = pred_instances[indices]\n", + " output.pred_instances = pred_instances\n", + "\n", + " # predictions\n", + " pred_instances = pred_instances.cpu().numpy()\n", + "\n", + " if 'masks' in pred_instances:\n", + " masks = pred_instances['masks']\n", + " else:\n", + " masks = None\n", + " \n", + " detections = sv.Detections(\n", + " xyxy=pred_instances['bboxes'],\n", + " class_id=pred_instances['labels'],\n", + " confidence=pred_instances['scores']\n", + " )\n", + "\n", + " # label ids with confidence scores\n", + " labels = [\n", + " f\"{class_id} {confidence:0.2f}\"\n", + " for class_id, confidence\n", + " in zip(detections.class_id, detections.confidence)\n", + " ]\n", + "\n", + " # draw bounding box with label\n", + " image = PIL.Image.open(input_image)\n", + " svimage = np.array(image)\n", + " svimage = bounding_box_annotator.annotate(svimage, detections)\n", + " svimage = label_annotator.annotate(svimage, detections, labels)\n", + " if masks is not None:\n", + " svimage = mask_annotator.annotate(image, detections)\n", + "\n", + " # save output image\n", + " cv2.imwrite(output_image, svimage[:, :, ::-1])\n", + " print(f\"Results saved to {colorstr('bold', output_image)}\")\n", + "\n", + " return svimage[:, :, ::-1]\n" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 979 + }, + "id": "-BL_keU8moAM", + "outputId": "78fe2957-1980-49b7-a64d-6a5d9f62cacf" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Results saved to \u001b[1mruns/detect/output.png\n" + ] + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAhsAAAOwCAYAAACXi7YkAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/bCgiHAAAACXBIWXMAAA9hAAAPYQGoP6dpAAEAAElEQVR4nOz9S6yty7ImBn2R/5jrsddr77P3Peeee8u3qrBKlowsIVAZSsjGbtFwkw4dRA/cseiAhBsWHTru0KBBi+pC103cgBIPgYxACKESMlZV2dS9vo9zzn6svd5z/BluREZmZPyR/2PMOddae+8Ze881xvgfmZGZkRFfRL6ImRn3dE/3dE/3dE/3dE93ROlTM3BP93RP93RP93RPP2+6Bxv3dE/3dE/3dE/3dKd0Dzbu6Z7u6Z7u6Z7u6U7pHmzc0z3d0z3d0z3d053SPdi4p3u6p3u6p3u6pzule7BxT/d0T/d0T/d0T3dK92Djnu7pnu7pnu7pnu6U7sHGPd3TPd3TPd3TPd0p3YONe7qne7qne7qne7pTOu198H/6D//v4fWcMwCAiEBEAATBTGmJY3LOICKklJBSwjRNoASkBJRXK+nGpszcf88ZnLnm6T+naUJKqVxjnKbUpW2ftX/MDJYbYGbknGu+OWfM81x5qfyAADjGTR7MjPP5jHmekTNwzgCzpKfpM3N9fg9FG74yA4X57ffLY5n69IYbydZ09/F4yYa0vvy3tantbaRz1xvsjtt+rx+QAQKIej6Vb+1vRIREhImAxBy2ZtSf1nj0/STqW/Z3lF/U9vav9hPMIAKYc/i+ltH/RfzrvWmaav1E/Id8gXHGG1DKSPwU+cMjnOghJrzDVXqJl9/9J/juD/8Mf/Gf/VP8xT//C3z59Gv80Z/8PUxXT/D23Xt898NLZCR88ew5Xnz1Nb759R/j61//GlcPHoOKIpxzLHO+bvw9+/18Pi/KZMtvP337Sb0zOAPMaZG+/d14YZxOTdeutfk0TWDOnQxGMte3IRdFt9Tdnh+w6KtmB4RUj4s+zqHtGvHelT9RJ5/znMEZUmfMYAaIEgAGaAZzy8vWgf412QUIvMjftveoTiNerWz7e779fL335eGgvdH1/f/Zf/8fLPjytBtsRHRUEUeVIjJEYM61YrRhfAcCiu1b4UPfVUXCYGnzAwb9NkkbXDtkdP+e7mlJefsR8+xWVySigui3++xRAOzfs/1xLZ09+qO9T8WQUc1nz59Px9/3BsmXJeIz0QmUMjATQBmgM4jPuD6/w4fr9/j+hx/w/fcv8ejhE/z6j/8Uv/7jP8WjJ1/i/fU1vvj9HzCD8OTZCzz64imePX+B09VDAISceS+mH9JWne4xVkBhw5RfgJ5t13bd5F4MLYV8UJce1aJugdrykvm6BEf2U2Q8Bovj5HuZtzLW5VX+iygCLPIsL+4pyNC/mg9yCDR839rqnyPZj8DGUdJ0Rv1jRLvBxm16eL3nQqWCuQMZQ+HgJS89Ipc0tAH3Kr7bJNuQfUM3o3C0oe7pnmLiGD+QMaxQRbaRklFk0ffoee91RUBlDbysyX+vMCcQiTGLojZrUYkRELFKPipDaDBBSJjAnMBZjMOUPoDyW7x5+z1evvoWP7x8iZkTvv76N/jqq9/iV9/8MR49eYGZGY+evsCcgYePH+P04JFENE4nzJmRlYdhjazXX6R31urEltfXNxODNjhpdadc9/mPQEHlyfFhy7FslyXI8GVun5K298Z9RNnn5z8j3ghArmDGeP71rkQ1UlKwkZFzczg1XRtVs9GH2lcdMBj1kzXwGMnAqC2iqNmoH4/s7xZ91MiGf7citho60ns63BAABR4Lh6ar4TIgIZGgUR8qvSvygtGEuwPn93RPt0bhgAsDqQANApCg38fAAYiN7d4+c0QhbtHSo8xDsLHWtz0gGXnRkfL16eg7OZ+QZ4nKJroGpYx5fonr+SV++OFb/PjjKzx69Bzf/PrP8PzL3+Lhkxfg0xWmNOGrR18gg5A5gdIEpITMhAzIsCxLKP1ohMOHuiOjGZUlqou+nlKXrk1/STmUgRh0LNsiAqstor30xL2c1nvM3fPR34hHy8cQfBSg0eyTTTNBopIypEQkn2oHPNDt22IZhbdAydeTHyLxdRiB662IU7uIjpfI1kZ1t0afJLLh0+RSkbZgNkrRFTLoUPoMEXXjSEUbVJQ7qqw7KxezGSOU4SJL99GNe4opHnIbEaWRD9q8Qhjfa036RwB+9KxX3tG7R6Maa544jOcXeYajtPRZm++W0YnKRERINIGYQHQG4Qye3+Ddmz/g3dvv8OOrH5CZ8OzFN3j6/Nd4+uWvMT34AjMBTBNSkvA5mMShYmCGAA0GUMIlh2hPPa55wJrGor1FcdaogpUjwBlgAvz8jiiCYL8zluDW2wDNy6frAeFWZMNGNNYiBTat7OxSrRPSaCJDotXyWV6uAI0hgB/oDf1alCEFQGOLX09H7ZudT9KKSYCLJkVO/pH8bhTZuJQWCogbctX7zYMRiCU+mYQHfNG8QFiwkigek13jLRJiX7nMvODbp6Nhu5wzMvdq/gjQ2KcUj2qona+06Oid0T3YCsh2jxVvBICEbIMkpJ+pF1mMBS+fWcvH57XKsvO87PetNl4DLH3aqbsXAROb3iJC4oCGz1evjQwxQEi4AqUTCG8Bvsb5+iVev/k9fv+7v8DbN2/w9NlX+PqbP8Wjp79CevAEmR6AJ/GTrs8zQIyUTgCRmZlTNAnHUQ1ff75e1+o4AhxRNKGXqVR0hI/yNCESzz2uY/uOdRprWbgZsy0nkAM9G4GjkZ6MjLYts40a+HR8BIHK8BKTtJP0r2VkRaNwNoKzqAP0bTJqe1/mCGzZtHzfiNplBNT6tlsC/lGee+ijgo2ogqUAItitHBLBSHSSwucaFKtK06P2kdLgzMC0VEyRMrIgxaI9j0hb4yzLFqFpIsJEzZvxaY4Q4m4jfANbveb5CA99Ofemt0zn4wOKNSP0KeimETUv5+LVywoTwlKR6aOk93fkYZUNkYwtr/HjQ7ORDPs0/adXfDIE2kcv2vvLlSc+73FUZLvs0XfLCxFAWQzQlID5+j0+vH+JH1/+AX/4/e9wdXqA58++xuMnX+LFV79GevAYmRJycaCQTmAAc9EvMnxSM8UaqyOeRt6vrb81Q+EjQ1WOchxFiJ61dWzBhRpyP4wg+pEX7RjJUrkTljtqf5ThKM+nnbNhycr4ZsSjAoKMlICcBOzL/RblETldyrpNKwYhjT9bX7adIwAR9YU9QMPyY+2f1OGyTTxvW3Vm6U7ABu9gYOu+Cod/XhqyLCsy90KF4gz7CJDop/K9xxOTRLAQ6gULHWoM8lxB9Gso9p5+rsTuE9UA1egEVJmVzxXFIs9SlxbhmDxtGZq9aXh5HhlANVB6L+cMStwpb+9sRDxdAjIi6tORkHmer0H5Lc7Xb/Dmx+/w/R9+hw9vP+DZsxf48svf4Muvfo2rx49xZioTCkV3i8FqgUUSBjRp8YgDY+l59QZgrc3XyrT6HLFOg6yfHV/etpt7IwOrn5kziOPIypKn5vFYGe8dP6NHjd4/pM9tjsE7qSx5TTWTwneSa4JF+uE9ldlRmkf4iX5HoGOLfL3Z99nIor/v8z5aljuLbOwFG8waploKGLP9bI0ZUQQe2udSqeWcO4+pgg1ggX7XGnBLgXYInJfXIs9uLf17+iWQSuIyJKqkCm0imQRth1IYJNFAHUc2SpjM+3vkaq8C21WqQT+Joh3xu/GEuHVDtY+ices4PUJKQOIZ8/wOb9/+gO++/Rt8/+13eHD1GN989af4+us/wRfPvgSmE87naySkorfUaCraoM5rp6LrRq2xpvjt71sBGq24dQi7DmW7+yVnWFaiyIPlMc/GWLu/iFKJFqwZ3ZZfz8tNaAyaJAMBEwm55kmQ1SjtT9+5FPj0+fZ8RaDDPrO0sw70udU5FbSYaef+nr+2lz76nI0I6cqs3phpCzT0HeamPzVNKxRR5dn8lKwHVf/c+9tAYxy27YEMIRVfJlK2kRBGQCYqxz39zIhioNGtxy/XppQwkYwUdl4OATm7vkaAH1DZCzgWLB6Maug7I9m35Qpn2buJoSMjtcbXTZR85RMMSgzwjPfvX+Pl99/i++++BZjw/MlXePH8G3zx+AXSdAVOjNMDIJ8NgpCxCeG58iP3ibOsVFkZ9NoT2RgZ+Sjqs9b2URShPa+xGf3cxyegendUwgERqq4dyWwrX+/tX2rglzKlQEaH8uWqyCsD1RHuwcalkQClNSMfRSlGdsLXhwKNCGhXcLnSjkfLdTdgg+NpkxHQkO8z1kSvL5SM9QmiJKRESGmKUa/+aQXbjuHACHNZ5z6ITIwoalg/Rip5t+gN1/qxE2EbadTF5n8PNH5pJOBADGkZLpmoRshqqJZIIhvG4Hrl2smM85y35GqPp7y7RDuABrCcQ8DMoKT34/Fp/Rx50lvU6SY7RGuUrvKb5/f48OFtmafx1/jxxx/x7MkLfPXVr/H02Vd4+OgpZk6Yz9fgSca/CQmls0MaQSavC3+q/zKIkzyyUuUR0CjMCr8bDpL/PtZxGn1QQ6/eezb3S5sggYphHTl6nV5FjDbClREE6PDENtBAze+mZOeb1PQ5A5ghS1pFp1Ppp7KzdYIFG55q5OBgn4qAw6jdvP2y+QKQHU9ZduLOdY5I4YfQhrfMpOVmr8xEZk13ZxluFWx0nSC4b6ueVYIBqLcfjQl28+xJxFqAAWFKBKJJ1qszQ7ZunkruZYMwkry4eHWeL/2dmWVsVdBJyY+Quax+oeW7VD0QVmbNdWBKU4cO55JhrhFV4auO6WrW1YsSxKz5aLPqOm/2lbWDlNsjVOybIw4T4vrPPW1RpBRa5IEAlp0xdEVV2ysDVSIIANHUy6ca5bqtcq6eYZ/Xko8lP0uPyV7fq9S9dzxStjay0eVL/YqGtXyOXC+5Alz6F2UwZVltwAXE8QTihIQTEgj5/D3mt3+FH7//S/zw/Q9geoLHz/4ET776U1w9+Qr59ACZWDbqyhkTyQREGwsACHNtMe3RCZk0+rngsL1cQAULZgFn1Z8qG63Dqueq1UaUVuvf11Vv0BrYq/jG3CKXxsjjrkmVFR09tNOIQQ/+uNRJFEvp+Sn58rp8jhw47/kDbkIyCJQmwRwEqFUj8QCg4EOHfUzCUOCGWkZ1h21FxvXngYUts76fuRwHUjYMq8OOJZtcHPW5HPnRgIPWgZSBkKRUpU1btL8lJ8+Zzx20G2wcCpuOrpNrAavQso5XGuRXaqLzXKDmVxCkRgxKgLM2IlEqKB8NpAx4zKyTQ9ud3tNBLwzaWVl5bFxpoxHKmuliBJgZSZdJFSSZTV4CLqhGaNrclKYYEiVBolon1XoMKjwgNv/uIa0/n4W2VZzB2JAsHv8JR2puy+NfppmMdSEQJ0woYEObnDNSLrKRTlgep1Fkj7NCkpImQyMjfvhuywhZGrXbVqRkbcjD34t5WSrgo+HcZbrFgQHAyAI4ypkzUxKAgWvZGA0zI79/ifdv/gI/fveXeP/+Gk+e/wZXT3+D6ckf4Xz1GBlcAOIJk04Kl2zEBaoTxReMQdq7tJuNALWiC79Zw+AlEqJA1KjY6pVyWwLtV0jYOom95ML4ou76Oq+gFtuyYaq9gAu2RSv6VfOXO2L3e73XZKQBNGYG53zI4RmBjZZ3boBDXclFvfgzuGAsjqCkRCi7xNYeCUUt7OrH82Drr3NMDABQ+0WlDlXOcs4ypwTAnBnzbNpKWVBnHwBRBqcy6K/9Gag73NpCHtGBn2SfDU+jsBtgDLH51OvNEJtQln3WRDiAHjR4odL8Du00WkCD53mk/FJRGjVeYTyVZXh5V9Xd08+W1j1PK557jGyVrVFuA2NzlPyBU0q2X9m/6NmPTzIPQ7xTcWCoRDSpbFQFOgOccf7wDu/evsT3332Hly9f4cHDp3j+4it8+dXXePDokXiBJYxE1JmV3jMstPDiedkOa6Fy63EGtrxcp/D+TWgUHYt0nwW2/bs21m0MqInscLGiOc8hSPXDzYA4rnC2wg7RxRHFZTlCENLgBtb66Ch9n5cfUvEAejVteSFMVw+bq3wTRLarvXPvwIKVHatVgLA91uiTg41RYayHtBirc5XhQUj7LWAAWFaUhsns+u+I1pSwBy/2036P0+5FtkVz9B3N4KdFow5wT7dAbDzA7nuN6S4o2ldgRHsjT6O+Is+6FTDFo5PACslup+oh0nK45Eh0ZY3HEY2jJToun1Bn4jPLyCzJtE3gPT6cf8CrV9/h++9/xPXM+Oqr53jx1a/w7NkLnK4etJCzvl/c4M549hwFvGChCyPDJ/pLtwHodY0MPZlm+Ah9cmSglPphFMtPb7x91EqjFZFx87qbmTWQsHjWAg7LpyUiGq5MKi9h1Nci8nYssieRzRjV4R7ytrTKY4rnco2Ale2LfgfWo0AD+AzABrCO3JVsIy3COYgnZcp744bMud94K6VUNpopeqLZ/qH9XAMZXoma0gjP0HkcpXOZIaNbcDI/DZFF/o1uw2v+pVMHbmHC5YPTQmMAf3coVsP90VCJj2hQlZPI4z1GtwE22k6QJ4AnCOhgEM8An4H8Dufza7x5/Vf4/e//Ei9fvcPDL57h6fMXePL0OaarKzkiHm3YFAS7s7X51PKmZVPwui7R7+2vTdZcGNZuVRMPddgRWjeMy2c0/zCyoULcfaIMR6PWhc3HR7hHhntr9ZLnP5KhMBqnK08O1Je3A/4AwIuBxUDXalrW0agyie3hmnav2c9cltL73WB/cmADGEc29J5eY+ayG9/yuWUDMiZ1q3xornzKmSW6YUsfFbGfYaUOQNJIYBYhO+WMSkcyZbyne1oQ268mFCqhseXjB5TXerRi3zs1yOyUahS5kL9mbG8KOG5EZShDhk0ITJNMCkVGAoPmM+br13j/9vf44ds/x7ff/jXS9BjffPNbvPjyG3zx5Bmm6WRspp0zoJFV+cXd9fbRMxMbgBhw1CsthVqHbe4DueGam5A9ndvyQ0iLa5Y6Xa3/OqCh25jr1DbOYuhkwuU+A6d6NDLu+lv5sytNfJTEriZJKclOoyVStZd8lMSm6SM40fNrZQ3j5c4GVrBB6pQsgUZEka0U+6g2UvMbsregi8DGlmIahc68QrEVHIVybBr13SANFZpleEwUmp+80/JK1aNh1j0z0iKqITzq79bJUxDyXHTAoK6041NJryF/dJ6KDSv6tG9K2r9vm7Sje9rD+yUG76dMew0rM2Oec1neWq/KSikiyNLJ/vlxPgzO2+PEVlFHacfbmMsKmPa+XE0V8BsetP85D3QPRZFD+9tTFGnp7oMgJ5ZqX871GjAjzx8wz2/x+tV3ePnD75DnM548e4EXX/4Rnj37Fa4ePhRYwjrJrk3oZM7VG+7ruD9o0nr/oyiGrwPRL+ND6Gz55VNKa9/X+5G3Pcoz1NMmYrOMeMReswdIqndz7vOQc1paBHqkV2taKclKQLI7eS5X4dilrZbPUTSkyvRgczlfVs/jApwH9RkN8Wg5vLy3SOWgHqzdrOOZLW9bD74Mbe6QBb8COGw0Mi9npg/pENiIogiWeSsIa5Xu37Ofo2dqGrQMHI1QoeN+kJ9tAAC1w5g3qUUfOuEwHuUIKMUemwo9O6Bs0rpD+9rC3f31o0Y9btfLvKeRMYjk6SZ0FNDctae9L32dnGbeg0Y3WFZ5raRv+49G0Oy1tfqNPDHPe+ubufaV5lXrCqu+LJLeDGDfuRTa1xoY78sXpeGNy2pdy17ispIBZzBmTARcTRkfPlzj/euX+OHb3+Hdm9d4+OApnj3/Bk+efoWrR1+A6FSiNA7Bl35sHaJeyaujIVvP987Muj68aX+I9HPkGEb5jvMOJh4O9WIDpPaddooqu2fWy6u8y5k1PcCIwEYk/xHAtkDD5xXlf+T6VkRjTZbX0veTtBvgaOK5BdBR39D2Kfaq5qefdwQ2QnYcwLhNw7BFa4LvUXr0nj+YZ00wonLVPThMuv77CN0yq0fFXeeqihi3FfS8Gxoq7p9nEOIzIRPtKkCVEcvaUZDklU+kZKMDu7rniEAkC0iXQKYB6SX4CErqL9Y5Ffu2FV8DlYt7jAYUWDY8kuWYM/I84/rDa3z/3bf44bvvMGHCsy+/wZe/+g2ePP0S6fRAhl1S0q1AGruFclZvHV0fV29enykM7HLAbHm0EE2HlLbpyqzGO05jBAr8M9GE49UowxAMkuG1dwJb1EbeDfbGWvDZRTFoHM0Y8egjB5GhFxkH1pScB26jvDU/n6ev3zUZiMoWyQ4Rtc1Nd6RR7qw+b9toL90IbETI8GMBDSVbuWHl57IUjfR5+csz16ERZZlXhWh5T2ap6/d9S9bavaCTd2HIzxlqrBDRWl+8pxuQer7lV7uOfjXKTaIxkdG2/TwCJTUEbxTxyFtuusL1vS1vnbMBHMHtQf+PnotvpOJVS1Qj8xnI17i+fotXL3+Pl99/i+t313jx7Dm+/uqP8fz5N7h68AXOYGQW1mp0PZPoCw1Q5rGhtryXX8MyxqT17H/37WFBSVj8Qb1Yz9ufhl3z2UEWRKh+86uR7HP6XY1y20MkjqzpnAoFGZPbvGwPnxZURJEfYF0rRwB9Kz9gCTz2Rq4siPfXfRpEfq+WRj7/lpAazfabiMp1WSp+RFJvJbJReftIgMMrPB9Z8SCo224WBWwUBTDPfedZO5CpE1xiJBZFb/NueSx5Uz6ISLydoqiqt2MASMnpNqrrzijqVB8Za/7iqcqOjmTcEGjYPuTDyn5c3+ZHBGPxlt7dMvqJLp3NJbqkE6i3y2A/dxETUOdcZGS8R+JrZH6Pt2+/x/ff/w5vX7/G1fQAz5/8Ci+ef41Hj5+CcQLzLNt0Q9qgharbt6aSbSDbj3drfbL5vm247KmibX6Clr/lrFU7imzYjavsdf20kWCljj9CNUJr1Bu1PprrnkTbDjxhSnptKVtE1AONEt2wz61RvEX6qO7XIw1RHxlRBGjU8EeTcGO+Y7Dh+YLOI0J0r3+3f72L3VeH50j0TekQ2NgKmyxDp7dLXGJu2XTesm+oMdCNFxVk34E88qtLaTcQpU8rw+5LGhhe5ZD7OhFhorpzadHUsGoKGIm1KDUNfhg7s580cOIyqMv0whcc0WjRFdq273vS8U/8bNFKVK6RcpZrCcWZrw6GqVcuN1k9lt74l64y5ITcZw01hJGNpnxTEovKesibERprRGKAsYxs2PfW276dh9Q4b3XVb8Nt70WAeJSPvEuZAVwjz2/w+tXv8fLl73Ce3+PZF1/h2Yvf4PGTXyFdPZRdW4lwOp1wzrPhRdpKYIMHGp6fXrlrE/TtGJXHRjRaG6VkhrtKJVsPOCq7DYtHdRXpzNDR6KJtHmhFDlgvA8x2qI669kypXdP8rXHX737oROvHliUqY0R9GXtTHQGFUWQkoq08o9GCZeS8H8r3ktZJm1M1o8iN3qugokt5CTLuDGxo4pGg2UrxCDlKwzNr0eAaMsxcwpaqBMvMh7o1eDGCVW8ygAwwtc6SmXHOWYx9zl0jzSMhgOnmxcInkOTLRrgByFkV0pMyCG0v6clsM9yQq/xuISuClCNTADxUt6sXVcoYjPIMy8EA0gGbXttB+VGvaTQhlw8E2EQz//wp6A7UIYlWCXq4GnXvyRbV9SlKKmxmhVZLqscOTXrrTA99ltArJED6kCrtVISLCsjQ1LSPogGU4psNCy88cf1MiZDNYPKqkmYCc6p10nLUybOSb87tLBkq4KQNgerKnWZ8AchJqzljSldI8wNcJcb5w2t8/91f4uUP/wyv3v0NHj//I3zxq19j+vLvgB//CmfKtb7m+dxGLLQ4ZVUvZwbXc1248gsAKU3GUxT+5FBJ6/HrcENvPppRzhLJUG+epL1I+1Wt02LcTTSFTGUSEeazm1iIfiMnvR7tPyF136+KKg3XPkVF4zRNpT3LHW4nf+c8V9lgnko+srqkplF5ZExJ0kxJAVf5S8UJBcA8t/2TAOgkVN07AkY+LDjRaJqVyFTAj+7Q6YdwpDyXTeCViHc8h9BHmVTmF5EhKjYJgE520QnkCkht1CQGj4y2wq1VeEq0mOd4hA6DjT2RjREg2YOKuvExj7jKZ0YRMDW4KAqyPm8aKTMytaEOPXAtcxtD3Vtt2tWLCihXVcDKNe3cFbVbz2IqHcvaFoJuYI6avuHfVSN336les2Co3l8BTjGN2ky1AqoVG4UZ21JuZzgGmZKmuUF3PTx3k+GHbWpgbZlvqWPqr00pqNuVvmFlv7/nv3MDDYvu4vIkK/EA6twQqgq6SeAgDZPPIsKRk3t3g7j1PeFfpd4gcHvPdp6N5qWUkPMZmK+R8jt8ePsab179gFcvvwdAePz0OR4/+xXSF8/B0xUY70vZ3BAqL3VYBTta3KzvqB4sL6KPMAhYtIarGQ3bLpSase0FCaZ6nS7lNmHUG7LqndujHsz10VDVyAvn8l+LwvTzNRQI9HVmwEMBDnbFqR/qs5G3jk9TztpAlb9e9vSZ7lkaR3BHOvBSsHH0HWYdhjeTNblfnebby4KV8Gh57uXNrv5q7C3rbg8dPojtpkrZC/ZdU2aWcw4qEMq1oj3YoBXB2ktrgMrm7alTWixrDSKn3yNteRdhZONjG2g1TYnI7AHxcdr5p0qRZzFSYlvnOtwtYPr0FA3HrD0LqIde+s0a+CaA+QzCW3w4/4CXr3+P71/+gPfvJzz+4kt8+exP8PTplzhNQObrEB+zk/mmM5NidPGGE2CPTDcpVCPY+C8nKYVtSzVtKzPjemlDNl4Pe4+aSCMeS8O+JYOR7rPtRsX7JkqdLvTDLP3yVeVnBDL6IRTfn6Jo+k10k887ksePofeitutlr9XDWuQlChCMggJezvbqnd1gw6+3X6NR5t32qR/LABVYG4WhAA2rmXNJbkFhR8IsHxm+2D4axIUH6esBah7keYTtQ3Vv+PACvFA0kAgTV4/CAq17wLFFvUcXRQhiYzLyrn6O4MMDja0yeg82epwYoAwwrkH0Bq/f/Q2+/+Ev8fr1W6T0As+e/RmeP//bePzFV8BEYDpjtJlWVO8614RL1KOecF2f9cp/2VdiAMrlrJl2f6vdw3C804n+MEqiNgGTSIYQfHr2+SGos8CgTP60AMG/1ya8lqEb4gWgiMCG5hXJyG05u34oKUr/KN1UR/p2VP48nyOg0csCACz1vJ3AejQAcTiyMULvl9BRZHQJLYWvIWQAZf7FIn52q3kL9bOwqYYUtU7bp4bvNOS9JF7+pOWjHD07SGJYBqDsgDj2uP0bVHYounRs75dIneIkWhVF76lFyu7nSD6yYa+NngcAnUEk7w66eZ5B+T0+fPgeP7z8S7z88Q/IOeHpkz/Cs6d/Cw8ffoNpeghMM7iuGIk9QSUb2bC3KFGNcvSAXN9To68TPhVQLNs2EdXD7UZ8RPUSGSbluRlvSbuu8pimhVHyRj0y5t7gSdSE6vwvQpm/ory3iqg8UJJ6tBt12XRHG3fZFYBRmY/aIAvI9gCNu+yPGvFZK1Nkr4+AIw/q9FkfOdpDh8GGZyxicsso3XYDbBW2E3agHh/dFNByUmoUUoqQXPT8uvLr95Vv47YGaLB8xjtDRuMl5flwKCVkJXw2pt7TXkPI+rzWbvSsR918jJmfJa3Jzihq4Z8Z9alItvcSM/oJpIP8LZ9r4Vj73UccttKPaLeBAOowit3cyoKRCTOuz2/w5vW3+O77v8abt6/x+OFv8OLL3+LZ099gmp4gc0bO7wEiTOmBcRCW9e/1SsSqRjUsp62v2R0tx/qUzByFkdHXT42mRGDD9tVqvKcWVdA/X/d7gE0s02yiplS3ue+HmKk5URaMD6Ib3ihaPiO+1jx8X49ROlvRgijP2yS1Z3Z41fe1qC729E/7jo8aKYg7SrvBxjRNNRNbsJEyjBDUMspwWThLxu9IJg05AauAAn3eNS/Nv9xTT3KNh1GHRMC734muF1iv+FXxLSe3MtDPbxsIiH7mcvKnr09tN02DmWXG+5Q2z6SRd9AiJ0FHXyovQHZgbIqBmevs7UU9/1JwRgWYY8BaH10YofZcpFT8PjJR+vb5FEw+HfGWcwYjL9o9yidKa03pHml6r3OWxnzFAWCuy9uJ6iT9lh4YfP0aH97+gB+//xZvX73FlB7iydNf4Ysnv8KjJ1+C6Qo0MU6nGefzNWT+VV/20Sq8oio2nJDW7nalml4fvTulCXB6IgIaMkeNyxBObyxs29pzMqaUMJ2mDmSorO01wpq/1xnttt4DAMI0ifZ2NQQwy4qbAPR4YzgifdaW3Z6pZevMfvcHskV9TcvlV3r4eRL+esSjT1f5tPaXAUwpdXZunuchWBj1kygKIt+bzC7brt9xdS/tBhu3hcrWvLA1Bdzz0jyF5p0AUONXlzChoudWkaLkbGXpZEYA4+Wc6MNyzIyTa0RtBF+eSMAio9HlRaiFjDqvf9cKuRd4zyOhLTvzfIYddrCudgE05CoIaeHKRUCzpIKlcvl50siz8O0zetd6XqN+tGbklaJ04nxV4yyV8lra/vsIcHhve0/aVvlGcwsiXqiLzC1BS+Iz5usf8e7V93jz6g14vsKjR8/x7OnXePLsBU4PrzAD5bTpJEsOsS61fT30gINL5FLIpkLCX9FTicwwwsiQ0lI2RkS1DGO52ZK/6Fokc2tgcJS+fI5lbI+T6/O3fz4/r69vg3w9eadadbPOe/F9f802dqDN3dPP6CyjyEHwdWU/LaCwnx5wbNkwTxeDjS3h8YjptsBKSV2UFPoKqpUAruv3Jyq7++mzhW2LOMm8b42kR+8WNESdSBtDT8TUPJrxz64DSFniEgrgUHBA3htlyIKykkZKikYVDOWyxFYiKLrpUUoEsOxgGgldqIjQd4hhx2BRwaSRJ/NM5FX8ksjLy2r/iQMbuwHHHoqAxjJtAGBwAdjzPA+Vly+TV/K+DwHFW8tYXN/i29bFURJj5ncNvsbbN7/Dy+//Cm9+fIsr+hIvnv0Jvvzqt3j4+BEwncs+EAmUE+zhcTchqcdlqJsIZQhD92/YM1dqmXZLr/TF8MiFZdr12oEsI+MVnaXTP69lJ6iOkufG+ViQubW6YgQ2Ihm+DfKAYMSHtVWjvjRKH9BTcFvdWZCRc+6i2ba8e8sgf3paejw8dRRoADc4Yn7sDTVmPLq8DWoeNPSgxgVlbqfpZu5DzOQmiHqi0T7xet8AiuQQZoQE9R1mRuYMXaLMZRMbfS8qp7wrf1bheFTJLJuUCXIW701AUasfIt2BEbX8REsFENYL9Qpg3WBSZdrer2cc3MBA/nRJ66v8GngSTWZWvNMAcIyei4kX748MOBFXOfWgwUYVRulZT86WVT85p0X/3ZINr/xG5fT89p/tnfP5jPO713j949/g7avvQfMDPHv+W3z57E/xxeMXyJRxnV8jUwLhIQgPyuvnYZ5LnldLZOpMgUZb/aGTMo+Qb1N73f/20SF77xDaCHgYX1eZysWgqfyvt73Kk9etes8+Z22PHy6+fed33C+P5LNH9vWTWCL4uq+LtTsjcLW3bwHav4/Ng9miG0U2tpjfAiSXUg1AmFgm641ySVnLptOllPrpWA6dkTy4KIOSBRqn0wmTARseZCwUIQGJl8uGRtQEqOyImJKEVafJOL5mUuj1NZi5ep+610WkdHw+689I5MeXyX5qeWRPE8jEVmrjsHDv/xIBxwho+GuiSIAIcKwZ9nG+sTyvvd8UZX1p8b59hmjZ172ij8CGpLsvjB2BjG3d0g926ByvNuuVcX39AW9e/4BXr/6AD9fv8PDBV/jyxW/x9OmvkabHmOk9Mj4gna4AukLOCRMmAOeb2GJTJtf3SIdPgGmST63fQSpNVMwjEZD1XveoDm+qs7eAsPJlP7Usa1nbiFg0XK3PeFBh5dBfj9K4lNYAvL8W9cs1HuJ7VCU80ic+ghLJ0AiU+nR8Ge31vbr8xnM29gCOCFFegi4VDBDLIWhUUIWv8DZngzCXOC0B4DxLyIOpHWhXlV4PWLp8XcdNKQnYSASqO6wVYJAIsmGPpkhVWbCu1CCAUgFChf+Wb/FuynKvSQGHzgzXLYkBOZ9CtzbOkm8mxjwTciLM5ZS3XMtp9FHlr03W4u6BZd3rK1R0dSpDVFS2Ek6lLNKuLMM14Hp8Ruwr3T4Y/TypDkbJT0ZDINVit3bSVc8VWC/AitbzvjlQlo8KVWvUS78HAIQB4tavdAhCh+G0nbUsVFcj6Z4yCeAssqhJcoZs9t/y9AYjomib7KGx5FxUsQL7CYwE8ARgBvMHIL9DPn+Ht29+h9dv3iPzFb549hxfvHiG06NHwHQFmX50AuEEcALyDFC2ldZRhTetmkufQL3Q9UFiAzjQlppS+xOFwSv5GWNQr5UFvwWLpJIBpwr/S/01vWf1qN7T9hgZFi8vI6CxfF+GhyU8nSBHPiS00WIHBMzZDT5PH8mQizIh1k6ozFUHsujL0ocmoqhqpQ5qrbbv0iepXreaTZY55xJBl+HsROJkJkr1XK/I+DNzPQrDcAEyskTm/Zl7MF3rytRzDyLssJOKkwWeqbY7WRDbWDHXepnbQzcCG1tgwZ9cd2S8fojymOvwhT7Rd5Aqkfp4VeIZqOeUrC0XsjxYlGdRIhFhKlJQFTUIhFTXxavRXXTQ8m9VANzPlpdTC7l817CqzgDOqh1qfYCBKxLDPhcsknMBPiBZsjfLsXFZgQ/LKgMrKdSYM/VddWIBGPJd2yBRMu9IWWeeBeRwrkBDgQhAcl5Mbbz9YOMnHQ2pB6aVn+Vf7eSiBAk8A6nIUi7yk1KRdwc4Wn9qitaGQVe9YctaZWzphQkkSOatyZSHKrioirhu3y+GREwegcyGdtp7W/RuOalNv0fGbTvCwUiKf0h4ypQAXCHzhAkJ8/wW+folPrz5K7z64c/x7n3CF4+/xLNffY2HTx8ADwCcTsg5IXECZoFQia+hQh3VrjpErXuqQ7EshyonKrymRJgmOQdEDF2SPzWkQY5J0KDN3NRlM9J6bh4S1X1zlL+kxrYgW3V0/M6Ytj0ibzkycN7DbzcqQxAfUOSlDi/bIRUuq24cjUAQifcFmbdmV3GouqQKAJmp6reubYBmfPW/rgwx2BB9WcNRxT63bQ2S2gNu8KW+CGVQPYwCLpaYQu4wuZODl/bLUkqTqQ9bjiIr9Z0CqAywIWUxBBn7dPiNj5gf0dEQyxFaK5oPM49eGO1mOgI5y45kDIV513oELoUmcKnJlBp+e1CjPJMqwKjHJgcAqX2KcBATZsziMelhRwXBShBGTzzY3y62OHvC19rB+t8oStV5sbu5+GnTKEzqQQKA4oX1cx2EetnSyELOy4lnl0zEDftOyXdJ1CCGAfb2LAW9JvfTol9EgMEbL/t7b2QDUFtWQFm7CpkfcEaaP+Dt25d4+f3f4NUP3+Hq9CWevfgGT1/8Cg8ePgSXCZpJQVFiJMzFiEVmv9Xf4qj24OnOcKcGulJqR6VbMDaKBFM5LMyTtn8SFKOZCnhJzXCuAbq7p34ycq9jqYKwNQrtiwK88qfDyTphXu2stktm1HbexfWgH0c0arvl88XId0kbyXGPK1hSLDJqyz79/pm1CJQ66KNI1SV0J2BjZBDvkjyCHo1RbfG3p1K1c9jn/Wf8Tj9+5j2F9qzMRteZ6KPd8TreJzKIXsIQzO3QNzndsFbAZhkbM6iofquM6+mot8cK+n9xtLcfZMiQFFDkplxXsFZSC+vQAo7IaGzxsLcvjOR+T5hdP0PDHHjKtjz23TX+mFKJ3NnIyxl5fg/md7h+/xKvfvwDXv3wA+ZzxrMXz/D8xR/hyRfPkU6PcIauKCv1XoZkUALho2qsESFdKTGA07bvax3IpPMl0FjrdyOw4fkgEv1AZadObRfr+etze/asuBVSJ4hRDmNrwIOUZ3lk6JUMI+5mUijXCHsuB5fJczaa3PrYth7390dAwhvqbaOdqiso/XttTpb0fwVOfhQhivbZibJRn+z6m2KOWwQcHy2y8TEAx13l45VCuYrMGq7tAQQwUgy0+O4Ng6ST6/yPaDMlLygtDYBy8YRy60h1zJ0lFHu4imip4I8KHikv6rXUO78M1LFHLmuH5hYyZ+autgBtc9sW43z29odIpsyF8J11I9juWUM2AhsjED3yuu07iygOleXvpENUVIxyRqIPOF+/xLvX3+Llt7/Du9ev8GB6iGfPv8EXT7/C6eoLzDSBMMmS3wQIZJgBzADPBbyvrSCj7iTTEeAAlivY1sK2UVsO9mfr2qRGRBkyv0r5KunphEu70uO2958IOEQ7MZsBtCGBWm+8lG9LQ9nmDNHPzbjqMErHAakjVnT5TpBlZXhvv95nB1Wm+8hkCKggjiQYXTnX+PIRUF8em5fKi97fW9Y1ujOwAXwaoLEfRV7mobf0IbvxUd+4a/lSN1SiPKM7fwAo8y0oFgJPu4QdNqy47HRb5I3dRUSAxAi5d1RWvJafO42VJZdx3iXIsACjBrWmVCfD+aG2S2T8SN/Z82mXPfegmsPfyoP9tOSNwiJ6AyATlekKMjeKmJHoA4hf48O7b/Hq5d/g1Q/fgs+ML//oj/Diq9/g8ZMXYDoBNCGlCfOsYcEMkMxxmvNc5HhpjNveNZUz6etiS8M6tIa9Gn9uO7Zu6VDmpTH27VfrFhpMWBqOmzgSh4mdTtG5GgmYZ67L/JkzpimBZ5m7FiYVh/eAulNqP7wYNQRnmdDZPzem6H5Un3rdb7LoI3U1DXBtG3bPB1wUkEQlAqL6QIRtKRNAzrKRmF+OHpWDSwjotqIawMEJoqvMfSRAsVVor7DW+BpVpEd93pMyT1Zhjjq359muI/cenlW406QTAneGNLmkxybdOglKJpRW4XXlisLWI081im5EnnS9xi2K4oO97bn1uv+lkA1X+siGnWzY6sRGA3R8ewl6L+mXHciFCWkPDLz/bp8dAY1RH4kiG5FcbpaLACQCz9JHTwlIfMb5/Abv3nyHVz/+AdfnD3j04As8ffI1vnj+K+D0EBkJsoLGGnHxkmfOZdLzpFY7rDeZnK1AgCUyOKjn0fbXaqTGuqfxFlWFr6vWZ6detkx+o3yOAMHRu/3FGnwwz+hvLlEh5UXAh/X0rWxFZdVP4dnKLpWpyjHvtvyjso10ps87umfre6RDG2gY60UulcWcCoiwMtB0Qk+X2Wlflpvo5MOrUbYM+RZTa4Z9L+1Ne4v2dpY43WYovdKM6kCFPkq/nVXRQq92nsRIwBdo2qRZl4H4sjCgy2X3kKRl3h907Ogau2ueFGzsNYqj9v1YQPe2aY+8Nu8GiE3WOL2jHon3bAXANlke7W1gr43+RvejNNaAkgdUMTGA4q1CQHhiBviM84dXeP36W7x+8z0SJTx98jUef/E1pqvHmGkCU5KYRYlRE0mIvf0RiCncC4VZ+WvnphAxKDWjZ+tubQiJXb0PQckCxqOm4X+z8XhtHiNdbvkatcfevtc9xwqiG6CQlTMCzNSYNr3YlvsD6HYitmDWp2/1iwIayy2X52RlXSyrozKsAQr7N9pF1X+qU8GVL/9X6knLV95pc/IscIyiXUseIqr3BjbiJlGOQ6e+bjF5E2/qc6E9nnUVpoHyt/VQ682spXdPIzYicn0/ioaZdd22gW5paaEAHc64hNZWOazJIKtC6Z6nS9n4RZBt45ybh+c9Oh3r9nJ3mxQBh63nvM7YAhprdFincAZASJhwQkLCGfP5Pd6/fYlXP36Ld2/f4ItH3+D5l7/F02e/RaaTLMmWzJrrzSq7gA6fsFqEAY99HWVQ5rqU1G61PTJos+tj3nB17bAyDrkwfgA4Z5nPMpgs6HX4Wjt5wLL3GaNhKyDo09DJ7EZ2jM22w4WeX1brLD9KXjpEUi+XAwYVhPXW2UdO/HU/GdOWM/obyfyi7c2kTws0gLJniqmvvjx3o0RvW4ccOvXVRzW8IN2VorsJXeoNq3DYsdNeaNbL2AGNmh7gFcOwroqSi7yeRRmYi4BaBaJC209PI9ChyIblZRhRMZ7DWhowId9WrgN8/EKptqfZedYeKCaf7XkvMzchQm88R9tbd+8EoGLNc74ropwxzYwJsk/G9bv3ePvmFd69e4PMjMdfvMCjJ99gevgcH8pKABKnukzzb/McmAmMqRoEyksNkBWEaDCQNKJZYcwQaDUnoa2e0OfXHKC1KoyATM5zdUrWHJmtttny8kf31IOHLsuvBtT9EUOWS9vIXp+u1tnigEsDruyzmnWdxwG91oYtR3W3VeY1sAFgEZ0K28Y4jJF97cteQNNcnN7aZmWJ+aIpjuqCyx3SER0CG0q2ofX35xzZGIVrt96xnojv8ETxtMnII5DneUUxBJ0UqJOWfNr2e+28zmPJRiBheKIaYdjfRup7jGZI90ZonIb1Yu5pH1lFqVvRSz+zstADvVvtgwZ0j46Zj1/bjmysRUfWDNceSpkLeJA5Fvn6jHevX+PlD9/j+sM7PHz8CE+ff42Hj75Czo+Q8QFMZX4Tl50cy9yBXHYcls2gdGO9gD9dTYHWZ5gZSFzT9brTessWbFiD7484t+/5fU2i+mo6AZiNR4/gU+t+DXCE0QoHXiI+lpENBRH6btvgrEU7yl+GtKdzeLV8Hd/wvPV8M6txlzzbv72MjuooKtca0LAAg7lf9dKnkY3etmlUaar1hbJ6R5fH9g5w1G4EHAAQtg2iMl9CF61GqSjcCehPBWjsua4KVkGW7YBEcvaI0Fgp9nlIQy+zHdVXPCnLfm+dl9tYH9pkq26cz/FxjIo3Zzp0l6KPaLHmY8tnwIneJTrMyS+CqClIbUHmshdBQlFYZJqyN+h31Rf3AAXbRyKwEaW5l/aWh+q/BHAG8TXm81u8f/cKr358iesz49mTp3j05AXS1WPMmIDab9TjzcXBptK/UolzUAUOe/jtiscCSBgMpr6NerBRzB/rBm2MlBSk6KaAjD4qoGXW9JqhkjzFa86GtzHP+9okTMNHIOpQVCm//uj0mf5cLj8lXR6rSRvdl5m7NUHtGf3e618LNCp3xv9hl5bIgjp0JgpSl9FyfS5nPWhTo8ryXTZpW5+fZEEYs+zmwubPaMymG6jpBa2Pfe1m9f9ahH65oqUUFfCNvJMOzdmwqNwbnRHqu01ilsk809B9doaVuW7NS0D3PeKOUHbcK2CCUqpbikM/NaJBut6j8dYPmXgei6rKyiM3wOpGXkUEGEQJxDOqhDEM+lUUT2AizDmBmcrQSUJGLsuiGMUtg3pBXBTbovxBaI+Z5WC1qjFqcSqMSVnqearHEqfSqaXbMNsSSiek0oFT2WQIXJRsx0LUSne9/v/TEwPAREgMAWRUpjsm2VKKQTJ/kcp24tnIXMUgo/5nVFgQ0vUyEIV/196LgfYyrzX94MfE1/RMdD0z44wrpCmB+C3m99/i+t3f4MPbv8bbt28wPfgSj578CdKjL5GvEpjfg88zmIBZJ3bWFSclTcml1SuZkDdEvnVCoxoIIpKoRjV7EubWLbLLSkQAMtGv7nCZrW7lspeGTHadplT6c4tSNr+n6adc9MSsAMYYWuu5ez1OiUpfnMFlnonIlOgQKgXkrACp1Uk6ndBi+M3ZUBCXC4irek7LWqItRACyyDmRyP8sS0eQUtlluUabdZs1LsfGNLM85zJxErKkNrPMYavDXEVeFITNDNAk+ks2gxNQSEQGuBTtxQDzVNOtBw1KSxmAIkBX9TYVoKc7ulYQU34DAM1o0bGKwLhOXm3tVexvHve/Klld39FJskVi9ayU7KMeIlNM1NVXERDz3DE6vBplLbw5emZrfHctv+jZ6A19PlkF6EKOo/GyMA8FFeiBSnvG4eYNb69BUNRPBYlEvWmo3YYqhG0dNusQiaBrGdNUoNFCazlbJG/aJvch4K2xSf29ACKG48ztELbCVAM2A7nU1BIBsz5HY5xtKmaZ1gHveJj0HYDjmxJX8KBhfEBrqKggzBD4VWvAyFc80NceHoGGLro34G1PxHBPfxtRJINb4KMDHOXgtYmvQfkVrj98h7evvwdxwqPHX+HRk2/Kqa4MorkBnDFD9keNFNgrvUZo/Z3kRAxo9EQMh028aZp+EnV5B2WhRpb+bgGj3Yzdcj/PM5gZ8xw7FtEQABG1SA4DzHPX9+2zCjRqhJtkmXEngLBGUIFUu60gKDP3daJGMalk6xADMJX6qLqGAd9qOZu0TQTe1oO9jgIwMmMBXDQbnYSds4BEudZ2aNaytkhDsRDczLgC0wpQPeDjBJR5Q8IqNfBS67CBBs1sy4a2T03T9mpqzrSRXwHEtu3MGxfq20Ngw//p9ejzNqkPq60/F4Vwj4RvR0MXPj35nqGb2ftxOZuGRcjZCGMrU8xLKhZGASZzm6lsOy3IgpDlTPNIsYza6Qj4s+WL8okiYF36JIhaAkbbHu8vjpY6AEDFc62uVOdihyKg5gl7mdXvOoQoxqdE5IKo122APE3r6HMjD04VOYjB8zXm+Rp0vsbbt2/w6tVrUDrhiy+e4YsvniJNsnHXtHpo3T6e1oat1FzoM/1E3zjCIGSVPyOeMxXPo1KwkU0UM3KIfL/1Ey7tu75P27TBjIwZEYVlzKluvKV5ezls3zU6YnXf2N5w7jfzss9FdsyubJnnVgbPt4IlARuxfut5KkgD1NWZL18H+Gq8fMmnf17TWZQ/6CcjoE6kUY0lReVae36LdoMNbYSoY9wl0FDSStXTRqP7dvLU2pkLNr0tsp2gAzHF4/QHsennsn70AKBxB/P82YlP+rw/y6A8LB2fl51A3x0BkK062dOmNo/IC7Lp23oUcG7Dd/egY41GMsx+iAsrYIBg9i+Io37dZFADNm4LXHg+9XOkEKP7kbLtZI0z5nyN+foD5nfv8OPLV3j37hpXT57j0cMnuLr6AuCE83kGJ5KwwU7SLKuBCPt7m8yZy1Zh3pD7vr1GkbEqd+ANkz7vjWHkiHWGrlyzm4nZ/DW9eZ6Xhpao7DrpZaRfSFDLrpsNrrRjrZ+iN8FtkrRSuBSfWyTC1+GItFy+bmweUgeM+dzeGbWfRr8t0Ijbr9Vt5mbI7fMje7tGW31Iedgjd3vz3KLdYCNC1b5AviC3RT6iEKVsV454BRpV/AjlR3xbI2pXXZCbRLOIZnRpA1UxoK/DkZFPTiCZy8ZCNgRYkpXxw6WQdu8OkPht0RqKVlqWtRi0oC3uaUy9TKOTw/V+2Cv3Rd/yf0AY2bhNikBw9Dm6t/ASAYBmEF8jn9/jzZvXeP36LYhOePjoKdLVI4AmZJrAuexOOQAbo3J7YxEBh9YO7ewNCzii97bkfqnTFGzo+7Y+Uf8aQGpp9XW6zKs734VbJCNyeIhIj6jr0vDP1efbuOCiXMuosAxSyaQHFNyxBNg2z4jWZFjBhl8psvxTBynOs89DHSr5y2XiL5jbviHqIJa0fZqRTBxxlLe+b9WJf/4mdHg1ypYhuU0jYQXPKsAUVJDdB8Tz6n9fUom206RUJmmZoa81gANYmGF+80Zkg5tXyTUN81ffo27J1AKMuDKvlfpSARu9F3dC5VodIarabgS+folk5WOrTna3F/XDKBZEj9ro0iGUEf8jmbCe5Jo8bckawCAuYCOf8f7dO7x/f43p6iEePHyKND0E46qdsAqG7KPRG8VRGdVxWFPm3TBA+WcESqKy7Cc7T8DP07KG0Ya/+1C45O3ni+j1xqsFGpqPfXbm5TDK0Al1uMSDDPts9dMAGblGrCv69hrrHb3enMh+VZD2izAazO15n/eynEWvMSDnv5SvMjXDlB11LiklO9/tMsdwDZhvvbd1/ya6+dYOYrNK5ba90goyUqrDKFZRRgK3hQaPVJxNq84+lqnFSKlfB++9LasA2YU714YttLxrdVnTylkmxOWxcu5ASBAi9eW8hNaARfi8vNS9H52B8Eskr7iGbaNeq7vt667+JnkpimKM+FhX6tvlGA11RAdUebBsn/f9zCvj+g4YRNdAfo93717j9atXmOeML754joePn+Px4xeYTg8hJ7dS3VVzOTdm3Eej9ojBj66Ia2WzwxXAMqoUlUuv+/NSZJLi0sFoAASQoV4Z8u3bEdAVY+XpOh+CuUVgbERjRAZWOSL080r6O9FwwhJ0KLBZyqIOfdh9oCLbEPWh9o4Atpwz5nmu812iPZYYS/Bk22zU73zUWYYy+ygIQN1cmxHfPm1bf6NP348i2zmiI8+u0Y3BRmTc78JIhF7XzkrwQOg2+NPIAvMSYCye1UZXhaFIF1H3bN5HMOwIXesvt2TXw70QoZZfflTejgi0L9OePI/QXYDVu6K7AkNrsac9Yf3RPYpQieY5AOexjLZ3jgKPKC8PNkZKczTvaBktYGQ+43z9Btfv3mC+vgalE64ePMLVg8fA9ACZE+Sg0bYJ1G3R0gjoOStLo3SJw6Pv+ehCwAlG7e3TDEFb0F69QzECF10ug+di/a310zlapU3l59JANzClUZ5+f5OturbzxixQifVQtFCgz8c9PsgT8G3Dq70tSuP48LPnd4/e8Olfqp/v9Ij5uyCisgcGjRsdQDiU4GlvpUXPqXhasOGf7ZAlFLmbxlKv1JRtmxcFHEY4iYG8rlg0bVk3T2XvjB6IrXkut0ELI4MWZfR8fmy6Kaj6qZGX09E8gi2gEXl5o+e3PK81sDH6HkdNGIQz5g9v8e7NK1xff0CaJlxdPcaDh0+Q0hXmrJukJdCGUT5CsQHV/Wb6PS7W3o/IR4WsURQ9BHTGnf37ctHOelP9xVwfqPMhmFydt+pdAOISw1lkerT/WLmq72Yu0Vh9yhpMXR6rwEOv6+c4sqHll0fMML0BOl7WiAiEqUs7KkPj1FRavRfLm7Un/u82nb4tkLFFvxiwASzDjr6DW8GI6JLKWrxDIhy5rn3f9g5mXl8K6kOpt01keiIFQzR3GVUYpWt1mI9AfQ70OfFym+S9tzh8va6QtiJ6/jn7fQ1oDAH7Sjr9d5bVKOf3+PDhLTjPuDo9wNXVY5yuHoFIIhvMGO9FcgFFoXthR4zLyFNc96T7sllgKL91k69cNoRSvVcMGjdDrMMFLapZHsl9lMSzoRGFBmgQYrNg3iRGhvUYKYhYRoQ6ENDp0n2Rb6K2lNYCDWvke2NP0N1nRul3Ubfut2mDHXJ3kyjCiG4KOm7Cz08ObERA46ZIbYvCCi7onmjNkBovDGaPDDZKhtuwhvhYZTOVQR9NJDsJZmRwGUbJO1RmVz/SE3eU/DIaiWN0XfvgXsN1lzQyBj8Fsv6SdwD9vaiOvfKu36O8HFgOvdERnyvRirUVb51n3Z0z1IZ72vOMPJ/x4f1bnK8/gBLh4aPHePj4MVI6le2gUzNIi51rL6cF4KjfI6cInf7YA+5821lP2D839KDds6PIkX9+FQzZAi1of+V6nd5OybXP2DQzdNdiLa9Vb94ZjcowqnY/Z0M+Uwg2+iiTkWXXtlvOFGPZNy5xmqN31kYC9tJHBRuR93mXXrhXYl7RRWDDot3RuyOe47BsT4pSZQMWrjvPVfkvQES+SwfQ3eR04y0UJQGSQFuZQtal0ZuOUgaaUHfP12PrN6q/C4trT+w6rNaZ92qqdl9Nc42aoesVn76aj8j8bThJn4TGimXX2xtALBdJtBhV1aKInchZ3S4oSGOk1OyTFrBU2eUoeL5dljCyIRdq2uA2/AgWMC6lYehSVTlPtRiaOskpI5/fIL/7EfP5GpQe4PTwOR48/hKYruRcEsxyZkVKZX+Dtb4+KkxfORJi98pc652qU6Fdr6VdgIHZZ0G2NdeWLOVmkxkwCiVsksqRX12yKB5zWCscfCsJH+LDajdKCsiACszKn9TnEmBKVEInO1pQEYONEajrJ8m2d1Kd2NtArM7vIOg27sp/KVFpskSArkDRvLeARmXe11MIkNaiNbT4HfXvvYAj9LXrtf0K+dDZKP77JWGeI4jKgoW64QqAidrs4wilWUS8thnZWp5bfDOzKDoG6kEVqsyjdJHFI6uH9RTB107C8kuOQlFhl04GaD1k2TMfZWooy+Q2PTtDO9wImFXey2dK/bMyS9mGs8flD+sEIoTtQG2TH8k3LiCqqlladqYh8mYAZhO1LToyB6UaNZ8tIVaih0FPkPZBzyTaEIiIkDHX2FY1aEZRA1QMLFXTHHLoFJIFwxZolNL0/O/UA95js5E/+6flUINHTEi4AuiMTDMyXWNmgDEhpRMSTpjnayTMsux1fgU+/4g8nwF6Drr6EnT1HEgPAMyYUgaI5BC2iUCZwmaOSOqQunkLifoTcWMdKaBJ667uYqJ9v+iB6md0IIi0C4nBq3LZT5Rd8Orac5qmrg38ygv7ztIR8HuCLOvlCN6QYgvgo7JCQtMvi/2ARKBE5XwYtm8CAE4ns++RASoWbOin3R20p+XmZ6ILATnnScpa98ko7xBp+h6o1NrA+QzwzODuqTEt1I8DSvZa9Hv03T8fDQH1ts++612Odu2I+b+VYZQo2nHbtDVc4vNdmyC6Gg4MPC/7uUd4Iz7LMUTQQ3Sid2wd9ih+JKpLI32k/r0AH/F4Nymyz8WK6OcvioLyXtJfImXStU8AKo2TvanxehnsPaKtyOBWWaK+5OtgrDhhPHluAJYJGSggXL3PM+b5jHm+BueMq0cP8fDhY4DKCc4EyEEjE5p1v5y88rbl6vtO/50r4JDf9XvtIGsd5ZjT5j9HQyU9ra2Lukx+I/JOoJW9XBw5SgBhue9StO2A3t/KM3rWO196f2nfRm3D3fdUJiD7pcN7bJCltX7ir22V3T93pA1vYutvbenrXZGtmNHGQ5ZsBCNaKrcr9B8If6SER+mFPJLOSG9gw3f+PirRUOMayLJ5iiO6jMAsylcUbBRi3FWWezpMt1GPkdEQ6o33JflZmRzJuuVjFDU6AuSHG4m5ciy27QfQDSEwylbZDKQZc77G9fU1PlyfwSA8fPgIDx89LpGS+sqt4N01JygGHP39uybPn9Zn3S/oQh5um/cI/Ch/OWckJOh4cd1Kn/oJnVsGMOI5iqr6zz1pj+4lInDaZzf28hzxNnp2La0aRXJn9ezh6RLZuRHYsIZqDwM3QcFbkQ1N3wINrxC9stviMeoAeyo4fI5RIhu9YRghVg3djTy9BVGZQxIYiiWDwou97wX4aKe4pzXSuu2vjuRpj8LsP8dh1aPk5SYCo6ubOx0AG5HcR0pPduyFKSa17zI6KacZI4PAyPM1Ppzf4zxn0PQAVw8eYrq6AojM0IBGESp3Q75H5B0hrbtxZGOdbquvRX060p1b+cnEy3H6l3q4W+lZEkNYzpdJrUweqNr6HlW5l+WRo+gd274ex0MHw+iQuxwBeHvd8xulPQIae+XN1kHkaIzKGTkje+knMYxyBGjY79FyvksiG8qD5QVACGiAfqfDlgBqNMGXKQI4vqybEQgUfXwAiXth9Xlo2eb5bvff+CVQJLqX9pelYlmOUV9CkRKJInz+esRblPbWtUgWO77MvJc6aEIEYokYJs4gzOD8Aefra5xBmK4eYrp6AKSp7C3TR4MAASuXVlukm0aeZ/TuxwL0UZ0qjSJMMty0vK7zPC71cLs8AnDrgS0RyoaIS4ManVvVpGNf/jbKQ0SLoy96nRynO2rDOed63P2aI6vX1mTFfh85imsU2ci1fEZ0qb0/dMT8Wsb29yXp7CHbsaMObiMal2xQNUL+a3l6YMHsdr/r0u47Z/Su52MklP31pXJb3dSMYMaKl4Ks6VtvbcSLS7Y6nfrk3vbelce4rw/T3J//Jb7tXj5iZb+mXNa8LvtdyjjXa/ZznOa6fPk81yISe/ge3dsDWGqehDqzVY0gMwFMyJmRWBaEg2dwvsZ5PoORMF09QLp6CEoneUcnW8vYi0nwsmHEqE30e+R5eoo2kAL6rdz1c8vpss9o2nuHqiLnxoMNrxdGxnMEIiMe1qiVF/DRu6PG2vIf/akuDx3FQZ5rv6vDNrfVLHuA2Zot9bIU2as12YiAhn/H25Uo8hE/u003jmwcyTAS6jVkP+rEI1Cg3/2sfd+Ae6MbntY6rpZjDHIEbHiFYc8fsPluKRZbtmooOTYOS06oLDWLBdej5ki5hJ5Q/WcdFURea5T3Iv2DaGCt/hZpo4GlXc/nvIpOloph6ZHsMfT2U78vZaMchRk8O0p3XMe9x7qYL3GBs3AUgMT8cVmeoD9TWVRKRTB0POWMeX6PnGcgnTBdXWG6ugJNE0BTARk1LlL+bysaRvxFdWnnDugze+XNPm/3c1gzCkfyUMNpdZbXTZHz1JWVxHM4Atqj9jviQXuAJvXT+o8+YwFCJCuavOUnOgkbaO3oT3wdTT71/PrPvt1St2v0kf5jn41WC43eiexeBAA9OF0CkL0TiffRZ7GpVyTMqwgNvbDYe6P09tBIIKwgeuXi31fh9x07lVAgzIYwVrhtp5F7uvQ1Vi49uBEPD67co3og6bkhUrYdsJXhrnz+nydFQGM3ihmkod+9gWptti99OsgHECtUz9tNaaQYm+Er9xTHUpN5gtYNAM5gnuVQ5nSF6fQINF1B9+JgTcMQmeWla1T7qnlyzbvc47nvJet5W9C6puesvIxBXM9LpwsGPI6cuIhuYpyitHyZ/WF26nzZvD0QiPS4B45Hefc6v0Y2mBd8r7XDWmTlLmjkIEdgY8sB3qLPAmwA60INGEEDhmAjencLkY7uRajXd/SRhz7P8wI8gBiJEpjbqY2R4mj3qEPzkVJrQkKYmWpkQ5/3PNZ3V9Ldqpt7WqdIad9WWh7wjpTjXbTfVn+7rTzsZyfDxttmJDDPDcSRgKgkM0XB+VreO004PXiA6XQFUILuuOtyxR6gtohgBH3LR4X02b3p+nrwz42+HzWKI7A4kt2Rjtgr27ctN94Dj+pmBDT0u3f0LIAf5VEB7Ub6jtnFvc9Jv47Ahu0TvV26HAx9crCxJrC+IynYyAOU7hv1Jh3RK4wIbPh3Rp1ThZRSW57qAYcl3VAGWCqteLxUAspwnW5Yfh6H1aK/ezpGsbLeX4+R8owAalO0Lf2tuUpbu816+phRDZv2og4BMHGJShAICXJEfOmf0H40g/MZYGCaTri6eohTARs3pVrnkCWNlj9blj1A46iXaNPdY7jUiNh3onlcq4BDQkb195pujZycI4DkCPnyR0aQCKEui3Svb4uonO2Ztkpwjw3g3KnlnxC1uSaLOxfahE8ONpRGQhl15iORjRENO2mpZPv20psE2s5y2+OR7R15T6/1Y6pW0cpOnsBS6do5HjVUV852qKmYqHBUC1p/UZ3EYOMn2Vs+CUWe1iXKRoc7yPxK1J94rAbB7zUA3F1049bTNGlHkZlanvJdQAfKXI1mAARylX11skyYTdMVptMV0jSV4Rc2OdYc9gY3Kj8K7yx/yvfIeG+ladPoedQ6gcxDgez8W3kfUDS8MIpqjL5X6RsAh+j3pXWgtCVjFshE8m5/R06TRjX02S5i0fkE/caKjS+qQMaTBy1EBE46nIKqnIX/ls9PSb/eBEDeCGysGduIoRGqPoLuASBLa5ftultTVYHy391vAIj2/hRZkyV2CupVCFOSeRfyuxlhL9y+nFUAUxJlWNMlMGeZSZ8EYKSp3yZXhbLrAyroxGVXPcIk1SECnLMcDU25Kl9Ngatgt1rznXdvZGNdKdxh5zniml9ydsTO5GmE5IZJ8iLtlFLdhr+7TiTLMUtO+m+CrLhIlEQWdXUGUOpFX+KVKboM2TE/MOhBpNA/Ez13CVWpLJGKkol44ECZ+imCLZiK+zphiWwkAlI6A/kDTtOMDx/eg88zEiZgeohpegAiObgQBFAq/SAzwAngSaIkdIackUGdkdG+Ko5BGd4EynLZpWHT3+pE7Il8LutG9gxhzNBDl1Jt6xlz8ZYJCd0O2oai/Ef9eWRAaqygnDlDRYRzf6JkmVhdthQnaqJuHLNhWW3def5UxrM0Vb1sjLqfWJ9zrkPQmq6uTrTRjJQSpmkqQD0X3VjAqv4nhqNqTj0AcGTzov6SplIPWewViu4OhyO4Rb5t3djydd+J4j5un20MSX+TDJrTTmLcOHqntIFek+pYnxi/RncS2RgJ71oINopghGmXe5yzTNQKwl9R57Ie1JBvfY4ImRlT+U5JDIt0nFrtdRa78u/Rtv2swIF8jihAA5gmXXqVS6cwHmvHvxXq5vUBLECDM/TYe6QiICjKnTP6KW4rdV2v98Bq9M7NTdA6XQao98nV4aQPeqzRvSH4RvOe24ROgcGp3uOO5+p3FePN7n7NwxhTGx7eKoMFpbcVGi/SLzJbeEaB5FIHXB+UjcUZdfUIJ4gcZwHWNMu5KFlO5rmarkBXj/Dg6iFO06kqcabc3EwmEJe+wNQAW/XoWx0o6BhFNqzO8Ua+lnelDmu/plw/qQPWy8gEUQZn4X8UTbgxMGyKs/1mAzI0v3JPo232XhrIy6J+yrO1jHqRNPuxbo3SjRym6H0u5atAw+rYgrBqW7iijKJFvR0QvZxK3+x57muRjElWoDQEiZL44rqlLvXipIdORSQv1hGweV5In80wyhGyKDbyFiLQEX2ukUW/FMyrAFC9R6+QPfCo4V/AAQ4y+filavH8kC3+NT07dh+HHds7ayDtno5RpMzKl/qMN0yL57kAjXoNXZ+nltBQ2YwAQeoS2PeO3rtNoLHMANbOtz5eLuXiEYIFnFSbVwwR1bCj8Hq6OiGdrnA6nTBNJ8x5gsI4NWXqOEhko5Vz7dMyuxmhCMCkBxy9x9qe037cZKVPs+qWwbF6Y0BznI7ozUtoW6aMYOx81wOMUdROIioI0m/RkZYFAwmgAlYXbWF40e9+OGv9yA1ColNNU98dHg0wSMXmH13v7cDH0/OfLdhYEyIfZhpVYBR62ops2PzrWvUORdvnTWgKW0ItnpnOxWjpUY1itAlLWv5lHcT8U5cusJxI6sONNr1LwNg9jWlpqOSfMBQaPt/iGV0ao/y4LK2uLqUa1GU7EqhEu9b3o/G8RcbxrqmXxwI2SkxH16XpPds3U0pI3PqV9gtmiLEgqsBFh8M0IqRljgxHH9mIn/G82/sR0Ojf61fBWc889GyZF1GENR6O0lH9eUna65Ee/YFNgLx0Npf17+1GNeLEYV2HkVyO9fyeCGE/dBPrcTa44iZO36gvf0qH8rMFGyMKBcD8XusURzpM6oTDAoAmiDToAQthhlFwaIajpYku3ct47r0ti6xHSNZ3sHuQcXNa84jVU9r0iDhKa0XjAmixMzIGKIjGlfSj9j4S2bgrWRmBddQhhoRM7VAuKmEgC86JzPCIoIsCKnRSbakg7YcthyHQsN/tUJS9F/Utn0b0bHun3ddnVIdYA3XJ7siX0sdwQHxf8bpz7b3tqEgUmSrOH9vIBkq/KJ8AOOcK2ivWyOY9x0fUN+zcDLvRWqhvuUTwdpavWSNbrrEet3K3usP0HdGNtyu/5PlRNCKq2L1h21E0I1ICu8gIEOksJSAUqBE/Pt80AWoQlqDJ1oMR7gOdfa3+eo9qX52aVByvS2rXG6j6nMHLEd6ODh3EkY1mTEZpr0Y2XBvU95jr+SAVYGywm2g5MXVNQXm6pD+NDO/e93Qs3c6ZQhlO0XSpGAGCzLOSSdYJiSakSeQ+Y5YJmCTvU7UwcVv4363el9GKtfpY03UdoDKkhmqe23b0CzAU8BrVn6c9keMojT16z6e/9U707M2iaNLhOp1k7nEBosy+T0onVTAxiuxY/emjXl4eFlEx91yffl/vGunW9l/QoF22bO/IuTzatkfb55OADXvfeneXKPWRQOjnkc5GaHt4dOJJoqDVI6xRCL3p8hpti8vgsmwth+WNOvoo9LWMnvSTVb1A2GskzCyUq+d7ra5GlJll/t1RPGPoZopmmz5G2iHgQK+U9PnIkG3l0Ssn871gvbUIC/OSD0teNqMyeQV4tE6179hjSfakwczIyFXhEzJSKqtIjOd4lR4gPXiEKU0FgEwAacRyKuFqKodkzXX7flvOde77/r7mKfp+vemxVhwp6U3TVNucyB2r7iZ7R/zb/GW1Rr9bpud7b+QkkpOoXJqnfc6/b/mz6Xv9biM9Y/nV4bM2zNYMLKq8NF7KpNxB2Xqdu77tu3fu7P1NI03A6XSq5bRtHr03sn++Du370d+6LC7tiKUj/f6zG0a5a0Pj87LkG7abCbziCexDlZeVKUKhHggI34BXOt6QjbwyJT+x1OYR8eU71CXttuU53/T5vWnc5vPL91s6o067iGp4EELcpVMBL5WwiUujphWCjXgVyl5gfpP+OVJuEciNnzHzNcy8DTUqRHJqZy4RDaIJsh9HErCRZAFxoqnMcUmgRMjU3t9TVm3LI4p7D7XhsJbPiI/mPMQ8e70xcuou6btHZGLtTCn/6Xkh0pVVfZnWHElpx8EdlsiE6vfWlywwACwYYbaf+/uJ1bG+3tfaddQXFuBn8N4ovb4OtoMBUbtYHo7KzWcHNj4HYtbZ7ixbj5N06CSxj/acf+eOQdJ6HnHY0r6jwpHIL5xsZE96XUOzXlEoIFPT93Hg4k+TthQvQQBKFxUht/yyKsVxFEONUPuUf1PaXllyG4bTpmXLWcE786IPbafBKKtW9YZxCER+T6cTMk5A0qPCk4CKch+UQCqvWfbZmNEmUFtlH3vvYnTs83Zl3F3QGKDtc4KU1rbmvgsaAZytPC2YQ50zsQQcMcUevLbRMnrShqkk8hxFAtbz9f0lAvkj6sFhXjq64xc7Pm1aPu1xFGidP9t+zG1/kD3Ogad7sDEgBtdxbQG/hMxqBMpDlDohBG6340YdxoMH/eS8DPlZ0mdTSshMoNwjb59mr3CT+R4vxSVjPHSGvy/LEbpLxf1JqNiFEKShv06Zl6fylp0IdffIlEybok1W7mSirpcy3i8RMvoJhx2bDqjaNG9CVnZUVkC9Uvf5WypPAurjKlAp7yZKmE4nXF1dIT+4wpmvwFMBGxaUlT9ZKZuQpgnEEwhtmaE1TENv7hOhac/LHj4UXPRAdexZf0oiJxPi+LHMDUZrE2uQl2kAnKPVPCI7KYku51zmAFGbj+cN6pr+tWRlxkc0RrQsBwFMq/mM0oiet7+PTir2suL/PiuwsZcF3xSL0NDG85fwQaN7xmXhzOAkjTSfGUgS2Wj6ikBJZsf3yPR2Oq+iafs7+owAg3/HK81oNr0+p+/174yX6/nrEtVYpv+5KLXPAcSMvJ76m5YTRAHUXSRFGZqwbwA2AIQARNNWA+RPquw9ytutr05ODThdKK5AVFoMpHi6VK4yyy68U8KDBw+Q5geYr6/A+Qo5nQBqm15RAVoaJScSmJY4gUxET50ML+vKIzMDuTcmd31aZwROW58ae6zKY3RqtVLfN8dp9rSvP28BWk+xB94iHPr+mrGTHUHnhSG2Mt8Zz7rjqLw/z9mlB2g3Gen4RZqu/qPyK9BoUYzWZ/cY8z11u+V8jsjLjH9v1D/WaD/YOGArGNiNCponTG3finJvqyB7GiQK6dfgY/XE23U2z2cw5BRJwpyBiRIolVAkgIlRZ7NHHqHnRP3Q7pqWkW34rnEUd6xIIWyHyyogYDSviNC8TH2v3GuOUxDCRXuPmWXb2wVP5lfUEAMatftdAZYt7+NSasYVIGIQi5ctLa3ArzJRQaBcM1tnUwNylBpY0EjJCGyMyui/j8o4ArFr5fXpR8rPRjYKZpDr5Xt9thvsI9S+Q+0J/UsT4XSakE8TptMJ5/MEYCo6RbbuJ0qgrKtU2DgOAjwyuFwfT56VqAZXfnUXYxCZpZI9rWmxXmET9mzH3zkYyGhjSioT+qsNvaViVCs/JA4VSplZh6dqnba9WvQtBst7rHW/LGWkSsQQp/p7ZKRG3jmXuJOIfILMuRCZb48354zAIJ6lJNomzHJGjjJYrjGAifU0YOrKpn23AZaSukYhTB1ImUrfTuoYLMFdBDR6sDGOQET27sh3LXunqU1D2RbpN5iU5+p26/XdVld76EBkYx9yb4b6AFGZEaEFphLuomaAbQ7E2QjAGNVrOpYzFaazeY9K79MpZ3VJHTP4LEdZS+UDiRMSgAmEfD4jcev8EU9JvUlqs+Xb2SoJRFNVCISyqUvVujJLmjkav4sh1JbXoDyRlpdRvUt9NVWLJ8/bdQddfU7iDWbtKARoUL95qRU6xlo37xfW4172Un5G9TOKNlwSrbJpSOSAcZUIoNyhWZUJrepU5QBlnwhNa+wpUaDQ1kjby3p5Xm69oorqpN/dcr1+Rt6WBRxU+3qLdqjdbcA2gSmB6sBRWd2Vc+mHDEbG1cMH+EAnzLMcRZ9oRmICz6nkdQUqB60wMWacwTxVlprc+HCxqxcUSKhRk6gNFICt1EttP9VDdonORn12YJTa5EYbadFr+glwObxRV2EUI6KHn5Bpn5aJcaoiOSG0A0ws/wLumPvfkRFmZpzP54VcZZTzsNBO92UQiHVIDd2nVInlXT65tBEVu1DzN8PlFmAAS32eMy9koZVLAEfiIqNTWynkIwL2ewMcZdVjbbcYpNT6cr/1eb+BY82Hudoj5d5o52qn1JlO3Laar/Bqzs1OH6S7GUYh97lBFSuJBMiv6i0HYMK/H3hUy5c0fKsp9DDEvqrAtX6WZxTZEUvUg4xCbNnEa6z1swGh/rcKuEYcqjCYiEcrfYSsx0Vf1gWial2tv1qGmiEwc4YeYdTHiH46dJvDBGMy2hB9u2vu9vwI8s/siFYcIR/ZuIs62BsRWboS5pp68OW7OATSP6oXabpHBW+UWsxDjQYySHcM5TIMI5ZBQIpV4uZvNNZthzU2I7A5VyMXtavmlcwZTGsg2RuhNlTSjEU0rNODyr5tiCr2q7qkLxubv8bHWkTXOmFbpHUdgV4N3NQIB7e29z6NgNSlXlZ+uqjQBlgel23U3lZXazvHQ9YerKuzx0ZHWD4jXnKJ2kRAxD5bnUy3KkjTjZwJdUbt3KWjTpenz2KCqC3wHi8p+lO0uni+msF4YtSigQZypGvFmWXiKBMhcZlsh14B9F5FYDA6ENXmgfQNWuMsxtuqCaBXy7GiXKOtoYqFchsZD95us8+Rtgz5bZNgjX6cfwRGiWghU2sg9igdGRq5JO2Iv5tEhzZlufxzOk04nU44z9eADgNQDzjA6hkzUvHkmRkZTS/Y8Ha0oZJtJyJaXYmi7W51nFX+mqecAMygpMs2+zLbPm3r2J5mWs9yCozHqC49aPKf/jn9PtYxTa9JGn2+zFTnHDH35ejTM7535w2ywe68+CyZSOTGkS9Xp7Nd/UR1tleGa3uQGvflJGMPPC3YiIBGCMKY646jHrh4GSCiGtXwz6kctsiY9p7bARlKnwXYAProxGrj2go2f0CMEzo8OvDi2KS3xps9S4JB0KOd2DRW1ElbNGP5vXVGmO9FMToaGgkJuIRCGZU3CmxEiqaG44oL0Qk0sBBEbww/R4oU6MfglzPDREhDL3cknxGfN+HdTwq9LWUyMk6XptHphI13RGkK2MjXZcIf6Qh46SDlk9CWrxbVCz0Z2YONiNbqa63c3qO2IKV6kDmHYMO+b8ts/xRo2Im/I0NleR0BjdggtjbpwvVcXLrK36h+tGzUlbM9b9rGfEpcQx9vAISUf1O2UfN4vVjLAhmmWVsOfARw1HpLTcdHEYVRWlF7jMDGFh+Wn5RsoxDKAsYObNS71rAGaV5CnxxsrFXYyFjqPR/Z2KItsLGH1zrrGIS5oEVuD9jMTPgXfe+jiJeGJBdKjiyQotqxW35Hy7SsB1+HHXo2/GSt80FenzPQsPQxgQYgHgtG4G9FxqPnbsK7B4pbu18epS3A5HnZSss8HAIOeUb64zSVyEbxnKm9iuqWVO/XprH0DLd4i+6PgP3I8lrd1QzUaE5Ay0MBRQQ8knN8LK+jMo0iGhEA9jq3pklc5xtE79i0I+OmZHdLrfkRMAe8j8pHgX6zvPtysktvK58RMbOLEiyBhgWXPrqg80F8e3gevFO31nZWTlLq23MVNBcR9KfW3kRP3AnY4OIyRHx5IfCVE6bVX2heiA40EFbnDIzyXHjiA4U24kkWWAFgmdhDqUwI0uQKeq9LQVlRONXwbkPw4/CdncNhr/d1c8DwRKENk/YoYgGYyUfQ+vcJxV7Ux6IO+Oyok07p3CW/TjzXIhnMZTLXweWUW8AdKNvJl2tbIGMNOBypq0uej0hWIBjRdfXZvGSCPTelgQzRHQQAc4s0ETFQJ4ju3FCp42sDNK08uzDk2UzadPc9qLDtEz1nnaMRn2vA1W8ApnXih5YU8E0TAbyUGWa1CVS2i7cTYvWhxi93k1GV9xzqpC4v9evKfz0PY7ACUz/egPt6XZOLhTNghrk9cPAOGhG1k5sdf0fAos3frigRB1lXYGm5+vJ3Bpup7EOynNB6Ke0GG2sTjiIycnJR2kC8EYl4ZtYtkX9E3I8pBy9YAALjOVC6RX9lIx4NoSoaRB1uZJj1PGTz1nMeYgO/BkJ6gV2O1doy9uXvQcXCk3DXRsqKAtTC1YvcSbSEBFsGMKItBbBGe4z0jcl0cvsXeTAiy2Njf4QW7Yy2Uswrkb0RkyNGy6ft8x2BLZ9eZkZiwsxNiVfJYQYlwjxnMdZqaCEr1+o5Gbks9WJILehsQzCmFWMQ0S7HqPDWfQ7SkDIvv/vniageUx55zPq+97BD42zetftP2GtbTqD1mqdpWjItTwlAKe2UppFck+6vtdBxebbAwxuX5Xd7WJrXZb5OfF355yzIGMmGjVAs62wMIOyOzUgEBJsz+rqufQJLWfVAo8lJmThL8qadw1j7keE5swCfLVtwRD/e2TCKGtg9tLa72VIoB+BmTT/yEqHJZT8P4bhhYZjxVuMVeRLjUTwFauG1Hi32kQrhT3nVonC91z6BlLYVZER7jNlKIOSzochwf87kwWNnlIsXODIQi+dX0o/ABgfPfEzaA6L8/QQI0CDq+kKbRF0+yz8MRp7nsi9JicZlApExiNQiHnNu3nqkvG9CCiCj68HVqvMjTzUCc1G6kVFckxUbwdBnfVTD3/O/R+n7Z6NJrPZ+6FgNjFzsBAvYsLp4ObwRRzDsdwscbJ2OnGCtr2U7cYc1h2C99PuI9jhTEdCovDCX1VjsHDyKgTCPXfcjoNzSnYANIhciW6EtL9bfF9UQGXOKr5fQQhe98GlW7YXdfKPwwSwdITODcu5Oj1TZYS4rVpJFy1ZxWn+zvau8+c63FvUA9nvEWwar1v8tGKMhP7eUdmyAb24obotqQHVgBKIIxwg8jcvb0wJobICN2zCsW7Q3euINglxDh3q5RAMn54VSkn4lS9VlQy8wgzmVfRrQJwSUqMeY370UGhJumikqV5DrAnDYIQ3vdWtaNj2/18IaRQAmmlcRgTDLRz8BsU+PzHyO0XNSTo3raZmk0a/SZK4B6+5POZKBlqcT2zpsTy/lMopSjIDGsh6SsYF9Px5FLJgVBOyXN8IyCqhgbgF6CJgCj5zZagRzveYQU2SHtugOJ4i2DqN01IMaIf9jEQhaRDaiPJiPpdolX/6bOZcdCqtpaUMNnSHRS3IvanAfudBP3fylvdeXY8sANQZo+DxR8wrq9Y/s/R6ln0pkwxuFLeDgyxSV8zbAxscGGkfARrsmUluHKes/kVpkM8TJ9Zsd9lTXBaxDV0sv8QgN5W7QTpGBA7XVKL6+lqHxbR68Udsia7DCCEMAfJtRHqXKFWCozttbv9VZqwa/3hl61qrdbLntcIXfhySZ5zzYaDyXZ1dOsPUAUPmVMgwijYu2OeLtLifeehnp9Dd1FTisP6HRkFhPR85c2Q02hiGV0Qt3pLvWQjuhAHMz9JsdVCVjD5VOkJmRUIZSUpJJOEyCUk1kLNtnWXZny2WZWww2UMbMJJGyuAuZNdqAOpEKAIhqF9vRkWOlZ0k7521FNkb5XAjxhun3AOnWkr450VIJMso8Hiso5l4EnvZENJSi5Y/d9EMF2Jr+JQYWrZr3vB0p5LXnfG4WKDDE0zxjxnmecZ7PmOcz5vO5Lo9v6Xkx5tYghnsx+FRXkYSRCpi2NJ/AUuTsM75co0+Gzp1QAy6Rgd6ALCNERz3NyqPLP9r8aY+B3ZrXtzfNPZHadX2uzpL8RZGVOkkVAOmeHIQyl8Fmjq6BR7Lry0+kbib7ru3fhEqIbVebzvjAuXiZsu9jai/aFKcWHYpM3l7p8YBpi/ZHNoz+rn47kdlSmCuX2SGo26Y41ERhLWmolQiY6xr28nx9onwe5FvHEesO42Vy2kRGgMpnAuOczzIbG0liHtR4KJJep75lzsjl6GNrEFj51DtFGXHZwlnL1m15zITWUMKRplGVGoDMucZiKhHq5mX6PBdPUJL1w1dqflSBAbauPVnU3651VsDdjzu6f64quEgoBkqq41L/uQg0L/PMJMCi7YaICjCoeN7eIHlvp5Wvr99cvHOV9ZonguOqKZXpYahyX1PkXJPXVOxGQJUUaAPVqIc90hgxbxyYj622kXdLRCOpAmWARPKZzzifgfP5GtfXH9BN4iKCrpLo+r/2iXqYBdWaZVfmXu/ZGyz6LrOp0x5k0LJXdXXTt3cCqLSQ4VWMZQK4/JV7fs8Uuz9IHB2yfaS914OG1uQjdeiByWhCqn3e8hL12VEe/pofOvDpELNoRII5S4jrMLaAjQYIdLijFrhmm7uuTGjDc2vGXUlF1PZurnZqEmkjLoq0sEDLyOWwfpjrtuwE6at1SLHcV0ZqVLOeKgvITrsKPHrpZeSyzYHhg9vRFJwZOZf0VrlstD+yYRVFdx2dctqPiy4jqlBzH3ExGNVUc7tThUMlLuggeziSRtRkGZxESKsAloYmIpT1TUVpDPIjLksU2zLTynVVkOY9ZogiUhZUqZozCezsbIfLSFmq2lEucC5CmRKSesbG815va6sQVp6iqDWXE7i2QsGRh1caYZW3MC00CHoR1gjyYwjgIFArM2lYv7QXqQJsnlgDFf0wnAVpnBm6oNV6t7ksF5xzOwETxOCynbcvc+iep6aEyD1SgW/Vl1SDryPjsoe0rEtPvTkG0q/UqDJynnE+Z1x/eI8PH94j5wd9REEeBKrn2gA+9F4526Orm0jvkYK7/oYFIfYeaQcLyulJQuBRCJvAmcCJ0PCFtnurp3meu11FNR8i23oLLisQayyxV09Drz7a80P5iSI5Pk3fzlEUw79jr0dyQshIlGv7EuXCp3wnJCQL6lj1X6dtXTVpvj1YHG0G1viyBrnIIMp0AOaauY1w745SOZCVFEBZvckqrwUsZdsrLKhp9a72x+CMer2BKKsZtulwZGNB1RVAZK9//uTqhZnLaYrOswMwlY1b9LnIiEoYV4xQfIZkTDVU1vlU4zFNBT+Xhlxvq6GbERjk454+YrQ+S6L+07a/Ki5q6HhITX5KZAORcUYX6mRuQw9ebtc8z/rdpNn1dWdkw5YL2m0tFG4Bx2ao1vCaM2O+dJOyI6K1UX9KCTInQE9ajurZXycaR3x6EKbRz+Vw2Z5+MnomMj7RezHv6320i0AEQONS8nMilkDK/4WpbKRfYhS0nDhqy7Jnk7xFv3QyvixPTD7CFPFzJL36fKdPbkfnfvIdRD8l3VTAR2nYhtVGTyBBikQ12lLRbUG6IIKGOZnMveIGW2+2z5DKzPsWmGhRjpuWUXmiHqDQ7SLLNe+n5+UnDjYG1ClCgxulzgtA7KqcG8DPWSJeXHx/VmWhc4X0PZETlRWfv999ck+oe0T2jbUWG41F2/ubCrI+o2BflH1icVSr6ea+b9hQzX6pMt5zcQw0MiUh+xHgWJZt5OmPi9kbDOaMOZ/BZbh1Xx9a5jV+PoaOkVcfARC/IibSi7Y8R3gPuXWy4vkMV9ggXpYcpeuvRb91KMuCvhHQ1j8/FOY/R/LvHRXfb6II4dHdgtu7u18Z0i8SbOxBijdJxytqKgAjFzCwtcyOiCrYkImasiRMw1eL/MCA2eujG5GPXxhGVUYRBZhOSbcJNKqyXu8w7fqWN/bTIr+UkSF+qo62kf2s0cPm2YJlbgY4VlSZyxCOXgMQ1aEqIm+ALgUaXQ7UxedtpvH7gXIdReGkjLlMnG5y76WlRsfZsOIY3T/6LJEhTceethr1n4l7COK9ey/7a3WudTPPc4ls6ERI3rFSRQod5RnlM+prfiKkX91hve2R8VzmdZwiwBKtjthT3jXqDL5uEMe86Cv+ef/dPwP0UZC1z1Ed2fqPIhtR5MSDvjUw1L53uY4ra4V+kWDjrmnkvehqFCWvdDohIchwijltdthR1FEN0huRBRprabfL1D1/exSHlO1GaS3PccfQ96PrnzMNx3sDH3nkDTLWldTeergN73JEUZp5gJ4joFFuLJ5NRDhNJ1xdnXB1RSB+AMwP8f79BJz3csdYhHpWnw14Q+/p28d9n/d1sVdutf3VQMlcnH7ORpTeiNYATutv6+/7/NaM1xat6rgd5I2o5ecIoFvNo8yPGunBPfyPAFfX/wNwsIu/oA4sRZGNkGe26X1mwyh7PKKfgvK/TerDdW35a6zY+mtp0s1vEnIuk+WBcliPggCrDPT9srahPdRyYK5RjRTcG4Xl5rmFBevBXeiV25G27RQBt2tWWY9XKiy9JU8+suSfvQ1juqe8NhpDtPSElmDKpI8lgBx6PrBbkLcEdPhkdjbdbqrk68IPpWytGPG8M7W4Wp1Cybo03AAow4+XvbW6TalNrCYUESfpEw8ePMBEj8DzNU5zwilPACWQ9ruyDLJGNMwwCucZIB2Pb1GAugS8PA7tGxohSLojsJxtUaSuth0DwgPGoN56uVved+fBm2oiaps5Aahbh9v05HPbw2cjRHo4mP6NjLfPy/K5F/RGPFnnY/ROlLadJHtpRCMuh4S0ohU/XsdEAGftz6cxIl8WWzfeeamy4ni1aYW2egC+xT/oIx176/bGYGPUeS5Ftx+D7sJz28yDMWi+mDjLPHMiRdHkPNsGOMKMmsZcCHKCrG3bExEYdSarkPS5Ix2l5lmMU6S4Biks+PFjwxEvlwKi25BZ204R4LCfkmnc2cNQqP6R/CNzNcSKZjb3lZcVL8UqRwuCxlEvBzQQ15cu/SS2IX4u10dy5cqu+cHKTgPOp2nCgwdXOOMK12lyJ1wmpJqb5asrTHle028eHQUyqs8t/0rf7FJo9dWVKTAwW9HI7s/0vTUQYPNOaQqfiUCBGpW9QH2UrwcKI96i9Pbkq+T5v0093/pCk3MfQbGkIMc+F4EL5dt+RnmPfvdttczLks9nq3/voSPvfvJhlBGznwswuTWqBmdP45RTP1cE2hvDSUecaSmMnZfFsqSJYkdhyQkz7IY4lsfbIFXimv66kY89FGv0Rh3oY8vTss32KZKqJPK60VHKzOA0GUVSU67zhBi9kY7qyhugHthu86JyljkeM08ku8vY9ybECvASSlPCNJ3AUwMaukeIyC+BOelq2XKsgJZd6ktXGfiydeUox9drey4nHfYgvAL+oae4H3BEYIASFu03Au5SL8vnrYGy+cg+Cj2g6cs6Jt8nt8p2CXl+vMG1eYYgWJ/h+s+OfESWtHxR1CCqU8/fWj6eRm2wcDw4nrPi87e/bxOQbdEnBxsjOuqJ/rxIFGHV23U1iPUwAFWWAMBJt1zWteQlleJq6SeIgAzMeQ6VUiTAKTVBbh3GsHrTouK4F7NIxiicPeDjprRX4W6BjZGHsyr7xbDXZ7iV3X6Ojkr3UYu9nvUW31jhe6FouURgAo/sUFuxLBFvY9FcgdUpJchpyAnABHDCrNGAJJNwieS7Hg5mDTCzLKO1wt7O7lDUoh8KQKj2VN2sbatOoz639pz9rmApOjMlBgdLY2yfaUAjY577KIHW0VobRTJ11x50VBav20bRGVL9uCN/GVIj2OaJ+BpFMC4FXd4ZszKqcm+H4Dw4ieTAOhkfiz4bsPGpPdHPjYjigHfn1ViDkbl6Xfqm9bE6RaVzOlx+I6V3Pl+DSzg1bxnCo+XEHgVZ+OZY4Y6e12c+FeDYk5/3VP21tff0UyNPe9JhM3QRAXq7ImVPnfWRjXWgYdNWBU8Yy/RuYi47GpZVGVTmUaSERAo4pnLqq7wiBprKrpJcDakd8845I7nhhRYJWdkLQ8tVf+mVuG78557+RcWbGEUrvIFq7y2f1Xz1e52XZZ0KoKyA4eFR8nZStze2vl0vMbaerFz55yJ74su6V8Z63qnqLB/VsuX1Z0t5kOAPiFsrq9dzCix9FEPrf7Spm33nY4IMpQNgYykca+JyxBxFBb/VyMZHxy108zxdlTSH0Cds1FpQX6F3yxoFcX+K8skYDwbOZ3P8dv3c9gZadpdHGaLy7lXInzOxBpmC80v0vidbbVxCv+LMu/bnPvLRG3GbRvHEqeXflis2uep5CCaz1nzXyuvADWMxATZMM8pfv6MN0ZRAH4jNUkCgTuAEBHxosimVaEBqE2YbkNBhBx13z5hnq9i9oWrzZLTuuu62Uk5bVu1z/lwX5c3u6dHgyzbYsJ9pasayGiDOcp6T4SWbYTzbFjnnEGxEsmYBmvJgdcGoLhaOlNMfUb5baW7nTVXkG0gs4KA4cLZb2DrWPqN92fZpnbS7F9xslSFsV9Xb8mBrQ+ZydEZLO6XUAY7QIen6vw4bRiJse+I23XAHUUakYHSy2oJWjhb2GTB4XIZDdubuENzaZLvRG7up2+LWFnjZ+HKmCYNyESb2E5SWYKUq59KGRCUiQqgGTOHEPGD7UnOvHSeV8yj2PN9oObvbfvdGu751m+B1By2VRnRIlS2H1Lsq+egY7raTX+9xsjGEnfKVR3s55XrHvAP4+QqawpYyr/UtXEF3Qta87XeeW2Rj/6kojqPMmGSigsxXKNcTEiZKYCIkEE4kwxkJwESEOQvQyiLsyERyDEkCQDJ/pBntVi8CMnQosSliGeK09VEmoFb5U5CxDpxsHQJlm+i6KUgDGFq/VIdqJPdk+NW09HNp5HJJRWqsuRla7kn0AKGeHWKjHNpnPbDQ99WT9g6ONZT6zJbDcD7LumW7h4SPAq3RKMoRRZCIJsAMfjXBtf1Nhus4r09AjfqLvebBlH1uBDRs3dnIyMyyiCAD9TgRBkCnU4uiEGEupaKA76huAJW5SVMEcz8Jv1ch+/TqzcAGH7t+yDgzelfOXA8vjzr0HYKNj0e9F7Ws3FZKRdgLTxKujkg7VfvUSMZCEYT1HXC5gtzHHalvIx/ujDwmW84jdAnguDTcODLSffZLJdSe3wLmo85nn6LDWDuunv3h7yFX5L8ab6zLfz19UjRs0YwCKLHIRqk6tAPUyAiZawZ+hyBV5aYOE2S/soXav6SKuTdMYVmC/plNZMN20abktfxqHLflYEkFulQHJDL+S4PkDZOPPkTA37+3i7tBna2BjVHUdK3+K+8ZaH2xB+sqG8ytPUdl93Xo62ALdPrvns/FMz49005RlEslZQgwbJlK+iO+L1GLn82cjXu6IVX5G09KWrwyUBZH04nS3cqv/iFWZNGnPVdm1Lnt5+dCkRK09y6t549Dyzq9hEeBTyYMfEETUdP+AY+5OuqVPSNjGickHV6xz2FsGK23Xr8zL/S8vuvB8Frb27B7rV9msJal7OlgM+uhxSVgowdVXJJo/UnLkgAsd5S170Zljvqm36tlzZP3m051IE8sYP1s+QdOikmTWaK2mfWATKmHlv/cOTyjiIT/jBwjDzCiXXmPkn1/D+iPwIZg63WwszdidGk57sHGz4C8Dh4hf09R9EOv38RgbwGcTmCpjz1FvNhPv8nQpd7TXdHRejsanbkrGvPde2ZR+HeLqFj2CizLPANvSLfTQQUUkkpCIkZiqnMzmAhEqZ1+CZU5dAhDfpc5LMbI+nrwu/fK/W3g6A3V6hCU/QNLREaNIvdDFS3KdFn/bLCr8Mga2bA8C9Co7wTRgkgO/F4dHpQs+n5AkWxJWvEwioqQPSyMfRmx1GlNFLgcSb/OmwWPPV/bDpCmubaV/BpFevqonhlJrZXrjrcVYHIPNn7B5MXCC8OeMGj0/G1HCKJ8kslrFJnovUUFHRee7PmJKKrnzz+q0WhNXvZQFMGqEzv3c4HqkdaIRUZCQkqENAHMqawyUYOioNYMidS0duQYAoKSoCM/QVDLuwbeF0aKlv3VKvjF54XDxC2iYYdRqAIQBO01Tmvp9fuJz6PQvie/WqKlOT6szOalsgFagpPIOSEiJE5grO+WGwGJKO0IbNRdaC/UV74O67WN5+38mBrdGJB3JkbP/rLAxuWg/mdLVOZb7I0oRB1Cn/Hv6P2oo0Tvjb5H/GzR0otYdnrPV2TQPxda4+VoB44iUndBXELWW6FgCTZYv1ku6nAJg5HM5nD1yPWVNJe85GZMULYHp4QJU5lISOCyzX+akgyZUIl6lCEJ5U5Ax7ZMh6uFOK77UVRkD9CoVUaEOtVGAVL1utEiRNSn4/O3BqpRGXIoQxGcfb8igK167YdSbJpbRjfiy4OOUZ3E9VQqRIdRirGtTofmUcSDk0mPy5L9csxC1ZdQcNWWjI4Ax6i8/sTWSK9u9dOtiMWwrvUvyNcO4VAJ/9CgbFHUMnJgvTwd1Vl3AjZIe4qjI8pxKJAaXnQ0Ckd/TsbmNigMx4JXPZxwDG/hOawDij3XI4N0NEoSeSuNv6Ux2BOS/Zxp6+wRT75u9yq0y6k3hNZb0ms+WlBHOoLYrUazUqLuXBxPMWjU9Kmcs8KYJsKpRDaIZOXCNCU5nO10AjDhNJ0wg5GZyim4xWCl7TpbN34xRfLvDXbU73Qprhj4fiVGve+9W/M3z22jPvVs7WZlQtn0JdPPMqENRZCaddgGjMBGdDx6FJHZqlP7vI1atmul3fXU1RKW4dyiM/I8kEnARaundqZU4aCWkAAgy7Oj/UN8RFg/R5vm+fbf0oe2DYdgPnISAcwuwhtFr/XZUVnC3y7NkSN6BHD89CIb9/STJO8NVaXlVsCMvLTy7WOy/FnSxwdQVKLSsUdq22Yc2F1JPTDGMXEBGIWHREhA2cDLbNDFLTrAWIaDi1OM4uzpld3gYW/9bwEO/e2fm+pupj3YUKCnB8Up5/PcDke0xk+NNuAP55KFkLmulDEggy24oO7DUuQIjMp/1DnwIKyBp7bk3+fTG+CyRT9zWw7Ky5VrTf8IiEmGP5vmFliyPEfP+p1d/XMjoLCWZyvrMRrxGdX7XWiZe7BxTx+NIsChP72XNgrFhtrvnu6MdL8PrzQXbXRJ8wwiGiPjVRU2lTkZdf6G5JuIAZ2rAZQhASdT3MAFo21xvjZ7ZCGLO1aArHm3dnjFbrKUkkR8pokWYMMDD+UnJdkL5Hw+d4BD8wF81FcBRvuUepFKVDbXHNYomhF55dFvb8yjTxs1kDTKJifk01We+5UqWVq2DeHxcqO0OiwAqltCjYYI/PdRRMM/byOBo429oiizpyHAPRBViNKLHLpaNozBzNHhE6V7sHFPH4Vq53Ydq/rFK2Cj72y/XLDxKYaF1obgfMj7cNoufSA2WmMyKtGACQswNOJSr6FFNORa2RODUjX+lg8vs3K0+z7+ojqz8q3goD2TzMFqcTRpAVzKIXPdVvBYRh8C7lotZi1vm1Oj0Z8j1M0R2KgT5c0DDws02mRRACwn1vb86/tl0MdgXpCsTGqG0/NUZFj/G7SVlXUfGbBA0ZZNgaE+48sdAY6P0bejyMYiomF1b5DGpUADuAcb97STtn059zCg+sB4S8HEsmAYZQ1s3EDW7+kiWp/vswg7H009MKZDD08yBeosRi5h81znY1CVn4y6T2mHSTSyIe+Jd8vgJBKes0QLVHjn2RpxrsMPhHh835apGp8ptUmd3NLInAuosH+EKaVyZgstIkq2voWv+F60x8cyZF92EWWd44FaZwo8RuXT9DyI0miJ8mZNluVX7uWun9t0+rNZZiRKi/KMjLe0nt5LYGTkqof0uvyTzLM+LQ80tH6naerAkI0k+SgUWpYtH6MXtUlaPcanJjfem3LN3WzesYbWO63/oPI9Ahu5VtLt0R2BDQph0Yj1QyNEBHDkWQwqxl79FJ7h50Jr4U6geanhuwia08p97cllvXrxOIgJnEh1bMiLNwRcv9uM7p4ukY2boPw1uoSXm8r2uCxl+jFBjmUnbTL/X0lnhY3MjERA7gzOmPoylSEQavJESMiYkSDno6CcyMkZkK3KZ8w8IzOQOdWt1Jkl0I6SBjHKBEQAiaFnZDFncJFtBQK6YiMHRsF66g0kAFMCdDdTjRhwyTOjqa6UZA4KnIdMulc69atppC6aB61G0Nbb+j4u9VjpwrdEeSRdhu5DsmbUrbdMlCo/OiyjhZNN5Mv+J2WSZwWMzKZNqdZBVmBZgBo7s8Io0SHOspW43iU0ECGNWsBkLa6WGgAwTbLVfTTU4aNa2r6+nrWtLOCQ6wSQibJVdFG+U/kDgygDucw04qyIqMrOQgtnBpXJxC0WUcpM5g3Vt1qWrg5NFEyvFQGNIiFbkas1uhHYGGY6VDiBsGqAayf/PABwQ16yvjfeevXnTKOQ2V6B0e6uHb3KMRe0nZtHSyBMkEl6Sb0sQYdFGTRgWXkJDj+UW7G38UsnXxdrY8dH0hrVcR29LRtXqFLLRuHW5uL1bqyT9xKZd7AM7/rQulzTZ4sEZmpnMCWA6CTXWc2MKGJkOWtkzglMQKYMpixKmhPACcSEnM8FL6u7mcXYFYXTTlftD90alrUaFRReWjnEV2KACXMGctY5GjJfo4wv1fqOvFIFXTZ0P01TORp+rjwsoxm9wbB8yi6sBKnQrrd379u0W/oKfKjgCG0H9coTgCTAlRPAGZypvmlqTuqbEihliQKhzHMhKnpI8lSgIVEiU84iY0lBbTEwzJp+T6fUVuz46J1+Rke42/rzEQ3RfYw0SesJuBS5auVUkGE8rgSpm/KMzu3VqJGtI6JczxjKtREz9NAgyU0HkCiObgCYA/0RaZRoWO8I3Q+j3NPNyMQCE4pAwnpeBjE7oHFPPx1Sw3JTwN6FajUghnWvuVGk5Np21ZJOO6lVrmfoMB2oHMTGRtGXVHJWL37JJ1GJfBAAxEeDr5H0BTP+X/6lK0LK7WCzGsUw9TECNjasr0e/eyPogUY03GJSdN8dGFn53n5rbXpALGkyM5AF8Nl7GtGM2ruJRW/kRsOsWtZE/dyJiGf7vDeio+Ed/7ydm+GjGlTfbxqwHwIbk+0Xy7bSd6d2XcUaLYJRYE6NlMwBPt5yVtaiWkfpHmz8QumowehErTpD6s5KkJTKlTpe2Xmo+moEOO4jFzehI4rgaLv7OTWXklXUOqeCkToTtxz/X0Yk46Jy3bBpKkM9KECDy7hKZhL3LwFVM6uBM4YuMqh7I4J77vnoQmQoq2lSUOaOBO8jUqgTTdfytsZxNI/DU1TeaJy/5DJMx+fVhk3Gq84WUS5TLzbvMdAgGZZKNroRE6EHeFF5R/IftatJuEtnD9DwMrEVSWi8NeCmnaQDeTh+NMJq2S6ge7DxC6SLgYaN4qpMl8iGDel2Ydqan45ResVC91DjcyYfvdUvJvp+RJqa4tPx5jg0uwQc6yzqNvYyb0BC2KqEUfeUaJ86X0BBh5IfXoj4GtFIGevcDaA/HyPa0E0jPgBXY6G13MrY8+CXYkZDAdbL78HbeqRqFC1ZgINhCv27Hdiy9+QLyk3RKc29LzuDLpdcR2CDyp4raZIN3jYNdo5LYMGezSeq7xiQsPtcyghRP1fuCNjo6oPbuVEZCmpQVxqtDXGupb/1zBG6Bxv3dJioCC4RlUlK1A2ftDHLHqg04HEc8NxTTJcogUN1z9yG8q3SLD+ZATvkvCdlMS51Nsim57bmEVKNYhSFWuZWoCph89lxuAQZRLExiHi6hKKhDJ9+zhoP14mBOubfKllBR3kjNLoWyPjwvi/HCHCM6jyMAmFffNK/E+oDH70BygF7feQrBBplKGwtmuNJq3NUBz4iEO2zsQY2umBHAF69UfdgcAQ49FrOWUBFGZ4SsQnalGLHbntI5yNHNo51sM/IW62R/r7CPpfTNm+N7rrC1emIohugOiE06hi6iVHiDKbxqpfbJttpI8U0+r1X1m8KmI68v7ejX8LTWtp1eIy5nKRK6ngWrQ4wy0S+mXONKGylnTkjQdaMeM/Ob1dd7lSlrUM7mRkokz1znpHzDE65myw457lOSBU+M2Q9TDntsxr2knZR/iMDtQcs+/cV7EReq35WY1XMtobFl/k3PiRivlwVcXV1FXrH1pB5Q9nStNEkWvQh+4zdnEx4SYgMk68rjWQswQYjJdk9lTWyY6JqW7Jt653qSpoG0lqb9Kve0kQ1fSt7OhfG100EziwoqfkECCySHx+Fio6lt+3h2yWlVLdnl8iYzFeSCbQlDwCczaoxl45vLy8rl+hHT3cW2bhNj6BPFzhkWQcNfgSlfe5eOOm/tK9DAvvL5IdQCKgGp33XvQFSmUWuBxxZ4UYNb2/xMGqboyFs36F3l/nAs3dNtxG+HKW5lnYN9deolfzXVhm1+slMbUkr2zsrVMACUb8ZlQ8rN16agRC5EmkCZYAzzrN6nOiiaJwZPFm9oX8MXaUgDPcDQiPAMRpG0O/ecFh1taV31Ggz94bdhvPbc2po4nbseDD3FSCoQesBA3dlWEs34n3ruq2zjH4FSb2X5z6P+khv/Ow+In3eba6G1k0q83QUgMh7JSoHBmgCEK8yWSvDGqBr9d8VYhft1T/dc1276DJmzdvMd+F+KMvzvhbNivrmEbofRvkZUBH/avgt3dRYVTSPNkySdCIotbHgE7VJoRmRErg57TGOI4oV0z1ZioxlSlNrd2d8KyApOykmll0bCTvAbBnqyDmehKjpNwXYvN+mPIsXl8/I+WTO+1CD1AZrCBpf1/01DCsVDKDL13/X36NIgP71SyB174w8lNHO2GfZfjwCG5E3HTlenn/7ufaMr3v/fa0eWh3GO6/6Tz9R3H8u+U1hXYd1SoSUuJskWrhbfirqOODAjoZRRjSS74iie3sAnqheqjLX2qcJN+mDLm3t20rR2TcKNCI53Ev3YONnRLbzeSU5QuR7SMYARXCT8W5P04SJEk7lZEmgAZ9quLDeuW6DtkBIpAB/irTlFd9G+l75tG0fnLdrZav8ZiJkJCTixdDIkmH5Rz7iHS8tT9Y77EyGRtDqX7EdFZi4MpYIoMJnAsGfCxLVR89Lyd9FHPw7RFS3H8fgXR+5mGdgnpfRDa1zG8X0E0U979GnrWONakSgwMuTBxran/w5Kb6/+fTqJ8reO4zF9aI1oBG18kBYd2H0ZaX8H4O8HvZyGMlBRGsRh9F7RBJlRGnbVm86/LkcjvFOhN2G/Qio2qJ7sPFzIQNirbHwnf4oVRFkyAxvI6BXV1c4pYSpnFkgY4TmFILqNdwejZTGGvqPFOA99eQVj35O4E55r76fkuzpmbfrmqrBXyqyKLwr6enWRVRNEiDDd+rRQaeeEkFXpsjVkr4e4AaAmEzZaKGILV9RRMfei4C+Kn6ZtNrOXNFPu/mWLl+dZ9nky4ONylMxXhY0RaBqJPf2mm8j5T8678aDrmX0qdR90OQekDDaYWn1mn2xFEnKqy/uH16obR3Uw21QCHBc3xH9qxGfbd00jugso9WjKFAtZ0qgOu8jBjoRCPOA1Na1P3vnEtoPNrj+08hn3g2djYTCFX6Z6l5m7vD5T09bHC+aXYEGKBSoRSfdXSWxwCaSMwKmNGEqSNh6JM1TaX8j2jPKrx5d62DLu+F7tIxs/JQBx0jxrNHScAC9cUWnJLtjzcu23lHeni+ZdCnRLz23gTF+19oSwy2A/kwQ+a6rMFpkRY+TTzz13lkBvIk0DwG9DIYcgaLzPSQ9KnxGPI5D9Vx1Yt3mW+uB2ieKd269Sm0O6z3qzp85J6httidwkjPuNTpBFq5p/1OM3w8PUK26WIaiaETLq0R/Sj5yLXfXeaUf2jTLr/bpRjVqOdGAod6IIjXLjABmrWcCs9a/BYl6r0VUekbMeiniWp8VAGVUG0cApokwnWTYcaJUy5FrHTXmWqAqnmhay09LXR45jnoQXX2PW46pzt9YgoqjuqSXh+O0G2wQlk5qsgi/BtDReSt3QwfSj+plDeH8FBapaJlU4aCcDWFJI2i1/xQB0xMSRyFmWIEv10ssnUDAJCDjNE2yYQ4YnGeTGeMMYOaMOc+Y9VRNYJEnUZloGOoL7cjWg8vmWTKfMjHQdyIJE6OWafskzJ8+ea97SVbZixKepglEqR5xLoo5V4XcDID1ZK2yk78J8n4m2XGTqxU3yhNmDwxw2weglaDyJ8BHjTfK0esSyaiRjcy4So9wOjWQIrt9MphnABMSEWYGEqaim+SvHMAO0u25qYEvNTJQQ0vtjwGReSRQKluRd0MKk7ySkkQ0UtnGm9TgyioBZvVqSx2WTc7Oecac9YRZ4aEa3yTni6Tyu+1sSgaBC8+s+3IkPW8ELTpk+spol826NXxNU++lCjpSKV9GdktyTYsS1ciNHxKqTgRaO+vVZPKm1M/ZUHmyG541sEmo26PzVOS18M8a+Wj85XwuaVs9xYCeuMJzA4gq6wmYinxKe8jw4ZRmmS9SogqUTlXepQ4k7dqnEgF6sBsz5tnWv7xj20fLLHJT2rkIkHyW2iz9Ubfdz5zB5cgBTYe1Lk0rS33k2kZpmiov9b0LI+U3HkapHmNRImuYJwJEO+euX04RQxxvEjSqu9sIv11k4HZUZsiby6rmbYH7hhciaReEnpqhmaap/i2zk9DonDPOeZYDktwyxrAeHCuLkGr9pNJ2vuz7NybaS6ue0y3RSK5uwq9NYzua0zxfbd8t3qIhBvt8VbKUpG+bZDTqxqrwKdWtq/twfpRfroaWiMCUQJyBKZUIRjn6nVtacrAZqnFWg1+YgU4aFUwkqxWiull2FX1uec5Fy4O7Z33KakjsJzNjzqhAo0YCjHFNbIZWqhEqQzRmnaM12i7A0dFapEB0hHViWnpc812C0q6mDL82n5ZaPFFV87fDYNFw0KBQRRbK4XzZNDoUHCtPCiiWkyJRri9kkxJ0ZYuADbQVMNT+RHcKkLJnq2j6ft8TgqRh60vlw4L8+pebzVoMrXD7SwZiqyx5EBNX41If3GQ46hDY8I19T58J8TZoO4JGffiaJiodSyaDTtNUTtlsAsw65pznAjS4O7MhpeVs8rX8o9+iP3vFtKZ07OVL0fhPnZaAw3qo8fNHgJaNIlkv0376Z5UXVXjRcjoPakQeiydNJWLAAHEqSxuziRiUPRYSiewmQmI5yKsO73aetSrkqpJ3k8ry2pJAq6Ct7FuwUf84tZ0gg8mXtj6qwXCyHRkyPa32pmp7FPY/2q/8EEEUMbPPjlKP0rC89UChN9ZS/xmUggmsAx2lQ0qUVD+mAjTkILxJ9SYAXYGUA/4i+ZZTCuM9oDxfwnsBVSq7ld2+zIAcgacbpTZ9IMMrKi8e5K/Vd8TbFu0fRhkojnv6PGjFedk07hFp56nr1UvnOul20MU9PRd4zbMoSwkB50VUow/P7udlMUYYRjZGpCHqeOngz5m8J9KM1T5Qqt85uOdXMXjAMYqQeO/MhsGjFR3997Z8lai47uLuNoe7gCkFyimhzimamUq0JYPVKyX1nVOBGcuIzVZdrXnua2l5o9fARn/fk6+nFIAQm2fPz+3K/55+Fd3vnRk/CTgGM2s0qnMbFbAAzA7pMOcaOxkBjq7/FKA7JXv4WlvunIhCfiL+ogihlmePU6/Rl/pc19Qsq1IkGAziBO6WmTPk5O7yPtrWCQzdEG0jb+VzJxY4BDYiobmnz4EYHbAdPeUEOrpvO76OmZ9SAk1NOVcFkpuHlmfZxXGup20ulYaVodqZzL+WSMOQNZBaDGUQ2fBla3mu18EvgXqgMS6/D9cqjZaw+iiFjVbYfCKPTj/VM/eyMcqPwWWiZJtzRAX01t0RGfV4cg1lU0KZGArMRYT0LxWwoQe0Wdlt3xltOGEJ622UZo3WDHO9xzIEpco+ClszS9SQiNrOrrTcjlyf1c/b0tYj8ASsG34lr2Ps5xDADOotimz0NncJCPvfbfLnWpSmyyfpMErRkTrXx+Sp37PGywz40WcsMG88LfuJlwPNN9Lnvp93m7bNfbnsM7YN7ooOz9m4Bxg/HdryOEYUh69Np8iMOc8NbLD8zvNcQnWxjIzyjJ62k4+JFbU39L6N+Jt+ipTNiO5avm8K2L3xGYWeo7TXDJ1+qsHiVnm1Im16ds5OBCIjfr1BGXlw3tutERlGGy4k4Y1RQsqT3NcDuBIDbVwdoFyiIppCMeYahKbiBgpvqN/bpEf9XNbpmnGydeKNaVh+5Ytgyt7nZd9lANkYn0UkMGyLMVjorsmNYVpR+ddk0RtMbzyj5yvfAdcjMKbkjXm0My2XSZSR3IYRGf3uNm6z+bS2kWOG80D/dM9yD3RtHS31sQKIXJunr09U0FLTBiNV4FyiHerMUdsBWrNSADYmE23cSYfnbKxd31Kcn7tXSYCZdLVNR7ZuXaubyCPJnFeb+hLa8m4jZKuBZmLT6VmE7LrM4taOrN0r6rg2f/2u6UxYhlE1pNelA8iMP0fec9c0ouY5KoNr7Tby4kY08uD35mnfj5T2Go0MonrJ2vbqMet9WdLcyJ//oM/o0s2oHNYIehm0fNlltzKEJ0N5qoinkwyFaNCXiUBceJyA02nC1WnCg6srvJvflenKEimYpoTMMqzCmCGny4mcJaoj2gvDF61gUn60zFF72LLmnCs48x6pfSelBFmRkpCSN0QDQLPR9l19F77FGMWAw05MratZAr2hz/o9OSJZ3Cqz3RPIfnaTZIk60Ot56q5hGUnw+VoA3Axvrz98u2rahCYn0gZ9HWrEBADWVmYu0+Zq3yOQEX3v69XWhT7XQDOdTPvqFN9EVTYr3qUd25IboLLXYbrf1OuzpdgD+Sg5W4VZsL+eZzSzGV/WCW6sar10UsQdI8wnoxiEfQJ7U8C6x9jfFVmgcCkPHnDsiWrsyct7zPo5rTzvQ8K3Va+qyKeyM63wEkfAloMaJcwNQJZKZ8jSxcm8Yf7spgRkFa58ypLy4nVmPc8jBkweHGg9+kPZ/P0ObJSJrKrrtyIHOctwkW2L4SFy3dLL5bwYzxsKULPt4o3xkTZfA2SR0Yo8+wik+nvZPBMdaqbfU5IlzwJo47Zb8OaADhH1QlhAkcwv4hJNwzIdw3P/1/O6rj9Hd/xqy1zWFRFQgbUfdqJ6WmxbwRIA3DbDBYAeXXEPNn7SRAU5Lq8HXobe2532tjdUQ94lXdLOMGfM8xlzWXnC6MclbfprHaYKOct+BwvFEioxDi+PDW6czMcGHFaZ3wXg2HpGf29FXexn9fwGnos+Z3fBvK069Rt6tfKUvE1+utkVQ4wMO0MqoeZcohllPw0G+k0Hs5z8iV5WbeSGmZGRQWZuiC33GuBo0Rm7eVcDBw1oAEAC0wSiHEZV7G/xUNGd+KmTdJu3aspFUhdAHIlZePVI3bM2/9Euo1vkwZUvk61Dn+cI3C3eh5MRB266P7OPCzN30TkL2izYADUdLOn3kLdGFurE43bdzqHwZdNLEciJ+pbmMyIruwyJYhATmG0kqTm1PdCOIYQH/veRjZ8FxYLkUf6lCn4kINohqsIHA+VAK865LnFtChjgLqlxVGOh4EAgym2LIKvwvIKr/3RZbJUyrMM1I31TWqvXiI60n1eyewCLN4bWaN+EonTvnBSAa1ZcIg1qwOcZnKcqPwxGZtnvpQ+BuOgGYpCh3+0wgChwht87wYIJf7KqXXHjD7NaGrMJDBlC0iEtBRBbEaQoT8vfCGyMqd950pbR6549DozlJ5LhSKaXnv8yyuOvJfR7M/r+3um4MjFYy+nbevFJEh3QeR5sLC8DxnEqKoz7yEbEs21f3WpujxFvYKGn2BmpnaYDFc0hiwBHlKdNS08dXmWz0sHtynfS3ejxXxYNG3sZUrzt0ZY+vAbMeRb8GyrLxsIoggE0D5ms1woC5Tbo4pVZLWeXIuv/kn78kETIg+jQCGjcprE8AjguyXdPZCNKe09eCwXrRp1re6vyN9EmKg9YTBByt0M/RGAmIYXPzAVsnOdZom3MRYPKBL3mgQIWYFihEZ6FY1HCXjkzZEsLmTfCBDBPJu2+X9rVKefzeVEeqarew5SPMm/DtLF63Ao+uo3yTHVGusF69swSlZRnYw8aJi2726gFMDd1cmy0ygMMC0SiOSG2bJHxVi+eyERlDcDxkRXZVLNJqgWL5/M5ABxlvxa2vAwit8WQa7+w5Ie6qu7juEzLfq7QJo54rAG3Vh6bpjpm29GSZTlvObIRLfzy31F+H9nxmwZjQ5+E6KDyvyVQFY+NYQAZbcitKM4SCVuk0qx4b5yphc7so0QJJyJMAE4t2Trpk8E4EzATZEvqLs8mCcz9OOmEFimpa9GTmA6axDCI0ZDteon6cVflpDUN12Lb7Ot+TeVS1mftYxXJL+8dpTUPM/SKzL3bAjdr6XSRonYVy3IvlY7yKxsStccYsh15BRp1HkMxJLUdWjoqQ8RtVQFH8mrK5I2zpCO7hDJT2T1xAucMzFROEDUyCBkG4CyRCClEmRPBU8k8gSmBE0EXPFXn1dWVRuKYAM4JM6Hs46GySFIPDOhRFFrtukmXgCBJXL1ervVUNiojiEasWUt5ZXklI88F+GtZzSTO0BFxv+u8P+rL1kY0WhoEBnjuAMAofX89um/7ALeLADeHg/RdLnPBuERUAyMX5c3MyEXmkqZPpHtftfYllEm4Gt1o/TSa0GqdK+Kmiyz4JiNArXzcACFDtvAH2g6j1Fb6ZUj76OqVBrS07vv6J7PFvvY3+1gPyrTfFeHmUglFxlToqXwnLPNbq/s9dHhTr14txd8Tt93KojQsiVDQ4tkRbQn5HloNaa8c27xMZ/ejGxTkGQACYOnxkQKkAS8qaH3vaOi/iCkSA9OUMKWEieS0TwUpFWAwMKMMnaDqub4MRtoTyUqTU0k3UUKaEiZKZSfI8izPmGeWDfRSOVejpKHTThVcTWhjhlzys5JZ/yVgAqph1LqoVWCqZGmML6eoU1pPwyttv7PqGh3t5FE4uPfEovSo/oku4mIfpU7nomwBVGOgwxQKLJtcoZMzZpljoS0Lx5/ybCNcWkcgIOcZYAUMZey5ficQUjsjiBkCm1Mx1u0dZgIjgWgCeAKYkMrujWCjvCEgQEVbQcisZ0zI8RuiuHkuKAMNcNR+WXb5rAJXylt5pZJQUfKUy2drEioAe2ZZkJXrsnN5IJqEauu06oGkZ12ofBQAWD1EI5sChRZy1ElLAKa3VoBUsGGQf71nhpysLJBLb0QKbplFmeiZMKSNR1y2sM9FXqo0Vt40ndPptAS+RWW1pnFWkKgso5a+QySH/1l9BgDE5Uwe6HCyRn0TqNahAGWtpk5fEYMhclKPlSg8sDqksHYiSdREEpCWzZA5K7UUsr+/9FfdC30ZubuUDh3Etof2m+p7WqNRA9/EI/beD1E7RGhCOz6+ovCiFHJqq01q/gLxl3kk03moHBqV5Bj6upyxXAdkTF29OzkXQLysidvkqqydmxkwx5eT8jGgz0kWfb0D3vO43YjH7VADvDr7XAEooHqX26O0LZ9WqZNV4uZ6aLCYoctTYU70HAMn85s1+sJohg6o4YzueTX8fRq9sUMFylQiDjLE0mqmeaQExuRAqKTYZMKm3nuU7R3CNEnYPJeD5gBdLdMPTQBYLENe1gvX37pCZfGk28La8+M/I0AdDdV4h8lSNxxinlnb0t6n0+kFajxwzsigsrFbQkoZORNo6lfc+DxUHolINo8LfcO2CWIbQik6rgDFejBh4UtMPiOXFU/EGtnqeYj0RCsjV3kqTzf+zGmwFShVR6LfwbXWZ82dQOW025FeOjKEAvyMJoiuRit+onSbvHfjlfVai2QkSkbvljA1UE9zZMJizDb0dFBwCJXNwHQbX/tHKtLqOTXvtwp+OfOhdZfKWkdrXld54mhV3Rl5Dz4aWvlUgMMbharIzTOer8V4uvXguN8gbHlWR3PVfDTIbjFd7xODKFe5qa+v1JV6uDqJlAEwaVyOzcTmUYRndK/xqkrbl6PxwABmmDhPNUYyakItcrGit7U+FvVYhl+8IffnqtwWjYYHIwDt37G/NcLlo3oWYGwCVw8ybP/SfNBkGZp2AcW6v0wK9u7R9LyhVwM/qpdFnWg0oTV/xx/rD6BEKpZDOHE+GrERPQ5Q7QpclLfUZd9FfD/3c0e66C+nAjgGut61/Rb9bMDGPa2TNSAKLJJGHhwIARGYyjI9zsjgOhwhx16Xx8y/5tWK7tv5KqmGFgEV/j5SsgxX9uFNWw5vnPW6JQ0Xfi4UeYDA5wE0LI8+5K1zqjxfy+E8CwLaEsyRIeamGetvDzB6JchIk0azdJnoWlSLi3GBbE6VRN4yZwEuNoKQiiUQC2BrxCdq+GnerNxIxZvt66aaJo1EJDJ9xINntvYori/0si5HgI+H57r+NaytY+S94ejeVmTD6psosrWXQmfDOi4a4NDIQ62rps94HhvORRRmwJsvA5FeK2XWe8oiALbIQ9Og5eF8nq9WLq1TqinW/Ay7TZ32afi+bvPRYEmBx6U/mQIT9Z876B5sfKY06nBHwlb+HQUaUxnKICyVAZW8cwla10ladawRVYnGkQ1z3gElo3xkV1QpnH6Il1m9MeOVqdMaOa8jwNHz8TnFNIRG3kFUls8hIuc9L2Db8/SgwS7F9KsQxB8bvwugnQFCDJk+UfbWUMu+QgpXmcskQ6BM0PORDfeSTp7bSp3kxFBR9W1fCgmkKNBBMX6tn9nNypb1uBKpsSCm9lc1NEtP3L53m7QGNuwz0fP2nm97z2tk+PfwVr6UPFreKU2YEhXcZ1fVxRt4aX32TpDnvPGm8yzanikOYLvvmkqdDA8A5gBLBRuep1ZGG5WglmfdoGwchemcz8ghIH1TF3BQd8/W8V76WYGNI4Z4NAZ1Gx3ztjp3VJ6jaevzbTUIYSqAo+vsaN6PGv4MUepUQ7IqfqXzmt0FKxgxAmzPwlCa5zJHo74lOzxKB7NKp8uy5NvXTRTZqAoCMGq4KepRG4vtOg7kNG37fc1L6vOkblOs6Jnbpr19pCpY6pWtrUf/fPTd0p7t/f3eFd5olt0zoK0roFcmhmZus/frOwB0MMh6lQKGG8DOs0Q8UslX703T5FLTumllUVlXGVb+53kuJ8CVfEmHFvV384Jtvfm63DsksowGOVlnBgX7SKylpxuGjYBylI8d6vF9oZYRQdTAvBv1qS0gbuWmPmONdMdTD4jt+z6vbgM2Fx2w/Ijs6snHWso+UqY4uQMHlhfu91MZOVOSr/2uoFonjDJynjvgS606lu/5/ktAhFVYsbp4pWsjfwv6yYGNo579yGAfuX6E1gzaERp1cM3DP7sX9VcwoGm59OqhQWaspHVUgv2w6dp7YYiO7cTCMomJRGlz3QLdeM4M6Rk1Err0ZH397FFIY5DZlMDW8/H7G152cN8blzWvbi8ve2hNxj0PEdAYGStVaJrOwsAMgJh9bvQdKOc7gEt4rczfKMaaiOrJryI26vNve94MlOWVMmUvpRKnSHJGSVWw0AUrbaJpKw9A1DZ3Z2akJJMv54zKI9VoH1qitmdwr0M82LD8E4n7OQJ6i7YMa2JFHoL+tgB/Tvb36i1v4CLnwRvbqJ+tgV5S/aFypP/Ufs6Ld6P0OkdFX2+S3uvPuk8HoPu8LOuhcuP6PnUgLCqf5bN3oOD+VHbKYYJF39o0NQ87mbjmnWJ5uYkO+smBjZ8K3RS0HEn7EpCkwyFAi0qoAmUw5g5wLFV21OmJSJZOpR5oVCVR0TuKZVL0PZeOYYSdlp30c6KR0t1SxpZuCzzcFvlyqDx4ukS2o7R9BGPhlcIq44xEClLLklhjMDoZtrK7YATQCYISI8lyPo/ZsyDnovaZceZ2EJ3yl3WDfmrXgTZXSSnnUrY5m2f8Ft0WiOn1nmXhyWzktVpPcbSAWYatojUqo/ZUFyICECOZH9EIUG+l4w3jljPh31s4PoFnXws74Lcz/u0JtOiE/pa2a+BQD9McOw4R6ForX6dTCrqX7wo8qWyJnzHPXA4KRAlTzN2hdzavheMzx8OM92DjlmhvJOFzoz2dvQttAhrSaOjWnDLbNm2SB/3YuuZpjUQdMiENSlilW9Zsq9emYANcdnzUoYSGyrHUCwAIUVHvsn3W6naPslwDHJ+TXEVgoNzpDPdNQPSorvx3r4Dt+0WIABmAQ0I/1LPJgyQksq/zOajMUDKAI0EmSc/zXDddatdz5zHrPfsnkQ0x8qdup9BGdtWNrXNdHaEgw6a5qE8TDfF/Nk/1yo+caZJII0frQGOt3keASNtire+MTt/dE0m0Oql978FCc2yASNv4vT6U52SiGqKnuIKOnLUsJWq7GEqJ62fovK2+U1hQfV2SmGeNaOSy+Z1y3g8ZjXhQ3Z8H+uxSugcbt0x3GdFYS/9Yvtw+uExEMuPamdD2HUKZ77GXJwaQxfuTVQP1+MqWbeayeU8zZBWdHy7LpyEftdkCHKNO+jmV1StyMVBLT/SSdMPrRJ0h17qwBrG92wevQdS2ebGytQk6uAIOmbAsYxwKNuwwCkENuq7iItnJMgE0WRNiQTCX33ItZ2CiOCQdy1BXyhDE9KVptHbuioINPbtDn+u8fk/mxNSaXwA0LolsWL5H+Vv+/VyOyEh3gCCYpJ4KMKAuIoEhHIgAHJnNCAWn2D6i0Q5tg1wm14/KpgChAQUFd1EEwvLkUjP37DNaf65+mOthgD6SUoEGEMrsRwEbn48Pdrd0W9GNmxqRtfxuCji4/pUVJ2hCJmc+1AShMrumDGy66jFIaLqfWKXKG2wPMdLwsW5oY9IqfNhcPkX0aa2+9yjdzymCsUW+HKoMreLX526WEfQokkWEwC7bbAaWytBam1xMidvGYEZhl1+gsRkBoLtvimzKRk9Udues0gou8y1SAeBMkK3DmcGshkzySWXba/3U8syMuuvqsk6b78usxmU5hKhGxwMxogw9Hl2veZm0fzrnJFrlsMiz7HXT+Lb57gcce2XFA13L32inXR95AFqLK+Br6WgYQJ5Sw9yGbAlWXuR+HzUoqZjyNs+MFc3VqIkMW0VDepwbYGnABbWNAJRlunWb2h4gOUBpt+rv69SUpQBO0pBIAUuUqPzk+tkfRWEZHw5QbtL+yIYy3b52BsH6H9y3WSX7/OdKn6th2AISUVgsem/OGUzAXLy5rP8WrcpF2FTdpoLco3rxSlFzOqVUEQ2jDJugLW0tMi0rDLl1/rbZlxz+pvzrrqSAPVXRoHlVUsx1u+PEsjeIeqVbp2UKh+3f7j5ap6xeYi5h0rIrIBdrMe6Kcafonv98ghwAFCTerMtGhoiKwpcggQxN2JB3PaW1vJ85gzOB6ASAwSkDOKO0Cqac5B7JMB4mQJbKMsAz5vM16PSoRNvKtD0GmGfZtZFlWS2Yy/ZbuRqqPOd2lk8SQzAx1Z32ZTdP5b8fcNQzUdJEnVGRguVmuTQcbyyDflXgovVoj6vnkr+CBzuRVOvSL7O9SmVnXjXeajysB19+w0RTrEH3G4uN2l3qoOertb/RUSVyanuIgjP9PWlUqfR3a/TquSlWzpLZQwhF3lKqkQiGYoypHfJEADKpOgJYtg6X+W2y0mhmxky5trm0+wQUOc3n1jbMhDqQQpKojWho01cRICCdJtnaK5eVS3ORZQW0NVqjPDXAYope5ZdJV7hI7UoZcgnolYic6uQOTEZTW51t6XLdpgORDW3QJichYoNt/AWnn4VCvcQD/ZThbh9uPULR83WIg8rqE5Khk1pE7o0Ec67noKx5Qi3TXnCXt7lKal0z4NK14U8fSlyADY3KtEI3gEC+LMtVEtB315rY37OIu7qliHvfluiQ+zJo4ruUwbW090S1KvEyChKFyb2KsO1Sva1CiRM4lfA3laXSJMtfqR5sJgFyEMl+HFTSLpskqYlREzaDQTUiwGXOaTVfZeiQaji5O4hSPd7MdbWKnmGhRsWWrupL2y/0X7LiYb1XW1+9kVdwsTU0txhG0UgH+r0dFmIbRDM0nbVhKqufRv2/68f6O3COmiw0Pqh45l2F6jNOvpKXN6iusVf6yI0OrygYRjXkxWpXIKZlRddeOjlTz60BFMcUPo2O9ZEofYAzgxKVuRYMTlQOMCwAlACdV1R5G9UvoThDXOy2yL3VelTKIx+Nl2mwSt27TEc00uHIRvedMc7tMwAVPwfaZdwvoDWlET3bOli8fHGU9haAI/+7pHk6nbprEdhQpRunbcQzABqL581zvixr5dtPP78OsahLghh/9HIbgg1gFVT5cxuYuc6Qz5zQhk0KoHRgYlTbzcRa+WSAdRs71EiY573JNLroiw31Rw5BL6NmWCjkMjbWkfwTwX1uOCNd5MRAnNAhWD7n+dB394KMqC6jtFqgYpnXWj4jkBXJX31HHlzkM/xdIj4RH9GE0uwc7LWhIBDJSYcavUlF3wIFbFAdytM+spsM6KkyhQF45PYR1dqltugXM0H06+9f4+nbD9sPHjYi6/T24RX++ldPD7/nlYu/tofWFI/vuHvTtp03EvaRIR4N7RA102CvT9O0yG/ET0Q6/FMy7549AhQihXfkeXNnceVzHbLbQyGYYC5eWG8whooxm8mXKS1ki4iqHDAz5uqXTQIsMlCXP5VIQ0MAA8aZF3LR1LBebvKybE8ZDpzntvutkiwzXBpyPQTO192IRgZc392KLqwSQYYdcxl+zKUKczvNWb33kvlquq2OGNBzZzT8A+nfiXqj7/thrvMXShplUyp9H4WfFtDoowI2zT0AYw9Z0ND9wUQq0IPOzLn+9QClr68RYGKW1XkyMSjVvmHBhmLUi2wCA5T1FFp084i0PGu/D+fn6BcDNv6N/9c/w9//j//io+f7//tbX+Mf/lv/tYvePQo4DkUqLuCl7UKaVjvzWsTDP0c8Ltua4lgLz5YnQCwjtNZzuRws7KefHeAIWJQiUj1rov4FoFLHtj3pDAeVKT8Dv1+uXaZsknxLSMBEMsY+zy2yocZngDYqdwo6SpQhoS/mSCYlqiFGUUwsF8OawSAkhsu/RN8KqCYbcQsZtNEJBRY2fztfIwOYFn1kM7qhXrR63Rw0sVhS4ZUoNOpd/RABeiov9ZWp80bsrsK2LDqvo4G8lqTkHQMNWycjPbEJPOx1NvNXYONfhldbhybvUVmsTFue7HtRRMpOeLZDbQp4gH078vaJOnADldXGl37qtNTbpF8M2Pil0FY4U+mIl5/MOSpWYUSh2FHHtl6Izd9O/txr6L23Eb3LVqMHnuGltBW1GdPNQcynILJFI3udmgGt/6GCOw1Pq8FYGAGUiWxBZENeDULoyoIakAJXcs51+GaTrKJtyKPwHERrFrxwTYYhE01zWbHCc79dueXdML+cI7FgT8b/bW3ZIRwAZR+Ffm7CmmzXMP1OaqH2lrYFjwiu2/hQwzSEaZLJsyOwYfOMh42K4R88PwIYe8vZ+F9GTQRGNuChE8E94ACWS49H/NjyW8ChPHgi82yNIpl89xCVxKN3FlEhINxb6aZ0I7Cx5W3e083Idry9Qx1rIdYjZIU/Te20xK2ohgcBlhe7fK2eCIvWaSMeRuOSe8pJhGpA7HtH6/PI9ZH355cn+mf7NCNX8+NRVy+FDwEScdmSgg0CqMyUr/fqqoe8eFdAbF9+Zq6ecMdTIkyyzEQmOEPWTKUp4ZQegDLhdCoyNgdAtHjqdaoEN69ZjHmuwMgP6bjaKVUiG21klBn+53LQ3Oz26CRGKmnviQyKnHhngerKllZP8rwe/uWdB/2tBpCojPdzW12mnnhr6n7mS3citJNrDzyIzEGLpZ70vp78bOe0yPs6NyahTa5Ufk1KRJjnfj6E7zftuPi04M3WS6u/9jeDu+u2CMwyT4hTaZdcasidV7Omi3o9wIvnu7JYXso1EVuNkHHn+I0cO5hn5BphQjuqZ5R/5RU9kL0Nu3LjyMYRT3Ev2lyjXzqgWTPyN01vHCXow6n6jP+91eFsulXhyt1V3qK0R/XQlNnoelze25aryCuOwqaRlwMz58HSx5Z9z3tkcPo/NqBjPATWFNrKcEfkscJgMHJRF5s2kUTM9Bh3zSsRErguQkhMYA0YDw47G/Y36OZQ7Vk9oXbJk3zmLNEcjYREOJoZdTXDwstmf33sfdoNm6qMya5WXZTAA47GdJzuCEyDMhItX9RylmYxQFTk3C7tbUjQzktRYMXmuaV8+H69xxla1ufSQdrzfFBJYb3pZ/gejZ0u9+Cif/ny2est6oJu/tOnsKP3wyg/QVpDs3dBNZxojgm3323+ax7/cvtgqntwRM/7zzXAYb2GlJKcVom2d9/IyHf3dtfIceC8DY4sGPq0kQ1PynltOyLZ8Mlck++y+VVThhrlIGMZFQBQHeL35Lfx1ny2eEzl9FdOE6Y0YUoZRHMBGkn2auCyLTkIYOW97CvpshhGtdx9KoaSw3YTcCPyLgdzEWn54jLp7v7W0Npohk26LvFdpNHvucHMAjbKCgdJVr10mcRaoxh1aFMjVvGwkvWcqXrZyz015E9tMFf5KE9A9g6TSaFyeF2uoKuWm1t6Hgx6IGCveaPrwQMzl70wuHS71oblDXmXdRiFu+7Z3huTBwC2DJvAJaBRVGx0T9pJhh2Z0ZbDWh7NP1wiKaOyXEr3YOMnRiM0e3dUJqNxO+vB53u0o3RhauahJxBFIez91d/6N0jfG3heVRfjsux/ttjbgRHtFSPw2aANohq2nRxYBBzgAzCVXWNLkLzNUKgebUs62sYZWAGs0fPchhooybAKaCqh+1kMOxd+icpMfDv0UIzqlAD023OvRbyYeiNWjVLAYMoA89y1/TgEDuQ5NpDKU+MvqcM/pM6ostS55zKX9slUzmUx9WIT9zLbDQdRLlGKy6YV2nJZPVPP8kg9OF2LXIRRnUG0InPZNI7G+qx/F3WjrqozRvXPWPT1SKa6fDdUShRRHD3XfWfIHh5aCARREAW3vFGuC+kebNzTJjG3kLHtKPZwpK3QcxS64+JNRWS95faaDufoL3TzCPr3gbLvU4fiSd9zEYV2566ISlni2edrhu1TkgINH5GK5tK0qh5Ht5rRrG8g0mqR99q1s8+CLJCdkMo8ASouNVGZPwAqgZYsu4fmCTKMMgI/y7K0qEDjda3tMvfevMpBOIySqetXvi6sgVgbLujSdMayUnnVrwIiBZjct7/y3kcrqNaxqZ2+/PXY9WU5Rga+N6aEaaJ6rIJP2/cdCyY8mLdRD9bIDnPb5CxwpixPOqMl4jsEw9TeHUXp9vZ73w/XaBHJcaDRy46P9rT+OU77KN0q2PhcFeZR+u78Fq/nD/jtg2eYqO3a957P+Ofvf8DbfMafPfwSL6aH+7zboINvvjLw5D9F/bKGFxEpruPRjS4NAnT8lsKBeBuwVNeG9H+0g5FQluoxANmOF8RVYYqfzWAiTKU8dttjBjBveIl7KZQJ4wFDsylh5ZYnid+ZCDyPvau7poUhoOJZEplxEmVZkZ94RU1RAT4sjTKnowU7ZFiBuQ/5j/gpSZYVAc1D1/NLctmrnNIJU5K5ArLNNTAlOcUyEWSrbsjOpOAky2cTFV40l+LF6vwJUw5SHnSnSGbbhAt7m0H1pFCdr0HEWA6jiDzqvhwdyNa8vcdKDTzX+jFAqDxY27LthKoDJSVfqj0QajYlSdn3QtEHA+U3FxDHBei1MiydCgvIbJm5q29bLo0CCkgh2a5+MmVT2QSBc4k1lPSZW50ocNKcyQGLjLIZF0ykYtDVarm47VVRu2/QlFI1/Y3IwPdpCx/aT6QeNAOC7sfPdQhQPSplnFq/g8oYtQrQ9qmAo+hC+1/L1LSU/GUeVM4O2g021oyq7wCZ24zm26YjoWvguJL+qw8/4t//i/8T/t7jr/E/+M3fr9dnMP7Xf/3/wH/06p9jZsafPXiBf/uP/+v424++jBGt5VkOwsYRz9kj8zWwESH5S2io6KXnFqXR3Q3QegySlNQT0SOfxdDohjUalpXJdqb7F+VRlBO12ext/JvLWGQ55wUZ4LaKRjq/GDYuBw/loriky96OvI68Fs7n/p62p308lTJlkRkNw34agFmtG9Qmcyp1TIRzGRbQyJJymFDO3jAN10cp2qd8n020o6VX+agGQMacUcCYoAvIKoWz8PXhw4w5A1cPTmUVRJmcmDOmYhhnziCeK15KInbln94gMjPmubWBbtYkbZNAatSayW6gAKYnlFNkq11Vw598uzodQbYdFFz0f7Nuk17ezObPV3gF5470OpnnhO0M2UxKIy1U20yNny1/O0AvOAsFVA1k30eWAEQ/iabyaU2qndOhp/YyuJylxCVCxZQqCKz5BcZdhl329a8qp129OTAhBZZ7aay7PdBQEMUVEDQQzUX/ygRjQp7L5OIO5Vg+jKwBAJeVU6ZtLLipPBXeOSmQWtZV9RsGUZo1uh9GMfRXH37E//zP/xH+8Zu/xh9dPamdiJnxH/zhH+P/9uM/x7/7t/51PJse4D/8/p/gP33/Hf7s4ZceBN4aeRQ8ojs3RlVf9fzEkayYF9/hBHSoLtR3VKUs3cOG7i3Aaby0vJvC9spAzEkSo9GKZTrQkvuj0brRs0kzqkZDMlSF1HfkI7D07kk3SczFkaICOEpYCk1AeAGQLJBYpGue6caOwzqUyJZ6sFbtc/FOz2euSyRNJrDhctLoDMrOlmaSqJUt1PQL0ChLTEvQQWNQnUK2XHffeSnPa+WsudO+4RIfAtd3Ixqmot6sAnAVwu4FudDz3frsiIct3dWD0XWnNhp+6dIvxjkasrG/7eeoj4/431L3a/cjsLOlX6yc5cxAOek3JfJPdd+joZ6In63novu2HfYCjnuwYehxusK//cf/Kv7hX/8/u8jMu3zG/+77/wT/xou/izf5Gq/na/z3vvmv4GGa7hRobHWSj0N93KZTsOHTCOsk6tjM3JYjDp7Xdy5B0v1Y8jJdz3nnKNwmsWyfbitHA+9JowJESNXb+KygBqohyYxM5mhygjnMTB5VnzYzwDUS0JahWiXFxv2S6Fm/1bklYioT+cTT9vZbNrnKmPNsdnBswFNCwy0i1oYNbDRlOWlTebV7QOQMcG57iOgcizqZ0UV6R625dq7PYrjEpRt5yd7YHyXfR9l1CAVYuu8DuADPUn+p5m/KgaYz6iZuBdi0qF/z7KcykVdlo4mW2ROl/M5c5CybIScLfk2ZIl26BXIi52qr7vbcH31fedm4Y7K/fIvsxjwtQA0QK2asOwcjmxMBvy26BxuGnp8e4r98+g2uXKjxdb7Gq/wB//sf/gn+Dz/8E7zN1/jXnv9d/A9/86/iSbq6GwOFXsmsNehNhk6O8KGOTuUk4IkRz0ePhZdlqWLxHu2plpGHtjfS03gRL9hOClslipc/RsNk3VwEm0QKlFb5Vz91uSKVsVQyvO4t28ciCwi0Du1ENa9wEqzRaw5zQhMXjTj1m0BxPUU1XnUi9yWyLpWmQxMV+JAAB58/s7SV1HGJYJTy1ClCJksPblU2PdjQe/M81z02rNzWOhrW7brH6XnR3/YZLV90CuwRveCBBlEDxC2CVABHwXDUkIT8zUu+u4hVbsefw7QNs1nmajRMb5R7XnPmBjbQ6ljmOjQJ2mPk99TT1jO79JEHAHuBIS17hLx3d3o/4rWxc9nczHuwYWg09+IDn/Gff3iJ/+qTP8G/9y/8mzhzxv/qr/4j/C//8v+K//Gf/Gt4QNPt87LixXiyhvr2GdGPfYZenKF1tN2+cx0f9AJsPYlRVGOfN7EcGx3x7c/HkOuDZbEx1mhzRPx121kBdV9F4dahAa5bb+/efvvOSUEilcmNzevUwJFGLMAZOtXOk6/3kTKzyxY9XWEqR3AXkFBABFjPX5EISuzli3JmObsbzAKMMud6hg5R7L3qHCPdFGmeGfO5LQW3QFqNa7ctd1CWtZC196L3eK8KOLyBP0oNPA67cZdnxIfybA/TsxEiP2TmzxOJyqdRJ9uu+p7lW/O26Y+8c/+ep0u89y0aRp+IQucNKLpJVwwBsUD5d4IyjV6Lnl1bETV6Z4vuBGxQCb99Th7aTUDgdZ7xkCb8t1/8PXx9egJmxj94+mf43/z+/433fMYD3D7YGNFdRzGi/JZLAssSMN9fAADidWyFHJu3I4DD56mf/vtovDYCIjLXIC+vu87eKXZ2mzORbBYlJbMFduVTUKZ8OtBhDyir5q8ADeY2+VBvVI/8MyAFF/Lde8DUPUjI1UqpHtC3NbKD8mk9c2bGzDPa2gGNipQlliBMicqZZ3LomQAEWSlBSDJpjlE3HFMBzTnXSevsvMTWTn3ERss4CifbPSbspGf7Xk1nUZ/L/hG1tTXYkdJnJyd7QEY01ON/V+DHGoFradfhK+r7Us9HwYBZNumSVRlus63Kj0mjvGNqxXbDNtxi+ouKpg6zEDPSVCKmDgj6z5Hu8PWwdW1EHqiMogNx2y/lb+R07eEDQDlJtucr7Mc7+Fu7N6I7imz04ei1kNVdK9RRhz9CX50e41989DX+8vwK7/IZmTP+/MMPeD49wimY3X3bdFS4buLV+DSad2bjzEANS9v3ARDrZMgYcFgh1dvWI1z7A9CFs316C6+HNV6xXc5gflUpqu3w5roZLvEhd2t46jOKIwpIq0M8mrBcLMrAZYbL2/SmxNyAV9dXE1XFVZ8lUx6iulGnzotQwKEDJbpvQjblN/Hv+jNB1qFkZlDW1RElgoByhgMz9HyNyg80vWJYKDD8aPNERlGNhYFqCddnPNjo0ii8eKCwRlGEYgQ47PNr5N8bRU/bxdYkhDKh1jgazMut1bXEbX4Hg7jUMZZgf1mmkXffnrMHnnm2uQxNRqDiUtpjWNcAyygyEl2vE48HFkv1kYLwPXyXzKocjiI2ozq7rcjO/TBKQN9cPcGz6WFt7mfTQ/w7v/0H+F/85/8X/KfvvsUHnvHt9Rv8j/7kv4lH9POvQjb/VqLlpej6WohSb1llrX9+F0DrRdoQqldeOWfMs04STIvIxh6KvNsojDjygP3ukFwUrvxwea3r2E9P4XAOwe5zYSlbo13rgKpRIGqGiFEOMkMU+UBVurmYKpEJARtEwkOGDDlxzshlQ6w8z0PlmDODIUtfmfV8FC5bmfcG2MpgB2bRrlmKhjI7r30FbKiMpUQgSp3cjQzCUbrYIy5kN9AagYa1dOxndELqqjEv/7YoSvuLnJK1SMbI8K+V/S4ozMOBXmbZ+FBlSwEfkZymq8/sidSoJlyrk7ss98/fUl5A/91v/hVMlDpj8i8//jX+3T/9b+EfvfynIBD+zd/8ffyXHv1qOM/j50RDAVy6idColoZe66MjRUfNq7Rh48hT80rOgg4lD0BGXWfBT/XAzf3iDVSvQC1UuZaMfHTPBkrPV1fnB2pEY8Drp6dxhCp6NArTRrRQdPIjfob0u1gYzkAiLn5282ZH4/81veKua1pznmXOhjs2XfPe8v71mt0h03/63SnDyBoaALcRkrVoRrs2BvSe15ExHpEOV2nY0k7A7flp/YJYnq9Bw5xLX6FSH7mcx5K10UuzD3srFJp2vEOjX2xWe8lQyhrY8PWzFdG6Ke2JYpkfJfpg3u+es3pxPIKwxYf/vhf03YTuwUZA/8LDLwGgAxJEhH/x0df4O49+BQCYOtP086Wjgicgo9Vd1KFV6SkoGeW3FuJTo7JmWI5YcAsyfNSifFkFL8XFWgAN/av7bGBZZjJ/Nbl9bH9eFCj0+LGx4fTGMBrCkuvti65AGMTg+vfQgjWc1T1GiYb1Uanx0MQSBEQevwe8kcGzZfTRPpsuTBr2c1jOgQGNAMw4Ldl1t06McDyEVOsDBnBwqfM+srG/PG3orOPE1FdtUxYIQi59+z3SSXcV2diTTs9PU4qVX/NdYZ20Y679wubTDSWqzBHVTuMjGh8runEPNgIagQgiwukXADBuTmOgob+pRAKqMmKdLJadkueanux+55V6tfM1b4mQTJDHL28vf6qtB0zeQx15vsyqsAMDcDF3msHO50x1hhjMGAObtm65rGqPCf38nY7qcaVAmRCI4vVqGxXYNfTaPdV7RVhEH7uJmCCZQ6LPDPlTubL7xcqz8zyX00fRbZYkK1BUjlu9eL49wOwibINogo3+hNyW6zqs6A30Fo0Ax540BF8swZuV+zD9+i/XT7YzTRmmP6ghNTK5o2gaUbRxHS68keV7IF+2zkdR1Oi9uySNji6EDA4QoJW17ejaUwggwND5a62Htj9fZ3dBHwVsrI0V3ma46vMk7X7edy3EKggfk3jTyo1CjpvPEJUZ/21s0Xp9U1cfqEMPdUtn6UndioJe8UkIfdaNfNicjVKMoWo2Sm0/hT3UFJDklLNsy1y9p8AbMKwBsADFmHTWYzTkd64Ft2cNFMWs5d2LQoiAPNUZ/6jh5qbNJWsJNicGptyuM8k8CzGKDDm9TtJJAK4yY8KEMzOQJvB0hbfXZ6TpCn7yLbGcqkplJgZp2LvsOMpaXip1wK7yys/miOn8BSlnBsmcEJOz9qtEk3i/TEUJJ5UCECXM84wpXZX6LaFqPTcChLoVPssZORItKVEpIpTdryXtoL8OoxoVLC296LFeXEZztC7UiLehizgNu+pk9OeHIG1ZqgGPuBuADIMnSpcUUEFJGrWCJ7TJwfqMvDNewq/wgqltkMcAWMvZl8Dxa8uGMi/GgoweKG4Nr/goVpezA6A6V6vqOfO9fpIO01rORf6k61ADUdTyyM6mVCBZ0tWIRgMnuQ49oQC+XMCgdUDWANtN7PWNwUbcYbiGerZoDdnfBn0OQGatfDob/mPS0l+5TZIOwNSWzNrOJXajdHJG2fG63FFHGK3X2cOq9HsuqL4lnlqf42ImSb2m5Tg1MG6T5XWr3LflabQrZEKbGBnnK1lpNGD3NhvMxbijGPgGOOxmRwIscmefmMo5JuVNGMVbD9KmXOUzExVwUkAJMRJTWW2g+cukSymUmRirsZHaxhiKoG2CCjqAEiEQddkCHmyMologQCew2mEYZmVA00ly8qtu9NWWDFWAhlLetu+Juga9XPlhvc4YbRgvJZEtMnUUAVrltZ90a8kbzRFFHr2+V+Xc4ZlhmsZ/sU8QGiBQw2qTZF4+vxaNVN+8rm7x0UaTto126v1kziuxYMNHa+wGg/azpkjUTZiNngHQnWodDc+NPteoi3IMomuen8x1s/9B1MPxLQ+EeV9qr2/lILZ72qAtwPOxARGt5Bld7yzE4PmF0lRdbZSLAg7tbOQE3WdRFV4/7t09X8MO8bvVeKOh8iHQsHnWi9TxEfJnL4Upa+pGs3K7CmqbKPEorxUizBVkVFNY6pYJBfhJpvOkURYCF0CCEiswcBDq5l9PGdck3n5Nc2IgzUgl+jFxRkIGY8KMEzKp70rVtmu0SK2XHmIWlscr3lY5Ur7q2TGIs0QiSKNC3L3XIgAt7yF1PDWF7IfT9FPzsSukvMdbjVggVx0nBpSMhuPqc66ePC2iGC6dyNj5PjKolFVaHNiM9urII659sktnJUNTnmgOypqVGu2DYnmx90eTfvXPrpqLwIiv/whQ+GvDyE4QTWEnM3201QOK8gwtn100cNEPFrxs6c499IuYs/Hg+oyHH86fJO+/+5ff4n/yv/0/fpK81+kuAA4F30ZP6K/WYbZJO9Bxzu6GYu3qr655a6FhOciFgXkr6WzV8Z42aIqr90atktOoRpzy5WXzFxsXqs4TieeayKx40AUPSKYN2Ca0iUF2kVHql9JtOnQV5EjCaw/eWYzzrqmW7YIy+KgHgA7wbdLBPG2q/+F/41/C//fv/qaLvI3mjQBLuag716KXt2hIp498VJetRgRHzIrLEW/89UkiG5/DEMUe+pf+s9/hX/mnf/1J8r6aM37149tPkvc93dM93dM9fV703/lH/x/8+3/nNzhP6ZABt8+OhlJGQ8UVLCh4wDq+pjIkGUU07hxshAwNQmOfK33/9BH+g3/9X769BPsZPZcn80l8i5FHHYQ6B2LZP28mOkEchDZJFHUEofqi6gATyYmgdZKFS5lN/bB2Jh2bpcWwRN1HAf1cmJbGivcSyrLNo0VWjrZZGsQf9HvdTruk3R9SNiZiCTsTA0wJGanOl9FapDp3gjFTQiYCISNxRsKMiWecwEjIsgOsVhURcpqQIcdZn/OM6+trTKcTmJLUDSbIfvOpHw+WnIXH6illEOvkUUam1I25a0XLuLoZQql39eTXc0ld5lxMTJgIePyI8OLZFR49ZLz/8BZv3rzD69cZ1+cTMD1Cmh6U4+Vl9Frma8icDUp6ZopbslrEpU0k5BbKtyHs3A/xdMOKrWh92+lF6n8PJUuVvs2f4zfUoNj6Y5PGYnzffFevncs4/15K0aPU+rYfTrD8dcVYC4VpOm6Ire7P49/v0mCTj62X9oJMBk5GFfXRVKtHap3ZNtf6VX4y17xevHqHf+v//I9rHUTDWeOTgOPoQm2n0URf+x4tbfbaHByfxk1pN9j4KYGKBZUK/XA14T/+27++vXRvAWzcVr2O0gkjUuBwgHWNlz2RrargDAAgwMyolu2lieRwJtkZb4ns/fjoFn9bfG9N7lx7/5J3lg8r8CrKgbpbACTsP+tcBM5mlcp22tNMAE/IRAVspLIehJHAIJwruJhBOAO4ShknfMDDdI0vrjK+eEA44Rp8ft+GInIG8gngEzIBZ2a8fpeAq0d4NyfQ1Rd4f02Y6REYJ7x/9wYPrmQ1C1MC6IQsLV7yn5EU5HDGPOn8jng8e0EZYMpgXEvBs0jUFU64Sownj4Cvv3qA508J796/wh+++wFv30x4+/6EdPUM09UjAWKUAWTwnIAsgIXxQarTjYsrecPgJ4d2TeJklpMaCZ38GYMDeb5N/Izqw/MxMhyWv2U4fdyXLfiY57lbObVKDEzAIh/lcZqmbsM+P4nWTmq1/I1Os9U5Ff47IGBVhxr8n60TpXluy+310LgQkK2kVYetqO2ETERAzrX6vvnu1aIcvv6jvO18IPuuzsXyfEVzTYikT1ewrMdsFIDkeZKt/5f53uTAz49yNorSWifbS0eNc9RZbxM47eX/CBg4SrdVnqO87HnecmbHGGWy0rKzXZLfFh8W3e8BL3dDEouxX21dWJ6OsUWQBcUTZF7CVKIOwIQZxGckzphwxgkzxKvPeJgYj9MZX1yd8eJxxpdPTnj8IOFqeogHDxLS1QSeGaf5ASY8wIfzjO9fv8HvvrvGOSX88I6RTyf8OCe8ywl0OuH0cALN72TFCiacM4M5gWlCmqgr9GLpM7BQ7p6mNBWwlsCcZXk0k54zDmZZDTLn7bbt6hy8uLao5cC46Tt+6237KUaBq1fpjUjAmdZKqLu2aAtI7NF/F/UL27nRG83oL8ozAlIjmfAgo9+MLWDPAJwo3+h31J4jMOrLbKulJb7kK6KlPhgssy1/fv8Vy7+tE9kzJoiOBGVhbg3qwfQnGUa5p8+bhsYZNw7IrFJNm1DC+6gTkhjtKHc1rDcFXCMFVvlx4cRRqPLOaFThPFhlc0H6TCxGDQxZrsoFaLzHlN/hAZ9xojMepGs8fpDx5ZMrfPVkwpePTnjxmPH0EePBNGNKM6arhOkKmNIJ0zXjwUSYecKrt1d4+fVzXD1+gb/58QNezw/w///r1/j9jz8Cacb8kHG+vsZ1ZrzPCcwnID0ASKMbJcJTVquodwZgYWR2Ezc5ylm2H5/njYgUWSPC7aRfW6WGDzVm6p1boGFlK/J6AWkbSktDNM+zKQar04mcb+6AWP4sL6Pn9lxbz0w+fPTHggLrFa8BqbX+PEp36XFvl3dNRiJQYu+NnJYo+rXFhycfNbPRDXs/c7+UdcQLoHuLoEY0emBmo2lNJ5M5ZPQ2dOY92PiZ0BGv/67miLD7BBqyL3t/FvAh2yDrPIabRhjWOrYHGp8qCrRW5WosZc4Grz+8oAxKMmACSmU+AyGBccI1JnqHh/QOj9I1Hp8ynl+9xzfPZnzz/CG+/OKEJ6cZjwogmd+/w/WHD3h7PiOdTnjw8BESJ5zTVPeQeJpOePHwjN88f4YzXeGPH1/hz3/3Hj++/Q4v3zPeXz3Ej+9m5PfXyJyRaEJOJ+NltWWxQLAEc62kWaIyTFkiGwwkTBLhoKKUM9djy9fruyyTVbCC2GAAqEBDwYamYSnin4jqkCWlslLG7E4KY1S02Zmpzlmxhu2IvO31xFff252beR+tv0dRh2gJsVJdYeGiG97A+rRH0ZIRgPJ102g94hS95yNG/lqfcpxeRLbN7LJqmxcR1a37fFrRkAu4zNvS981KLiISh0XLxhB9YiJsI7k/Qvdg42dOccjyrmMblgHNtGVZO5Pu2kijKahBchcqXft5W3R0CC0ObjB0wlrlz2rt7kXu67NQTteFoeK1AGUy5gdcTR/+C/b+bEmSJEvTxD7eREQXM3Mzd4+IXKq6prpmehrUuMIQbnCDARGeAA+B98Iz4G6G+qobPRgaTPdsPV3VlVmZGRmbb7apqojwdnDBLLqZmrt5LJVLOwd5mJmo7CrC/PN//vMf5k3gWQcXXcPzWeDFvOe8HWmJmLFHxg3kiM4J+kDykagddDO0VayTx/ueFItWIm6+pu0uaBZXXOmW5WcNURpe94Zv+45ZnzErz8rDZiq4hTm8rBNU8xRSeSwuLElQNRKjpl8oA1XRAuXtvXl01sphmEpkcpmFqdDY7hQPZ8/T7x/HBNSZ4t4SVXk+Y8zRwDId//Qxfqp2cmZ8cJM+bn+nmIfjENKHmIupTV4Wj+3/kSt69Nw+dF8/BFL21zm1LOd84L8xsbhPPdb+hOiUf8u03T7YON7+lM7i2Gf1mJGblpVtp+76Idj9RwEb/3iP/qf2se1DIYR/7DZpT4/mGhwvzRT19zS07J/q7pKeAow+NEso62wZhIPB5uGxn9we2+bEKedpkKzblXuk9krMl2GmyA9UHTChuHTug4wiblRK1SyUiJMVhshk020oeo2Fg0WTeD7XvDhzXJ0Zzq3C+TUNARU9sV+DH0snk4Ak6JSQlBGt8DHh40Dfb0ASwY+EYYNzbxB+R0iGbv6Mxfklz80F3fKK1bzhxdzxehX57q5nFQMRh9CUkJoAymyL0ykEqwSRiBHB6HKdWh2KL4tzabkXMSXAYHSDVoJRoHUxFiuiVI2u9ulFJFvCJZWr2BZhU9OXIZOPqmxne0qB0Qql63N6REE/eBxOPUSiUDWWWNIJJ3fbnVAy12FDs9Mxlcwb2Xtmdr/v/9ynvR8Ofsdcoxy9Y08By7tcjcM3+CgM8mBJ+bvctyPRI3v36hh0nph4TDPzaV1d19me/zQgHp3n3sEqnt8Dlif41w8xGnUX9f6rPeAqh/cgZ9Cq2qrvXc+eKL9ydA/u1/GE7JRR3GNtuk+yf70Tq6EmJrHAn+nvbb9YJwU7ljWV9+iYIdm7rR/bns5s/AhhxNOz7I877Y+lrx9Dj4+3j1DbfsS5/1Bdwu6QPwZ4UChlnrjv/U7rQ3t9/O+JwUDVtLCtoOsx+lGd2JNQirE9wPMH8cXD7aYXqsT095u8Zwb8+FXtlupHP5s6ojpDAaKuXZQojFAGPik1Xnbnk8vfSpXwQM4oVTI4JAe0ShiTynCqBGvA6chSvWVuE0ZZWm3pjEXHEScDVwvH5Zlh0SY6JTRhQ1pfY42mcZb7lPAxY40DA4qEsUX7kGVExKCUw3th1i0YkuLuLjDrLMGPpCQM9xvW129R2pF0w7OrL/js4mf8fD7j943wdsi860f6lBmiIiuLc5YYY3EflciytUhYMzPCvNEgI/PWMQ4bJkYnImjjEAx3q5GUDbaZM4aI0hqjTe1sW3TWNBi8RBwaJKFyAiyp6iPICUgoJVize8YUUgzClEapXAe3Wndl0lMchEAeZiRsnwVVarVoFFp02ecehS2qpPCK5JKFm3dpz/sz45xzsf1Wh8evKQYH7005/q6ysojezVhPsArHjIPsDVL76awHW6l6ftM7DahaMn5aPk0kdnb2u/3kI3Age9e0D5y2YGvvs+2/ctJl36dEkiLklHbfzcF5qINjHwCgve/1uAmKtF28uyKlpDrlVu2UlPfeKrv9vtPefgrYON55mVicep6Oi/FN139wjvV3PTEWTP3U1Ffp6UZvASpMWXJ5C8w46r+293X/2TnB7nyofQqjfGo/ejs1BB+g4j30vbPMfvwF37X9ruGwnRYw6W0H+z7q80F88we242NoBSaHetamzr7Ly58VJMlbRkNbhZGMkRFrQRNQaSDnAeKaWSPMW83MwvlyxuX5jIU9Z+YyVhka5Wi0RsWGhhnzJtPaESUjkgd0GIl+YO0DeT7Dak1QmpRzYUWsBaVLx6A1WVviMCICWllK56RJSUgpE2MixYgf+3LdSjHcv2P+/Jb581/yT5+/4EVwvNkI396M3I9CEEuWkfPOsJwZjAjnC2FmWmweaXXCKTg/c0heYExJ8QspFbbFw82dZwyZpEbWw0jIhYlxytHRFl2QSowqkg1Fy6ISaIuIJubClIgkFAqjdMU0eYdRVQGxIpCrB0kd+UA9TK187Lkqgyfbf4fPCtsBgIP1H58k7f987G3Zz8443OfpVPDH3rvH3of9sJKuvhQcFXZ7DID9lGzrh0IQpzVsp/dxar3y2V5wpIqy97coYK18nVl2tYXk2DvnYDZW/pa9yccB8Kt/76fm7p/rqWfuQSjrmGt+gFOm45y4KSfuzcd+j5/Axqf2B2kiQpZJLvgQDDw66E8ZF4c7e/AiHXw87Vf2OoU9qnFa5z3v2MPTeO+6e6EbAElYYpmhAmmvAFqqTE2h6hNaZZo80EiPkYBTicVMOO80c+tYuMhZo1g4YTHzzGegdCbHwLC5R2fFoulwDWjxNCpg8oikDSlskJKqQQge2ZSOLMYKNMyus0mpkLzWGURKHNrawoYZW5ZN4sechRQTOQaMgiRr/Ngz3r9j9uxzFssXXF58zmWjGKUj6hlrH1nOFC+u5jidOJ87ZirSryItgqTIcjZAzjRN6WBDzmSliEHYXFjGIHgRxmAYfcZnT5KBmO/YRENWhmAiGkdShqh1pYpt4S8UBTQAogxSC8uhSthLKbZptjlnkFR74h0z+BhA3h/kps7+MQbkfbH7A2Bx9PfxPvYHmFOD0i6MuCsctzuX+uTK4Tk8JnydBrF9/wxSelCMbtIw/JBB6iltv/+Yjjv9PZ3PSS3Q3vu/v/1j56m2vGXdakco7d28vYUHI/rpY2/3LbuVdgTWXghqYpAm0zHh4Jk61Yc+lq596ufhNZ464cPnRKkHl/De9glsfGr/qO2g00FIsoMJ+w//SbBx8ELvLX4Mip9w4Xwvs3EiXvxY25sInNz/rjuqugDJ1X9CSAhZTyWgSxjByIiVEZM9s7zizHkuF45Fa1m2iosuczm3zE1mpjytTpg8En2gTxGlDI6Mcw0LV8uiq4DKHmQgS0/KPdEnUizX6r0vA5Nt0LoMTlkKY5FSJGeFNg05JpwpoQ8QmqYl+FijYtOgXTKMUvQ01sJ4Qz/eIZt3zC5eMpcNX8yf01x8zvzqBVEZjBlp3Ihfr5jZDQ2eZXtHQ2Jc32CGRAoB7S1KKxoF2jqUspwvGrJoxpgJSYhSlA9JhM3guR0Mt65l0cDaZzZxYJMyEIAWpSxJdDUf0xXDqlq4rlDRk86m1FmpoHCfjj71yD3S+T8l7n48+99nOI5ZiumnHMTkH85oT82Ap5l32XQHQqYne3esD4ONA2bjA5VSfwqQsX9+j7EZH2o7xuLDA/F0zKN6yhyHr7br1Xtdz3C3jz3GY7eQ6kdzeG2PfZ+SoZjAHYIr4PC7OcGEbK/9BACewnfv7QwnoPERRPAnsPGp/UHa7mV+/4t9tNF7B/iHrYwW79MK/dSzLABBk1Rx4cyYymxktM448Zi4Zs6GZZO4nGkubOJyJlydG2ZO4/B0OmDF41RhK2wKkEaMJFarNSgDCQKGlbaIZGadQyRgdEabtBVKUuPpaW82XGakiZQzMUZSFpTReDWSY6RxFmcsWkHXtTjn8D7gQyDFBFoQLFHlEhjLI1oSDB6fVrxZv2F2+QvE3/H8csbi5c9BImno2bDCpYRfXZPHO5QEhtt3GJXIYSQZXQCANVjbYpsWN1tilCGPAQ00KFzbYqzDW+FZC5uFoo8N62C4GQ13vXDbD/QpM2ZLSJqIJoupAlbN1u+xDuTF2TUjuVa5NYWOzvnhYHEcQtj//UNg4zEdxfH+HjAb9Vzf1x7b9/S9PxYGemwkeWwAUxyyIcesyoP1f4SQ5f45P3aPP3icR76Tx/uGfaS5YyJ0HaGVmsAH6D0NoD6pKzts+5Vpp5/HqcNbcKFOP2f77Nb+/qbPTrmyHjc1CVw/0D6m//yoQmw/Zaztp2zqxG9/7O2P515/THDh41vazvwPl8sjwOKjvsEKvR/bF5RZ7P6xTzkMPMZ3nPqOpnDN4TE0QbVFIFY7JSMBnTd0suHMbnjRer5YGj4/t1w0gpMeZ3qcViiJtEbTdRZtLMRE6hMxRbTRfPbZcyQJfhxJIZMzeB+5X62L9Xn2NK3FGkXOisY1oA0pZ0IIxCQopYsGI6VtCIWksKJIMdA0zXZMs9Ywn88ZRo/zAR88PgSUtUhwhH6NyplWZSQNZImkMKCMYcyBxbMzFldnYMvgvZjPUWnAh5FhtcKHDcPqFqMyKka8Ktk3rm0RF8jjCCEiSuF9wDQNWlkkB2haZsZidGbZCrFrGHPL8yTcjYpvr0eGDH1IrIeEj4JPlg0diWqfrgyiLBl1INYsAZTds5VqiGD63ieTrmO9xP6z8ZT3+hSjccoQS2QnMnzqAD7NvrWeRIKTrinXx3bHcmwr+J7YxwMwxO7enGJo9s9x/7Pj5dO1H/87BjDH+9j/+31g4+Q7+8j5HI952+uY/i4LgZK1pLWqdZ5ky8LWoBt7q+4deI/p2Ibnyp73/VgKkFHb7cs5Vc70EWbiOPz2vmfv+BrLv9O9/v41lO716ePDR4GN/Zv/Yw2GPxa6fe+M4YiC/OMZyP+42unv4iO5so9sIvJoSOK4VZLy6Tv/AA24/WV6iR875inQk0+fyamOTpQqzAYZJQGdAw0DS73m80Xmi4Xml8uWKxeZ6xVO1pDWGKBt5qA12gIqbTMSVDvDdvPynXmPUonGtQQi3geUUSAag4YMt/crFNC1HWkcy6w9Z1JK+MGjVAmjpBpzzzJdtybFgHIOheCspes6tNbMdEc76xi9p+97GBM+jyQ9oNRUTyWjUiRrjYkjaX3L669/x8tf/Jzm6iXYOSpD9gGhQdES00iMGh8DOmc0gmscOWoSipwyKY1kMiEmOtFYZ/HeE4ZE181QOdBYRePmWDyNmzEzHXNrCThWg3C7SqzXnrs+0qhz+mRIosmmI5IJeRcu0VpjFEhlgESKZuMx+vrB87LXfx7rLI6fm8d0Gcchg/J8ykEdneOZ7nF/p2s2Y1le9lBYrcdi8I+zggehnaPr3D/++5jF97X9kMjx+LM97hF78r5x4Pjz7ffBYwPr4TH32SSldA1FVbBhFMZolN4BOpGMlt2zYPdCJFptZfFMcKTc/3wCrMoeGJyudcouOl0/5vj+HT8LH3relHp8grbb8MEv722fwiif2h+2fQRxUrR7HwN8fjioFPXIq6QejeqU7fZe6qQUxhosCZXWdPmeq1nkF+eZv37ZcOUCV84zp8fENaQBmhLGIA8o7YjjSIiZXKl+MRatDUYrGjEoIObM4AOSM65xjN6z7jdoDco2pBgZx4jO4KMvg3MIDGPAGEvTtJXtiGQRtC0VXZWUugoxepQS2tbhQ6SbtWhraeMM2zjMZgRtkBjwMRCkFD2TlFHakEePJM31q6+4ef0NLy+eI+KI0RCTRkyHmI6YNsRoiaPHYnDOoGgIHhADpniJxJSJKZMd+OQZxojCkkYw2tO2Giuaxgpd27KwMB8zgcDlTPOzs5a+17y97Xk9Bm76gXUQooqM4kjJoHGIGDQWBFIUYsrVDdSQUiKlhHMO59z2Oz/lfPkx7RiYHO9r3+xKHT1vwIEwczeIPNz39PPYPGs/zHnq3KZ19gcy/SNM4h7TSOxf32M6kgnkHd/7U2zIcciBR4DfKXAzgYwSpqh+LEaX0MOWgaj735uUWLMDHqbUANyySWpLm+2AzQQ6JnAxnU8Bh0LRa5yu+XIMgo+v5xSbsX+vtNbo7/foPto+gY1P7Q/fPuah/qjO+0disE4ccjvLeOyoe9toSbg0otOGTq35bBn4+bnwxTLx0vbM8ho9jogpng/KarCmHCNncK5YkseBGGKhzlMmEfEpMQ59Ca1Iqp0m5BzQSjhbzHj16hUhRtq2JeRMZxqUUvR9T9/3CJr53JFFCCFsqz2OwYOAsxYk0282pBhKxyeCrhbeGMPCLnG2wSpN7jfkoaSYZiVFn2IsISVaJ6Rxxd03v+Xyi79ELy5L9UqBkk6q8D4WZqUiPZGSMaO1RbJCqYykkrGSYyKFkZiEcQgY3aCyYBuNcQ1KDEoZ0AbjNK0kOquRrPBjYiYRJ4IdI43yXG8SmyxIbBiTQzRo25ATBB8QAavLvfLeIyJYaw9mo6lmZcDhIPjUtj8IPgY2oAIOeTionApB7AYTqQT8loivOy9eDLmWRC/bbo/84Pymtg94HguXfOy17/98DAC8D4Ttt8PZ+iFIOgBXH2Cbpn1Mlaq1mizsJ6HsFOqYQhCFdtiXaVi7G26tsYh5WKcpH333j9/TAjZOtX3Aub+fx76XU2nS5Rp/3AjAJ7DxqX1q37edAiF7H04vsCUxi3e0cs9VF/irc83PLhLPXE8nPTMnuNagtIM0dfKKFCNZEibUsu0hkMYCAKy1KK1JIUBIhNGTcyyeFEqIKRBDQBAu5i3X1wOrdyusbRjZoKzFOcdsNuPufk0IAa0nQZkQUyKEgFEKckNOkeAjxmiM1cSUMNaUQcpo5l1LZy02BkZnGZQmioC2GFtm2EaDM0CKrN5+w7tXv+f8lzMao8g6oxXYGi4yWhCdqZJ7gve0TUeOCUkJbSDnSAqBTRoIIRFCpmuXiNFkzkhmiTJtCRFhUClhrUI5RY4RlQecCpy1oK3gdPn3ejMQsxCMIlQ62WdBYqHPtdHEHMk5Y4wp34U6Lbb8QY/XB8BGfcpODtAPz6PGg5TaAxqVxK+/yOTQWna8Jyk4ERo8puXZ2SH+0Os/Feo4VU7+lD7hmNE5BiLHjE494MnU1+nz/UF7MirTRmO0wui9lHHUlg0o4ftyYya3WG2qkaKisGDmUKwpgKrnvx+OOw6f7fQdRSKwn1Z96j7usxWn7u9DUer+nO7HAxyfwMan9qkdtEderpPCkqmT2C0XkT377PKzI/BCb3gxS3x2Zvh8GbhsRpZ2QItHa0UWyKJKSqi2SM7E2tuvb1ekMGCVQqeIRmgloxOEFEiTOViKDN5jraZpLIum4/7+HoDnF0tucuLm9h5lHdY5tDE0TcN8Puf+fkWMkbbrGMexgo9i6x1CwPuRfvBoawneY4yla1tEKzAa1zRgFK1vGOaWfu3w2WJcMdjy40DXWMR7NJHU3/Pmm9/RXr2gO1tirIZGo1pH2xhSUqQhkFMoIEwyRjsgIikACq0yKg+MPhFjJovBdB2tU0Tj8HoGuuhLVDZIFHIOyDAQBo8SaF1DY8EhtI3BKMGnRMoerTuGDKtxQKKlcZYsmmH0JDJd1xYTNNn5VgDbNOLyPPzwJ/KxcAAUbHBKT3dawzABjclZ9Ei6VHUIImnvNXhcs3UwMANZH+YvfJ8Q0mNhlGnwnqzPJxZhWn1/QJ3CDrtBeTpDUFpxnKFWPFT2B+Dp5z5oqedRAYXW+4PyxAZNbJza7k+z03ZMd0cBzllE74ONep/r91AAFdu03L1dlr+1QqOLHixnyIdsz/Y+1tPZLwIoUvssqdegd84a22t/LLf7B7TvDTa+byzyVPvjEmx+xLlMU4NP7R+x/XTl4Xf6DGESlJf6HdNbvseJSgQE0ZZEcaRUkrEINq1pZM25G7jsYK43/HIRuWgzy5lm4SKNCkAus+QYi67BJBBFVgZQWA0pJlLwaMAoXYBCTUtLMRBDraCqBWMNPsIwekp2iZBEs16vS4zZtbRdZBwGQorEnAl+ZHF2QeMsw9BjnMM2DQwD1pjqrCkYY9ms39EtFgTv6eaGGEYSmcXyjNZqhBYWZ3TnI27taZRCG8cwDqjG4mzRZqQgeB9J169I62vU2ZKsGqI5J1qFnY+orFCrNYpIjh7XOMptKYLXlIujZ4qa6BPQ0rRzbHuOcgtc24LNaBVw2qJyJsaApMQw9CUMIxmVWpTRmMaRxg1La7k0a+bzGTdDZJ09/j6R9TNmz37Gt2/XhJwLMNEKpVIpay8JtMJoi1IaMzmPZkDVNMX6KKk6ymttdszCNCjWlY5n7vshkf026Q1Ozcj3QwXb+H8qB1RKbSvebjMnckZlRc3RRm1DKUed3D7bsv/+7Nup83Bmvb9sWn7qvKffD2baenfo7QCv9u6HmsI5qvy+BUGy25Bpu73f0Wi1q1Iie8ABiaUPqActP/JerZcphEUVhsLOZjZTeo+EknIMvXcfjQKrQfRWuFHtzvXupko5H0Gq/8sOAEwsDpNwWe82KqBnulcKY3YeIFvAufeVKrV3r9T0214/OzE/wq4icQ0TnXo8HmsfDTZ+TJABPx7QeOy8HkPKP0rb9RTvPd73aR9zPX/K7cd+nr7v/ic3TGEqfLarC6GKa1VdT9WuROogorbAxGjB5YFnzcgzs+Hz2ZqfLzNnduSiiRiVMRoshY4VraFpUEO/rQHStg5lHOPoCTGSY0ZSxDYOaun0jCLlhB8D3geGYYXWCussomAMkSg9WmlCiESZMjhyqSGiqkNoivQpkZLgug5RwmboabsZKEXMibZryDEScyLmBAjejyhdmBRtDfNZSxgz1rao2YLFlfCCEuLp+54YFfNuTgqxWGotzjDWkIc14fYN+fIzjF0w2obsDLr1hQGx77AqQQ4YY2pheim1KVIxNYoBcrZo29G0ZzTzZzSLM3TXIFYAj8qp1KKJEYmRPI4oRWFvJKOtYexX+GFEMszzyPO54cxmfn/dc+EaPnv5M768fkMUmJ+d0SiFSM0O0kUgWJ63ooOYSt3nDPsFRrbvrxz20VtgATw1VD7N3B99puVQqFrn/oXWPxzRCtgoG0FW1cTs9N4/9EYdayOm3/d/nlpne57HTI6SGqrYQYK9q2Q67cNzeLje6b4zo9gxB0pN6x3qXQ7/7QSdUp/KPTg1nQFqW2u11L/ZViUGIKHVLqwCVHO8aQCXUjdH6jIlIBmlCnNKrbX0oXs8MUAF00z3fPIC2T/nx+/VwX5Fv+devr99CqN8ap9abdOMk9oZbz041K4eKFLI0KTcrjPIESeRVgXmesPni8yLznJphIUNzFxiPmswkgqI0btqopILu2GMIXqPHz3WQvKJsR8wxtBaWws0lc4qV4fPGMcSZiCRohDGgU3fk1LEWrud3eSUSDFtOx+UQRtBiZBiZLVeM1fFPTSMI1nAjyMZmHczMjAMQ+1shBgCXdeRU8I1DSkUtmS+NLh2QRNazi8v2KzWpJSKzsMo+tUG2zYsl2fEnPAi3N/ecL6+Z3axxFqDahxYg3YGbSwKW+5THSTJgqRETtUtsrJDxmlc52hnDd2iA2dJkggxEELcE80lci5/Bz9itIKk6AdPzomhH1jOz1kuO9Iq09DzxdUzNmqE8Z6rxSUxB1Iuo5PSCmMt1lliSlNR3t0gIIdhiFMajAMRI2BOaBKeYsS03/a1CVvQUYHmwTN/AowfD/4P3pNHAPz7JkinsiD2l0+6gVN+Irt00sfP8/u2h7qWXdvXMpwCG2qiq9SxmFbKYF4nIpMrbd67hGnZgzbRDjsiggf0wV6/dCo77zFtyw5s7HupPH4PT4GYGOP2GPs/n9I+gY1P7VNjL0Yp01/TrKqmlzFxubrWNykUuM4BkzwdG84YeOYGvugUzztPl/tS4yQlom9QVmONxdRZcQqlEqnWQmMdVmvGcaDf9CCG1ja0bYMzhmHYcH93R4oRUzu8kmufWc7npJxZr1bkmi2SUxm0rLFohBg84+hBFMqUNE6nNKIMOUbu7ldkAdd2xJjYDAPaGoa+xzlHjHHbsdiqSVCVoUmxgA3nOrRr0VrRtg05R6hlq8ehx7aOy8UcyRnxCWcMvt8wrtd0Z3E7w1Rag3aIMmAcZAg50QhIFlKq/gGVUjLW4lqHbQxYIUrAVKSYq9C1UMC6pPSiCCmSQiTZWBgIP7K6u6ffbLhcLImbFat391wtrxhM5tvv/oHni0t6WXPnRxIzjG0w1qKsBWW2egCpxdwm+j/nXGutHHbQJ1M06/bHor5jseNT2n5WQlkASXZeIB/a32PHfHS77SA8jZM7hmAKUDxgIhTbAVzr6lNRDrINL8E0E98NlsfX+JRz/BhwciycfAiCdkyLHABKOfpcHf2bmj76e9p8okz2qC+p60thN6auStXV90XC++c71cepSx88d08BGsftgCn7yOfxE9j41D612iZmURQ7JqOKsLYAZEt5lpCFkZFOe670huduw4t24GctXHWJRgKdVRjryKJwjSvhEECiQnKk7zcEPyCSMZO4LYK1GmcdREiSkCws5vMCJKLH+5GYPDlnYrI0rmM+n5NzxvviodGPI03T4JyjcY4Ui5BS0MUfIkOmOGZu1mtCisxy6SB9CLSqiEWdc6SUsNZu0/fWqzVd1zFueiRlckqMw4AYQ9fN6bqGpnFYYxj6gZgindY0TcP9/X0ZpLWClBn7DWHoUbot4YiugTjHtjOsGJBEHDaUqPdUIZcyIBmFbRxN57CdRVRmGDeYHNHKkEIkeo9IFeXVkEGOkeB90ZHECDEifmRmDTZHbm9eoYJw+eyKr65fca4ha0Pc3PB8/nMG5cA4ktIEoVi8i9runyy1tHoR8E2z2P2By9TshIOOu247Ld//+bHtQYqqEtIjrMUpluGxY78fbNRz5mGYaEu17Z3bdD8eDo6H1y+SDs7zMQ3Lh9rHgJNTzMbhPupVSoFSItvyMAchrl1fcnQup1iJCWgcf3ZwzLJ1Ic4esg/757sTzD685vfdw/1r3f98v6jex2ZefQIbP1L7c9NS/CfXZO81ropyKdUwtnTotGKZsSW0CI7A0nheNIGfzQJfzCIvu0CnBlTeYFIGUbTzy+rqWdgIybGkuSmFcw0xenJKaKWxztHalpQyN3c3JaXVqm0htJwzSiusc2W+pAxKaVIWUhZQGm0sc1cG9rfv3qFQWOtQ2hJiph+KJgRKbn8WIafMMAwobUpGxd5saMpUsdaWlFhjSnn60ZcAh1ZEP9IuOrQqgAc0bdfg/chsNkNEitOo0jRdS/Bj0WBEjx8H2kWHWIPt5iTxNLMFRroS8siKnEdENEkVEFhCQqCMwjiDdQXAxBTIPmO1Q1Is/wCpWgRJiRQDKYykYAg+EIeRxhrOlkviOLK5u+Hs2RV+9Y4mw4vZnJvxmp8tz1irASUenzNRFEWFY5EsxCRIymjJKBGMUlhryBzS2lM7CSj2gMn7AMdT+5wtwwGlANgRa3KcuXI8kz9uj4ZLtp/vXcTRdscD3HSN+3U8HjvO/nke+1M86fw+so8+PsdT57MPOJSe9BA7NuFg/aPfj89nuj+Hx3j07E6GQx4PgcneNo87i+5f7/45Teudql781PZHATZ+DJHg+y78h+/9sR2rwyfoz7Idz1H+PJtix2ywnZUUHlwpRcoBJbmkW4rQGYPBM5c7rtyal23Plb1nnu7QmwFtI8XvSqNsiyKhUIRxJIWA5GL81HUt5ExOjhAC1hrc7BwllvH+jsViUdiMHOjHHqVKife2bTFWs1lvuL3r0aqkq0oGZRw5FpCT0GQ0q9UayZnF8hkox3ozMIaANcXm24exeGdkoWnbau5UZmqL5ZIswnq1Yjab4ccCHu5ub7c6g67tUFkgJcZ+TYwlPIHSLBYzYkysVz3ONTjXIiJYrdFNiwiElLA5Fy8C15A2imZ5iaSiuSCCzivC0NPMzkjRk3PAGgOUui5ZMkYVHUoII4vZGTkW7Us3mxGGAuiU0UjMaFFEH1BZyCnQWEsOnk3fo1Um+x4fN1xdvKSXjCaS3UBnAjYH7saAJE1KhpAsynQ01iFGCtggFbGosgW0bh8vddDhT00qE2IezKAPt5s+E5FSkXYPJOz/PG4iJZSRUtoe/8CJ9GhAfW+f+mBgnCrJ7oDElLlwcL75UN8wpWSWVNLDWfkxizGd8/75Hs/kT58b9XxOl7o/vl8TKJyYlv195zydy66WTBnES/ppqdo6hajqtefCLKW4A0gxZtKDkWmfRdg/T9iFYQSRVN6tI2ZjHySKlFo9u7+p/ciuMu772vE92Wc6jp/Bp4KOPzjY+DGABjydHvs+x3vKS3cKHf7QfT92vKe2R+9J2dEP2scfW/txnqMdyJhYjfIyJQwJrRIWQauIiT1LPfC8i3zeeZ63PRd6Q5fWmDSAErRp0KZBGVdzaAujIVWgWASfRXSl6jWMw0i/eg3YIhpNkRgT1hnOzy8JKbJerxhDYDbr0HbB8nzO0I8M6xvGwdftcnEHFcG4JdpkVv2KKD3OCT7COEa8SoTgGf1IygmXirreOQvWkrOwWq3IOeOsZRwGYowYrdlsNmhtcMZilcb7kXW/wrUNTdvhrKNpOwRNSL6kDOqS1pskY9sO5TqMdaA0SWmUNqUOSzvHzGMx3zIJgyX1EdWCUQLakscNyliatsFoSwoRwYMIFkUcxxIiyZlhs4GpQ86aHENxQ81pZ6JkDDElVps1IYZSFde1GPGYHDhvDGITJm8I2dGHjA6Gxpyh25aApmaW1gxt2SoVHqOqH75fhxT6qb4FdvUwhMf7nPeBjmOg8aFtPtiq8Bl2Tpr1A3aUP0zeFfvHmyy+d4feFysIHE12TjFDx38/dh2n7uV+H/6UYxRwoSrgODxHyTUMOwGRKeQnbAG8QM1Ueti3qkdTkab7WDKcCtB8PJRx/L2WdfQDkLsP6qa/jwHf/j07DqN8TPuDg40/5aYqs3HqYf3U/tTafud4RD/miCZh8ViJOB1Z6hUv24HPlopLN3Cmelo14oiQIjFCVgWkqOhRSVBal7olrcMYW+j80ePHce8MSh0V7yOr9Wo7kxtDoJt1ZRDvlhhjSSKMwXPz9pZ+07NarRjHsYQdQtGDpJRpmpJK632mlVzcL0WTckmhTVEIMZFSRKQaACmF2IzkzO3tLc457GLBzc0NwzDg+wHvPY1zOG2Io0eUYDuH5EThGyBN1WR9LN4T22qjBuccys0R0yDlTgEGZxrMbIkoQ/IB2wiqmTHKgMktKgcyGp0FraWGkSzJZ2IYi6MnmuDHatiU6Tf9QQx+HEf6zRoobqyj93Sz4lB6t14hWUBZrKjC1GRFOz9D6cxqfY2MPV3uQJ/R68xGEolMViVkUkLvcvBUPbXtd/DHbMb+OrlqQU4Nno8+4XurHjhjngjxPDaQnRrQixGVbAHDMeA4NSOe9lH+VaJY8nb9su40MD4OpI4HyMfasebiYcji9OB5eN41HCd5y3JA3l7DPgNTxofdMdMELirwSCfYhVNnf7hvtrVRpgJ8x8zUIROzX1l2D6SeuPbH7un+c/h9JtRT+wQ2fmB7Ku34qf3xN9myGmpLeyvJGARHxOQRJ56FzfzF3POLec/cBFpZ4/I9Sg24RqNyi1IW08wwpiMpgDKYxxCKxbgI4zDgx0DjHE3TkHPG6iLYNA5sbri7u2f0nrabMQZ4c3tN3w+EWKhUoy3ffPUN6/sVwzCSYtEnaK2LaNN7nLO1Y8zMukzXRbQ2hJRJMRJTIGVBG4dSheI1puo3jGKz2dC2LVprVvf3pX5KiFhjIWfevXlTYu5W0XYW6xzdfMF8vqRpuhKK0hbXzIrYVqlS+K2dIXZGsm0RW1bAIcrg2hlGO7KJmKSw3QKjR3QaGNZ3kHPJpomecQzMlK0+JJkUQVtL9IEsmZwy4zCglS5MgBSPjaHvySnTzWbcb3qCZGIIrEdf0k99wGZFkhVoR+M6jNLMlJCNYBVYGkRGxmyZarBMguJtR/19nsWjTv2xmSgnBoBHB91KLxz3Wcez/afQ4w/CF2oacJ9W1+WxAe9Dfehj1/a+ZY+Fh/Z/7mdo7Ie5HoCvDDmrOujLQebQtKrkwjrswNN0jLTdV8rF3+bBOX/gmo/DIAeBrBPs1+Fnj5u/ve+Yx8tP3b+ntE9g4we0ifo+7gg+tT/lVmKu5WeusCNjVMKqxMwKz+eWzxZwpe+LCDQPWDWiiPhQ9mGNxpoO7KwUDMsBSCi1AwIxxoMZxdD3bNYbXNehTMN63ePahm6+ZDOMXN/ccXu/ou89o4+MPuCM5X7lublesd5sSDGhtaJpW/w4EkKkm3VAKSS2GQLzWVszMzIiieALG9K1ptLcxY9iGDxCJqQAQIqRoS9mYX4cSToSvS71IbTBNRZFw9j3jMPI2A841+LaGcvlGcp2NbNP0diGxnXkpkN3M2hbknNkFFlllO0wRshEyAanMlatsVIycHJMFFFsEXzKZF4pQk6RGGNlcEqYKlXH1LTXWYfgSTHi2oYoid77wtrEiFGGlHqcywxjYDZfMmw2NDEzMw3GZeI4MESD0R2NmTPmjOSETGmlSk/Rsyc3ER6EXt7HXHwoxPtg2YlSnqeAxlP3vQMb29jRk87j+PjH53Cs2Ti1T6XUASDYHyD3af/HPpt+PhZSSikdbDOBDZEaXuUYuG1/226T97JV0q7YDCln0r4eVibm6cMD//Z+KHWw/imAcPh9HopCT7X96z0FLPY1IRPr+glsHLSfKLyh1EHHIMiDnOc/3vancI4/tH3cNWoRRCVQGVGarBSlFphgJbNk5Lnz/HKhuTI3WP8WsqexCmcoM2epsxoUSSDGwBg8Vsfi0FFp1xwDqZp49atVTSktz2kMCZUjzjWkLLy9vuWbV2+5v19zt+pZrXvu7zfc392jlKXr5vR9T0qZFBPBB4wZkZwJ3rOIuYQsgJQVm8HTJDBGYZQmx2p2ZaUUktLFbTSmSOwj2hRDreDLQN+0jvv7FSmWQmTnyzNmM0OIidubNUKmbSOIwSwsYmNxpKyiuskISzUNqplhuxmqnaFtR5TCTijrSjZMjhgp1uASL3EmYYeMCgkZgBSL7bNkkFRNvwSfE55c70Eg1nszjkOhn0Xo+6I/0a6h79fgNePgkZQxxuJzRA8lq0YpS4yCcyPONYwetDln4Rqy9vjcY0UICBmH1JARSpUU2JxrWuMULGNLg9dhC0EzOVjq48FgCjXUXKjtICPHqz3ObEx0fI3xbPcjlPuntlV2OcmYvK9N5lLHg9Tj2+1CA7vLkb3lj2x1FMJ536B5DEIeW7eA/yl8M50H2+tBSshiG0aRHUOhVDWb24KK6RiyTWPN9a+85+I11ULaHadyqqcuvD4P+2GpMt6wDUtO4fydOLWsx6QnkS2xBQdjlGxXrd8gkyvqVgRb+6yyjSlr5FyqG8e0ZW8+1P5swMZ7Y3XbUnxPQ9lPbTKJuabvFFUrJz7ytpxY9AdhQ46Dt+9b9T0v/p9T0wiWTCIgWshWk1WLjg6THDN/zy/mA/9kcc0LXuHiGiTSdI6u7WrFVYOzhpwFpQ3OCSIFZIShuFnmFMtMPHmIHiPFAXO1WeF9BK05WzyjMw0392ve3a746tvX/MOX33B9c1fMxDKs1wVsjD7Rh8LBIBmVMzmXY16cLWmswUWw1hDCiNKGmDUhJTprSr2GbNBZyDGTKVoTkWKRbNHoOihoYxAU9+ueFDMaA1qTtSWIYXO7xmiYz2eEkOnXG3Tt2M6W54QwkETRzi0YhdcKaRqcaTGqg9yUeLYWAhpjNHrWgRhICprP0Y3GLi2tsjC7IawVsb8lDCPWFJFo8oGkNHfrNVkEYwybvuhifCxhlZwzwxgZxhEfb0jJ41oHIRLGQDYNKMXQB7r5gs2mR6mIsQONtpA17XlD23liuCbgUfYcYwIb1TKqhqAcShsaJSjRSDaQLVpplEplIFK5DFDKIlhQoHV84Jq5jddPVPgkPq0A5CmaBYFimAZMqGInUlQVhO1mw4Xfe6iVOFV1tUSONPvsxvsAR+n3dtoHHmil2C7bgZLTHdGpsNCpf4fH3gNSFNFyTpmUZctYTx37NEgXB+EEErchEypomJgsJbsKbVKvsV5F+Wx7UAOqTDCm7zNL3FrbK9hmx2mlYSusnaq9GPIEC9Q+Y1XWnVgSNb2/ilq7Zi+cMgGIyWJfaRJF4TqB4zJBSeRYDPoyBWzlFEtV6hjJf6xg46miko8dgJ+63/e+jI8c80n73nv46kZHO2cPPR4e82OENh9zXx7brzwRaHxoP39OrbiCSqlcqaSUPZGASZGOwHnruZpFzu3IjA2aANbQGIPVqphjieBsg0hB/WEcyTkRQynPnmNEJGFqp65R+JQYw1j0F8bQzRa03YLV/cBvf/cVX337hrc3a27XPTf3I6vNyP1qze3tmqEv+oybdelMjC7F23St63J9E+hazeX5kmcXGWcM2lmUZNIwkqyhaywGkCToWClpo2pRMY02ipwF1zi01vT9imEYaGxLzIkcYQyhhF+gPOO1mqX3nqZxtLkhpVAGUtdibZkdNbOO4CzNbEbORRibq1pGKY3SCsFQ6jFoTDuHVtPEC5QOiFcYnfAIo08kygzUh0DIGZUCOQRiFiQEUs6Mw8jElqdYZmYRRUaIQ2QcPTFmlArElNj0njYI/ZBQxjCfLVjOGhoNku5R0dBIS6eK8VpSCaXBKEtPrY1C3s54a4Hy2k9Mg5baG+DkQdexbdtJzd6iE7T5+8IvH6LQD4Sbslt+/PPBQF4HseNB/Th8sc9kTFT8PrVfzuGRy38EtDylPSYG3ekwZMsQ7PEae+e6f8zyP8X0U00jPuUhV9vlWlcGQgT2aqOUPvhQWLrbB2xhnuxAx7RUK1WNB1M5XAWbxVm4+uNUkMSetsOo6qeTE1kSOafS12lFTgHR5R1Uoor5Xyq1kFKOSExIdQRGpIjbU0Bi+uMFG5/ap/bH2DIwKoPWDhBMjDQ5MJMNz8zAZwvP1SIws4JOFkkBbah6BV39HqDfbA462GkWbbQpIZpci6DlMrCt1mtiFnzKXDy7YnF+yc31hr/7u9/yu6++ofeJ1ZD47Vdv+fLrN9z3gWEQfMnwRGsYM1A7ISV5Oyu67xOtTaz6FbcboXUtn9FwfuYwpiGJEOtsK8ZMkkxD8Z8wtug3Bl/ErNbmkuEyDBhtirmXD0jOjOMIOaM1tG2LNYoYQ80uKSSyDwMmZ7q2RVtFJmOcxQsY54iDYI1F5al+TJ09a41kDUaRrSAGzGwG6gIxGqssVrWEPpHjSCQyxkDwHp0DMo54HxClil9HllJqfhIDi2KMQtYW7xPDWGZ2WTKDj8SouN2sUGpAW8usG3h+4Xi2MEQ14sIK3V1w0YHNESWRxoATjUngJZHUNGyZQigU+gJBl/o6lMFH783eT2kMHmtPEV8ef/6k8MOkI+D9QGPSbEwA4tTnp3QhKZ02lvoxJjeP6VxOaVKypDrY7+aIwhHCqEtRoJbadAABAABJREFUCqULI1GqpO7CFoopq2bHbmhdQIEgaLN/PgmtqwZiCzgqEN2tVb6DOjlRSlWmYgp4aETl3XoAKtcqQjVsqXJlqzI5J2xl09AlZFzuPWgL1lma1hBDoI89IQ9E76uw3RNDYWaRhKRcQEjK26J9H2qfwMan9qkBWSsiGqMtNgk2JhYSeObWvJzd89liZNkMuNyjpOSnlAyTtHXULB3oztZ7imtOnbFRipiFEDx93+O9p+3mmAyr61s2faAP7/j663esh4R2C1a3N/z6y1f86nevuFlFQi71LbKeUkqh+IBqNHVUqzMNZ8ALjKtMH0ecE7Jbk+k4m3eIUphUPC9yBmU1SXSppooqGSv9iHOW0Sc2mw0xJNp5cRJVUspvW0XxqzCGrmuwtUZI6yxdY7FWg2S0UXRdqfdC1ZCIlBmUUq5oOXQRrmqti05j6oCVKjoII9A2GM4KDa1bJBlMu0bkniADfRTICQk1+yfGUik3FXv2IXhiVqQMY1bEDEkbgig8QHVj7XMJT90PAZGAsuCGDaIMs9mMs3bOcgFdl1F6TRs9nQi9GFpRNJLocazEEJHC0tQwq0zXpqYcKDgGGk8Rgp4CGo+tvw8E9rd/bGCezut4/Q+1U6GWh2Dj4XU9FVx9qD22/eNZFILSZZiX6Vzl2MNkF1oRPQUYjkI4dcVtqEpP4Y0Ssto3SDVaUKaQIGV5DY/kw3DSFJ2qgZEyoZiOvd33BNoSqnr3lCd3CpuAIqNNql43qhSCVBZd2QxrdCmnYC3v3r1l3NyTxxWb1T1+HInRk0IgBA9S9V7GYLWp5m0fbp/Axqf2p9O+Tx/0xEmSANlotNLoFOli4sKOfNGs+WK+4rm7o5WeHBLQYU2DsaU4WU4JawxKa2ZdtwUeIcaSJpoSuQ6AucbJQ0iMYyCLJmNZLJ6BdlzfbFj1kd4r3t0O/PrLV/z9b77lzX1kM8JQwcYUGy/1TaaBQSZCHhB8VliBIQuDRGwQzF3JslgvWs7nC8yzsxJiiIVp2Nb0CBlRifvVhtmsQ+mIRnN5+by4ZOaIaRpmXYc1ipQyzpUZlVYw7xq6WYd1plZZLaXcbeOqvXjpepxryLHePwpTlCdB3GQVr0t1Lq3ttsqrchYlDeg5Nli6s5FgGvAjtCtSzmyGnnGETMMQIqIt2rUMaHzOaOsQs4CscO2C1tlSlkQUIUb0ZsNmPdCpGYP3DKPHC5xLA80ZbXfOrG2ZNwpJG0BhiOgYyGxIMkeYMdqzUvUTg6gGUbbG4CeNw1RH5bhc+d5jfAIUTMvh4Yz9Y7NUjj9TSrE/7O0v3z/OzmL840LQ0+kdMyY/pX7tcbBRB/IjK+7TzqpqL7UZdqGjKTsF2Kb/TltM7MWO2rDakNVUEG86P/ZCZRPwrEZ0QFVm1QnFJDqulVeUAAmlBWcqOAK0njg1RWcajFIYW+rQGKPRRmOtRevy0zmHDiuGdWIIG8LmFj8OBXzlTFNBhnOOtmlomgZr9yib97Q/SbDxg7QVf6g2hTU/tR/QZO8eHotkjpfvKNGnNANElTCicHlkSc+V6fms2fDC3TDnGpU85AalF8VWvNFMnY1rmjKjqWyG94W9mEoyp5Awar8aoyZF4X59Rzs7Y3lxzn0/cn2z5u31hv/4D9/xt7/6Dd+9veF2nVmNgG0gCd4Hpm5muk6pWodtSh5gFcQ6SxuioFPErBQ5wTh6jOm4wIJuyETGIKiUMAZER0JMxKwIESDRWAuiGYaxgA2lsEaBlJotiEZyxLUd8/mMtm0QVTJyfPB0SpGybPUtRpfZ1CQ+hTJzQ0/hhtL7Tv1vreqOiCmg0DiUztgzw5JM6IqnSVAav7pnSB2qKWZijAFtG7r5GX42omLGNsXfIwuIKhVys5S7l3Om9Z7ntQbLZvDc398hGc5aIWtYews5QQrMG02nC4XtrMJJxsYem1uGnBEDWWmiNORJGGgKoFI1dKVL8ZLTpcf5uBn/Y5kkHwIi++tPgON4+f72W2MwPdmVqwfAZB9InDr+MQPyGNPyse2YTTn18+E17zQaObMFDeXjCT3s9T0KpnwTpud17z5MMEEBZhKIKrDKFmM6Jm3MTiQ6ZaRs33BhC+J3OhpojMboYqA2MQ1ay063pcFU4GEQOqXQqhQAtLbUP9KmeN4YXWoLtW2DyxeQVjid0SnifTfRPsQQQClcBSYTUHlKezLY+MceyB9HxD/diH1MMX6onQpVPbp9Ra2nPv2hos+Pn8U8LhH9qQS8P1qr1P3uPA4/3s4mag/x1K9TVMZJwCXPgpFLN/BZN/K86VmwwsRV6Rhch9YGY0GpuJ0BDX0PsC3HHkKo51dqddhKhHrvub25Y71aE5LQdkvOL55zs+p5fX3H23d3fP3qju+uV1yvPPeDsA6QlCLEjE+CKFfmMzXObFzp8IwCrU1lTzIx5u0zlwuJz20/IKnMssasuFmPnHcON1uSQl9CGsYQM/gYGMdIFoXVmhQzku9x1tQZTikdL86yXMw5X86ZzRuW8znOWVKKJYxkHDEmjLWVAWlKVo0qGQAVW6CMQYzBKEo8eerA68yxfPe6dOBaU3NeUUZjVUIbuOocbrHg3Zt3eHuF8pGYBdeP9DGzCcJ9hPUQiOtIlPJdDesVwQe8H0FplNHbTBbrHG03o2lamlnLEAJvbz3L+RWLRpGNJ5tEzBuUCjTO12dqIMeBLhkUDVE5RBpELGxLvGu0lGyoqc9OPAQFj7Eap/QXH2IuPpTS+pimY19T8eB85HFh6+lrOR0m+iH26Y+FZE4tPwROu3+7VSe2YKIbpmV7TMTBdjVle0r9UFQgqdFVEHwIPMp6OhXBpUIhKqElgCSmSYzVehtqMVZjqujUWo3VJYSpTQH9poIPo4qzrnV7YEQJnSm6LmtsBSamMBy1GvOkHkqLlvvlGevNwLpd4bQlSkaSQFYopbHGYY3DmZ8AbPwxtT9qBuMP0H5q+vGPp6kq9H6ccj3oTLYzksN2ajtHphFPKwOXzchn7cDzds2Z2dDksaix0WAyoot7p5JUrb13NQfC6IuhVAjFTKoCDx+LiZf3Hj8EUoaUQDvDejPy7mbFZuO533i++u4dv/3mNW/ue1Ze2CQIWUhkEmWqorWha1qs08wWhotnZ8xmM5qmIaXIu3fvePXqNePoSREmOcfGC9ELWnu62zVg0EBb5QRaG0IskjalNaIsKWtSyLTOII0u+yRjl10pX28tXdOynC9YLDus1kQfGPxIlIxrDUgkxcK/CEWI670vzISr4KHOpEVt9fSl85eSzaG1KeeILiySEohTBdfMxgubdeLN9cj9xnDvO+5XGzbjyNvre25uVqyHkTFk+hjxIRJqKmynMimF4h0gEFOs1XU1IRV2yjUNs25Gp1raDG+uB67ONF+8bPnFF2dIsogEmjyibCljn9eFekYc2ggZi9ItEVcMn+o/JbtB+FAtcPrZfh/4gMMy8R/qG45BxJZheMI2ewseDOrTv2PjrRKuOAQDH5uV933b6fs2gYwd4Cjr7bJFyj9dP0+FYVDUdYSavlb0FVMfRaqLNQpB51hPAnT2kHUtk1jBNgljCoOhK3hwxmBtYRKMKW9FYSc01pTsM2NMNREsWozCdEBjpxBJARuNLdeilUZru9WgQGFFUiquprqKV4v42lYRcxFnl75EoZVFK4vSbstKfqj9SYKNP7f2nwp4+kHXKfW1OKAoDz0LC9O5HwOeFh7u5xS9ZCUzl5G5WvG8G3gxH7iwK2zckMeAxpXiX0qT4kBWgtNFDGr2XrYQAt776lqZtv+obntaKbpuRgjC3eqWczMn5FJLJSbF/Wrk1dtbvru+ZzUmvECo/zKCtRpnLYvFgpfPnzOfd9hGsTyfgxRFuTGWtmno2o7vvnvF7W0x2qqngQZuN4H2bkPXtrTWIJ2hc4Vt8MlDimitcNoVJ9MqIlvdr2md4vL5JbNWM2sdrbMYA+M4oJWUkuopMnpPEiFjULaksQqGwUfm8wt8AiMF1CilK3khkIWs827IVYoypzNIjqQYQDI5FpV8v1nz6rtv+e7bb7i/v+Pu9o679cD19Zp3NzeMIbDZjIzeE1IGbYunQC7gxyiLl4hkQdsGyYkQimjWmJKam3JiDJ6wHgjWsA6K6/VbkDVnS/jZ52dcnDlmTjFvHWfzOY1zSBQ61miuIWuinhHpyEpT6LEJcAAyAY2HA/3xjPxDE4zv867tA40P7f+BbkNKdonWhwzxYyGT8tnDff3jtwlo7bOkNUOjArZJ7Kq13ulYZFdxVySh1KS9KSEMoPIaNYwkGcNQdq+gMwFcAQiNKQyDVgalQgEZzpZQrSkZLVqBtdPvqrCL1mCMKroLbSvYKPfYVECit9kyGSHsrllFtuUZ8lSht2zTOEdjC8ixRpGzgqxqFedpD8UdNVf9yFPanxzY+EOEV37K9lOkef1ZthoOmHLKpzDJcSxLHYONE/s5dbuMCF3acGbXLM2Khjsk3kHsaUxTrccd0SgSuczPldnO3KbMk0JP6q1uYwIbrXWcnZ1xe3PD9fU1KSvOludoY7m5X/Pm7S3fvnrH3/3qt3zz+h33Q6QPEDKEVJlZpVgs5lyen3G2XHJxviTnRNu2NLrBx8DmfmDT97x7+xaU4vnlCxrb8vrtNTknnC66iRCFwUdSVqAtScDHhNGxlvw2CEWAGnwqBdeaFiWR87M589kMTalW65xBaSGEkc41iKoOpDERUyarxHLeYmyDcx0x1joskur3NNUsyVCZm+k/tnFvAzEjIZGiJ0bPMK5Yr+54d3PD119/y1dffcvd3Yq7uxXXNytu7taM3hexboilY9QGwZffa2erlUJbRxaNMS1GJ5TJaJ3r+edSyC6MdLMZrg4gs8WSmC1v/Zp3v7+ltZmZhfPW8tnZOZ9fXXG+nNO5AHmNMCebSFKRIBElppp0VZ3A3qz6WLdxDASOn/fjtm8rvf/zsXYSaLxnk2PdRkmoyHs6h/f301NGyg/pu54Chvbb433taf2G1vrBNlqp4qEpgmLnV4HKKF2EzUZRvVY0SvkiytSK+azuH3h2rlFO4awurIQu2iCjLcZqnDU4owuYUGAtWKsrq6FwpgINVbJftCqMxs6lo1rlC4hkhARWn8YFdUWRXEC/orAmBowRVJKS3UUJz5RXJ5NJJAnFrO4J7U8ObHxqp9vHajn+sdujHdDHnKKiDEF7+3osXr2lP2ErpNwecms3LNufTiJnjFw1nnO3xso7croji0Hpc7J05aXSCWVSoTpFTZmMGGNorCWnjM9CDsUIxyiFtY6hprq+efOW1f2GxfIc1zjuVhvevVux6Ue+ff2OL3//htt1ZEgwxJJ5IkDTWBbzJc+fX7KczWidRauE90PpWJTFtS1jjLz+9i3ffvctbdNwefWcxs7obA9xKLMem2mahpBhM3gwxTt1M3icycxmDU3bolRmfb9hvlxirUEpw9XlMxadBZWKOyhCigGrDK5paZuGlDNhjIyDJwksZoYXzz/DuYb5fIkXDVHQqsSNlVJILn6IqoINULUjFyRlVIrkPpBzIIln3d/y+s03fP3t13z3+jWvX7/j2+/ecXc3cH29IqQEqG04RATm8zlKKYZhIIaA0RalS30VnynW5cbV4xvatqOdz7i/vyfEDTEKm3WPVx6tDMGekbQQsRjteHt3R0vmolGsbu65uxZenG/4xV8tSXYk2wZtLrFmgZGGWCLwJd2yUvnCzsb8watyBAhOiTGP34P9rIrvBTgeWe/wpaNa9JflKaUH57j/nhZwztY2/lhEOr2a202mZR+Yl23fZpFStfeoabPvibFbN+eibZoswZWeDLmmsEkNEUkBxKpONrQSMAplFcY4jN3TTthiimd1wmrBaMWzqd9RcPXMoVtNYwzOmQIedBVq2vLP1PlUYQs1Vk9W4hlTmYjpb4Xeem3oqivRqupHpIiTE8VfozCIe/2kqEKdyJQ1o1FGoa1QVMuBTCTlTEyhOKZmjUqaUnzwPzHNxmMq658yRPFRuy6j3on2vvN+2gHKBP+UWvV0B/OHgiD7seGpmRL7qMunpaeve5/ZmNp+oaWDzpat82/tzKu2WymK+iKhpQiyrBLOzB0v9Xc81xsuTMQKqGxJGMacgbHEXnMmq4Cv5lnW2JL+pXUxBvOedd/jY8S6BqN1KbhmDHfrntXgCUmICd58+4ZMy7vbNV51vL4duO6Fmx56mfwzyrXNZ3OeX15ytlgW3wql8CnTp8TdesPm9TuMVoRx5O72Gmc1khObu7sSVjEWbEOIHjTM5wuszmwGj8+CtY5ueY7VQsoBH8rzZ9slpnFIDqAt2hhC9LQzjbOattHM5x1NY9DYwsSEiA8jMQy08xmNU3Sto3UOhWbWnZGTRYxDckNKCmWlhnqqa2HKxJTICciU+556sgTWmzXfvfqO3335O77+7ju+++4Nv//6FavVyOgFox0+eNRkWgS4xrFczAvDkRJdW7xChmFgsxlZDSX01Q/ryoQElFa4xhFirFlFglIWYw1aQb9eYduS+jdsBrQ4lIb7IbPyPe/uA9/erRiMZ3Exx8wczszQkpi3gg5neDqi1oRykcVxMuoPvqT7DMdjTMdjWozj/TyWtaEfew/Vfme2CwPtjq22NuhT6ub+eUAhJBNSB8Oa+lvHPNillU5AQ2rAdBcqLe/yvnvlpANJUobfg4mHCJONd6kDsz3Vsg8FiK5hCFBkJAWUKsJqXb0pnEq0Om5DG6oyDY0txReLSLNkh2itcUYwOmO0ZrnZAZ2XZw2qszTW0DTFl8cahVa5AgbQUrQtWy8OVeqQZGJ9VhoUbrr47X1ky+qqYr5bpwWGTMyJrekYoCrTWW5pgYvaOrRzxZEXIcSE96E4IY9+CxJVZXbVjw02/pR1Bftn/r7r+Fgm4KNvyamJ/WOHVE/fvzyiQ5iyFZ5+0B+nPeVZUaogbl3ZirKMLXX+6HZHF3Sqo93OQrYdlCahydWvQaSIoEzqcWnN3Akvmjs+4y0v5uC0JgWLskuMVsSU0SpUl1ApYlGlwJZaIUmEHBMpRjb9QEwZVcMQMSZGHxhC4n4zYGzD/PKcZxfPGcM1r67XrIbMne/55t2GVYI+l/CJSElfXcw6zuYL5k0Lkhl6T4iBMXpW6w2DaPphxEiGEIje02iF0xpJHqVqGiwlq0VE0NbQtTNW97e8ur7hxbNzlmdXzBpNv75HabDOIErTzBocmejvWW3WvLiYEeJYeA0RjM0o02EUxDAQg2cMAUE4vzhjcb5EW03TzgCLa5ck1WF0CzInJQ1aSHhiiog2ZbAok7bCeqSRMd7y9t1b3r694c3bd/zDb3/Pb373Fe/e3hEiaLsg9RtSFTc2zpBipG07zs7OaduG//q//r/y61/9mr//+1+XGXZKrO7vWSwW5Bzo+xGRyXMhc393i1KKGIunynyxKFmAuQyiOUasMdTkX2LMVRicWafAfcq8+/df8tnzJZcvVnymhPYsMHMdWhqUNAiWqIWoE1rA4E4O9Mfv1TQrf4zp2x/cT4VgPtTKtrv07mnTEsIsIGOXzQGT++W+9kH2GQpk+/8sFME1lJhDpXX29VjTHgVQ1m2BUWGqah2frQhL7TEgejtwTuEBLVKyqaqzpqogtLAYRfBptMZVMzmrKZoIMsaUrBBnCnhoDLjKkmgFzpZCgVqVd66INwsz0ViLMSW1uxt2vh0vny2gqV4X1RhL6YTSJWZaNKeq1lOpBfqKIKSChWKzXt3Da9p7uV5ri+S0+IHUeyyZVIG2tgrRmSRFo4GqaeWqRjJVAYubwdMPpY8Zh4EUY7H4z7nal8uBTu5D7U+S2Xis/SkDoh/aTs1anqJaP17+Q473lG0O/977HZj6jR/rW1STupxdUSFFRkmglQ1tvmeuVlxYeN5smGVhMevoN2vGcaRtLCEkcgg4Z3G2iLaUrjMqWxz0jDGkeOgmaq0tg05OGKNJMRD8wGJ+xrPzS3K2KGNZbUYwDa/evubNzT2rPjHJCZRA1za8uHpG6xo2q/tSKi4EYnXk1AjERGc1pIxpDc1siVVgpAzUSgnGKJJVJDRjFrRrmS2WvH7zhu9ev8NozdnZkq5bYpuu1DLJGjvdAyKtdaAyq03PZ5cLjIqkODAOoTAFzrJaDwx9T0qe5dmCy6vPObt8wWxxiXJzsu5INGA60B2iHCEpMomIkJWi7UqBOGVKeGH0I/1mxe3tLV9++Q2//off8tU33/Htt6+4uVkhotGmYVxvGP1I23YsFnP+83/6T/jLv/yLMsN0DcYYZt0cXVP9Qs0YKhVz01bYO4kBc85sNpvtAOeco21bYv2s6zoa5w4yP7Y6nZyJSfDjiJs33K08a/+aPiuuPtc8b16Q7JIsrs7CdS3Wdhz027WPeZd/jFZ2m7f3YwIcxqq9gX9XzO0ku8oJkEQFG5NhlSoZXXqKe9ZJx3S8YkhbitSVYmI1XVQyer8uxxZYJdRkQV4r/1KdNTUCSqpHTNFJtFawBpwt4khnFc4oZq3FiNQs6z3mwjicLdosrSjum1ZjVAEZdgIbupjcKV3uoet25zqbddCYetoTBauRXJkKDKLMUSepQTlQZitU3bFMUxhKMfpQ0t+FIoKuItYUi0dGSwkVidbbLCglkyh5x74QI37o2azuGIYBSZlh029Bh0zi9ydy5X82YOOnfOn+VNpTwcJjM5wfC6y9bz+72dEeS/FRX9t7qKAH1O7u72KcVMInloEm3XOhV7xsBy5sYMk9ZzOHUkKMoXZ8Cu89khLalEJjShvsJKJqHa0rGoW1v6fve0II21oBOWdiKHU6bq+vkZSZtR1aWV69vebmbsV68GTpuFn19D4yxupYnAs9u5h3dG2DHwaiD1hnsBpU3gEpLYmUMjkEZl3DsrOQcjHesaU0+no9kIMn5kxC08wWZGMZotDExP164O5+w+WzZ3TLC8LYE8aRFCJtU/QoRitQxQ2070vtEYi0rkOiIuSE0ZaUBdfMeP7iZywvXjA/e4Fpz0l6TqYhhiku7FCqQZQuVXJN4aat1qW2SkoEP7Jerbi9uePLL7/my6++5T/+/e/49pvvGLxHa4tk6P3AMIzMZgsuLpb8F3/zV/zNP/0rZrMZ4zhye3vHP/zDP/C///u/ZbVak1Km73v66o8ygYR9v4cJYEwi33Ecub6+xjmHMYYQAsvlkqZpGMexpM3WwmKTG2WMkZgNQ9BsVnfc9AO3HvT8JfNnF+Bm5GzIuSk1N2Tygz166j+gn3hMQPpj9InbQV8X0FGST0pBr0Ndw+ntHzuHytxXMFPs/HdMSCkQNgmIQ9iSH0VjQOk/JFMsuuv5lFLoCU3YVs5Vk+9EDXFoowpLUTOsOhNxpjAZbTMtV1idMbAFItZqtDFo59DWFJM+pYq+otqd67pspxmrKbGqaDne30rl3y3/o2Aq7b4lqfPEdADRo3LJKhEpRdYm0W2ihqrqcyy5FFQzxqBtYU9s7YErD4IWxSR1kZTw44a722vevn5Fv9kgOdNvihdPzrI1MHzqM/ZnAzbgE7Nx3N5Hmz7aARyv/+hz9MgHTwj/bGnSfXzwg9vRjtQuhitQfXYSBk+rRs7MwJUb+KLd8Ez3tPQsGl1MnSoVmesgY7Ump5KREOvLlRE6yaV4GEC1Hu66jmEYCN4jAsEH1qt1SVetNUXGMfDm3S3v7kbWQ+R2XPPmZkUQQ1I7Yri1mnnXEv1AGDYlbdY5nGuIKRFySS9VrWUcRsQqWiVYMl3nuDi/YHl2hveRr795xXqzJuZMM1/g2pZ3t9f4FEnScLta07674ezsjOdXF5imK8qWPDL6AQlCsyiai+ATXmsMUtLkTIemJcVM23Y8s47l2QVf/PIv6c6eY+eXiFsizTm4JdnNMW6O6c4xrkPbqaeWmskSSHHEDz1Dv+Hm3Tu++/Yd//P/8nf85jdf8ur1K0QUISq87wFhNl/w/PkVP//Zz2jahn/+z/9LkMS//Jf/kv/wH/6W9XpN3/fc363o+7FmyxRVTNd1NE3DMAzF+0OprTPicY2PcRwJIXB2dkbXdYgIfd9X+2dDjLEIUCtDYrTh+m7kTGaMEa43t7zb/EdWXvFf/h9nnL+c07i2DBC5uMryNHE/sGMTngIwPnZCVliFMvvfhmr2PClUjVx8n9D0BDIK0Jj+BlVVjuWdyjUcUsBIFoGUkVwrlkpE5yLWLAxCCZ9ak3FVrGmtwbpigNU0JdVUG01jdWUrNY4RqwVXRd7WlvWcKdocN9UPsRrRjqwM2tjtdbgpPFtn+ZO+BJGqZ3hkYncc+pKSGi5SMrgyieKuE2tUWEPSu/opKSLZE7OQci7AQiCETMpCzOUZz5VpCylydrbgmbM4VCnIJtXuvNZbnhhgyYkwDKxvb7h5+5rNegPUdN49i3qtd8zWh9qfDdj42EH1P4X2vs7lY4HZ8doFaZ8SirwfbBzHln86C3dFVqUbEl0OpIho8TRq4KJNPNOeNt6ycCPnC80QU60DkIlRiJXViDkTpJQut7aY4YgqtKOmzGCHYSyGONqgtUWr4hNhbcNsNq+DlmboR5Q2dZBV9GHNt29ueXV9yxBz6Uco1LGp8WM/DFWZLlDZA61UsbgWTepHGpVpZg06J5zKfP7Zc55fXRFCYrPekKIvIjGlmS/mKK24u7ujbRvm8xnDZsX1zQ2/rx4ZV5fP6NqOnAXfj3g/spaA6Qyt0Xgfiy9H1gyjYI2gjUbbhudXFzx7/hlnl1+Q3RzVPSObOXZ2hZ2do5oFpl2gbFsoYS0kiYSwIcYiLO03GzabFddv3vDl777k17/5mr/9D19ztyr1ToRShTaEUMIk1qAk0w9r3r59xf/vf4hcX1/zd3/3d7x69Yq7u/vCPkSp1G/pMCc2YrPZlDo2OW9ZC6XU9u+puN40QK5WK1JKdLUWznQexYp+38zKgHX4pBkC+Aj3/o7xN79C3IJ/Zhecv5jhlEPlQoN/nynTKfH1wdvwPZhfrdUeVV+fP4rAcqfVyPV9/7iz3s7+mbwpKmjSU0i1ih0RRNRWl4AuTIHWqYq8M1aXsIezGqMNrVU0RhefiAlsGFWeUV3c8BtThZlG0TVt8aXQaivw1FWPoabBuDI7GUHqZ+XsqoJZiuPodCsm7iXtC+oO9DR1+z2B7k6DkUjiSXiyBJJEchJIGqn/yJoYe2IeCAlCKu7CMUNKwlCLPa7u7xn6El70MfKXf/EL/gv31zw7n/qkmg0khd0Qyn03gJZSSj6nVJgPVbPGJr1SZTYeC58dtz8bsPHeNr0sH1xNPTnu+MfUfgqgte1gdBUrbUHEjpWYcvGnl2aP63t8vxx+D/td64fu8b7QbJpdbfdy0JmW8xBlStUCKZknTkdmxnNuM5czjdmsId7TLgyaKnjKuZZVTkzK25RKEbGsFCEV6lgbTYyZ9WpTnENDIMWip5h1HW07I8aAMw6rbakngmYMpeT7/Sbw9nbD7drz7ZtrNr1HtEYbRcqloBLsqsiqFAtdnBM5jBirmTlLa0udEMkZqzV+6Pn8+RX/5Je/YL1e8+7tG+7uVoxhBDKL5ZyL8wU3129JceSXP/+CF5eXqBy5ffeGu7s7Zq3j559/VjrxdsblsmVzd8Nwfw1Ny6Zfk6xCdQ0X5y+4uHzGrJvRdA3NrOHZ1UvOn3+OWTzDuAW0z2jmF5juAuVm4FpU06BMg+RSqjqEkb5fk+LI0K8ZNmvu7u74zW9+y//2v/17fvO777i+1sSs8D4RgkckVW1hYRdEMqv1HVorUop89923/OpXv2Ic/fbdHoahzsT09nmJMW5ZwCn8UQDJbjCYnq+c87aib9/3LBYL2rYtbFYItG1L13XbfYFiHAM+xGLPLoacFbe3N/zd3/6vhGz5P/9frlDWYCjpxVMq5mNZJvvtOJPkMdDxlLTX08cRspRqoVl25c13glFdsxN2wPt4fydZV6a+JdfQBuQUtr45uoIYDSgDYqaMlVz0FrowDs4YnIHW7lJIndZYpQsAcTWV1GjaxlRmQ7ZAXuuiHaECCL0Nf5Sz3OOLyKr0XhpBSyp9DBMoKuvIpBEpV78tR3/cQgilRPve/ZIEBksMBWhEAj4OhOgLS5sUEsu/oR8ZomdII+s+cN977jcjfYi8e3fDer1hHMskIaeItZpu1jE/u+Cvc1GvJKneGkCpHFs8bSbWiSxU4Qe5iuK9r9kodVZZQjQ/Mtj4eBXzH0d7KEr8PpTfIy/MH9F1fkz78KB+yD6UdNOH2ypVsPA2GjL9rSYN+Yf3P6VNHR/z8e9ip3DfV73Xw3O4pLwURgINIwsGFrKmGd4Sx1usWfH8as7yzIIfYBzLiyalwuHUeaetadEEwDIOU+OkVZtRrcmHYcBozayboZTCDyNZoFuc0c3OCOqOX3/1G379u1d883bNt9cbru/W2LZFhojS4IwrluBZiCnXoknFpMeYUnHBaE3XtuWFjwmjHForGlXU8e/evObufsX19Q2bsRRVw2iWyxmdU6xubvmLn3/OP/8v/oaXl5csupb13S2/+dXfs7q94dXXX/PLn33Bs4s5bQNnjeGWiCayPD+ndYbGarRrEG2w3YzzF1ecP7ugW5zjFpfo2Tmmu0TPLjCzc0y3LOmuyqC0KeGSnMjRE3xPDAM5BcI4MAwDv/3tl/wP/+P/wldff0s/CD5ZctakJCWclWOpq6JMBWWlM9ysV/z+yy+5vrlhs9mQUt6KP2Vbwvvw+ZqyOqbQyfGzPoGMbUXfynxM4ZlpljcV4JtCMU3TMgz3NcwGmUgmkFRkHF/xq/w/MZ9f8E//8/+KFy//hvvRl+pZHOovPnZC8bGZJ8ftcFt58HPL/B9oo06f56PnoVT1hMjbgnZaJawRWqtpnMHV+h6Tt401tppgVcBhNE5TgYatjpyOxjQ1K0RXrcYuVKOVVGBRziEAWamdKJXK4ogCVbN9tgJzhZ4qAlYmIOVJX8J2wjUBMImRlCpIHUeW9dK9DyUDKcYdQxAiKqkSEtGZKIk+jIzjQAiR9d2Kzd2KsBlZr3te3d1xO4z0PtMn8FkTRDOGSMpC6xyL+ZLFWcNyMacxinZxAa4lK1WE8wJKpmvUsL1HUzilmPMNw1g0SClW59AyuZzE0E9p/2kwGx/R/hAMxk95zJ8aJB6L0kr89SHY2J8h7p+XqbO/4+X7DNPheT00HjrVMRZ6NmEk4KSnY8NZumep7lnINWZ8y+KiGGWNYWDYrEFMoRIlk2IkpkRMiSSCMZMQq7yMjXPYWhhpmgnnlEohtpS2egClFMuzC2y75M27W377+1d8/e07Xl+veHu75mY10M7mXMw0fbxmsxpwXUPbORQwjB6JGofQTCltTIXWAiKCsw4lQgphSwXf3d3SjwEfIz5GUIbZosM5SxzX/PVf/Zx/9jd/w+X5OY3WGDLzZ0vav/5LQv8Z1ih8f8/MnhGHNUYil+dLbt69YhR48fxzGmdZXpxx/vyK589f8Oz5Fd1ygXIzVDOHZo7uzrCzc3S3RLkOZYpNeJJAjAlJgeB7xmFDTiMpFrv3//j3v+Lf/c//G19+9Yq71UCMhdWQFKuxUKqz7OJcKtmjlOC9Z7264261YrPpt9/NpKGYitQdzzb3haET4JjKbk8gYvpsf/IxMSDTsklMuv9s9mNfLZ91GZR0BpOQGHgz/p7/Of8btDQsujNs84yE3mohHn/Of/z2ANhsZQXTcmAfXBxgNtmu+5inx0ErOe+lcFlW5BTQKrLoLDOn6BrFrDE0VqG1oFQqzpq1Fo/RYDTMunabWWJq2qlRppQ/q9/JFBKZTrYoE3aTJK301phK1RiICLUmiCkzflU+M9v7ADkLSXJJv81lnSyZlFMFw4LGbMFGHmuBPhFWqxVBVUa0Pp/BB/r1wOAD9/3AfT9wtyki5mEY2Nzfsb67QcaR4AP3EYJp0d0CMzvDzS+YtXNUEnLKuKZ4AGmnUZ1D6wi2IWO29YeooR8NtXZtvUfGYIxlym5ZbzbFbyN4tDXYpjC2Ysyn2iif2o/T9jUVav/nEcOx/UxJCQEc07iZ7UDNlikpArpTYGOf4TimZgt7MjEcU+e3R3WoKhTLnkY2zPKahVxzlt+xlBuk/5aFDVw2L2lNZogJrCWuR2LwBD8UIWBMZdZiip9GzkKMRYsRvMdaQ1OrHk4Eq1IgqZhSQZnhKGX45ru3/P7bd3z7+pb7PjEEIeuGX/7lFzx78QX3/ciL16/56tvveHN7Rx+KP0fwG5Zti67mUjaW1DtUMeOJMZJ8RELE+7F4AljL3d2KIQRWfaAPoJ3j+eUVL19e8vJqyYurZzROoWJP03V0xrDsOpo0Z60Sm/t77u5uedNmnM50reXy2ZIcFnSNwTrD4nzJiy8+52e/+AWLszOUteSmxbgZql2g2gU0c2jnKNeWeiBSymjHOJJjqGBjjeQRTaIfN/zd3/7v/Jt/89/z9Tfv6D1sBlXqmaRa1C4MKBGarmGxWLBYzBEJ3F5fc/32De+u3xErQBjH4puRa/ZOztNjcjgQnhrIjTF0XbfNNDkGEiKyzUxJRwDTmBIOGce+2DonRY4WpOhtlM0o48EG3n33O/6X//Ff09qWf/Ff/d+KiZwyTwIaP3YG2QHrqCbGcH+SACU8MG25e/8mtvH4nB+EdMrS3WcqQY7MO8Pl2YyzTrHsDPNW4TQolXC2LXWBtmXNizX3JF6dCiJOGSwg26wVqi6Eyk+U5TXsK5UtnDq6iZEQdoZhsrvKLBCikMnkXMTighBTINXQShIh5lQK8YWMpAJim/sVP6P0Y1999XvWYWS1WrFarViv19zdr3n17hafNUPShGxIui3VWXUJn4TRoJImo4imwSyf0Z09R8/OUe0SbIf4WEO/muAsGLBKoXLCJ8iiQFmghK0Ki1HSgrOavnuNoIlJGH3kft0z1hDK/GzJvJszm82xrmiZntI+gY2j9o/NbPyxsBrwfmZDPfJz/zjTS65VfhCmFL3riI63Ueohbb1f22EHOPbZjsNZ14GBmYAi42Rknu6Z5TuW8S1n8Vvm+R06vOXF2ZJnbULSQEilDHlfLcWnwUWpIjIrYEMX4VYuLnw5QU4JbEmPNLrMl8bR0zTNVscx9gPXN2te3Qbe3ay424xsfGJ+dsGzL64Q23L27DmzYWR5tuTZ5TN+++1rrtcjm9Wazf0dKUfW/YDKgRgC3luc0zhnCoUZM6Qy644xcn9/z3oz4LPQzWcsuznz80s+++LnfHa15GKuUDkShhV2MWdcDdi2xXSGs65hvE2IHyAMrK/f8Zd/8QU5BYbNCqcVl5cXzOYdL754yYvPXzK/OMNU+3PnWlTbobs5pltiZgt005G0KUyGBEQiMXpSHCCOpNgjObC6u+NXv/oH/tW/+tf8/qvXrAbhfiNsRqGbd+Q0ElOpi6IQYlKEMBKCKWGdxhUfFClZQOM4krNUjYUQQn40urev19ofKKfsk2nZtN6U3jr9PolLga1INOdMygFRAckKSR2kOijGhDIe5Qoj8Orrf+Dv/8O/4y//5l/A2S9KZsATGI0fI7z7mDakhBXy3mdT6PNwPRFVpBaiTp7zyXPUqhp7Fa5Oa8XZcsazsxkXM3i2sJx1FqszSKx1RnaMk65xkd211vCYACqDTlvQUMCCRmOmmRIVqpQsjByRmplR9GcUQFKFsCUNei+9NymSZGKqxRZzxMdAiIFMJkli9J4YAuN6YNyMbDYbmq9X/DMK8Phv/5v/lk0qfU3RVng2IeNth2nPMe0zdPeMdnaJtg1GQY4ron5F2LzDS6YPikYcznTg5mQ7R7s5QQKBWDQmSiNK0xpdrkfpImndAijZhYnUlAZbuQ5lUdqijcM2HVkVRuPy+RWXl5csFiWrzTaOp7Sng40/TXnCR7WfQmj5fY/7Yx3zY/a9ZUVl+mP7y8kZy7T/Y7AxveBl4RT7VNuHeqvWVg/Twvb3dcxo1CBi/bxOQmS/06uVF8kYIp2sWORr5umOWbym8TfYeEOjRxZmic6BNPakJIxjpB8GYojFMVRrrCuaiDEEck6EWFJNc4ooa7Ha1I52umAp9UGqYPDubkUKgX5MvH4z8OZmQ8bw4uVn+GzIqmFIws3122rslLk4m/OfL/4zom4Z+p6763eMqxXjes3d7TskBsYYGEIuToFSbI3DWEIoMQrdTDFbzvnli8+5evmS+dkF2ja03ZxWRxp6oGgcdC4OmOfzjjQOOKP44rMXvLi64Pe/+QcaJyy6ljdvbrC64/LZOcvFjOX5GZ999pKLZ+elEJSxtF0LTUc2TemoVLFIz1qXezcUUAERSQHyCCmQomfoe37969/wr/71f8eXX35DlIb1emAYwdo5/abH4iFHlBJySvR9zzj0XL95jUJIOTH0Q6G7jcXZndV4imUGty1b/553Yh/gHs/Qp2XW2m3qq6/F3iaAMQlOCxhJ+JTK8CaxmFApkCwYawljT84b5ouWN9/+ji9/87f8k3/xsmY7WfYohPo6yXZM3bIDJ9/mj28PAMIerVk+m0Kl+ghs1Pd0y16qB+zR8T1Xosq11FfH6JLm/exsxrMZXMwM80ZBjuQsNMZVtkHqtmUg3TGgZb8VLpZzUJS+p/5LqbAMpbZJARoKIcURkQQ1JCIVdKRcw5WpMhU5E2PGh0So4b7Re3wIDJUNHf3IerPi9v6efrPBr0b6dQEbL1eK/wf/FBHh629eMdZaKeW5VIgyRDHYZo47u0K3V9BdIKZFyORREfUdAUuWYvolostcI2YwCu00SSxRCrBu0GhrMc6QgmKMgXH05GjQDRhlah9Nnb2VPtoaS9M2zBYLzp89I6ni2WFdw8uXL3nx8iWLszPa2YymaZ70fP0kzMZBKO/PuD0Wl/xD6D4+qk3w/ahNIqjtOkwSKClRzoNY7MRIHO9HcWAUMK27FzrZHkAq8NgDLNu9qMNZUukAEpJDsSnOJSe9ZNlTHANzMfPROtGakWfpO17wNQ2eNLxlfX9N1pH55TNmy89I4litBgKCHwMRRRTFGBKucWhFcQaNhfWIMRJCyVIpjh2RnMGJBoloY5h3ltV65Ob2lpwzi/kC1VpeWEt7Hnn97i0hQewHYtzQuYa2MayHAUFKtcdc/DRfPJvDxby499VBLoZASpHBe+7u76uqXbDGQk40znJxtqB1lvmsK7bKZBYtzDohBY8kXxwQbbFfb51h1lrWqxXdYkk2GWcUtutQyqNMmU0uFjOev7jk/HzBi89fMpt1GOuwrsW5DtoFvepQeo5uzzCzJQEItfBZCiOWjFOJHIvOYhg25AyvXt3y3/zL/y+/+/13DKFBmxkpehqtCWPP2axl3ARiHDG5XEdWim4+R1AM/cA4Dgyjx/tETIKimGSllLeP+xa3nnhHH9D9Ve8xaTEmL41pPe93Mfjp5zTzntiOlNJW6yNEslqjchGLkhRGt0gCv/Gsb97xH/7d/4ef/eVfsHj+S4JZ4EWjdIvEiKtUt4jUWj8/DGh86B4oVC2UR732XabCDnvoau4FKFsnALLd18QMwGE4VOdMay05JtC6lkhJtCYzM0KrMlosKIu2LVNeiDYwzYbyBAZ1dcSssxwlihwVKUmdqpcy6ZL3dE0Kgu8xSiNZ4X0BvUImp0iMgYxijJExRMYY8SGy8QM3qxtubm7p+zKhuLtfMfYjwzAWbVAu6bJkyMmAGBDhIuwG5RANqeb5plzvmYBTGp0LDMrWIM6ANaiYq8eFxWWDiRpSwiSPywElmSyppMZLxJIwOWGzxkkkByGYxDp4UKBTxuZS6wlV69RUF1uriuD27HzB+eUZVy9LtpSg6Lo5f/VXf83V85c0TUvTzra6pQ+1p2ejPHXF8nUfh+x/8vZ9xY1P2cf7GI8fGkv9sXwwPqYdDvofaCL7EOT4w5P7OTz3D/9+fB8PAMZBGGXSviuyqmmE1S9Bk9EkjHiceJrcM1c9Sztg8GxyTxg3oDJN95IgZTbgbINCGMaAMRZPnAol4n1g6Adi3A1YsnXNo7g+KkPOfmtffX17VzMgSun3zegZgyfGOTlHNut7Rl+YkeViTiy+RbjljJRKafExCCmDc5qUE/PzGVo7YiqGX9qWkMR6vaGo4UtFyxwDWhUzMFIkR49TJe7d6oTNA1Ynzs7PCN4zDgPOWjSR9eqO4APRWbz3GGMYxw3LmWUYNmgjzDpX/Aqcom0dTeNwxpT4boaUIBuDcx1NN8e0HQlL9EU8KzkVWl4SKhe7Y4Xi9uaWf/Pf/ff87d/9moxjtrgopd7NmhgzWgl+2JSc/ziJ6Uo667pmmkwDe861AmYFyaALqP3IYXnSXewLlvd1RPvLpufzeJ3JhVSpaktd5stFC6FKkS2RGi6sAOrm3Su++f2v+ZurzxBpiblkX4jSFE+HbeCwhFpOTOuewtCeYhAfbi9Hf+//Pmk0Dtcp92Jy0D1iJrdvL5WWrB4VE5WvCphwRhVjLWuKmBFbUmKrPmtiWRRVVyE750yRwmxCIqdicpWk+FCklOs7IkW/MPYUczhNiJkUi5lc8D3jOHC/umc9jNyvNtxvNqw2G25W97y7vWaIiSwaH4QkmraZocVBUlgEp2q5d9n5hOQ9FjZLrV1SGaA8SUikeK1kmTw9pIaZJrapsJnlVpQKtLVjQlEBB3nbH5Y03ZJdFySU2kNb3wy2NXi2xIZMGhiNaxyz+Yyzs2W539rx8sXnPHt2iTGWxrW0TctQQfeH2pPBxv/z//2/PnXVj27/r//7P2PTPS3u86n9I7cpXsFhB/V92vuA1bGC/RhoSH1pM4ZUFBkkasGuFMtsXXlmemRpI2cmcaU07QibzYZNf4cQWCwXzBcdzjm0M0SEOA6M/UhK1Dh/3qZLrlZrUsylhHQVAoYQAI3WmRBj8bmQnbK8H4YqVgts1ms2Q6RPjpgUd7evuXr+ktl8yet3N8wXZ7SzOU3Tcr9ac317g1aWi4tnKCyrzZqmLSmXm41HVKk+qVTCqTLrzroOREaq8yFFud+2GFUGOKMijdF0bYMmgYFnL6+4u7vDKGE5a1jFEVNDOZvVCqeF5cwR/YbzRcfV5XmNq5/RtQ2NswXkiBAqEBBdym1bV2ZxMQTC6InBIykQdUaRyLHoT5TWvH79hn/3b/8tr16/YnH2nIvLFyyX5/gx8ObNm8IsxFRnnPGghsmkjZlSCPPkpLiXunz8jH3oedzPRpm2ndJetdbb408MxgQspv1NFs6nSrsfg5P9c/Le8/btW3799/+Rv/4//J+Ko2pKiJHtoDSB7noFP14M5Ye2CjImsPF+o6dJSLEVbZTZdi2bniuXOl1ploxRVYOhpIZOJzBZQUTO5KqjyNEj0ZcUaVHELIwhEXMJv6Xg8eNAv1kRQuLtOtMPgX6zYnN3w2Z9Rxg3NeU0MIaATwkfIj4mkjKY7gzsDLOYM5tfMJstUTkT1vcwrhG/IfkByaVC6z4Im+7BYZ9XoGSidLkTQCAVl04m/QV6q7tIqgg7cwUXKUVUTqQ4FofV6sBa0R2kgKQIKSEpkyOI1ZWhnkLd5dky2tC4hvl8zmKxBDTGtlw9v2TWzWvaIYy+hIie0p4MNp7fj09d9aOaAPqJdqd/ju2nZDB+DN2HotKm098/EHAct/0B4dSy/Y4ZoNQOMKT6D3LJv1eJNg8s8oql9CzNhlmzxmoPMhDCuggRVUMIHqWWNM7Rr1fcXt9yv1rhXFtU3NpU6rxWVhSpHaGqHV15OYfR40OkbVtSStyvStGuzWZTin2FBu9LLZGrZwtublf84mfPadqWYVhzdT5DEFqd0TnQWcXFfEbMQqtKgWeHx4hgsmXmcp0BJVCJ1hSwsVjOS1XGlGlMsWk2aFZ373Bdy3LWleJSVjPrDMFHnj+/QAnEwWCNwfcrlrMGrRJhWGFIWJPpGk1jhKtnZzy7WLJcdpydzWmcqeZZQlTV7lgZmtmCZjZHG0vMQgi+FHUjoVWuNSQUoktHu9kM/Lt/+z/xt3/7d1zfD4SoWZ494+LiOc+fP+fNm7fEWDwtYi12t5/1UUJbO1Zj0kxMPgAHz/ITgMbUJrAxAYcJzEzH3Ndz7D+v+wDj1DGOt5lmkdMx4nrN1199xXp1T3f1DKOL2ZXeBxt1wJ1cbKdZ/h+yCVM9mR2jc9y2fQegVN7qtybwVIBFvSYFmVysuBOEHLYgZjpGEiHEUMCGFPARQiAHj8SRYfD0PtL7yHoM3N1vGIae1WrNanXHZr1iGCOve0vMihxHCCNGAlYVh1KRtA0jWduQyGjdMT//AubPMcsXdOef0c2WSBjp331HuH5FuP2WmCNIZNKZHeqCyjUeT6aSqFIXaQs2EpiMUTUDBkhKk7Qpf9f7hESEgOQRa4tgt9adrgaFxT1Zp1g0KxVAa1O1LRVsbH9ohXWuZv+U4pMTGLRNcUnu+4H71ZpNrS/0ofYpG+XPuD2mKZko0Cfv52hG+H2AxsewGvvrH84QFVmV0EmWqheRXCzIZWSpR66s50UbuXSCX90wDreoPNBYYNawWM7YDCtmQ4tpSoxZKU3rWnysgi2tSTETfKh/l3zzidWYVOn7A8wwDIVB2WxQSuFcYeratmVmDEk8Oa55drYsnaVO/OIv/gm3d2tihFdvXhdwU4VrOinQmkUDqFgqVDpL7wdiTMWsaFZDBHGDjBuMElrblOJRWtGcdczbhvncISmiVKB1lvPZgsWi47tX35W01sZBTGhg0S3QyvG73/2Ol88vuLpYYHTi+eU5s9bStqWDMraIF1MJTWO0w7oO184xtim0cUwlZFIi0MVMSRfPgxyhaRxff/Md//7f/++sVms26xFtZtzd3fHs2YrLZ1c0jaPfbDCmzmarjmLKBNk3RZq+D60USaawxdPDpfvP5OSxMbmFTizEPhtxmFG1e473WY56RLbBj0dAyJYlyZGb67e8ffOKn118gdEdIac6q93tTiYR5h8aZey18h3sMjf2SNH6+949Q0pWyxQelRJ2CTETIowhopPCx0xIqdYBycXDJiRiTiUjJCaG0RNiJMTIZujZ3N2zvrvj+u6eu9WGu41nM3r6MZDyLsPINQbBMbolyrZYGxHbo5JHCORUdEVaSqhSsiBZYZqOZnaOvfgMd/lL7NnnKONIw5q8GRB3j2i7DfpOFWcPmY39Z2TS4RjilAsixZlT5QDZlnWmUJKuGXJSABiSEImQS6VmjYacUCpBkmKZrhLOpiJSzqkUUcuJJJZi5qUnFFjYDa22AEtrs2VmhqGvwMMyDFVzJe9jsXbtjwNs/LELKv9A7YeyB48P8B83FzpmNY5/3z/e4wDn9Pnsx3pPZaAcMBzAlNmi64xBi2eues5dzzM2nHPPMq5YqEBrImuVGeNIzgON1SwXc66uLmnblomCjSHiR0+os6aUMv1mqOXFC9hIKRBCqFVCFTFmlDE0gA+B+7s7+n4gpcTZ2VkJ0xhD07YgwmZ1S9dolASW8wXzWUvya84XDcMY0WlEiSoaDhKNKqXLzdwWgyACy0WHZiQgpWAZZUbnbIMOgtEKqxPB94iBy4tzoh9Iw4C1msVsztWzJSJC399hVCTngMJgTeZ8uWA2a7i/u2fRWT57+YJ5a2gdXF2e0XUO62qBMusQbUBZ0A3adehmRtKWjCaGRIipOrHGMmskFdWBqhblQ+TL333Fq1evmc8XrHphGD3DMND3PT//+Yz5fMbq/p5d9cvDmfOUpnwQRkkJjEH0oX32x7xPE7OxA5eH2Sn7YGN/2f5xHnve99c9fg9A6Df3vH39HV/8Zwlti4LQaM02C3VfgiLqBwGOx9iXadcTqN4H2EoVRQyy/37u+U5s97mv66jGWLsJNGr/gpQiJmGzGVkhqFRspnrvGWIqoTgfGHzRQI3es+5H7u7uuF9vWG3WDEMRTa/uVwz9yJgymAbVdCjb4ZpzlHGMaiQ1iWY+x5gGN3+ObeY0KhE3d4TNDTLeEzdFDC7EKnnPu/tvDGIbsusYtEPbDnGJbBxRQyKSiSgp+ojD8Fe5X9P3tv2skqaZAh60REgRHX3xwpCIkFEVtCsFRmeMymiJ5TyVrVmrisY6Fq2lazROC4sGPlsuWHRNqSW0Baz7yh+ptV9qRlf1NZlCiOt18QSZTL+MMXTz2ZOetT8KsDFR9fvtY0MA7+tIDkVNT1nv+x/zxwhd/FjtVJYH8NGh3v0wyqnPDnb9kdd/DCre99mW5VBFcW0INNKzYM2F2fCyGTlPA/O4woSRYXPH7du33N2+Y+g3MGvphw2bvq0ZHiXzBCk2yCGVCokx7MygrG0ARQiB4ENxVU4RHxKubUm5pD72oyfUzAUfSx0MZUyxCVdSQxgNTdvRDyMxQNs0xDhw8+4dWsL/n70/7bUsS/P7sN+a9nSGO0RERg6VVVnV1QOpFmmTNinRkikIAiHrhV8Z8GcwBH0EfwC/8zcwDAiwrTcGaBgCDJgCDdKWCJmUSJa6m91dXVk5RGbEjTudYQ9r8otn7X1PREZkZVZlVTfpXonIG3Hvuefss8/eaz3r//wHqrpmHD2udhg1YbXlbLtGW0N/GFhvKmLQqLblcDiyO+5Zr9Z4P9CaTNuJm2JwidWqYd3WDH2ibRyrVVvC3cQ/JDrNumsZyJA823XHZtOxv9+hiXzvg3exRrNqDJtVzXrV4ppKplujwVhC1pi6xThxDTV1h65XmLolljwSZwV90Vbhp4kpBJraUdeWsfc8++ILrHF874MPee97jp/9/DP2+z23tzfc398z+RFUoj/siX5aUI3ZYGtGOuZCQ66PB1fXUzTim7ROcs4YY3AFQp6LGGvt4qUxSz9ndOXUd+OUUPrqoquWx8xSwdPCZG7RKGXQZPZ3tzTO0qeAVpZZijp7XyqlxfnytUXs68a3nvtKayLGh0JL6+KBo2a04oGrobWao02+UlTNf5/PkclZUlu1JKjW1jEME9ephzFxNIHj8cDtfs++H7nfHTgcB/a7A4Of2B8PXN/do4yV1kIC6xxKG0bV0RsHzlG1a0zdoquWuluhrUOPE9Pk0c5ibIWuz9BaiN44D6Yn0ZOUBQLEcu6z3PukCUwCm/F4jM1gEkkFovZENRLVhDIBXrsOXj29rxab0q5BiJ0pYJOHNKEnmVdUGDDZkw1gFWtb061WrDcdrl1hmjWubXGuwlhDXVlWjaWpLJWB2kQu2harwChwRjhXMUEuah6VH9x1jREH0qqqJJQxl+PUGmPlHjHWosyr8uy3jb8QxcZfjr8cp+N1zsbpV41MUpaBRg2s9J4zc+TSDlzank4P5OnA/v6KfuyJU2AaPCkkrHH044Te7Viv1zhbk8ZA8L5ApGkhIGotLQNZNJAwrakEJyklNubjsBhGSY9ZAp5CisQxkhVUdY2rLe1qTeUsGdi4Cucq/JS4evEFmsz3P3wXYyw3NzegFcM40HUNXW1xdc3TR4/44ssvOd+uuL27RSXPDz58nzB5bq5vubjYUNWO4Xik0oZVbSFNnJ91XJxtmPxAZR0+RIw1rNctgykoSuXompacM3Vl0I2QYWsnLZjVpqVua3Lp22pXCZPeOJKyJOWwpkG7DoxlmIS8WTcVfhqEsBd9eS1L2zimcaLvB25v71itNlTtiqwrjoPn408+5+c//znHY4/WivW6Y7+7ewVdmK+J00V9+Tk8/PkWxe/8WOccdV1TVRXTNC0+GqfIypyRAm8nOM/jdCE3xlBVlRSlhcW//FwpURekWHxIPFa3RGQn/OpSNcMbv5nNzbz7XtCN8sqzMgREgqqXdokc44yiKtSSH6IKKqK0xmopEHJOhKB5cXfNS3/NeLjm6upLXt7d0vtI7w0hiZmWcY6QMofJslqLoVxT1TTtCmMt/TDBsSckhak7bNPh6g5bN6ANSg9k1ZOMhoLEGRGWSlgeBcPIClXUQuLtIe/XGIUiktNADkfieE/wljgciOFASgMpj+Q0SjujbO5SfpCIqjndkhPUKGVIE1ZFnJowccBlizFi6lXVGmMbVAeWFUYruvWG7fkl7XpL3a2ouw5thE9lraayCleyYayKbCrHpmmpC0Ip/h488DYoLS4lsQfOOZqmoW4GfMl/mvklOQcpQNLbi9jT8ZfFxl/Q8WuXvn6LOeqbohq/7HjTgvDVbBR5jNUZkyYaDqzZc8YdZ2lHfbzh9v4Lro93MB0k1MlYaluzXZ0RwsTjR4/YbNe0bUPTtPgp0Pd9aY0IT6OqhCQqahTP4XCEDN7LLsxaK+6AIcp0r0tCqNLiqOeqZVfsnCQ+Bh8JWfwWpmmirmqUMhz7A2fnG2JI7PYvWa3WvP/+Y5TWeB9wVS2LflOx39+icqCpGiqrefL996iqmmeffcHjiy1t0zAOPSp5VuuOi4sNm1WLUpm6ttzfj6xWDYMP1M2K25tbUs60XUsMgclP5JTphx5rLO+99x5d2xLDEWMNWQl5z7gKW9Vi3GVbom3ItkHVa7JrRQqYPJWTQi14MUGLUWzgraqIUQnCdDww9CNd15GVRTnHO0/f4eNPPicEzzSNOGeZcqLrWsLwgGicohWnrZXl3/Oi8RpZ8dSZ9k3DOcdqtWKz2aC1ZhiG5fEzkvGm3z/dxb+OopwSQeu6XgjF87G8ci8g/XrREie0EW8Nlr74Cf+Dt9+b3/WQcyzIxmlbBB4KDmP0G8/L6d9VmXw0CqMMyjiysuTsCTFze73jeP2M3c0XPL/6gv1wlJyd1ROqdk3bdlRtS4ighpGmW+HqRq7LphGgx+wh3ZOnSDQVSlusrUnKQFLEJPyQpOZY+LxIQbMwGJg1CyoLoiEyVimaYoIQPGYaUONeeGTKkKYDjHtUnFA5iEpEZ/RcsJx8WKV7IVbkpUWRjEZ1DauzDc1qi3Er2tU5bdPhjKZxCqcjTgUhgmtFUzc03ZqqabB1g3UObW1pschjTIl+qWpLozQrbalL4m1iLsyllWLKPGuMXgiiVVUJshdKcGJO+OgZx15QxG94EX6nxcZCmCsXE/x6F82/iOO7er+/1mLj1/z4bzN+0c5z+XlGFA/RY9VIpwfOzJELtacNL8n9c6bDlwz7W8kKAXyfsKYSfkAcsPYhb2IYRvp+YL8/kFMWt1AElQ4hljaKwNV9PxSylBQPs4mNtQ6tDTEmjLHUtThKjuPEWEKXZBcs/f8QJTVRIWzueXFs24bt9l3CNFE5WdSTUdS1pW07Li4uyc/Fx+P27p6ubWjqip9/8gld2/Lo7JLj/kAMnrapOduu2axa6soCEec0m82Kyhn2/ZE8ym2vlKKqa3TZxf/xH/8xbd3w0YcfcjwcsM6w7lqqpl6a7dpWUhgYh3I1mBrXbrDthqQsIQr8aqwlpgmQFlKKmRAmJhWpKo1WSEKusVjr6MeAMTVn2zPeffcpn376GXd3d6zXHSkl6tI7ntsjp+P1wiNnSaWci41vcr3NhNCu69hsNjRN8xWb8lP04m2L6pu4GPPO9eEaYlHUvH5MOYlip3K2IABlYZ9vglde9Kvf+k7HCadgfh8pPXhinM4M8r1XYwfehu5Q2kBz24g5QTlZlHLEoPDFv0JpR9Ntid0F7fYR7XqDrWr6cSTnI7FagXUE4whUhBQJ2aGNE+5CaTcZrcTAiozJgUoV4zqTSWSsyhgl8tuoJEY+FQRHEI3SwkqK6BN+9DBOWDuQs0YpgwkDcerJfiKGREqg80M7yZwElp2fnZGctNRmFE05jV03rDbntKtzqnpF255RVQ1OaxoHziSsjjgDlbE4U+Eq2QBIS8OICZ9SS8vLijM7xlpshipnXIKsErkgLMsnpXKhxb2qQNQn7b6YxKI9hCDxDd+1qdcvGiknfnp4zn/+yT/i1vf8h0/+Kv/x079Opf7/Ezz5LoqFX0vBMVfs3/LXXj+U74ybctLOPH1GcQR8dQ+nyVQ60enIRnu6fKAJNzT+FmsG2rVhFx13MeA9GFtD0tRVxape0bUNbbsmoxmGnnEc0UYRUxTSk6sZ+qEcgXhnPJCjzNJXb5qGRmmJac4ZXXaqM3l0GHpiiCitcc6yWq1IGYZRDHUGNeFcxXqzwVlhmnetIBPOWXzItF1H3Uj/9eZW5J8pZSqnwTgOhx1P33nMdnOG7yfIiYuLc87PN7jKUDlTItgN0yR269c399zveqpGiLB1U7NebxiHno8//piz7Zbvf//77O53PHp0SVNXWCNtlyQEFlzdgDEoU5GNEEPr1RbXrDn6Aj+XtpISbR8xidzubLOmqiwpRm5ub7i+fklTS9CUs2JiNI0TP/zoI0KIPHv2jONBuAA5esIYxAclJ4yeF6gTqeny9VW10Nuu21NeQdM0VFXFZrNhtVohstxjKTrld07lr6dFxdsKjYfX0cuxnnJOXm//yGWnsMZQ1zVKicJDL4Tu1577N0gPk4Lj4e85ny5GAGoxnpqdhcUxO79WKpUWihLD8DzbjmNQWrI4QlLSMomiTrEJMA1RN3gqQjQMyRFUjTUdaFfk8JakMjEfSVEUULPPsEledv1kjM2ElKlMwpjMRMRqg0ORjCJqVdopGYqleXnnMgtlQ/QJPUwofSCOnphBxYl4PEAIVK7G6jNqDZWRouJp7OATKQB+93d/G9VIi6JtO+q6xjgNFdRNS9VsqKoV1tYY7XDWUDuN1RGrI0YrGlthsIVDIV+zEo8MtHxe0iiBpMBnQWpySbLOZJI+Ye3yKloWQ5IN2W7P7d0tu92OcRISfCihiD54Qv4NFhs5Z36y+4z//R//l/zu5n0+6p7wX3z6X3MII/+rD/42Tn99BO2b1lRVmNDfxfjXEVt5407mtZ/+sk+s5knyDc+alVoiEfLJY7+rz+KVkYWoBMLBylB042UCTwmVxY2PlHDhwCU3rDmySrdU0wvG/Wckf0OrI8GPDMeBFJBea4Zx7AnRsHadTF5eCKbTlEhZblIfMqOfqEwjFscYrMn0fsBPXs5LCoChacUQLORYHD1lwuz7kX6SXULwyHNYR9YU6R6MSVJl+6MhThOb7Qq6hq5tgMBm0xBj5NgfZXEnsd/fMU4jl48eU7mKY3/EGMPNjdhn59gzDQfOz1q22w1GizpGZVhvVhwOe65fXqO0GBptN1u0bdjtdvT7PX4Y2O92VFZTO0OOE5tVTfQ9blVhVCJFD9qKDK5qwa6IpiHpBtus0HVH0paQPClElIEUNSp5cgpUVgMOPx0Z9gN+mhj2e4bjEa0VtdO0bUNSlt2hZ9W2/JXf/hFxGri5vRWraBSx2EhDktyKKPZPCcmuiGm2JRdHxtfXZ1V2bLI7l+91ndgtN02zoBp1XS+x3rOnxikB9fX2xynq8abWiBAnMynFpXgR7o8UIXHhniicNmjtWK02wonJRtJyUeisycxOmqlwAb7djfkmiflbNzUnK89XlGiv/Epx/5yLt/K9mQewIB3lCWUzPSM+4iIrvhuZrA3JOKKtiaYi60DWGu0qkhETP6vEXdRZI2qPLIRTndRiZqWsQZFxTlM7S201WhX5tZVzqQFSxKmEDl7cNpXktAgRUoFWhQAr6hFlNcaJCsSqERMVlgptrLR4NzV6dY5jQ62hc4bWOZq24b2+gv/2BUopfve3fw9VS55QU0uhq61GOUEgXNVgrbRbtdJYozFGYXRGqYjKCWdsmavn1Okyr2tpjMg1lYq1fcLNqcspSAGslPjh5ITKGqVy8WPOaJXJ0TPs7rl+8SVffP4ph8OBmMLyvJRrOn6tgdvD+E6KDZ8j/7dn/5TfWb/Hf/Zbf49aO/7t7Q/43/2rv8/fvvwxP1q987W/rwD9lZ3zm4sQOZ/fYrFVvPJEM+P8uxjf9nlef4+/7PiVAY/85jM4U72WCeZXfJlfNF55PaWIKhPV3IZDTKBImCwOeefmyIfVDU3aUeU92hzJNhCnxDgM+DGQoiaGwDhMpBDFOjxFDn3L2fkZShvefe8p4xQZRk8iljTDhmEKpCymTWEK+CkwDiPOCjpijKaujNyUStHWjpwNx15kec44mm5FzjuGsWcKEZ0VGU8eS35BihjlMEaCp7abFUplUvKMk6gtlM6M4xHnNMbAb/3oByit+fnHH7PebOjaihdfHlFVxdnmnMZ0NHXDMOx5eX8nhK7qDD+NHA8HpnHk8ePHPH36lKpuefbFC1II9IcjOQYeX5yz3ayonCUGOc6magpkKxkUyhhc1ZF1TVQV6BZTr6m6M7COMQi06qylqgyqJLvmHMhJQtjC0OP9RPITqR+xKFZdzXrVsd8fmMIRnTLb7oxN13L77juomHjx8iU+zjkjcg7FuEtaXv0oKhGlNeiHNpfRM/Qv19s8p7RtI0TXogypqgrnatbrNavVSgq+gmrI7z20bl5fmGeU4xTxmLkdDy2Uh86494LMaGPLzrnwSrLA7MbVWNuwWp0JeTFZopb2kVUJlZVYUee4ZCC/qVh4WwFx6nr6+nv46siv/FxMnfJSULyqMlHLkqdee963Kl1isa5XWbKN8kTMCa8tk7IEZYnaCEE2eXTypFx28ili8JLxoZxcr06u1YBlpKbEgKBnRUmWAsioDEaToth5V0aLp0UWs6tgNZNRBdG0qJSwSoIPG+tQzYrt5YputcJWBZlwNc4YKq0wZJwGpzUr56ido65qLu4iIMXGR9//CFWLlNw5hzVWcl+MJNoaYxd564zWybnPaGUkny/LMZMzOoNRqqiVirdqzsSUiaX9rErh7EPEGEfMmbrp5H2q+fPLItklo1Ig+oGx33Pc7whB2qKkLCbySgua8sYr7avjOyk2Qor82eE5f+/pX6PWDq0Uv7V+gk+Bz/sbfrh6cnIJ/ps//qLzVJTiNNDx1Z+99u9fK1KrkBsBxMW/8EA1YHKg1gmXRmwcaR08qQJd7MnjLf3xJUx3qGmHSSNhHEgxs95sUNbjbCycOsXV9Ut8CJxfXuCqWlIbfZD2AmUxSQnvJfNkmiaGsthopQppsF28F+q6oqodKPCTpEJao0kZtNI0TYN1lqwTU/CSRaA1zjq0FiIdSjF4T7y7o1s1xOiZvGSsaGVZrWSHvdvt2O/3RSEjE+TxeJAQNGPo+yM5RPwkNuur1YrVaoUxhv1+jzGGR48ecXFxwdnZGV988Zz7uztWq47tZo01mq4VWaz3I9YaKmdp24bKWpyBKUaitsiWriIpS+1a6k6kdhFIKWKNoakaIBH8SAieOE2kMDD1AypFyV+ZAihD3bQ0zUhdV4QYWJkKYytc1TJOnvfefYpSihg8d3d3HMex+J1IMm/MQvQzKqOMQMGnSokYsygHlFzJxmiMcUv//OLigmmaODs7o+vWOOdIKbHb7djtdl8JWvtF41QW+3WPySGgk+zi5fGycxUyXl0M0yTRMxcm4byMz2RGclnav0Wh8TaPj7eOueV6UjA8oBRfnStO7d3f9PXV555bXYrZmFzQg7kwg7mFYbRCFZKjUuLbolTC6Iw1oHUiJy88iRjJYcIUTwptNLWByoDWsljHECAFaakYTQ5HckKeT2faymDXHXblxJnYQls52tqhqgrTrVhvz+hWW6q6pW4bdGkLNdbitKKyispaai0Ovc462i+Py/l4+vQxVMVLx8pij1JSiM45KMwtQZmXYpT8lpBFnq1NuSpmVIlSUKXMFDxT8IwhSZsjJ/I4CKpkDK5qpViZC8qT3X1W8rxaPdj2y/FoUk7kFEU5o0qLJv4GkY1IYkqBx9V6uQBbU/Go2nDrj78UbfrrF+xv/mRvuqV+E8XAmyeBX/vLfsPxzR1Ef73nqlTditLZ1ZgsVr2VytRxpOFIWw2sqsxG7zDjjjDdE4d70nBPHneQRpL3HI4D6W6PcZ0EGYVM1bY8fvIOzlnabsXhcGScyu5Sy4Q9eZE3qmJlHkrWhvAtOiprl11wXVclWjwy+QltFHVd0bY1aozsdrfEnKiaGmOsPF+MmLJczDfuNE3sdjKxZKDvDxyPe6qqom07ctrjfXiFUDhnEMxQ/GazYRiGZZE0xrDZbBbJ5mq1oq7r5Tlub2/5/PPPcVXDatVhjKBAGSH+WeNo1itsWTQSkAuBU6kK8Rg1uLrDtR2ubgFF8EFItsYJzyCEcg6LciSIesQoA8iq4aoGpQaGcVgUOqv1hpwlFEuReXR5UQqsRNs23N/fcTwe6IeBEIUNkFImxFjkx2KxXNUNuZwnpURJJK0SUYIYY/jxj38sraS+Z73eUFU13nv2+z13d3dLcXdqTX56T5wupKcEVa31kqEDb+GNJCmUjCn+GTwUAUqLxHMYxYbbVpagFDkm5u6FKsZ2irchEm8vQt7GL3kj14QHI66H58yvFHQz+fX1wuRNBcrr5yBrhUoF/l/8smZ5rEh9jaJkASWsihgVyw48YU1GK0EnDAqxdCmFSxJeBkgyqsqhSG8zOkZI4tVpyRhnhSdTVawqh1YrdDqnNppKQ+M0Xe2oayeql7ahaltc1WKskFGNNhitqYvM1Clw1mIK0mWMwe0fFmVXWamAylYrZYREHX1RxVDeRxIlVwgIsiHnMyhN1VSCgCgtZWj5DLwPHIeBKXiOo2f0E0plQt/jpxHrHE+WQldRG8OMSM0NdqUFYZnlsTFn8SFKSTKQUsRixA34N+kgqtFUyrIPD/kpUwoc48DK1t/FSzwMpd5cQfw5jLcTwt5wo3/Nz77N+K5aQG+cAL7muX8dRUdGmM+pyM9MVtgMDuhItGpipXtadYed9qhwSzw8R/sDnYnQGJJ2kDKqa8hodocR6yxoy/XVHeMwoFBsthti6dVPvtj8Cs28QN6ijpjTWp21oDLbzZrKWvw0UlWOuq5YTIyU2PauWkfOinEKVM3I3W7Hcb8nkXGV4+LiApUyQ9/T9yOuMsWnQch0d3c79rt7lIbN5gznanJiCYR79uwZh8OBmcg4Ixhd1xFjxCm9qBtO3f7mImNOL/Xe887Td6hrUVooYLVaAQmjBLExRi3PVdU1KNDGkXWFx2FMTbvaYmoh5qU07+gNxhppQc3R6knCOkPMRFmZSLkYVxnNOAlR9uzsTEzKdjtxa826KFAqtqsVH33/e6y6huuXLYfjkaEYrsWUSShWqxVPnrzD5aNHdKsO5yqub+94eX3N7e0t0zjy5fPnDMOANYb/xX/yn/Cf/qf/KX//7/99/vE//sccDgf6Xhxjb29v6ft+KRxeV7mc3g+n9//M6zg9/6etlPl3HtRVEncetZZzV+6vmCkW3fLeRLUxEy0L2bIU53Mj8k235tvu16+Tpr7hWZY79aEoWg5/gfjLI75CmP3Kez49Dh72oPKeZCetlEJnMZdSOZfco0wIIzpPWDQW0Fbhyk4bEiol8uRJOaHCAH5EhyCIYrbYrKlnR0wjLdHKOaq6pmtqnDVUlaN1FZWVmPXaGJyGymqqwhGx1mDrCmUdxjqUsSglmxGjNU5lOe6cpC2yFJEKXZ1yF4shXYrEMPONhAcxc0RE4i0LvGTAhLKxachZkUIxdiv9+Zwhp8yh79kdDoze8+Lmhi+vXjCNI9PhSPATjx9d0LYd5+fnRSFTCrSTTbzWGmXEcE0ZUwizCqVtIdpp0EKc/3pG5sP4TooNpw3vdxd8fLwi5oRVhhfjjkOc+KC9+JUWqq/8boEVv+nIX1OZfBcL/7fZWfzleMPQyESTFTYrapWpc2TFxGWV6Bjwu8+J/ZdM0x3D7hqbA1aLBIzkyTkSU0YbQ9N1DONIzJ6qbtjvJb0xC/OL9bqjcfUC16YUl2vETyPDOLFerUqEesV6vUKDuPGtuiX10xhLjBofhJBWOYNzlqp2KC3w/aHv8dNEfzxSWcc4lSh6W5OVoh9GtBbPCWM059szYlK8eHFN13Z4H7m/v0drzfn5OV3X0XUd6/WWzWbD1dUVh8MBp0oGS9tijFkyQ0TR8BD0td/v0cbg/cR6vZYI7xjJWaONWYyMbNUIIc1aUgKfNTEZqFtsu0FVK7KuyUocGGtzEreuZPHQ2pD0vNtSoOS4spIJLOaIrRq2W2jbjnGa2O/3HA49h0OPQhO9J/iRs82KHAPvPL4kxFiC8jTDODJOnvVmy4ff/wGPHj8GoO97zm9u+cEPvs+LF1c45/jJT37Cz372M6qq4vmXX/Jf/YN/wMc/+xnjMNAfj9zd70vR0S+FxalV+el4/d5eJP+FHDJDz6dFyistjFI2GOeKG+cDChATaGVpVmuMqxhDgGItPWfM8NBM+dZo6XyMb+dpnL7PmU6lln+fvOtScChm5c9p4fUm+eur5wxyKm2hZUHW5V0VLgdRuAE64ypoXMYqj4q+MMlFRULOGGXQxqByxhYU0qiapmlYtR1N5cpmwVG5SgimzgjHyFmsVVhjcdZSWyP8C2OwSuOMoBZGa0whmGZVFl4lpFWytHkMmpw8KqWScpSL7btCcl1lDMOBGBBFhw9F6RTRGVKUFu80TXg/4v3E8XhgHEcePb7g/fffp24aUjLkZAsBVBcScmbyns+ffcHPP/ucP/3Zn/Hs+XNSyvTHA04pfu93fpsPvvcD1psNufA+1AM19KH00AZtHK5qqOqWkFXZsEnCtNa2ICvf7CL8TooNqwz/4eN/i//Dx/+Qf7B+j3ebM/7zn/8j/v1Hv8eH7aNf6bnfBu99m/GbXvb/ohcaf15H95VJuoRLzRW9IVMBnY5c1rDOPWa6xsZbfLgh9jc0WpIQp8MerxLGCls8JojZkMqO9/runhRk8vYxc7c7sDsc+O3f/i2BDpsWYxTTNDAMiRQnQvBUlcNYXUiDsjA3VYWtpPhIKSLyvURdVaQ04qeBzWaNNpbbuz3WaoxVNHWF95GhHziEA9572raBkh2itPRhUxRfhfu7vRQjTQ1ohmFkGEbW6zU5K6ytSktD8bOffcw4ys+m4bgUIwDH45Gu614JEwPouk5QCwXb7QalpLUSQqSrnMiNAWMNxjkhl2WFT4rsKtpui2s3JF3LRIvAt0aponYRhYTRlpR88aXKZKUx1hEmLz4dMTH5CaUNTdtibUnODZHhOLJedYzjhFZCor2/vyOnRN02uDkTZZqIwdPUjjANfPH5p1xfvyRGkTFnNE23wihN9IGmqjnbbLHW8MnHH/N/+T/9nxHCZuDQHzkcDotT6FygKSVk0/nfs1x1hsVfRzaUElRoRjZO5a3zY+TfGqUf5LZ1XaNK0nCzecSj93+LR++8C9qiis1SCQEv9+7c21ASyvU2/tVbWimv3IMnhdBXHru4Nc3FVCkE5kNgnp/fjpK+vV0jRMaYROVhlfCedGnVzFwNyhmoDFTZ45TCaC2EVFNRFf+Vqq6pKiFa1irRKEVdOZy1NHVNU8v9bIwUEsZorDGSkOwUZi42jJPvKy0tEKUxSpeWjhhkkWMx+VPF7A5UIZlqJRxGlSMaQ8gil9UnqakA0zQyTkJ03u12jOMoJPdh5HDYc39/z+Gw53g8cDgeOBx2hOD5/d//t9huJXtpPv8zMpuzIGOgGcaJF1cv+fLqJfvjgHUVq80FziiqdoV1TlQ3hcAsz1U+H6XFo0Tb4uHRYKsGHbPkN+URlcHWDZWtcNa9+QJ8bXw3bRSl+J89+h1u/YH/48//ISnDj1bv8L/50X9Ea6rv4iV+6aGWG7P8+zssBL7uub76s19f++OXGm84nF9nkfSmCU2Ty3IlznUOT60SLSN1PNDoA7UZ0XUmJMU4gQ4C6Q0xMQxHlFYkJUVFPwZGn0lJS882ZPrBc3V1g7OGH/zwQyFxGsPQDxir0Vr4EALpW9G4VxUgmSdjTozDka5pixVzkpA1rdhuNjx+/IjDsSfEwP1uz3rdYKymrh3jFElJrM2vrq7ltOfM4XAk+FDm6MwwDChFkV+2WFtxOBy4v7+n6zqmyXN/f0+MkdtbIaDGmKjrhpubG6qSTXB2drZINo0xtG3LOI6vcDnGcaBpa7abDZOXPJa6KSm1SiZcpRQhZRQGnzOq6mg2lzSbS0y9IdqGiMNgSCERpmJHrizGNMU5NBBjQivR/IeYGCYJ1EoxEmNmtdqglOHFixe0bYtCEUPCWsPt7S0xRvb7nqk/ig31YQfKEIJnt9uB0qRYYVzF3e010yTR8vvDgcNxJCuzoDv39/coYByESOsnia0fhoHd4cDkQ4GyX/XueP3e+FqFBQ8k0bnQOw1ze1BuSDE3K2G22y2/87u/x/e+9yGbx+/TXn7I+p0P6Ze7Q3bNOj9stES6WKwTVX7Dcv/2+/AbD/V6GfE2lCIvPzpFcpafveHrQgZZ6pkiL44BqxWrtsGpM5xNbM/PaC/OadoVbdcVR0uRlldVKTYqKcSdMViVaYwQNI02hfQshYcxYEuxKEULuMosoaegBFgR6KW8L0EnfJJNkdVIYWS0tLuStE7GaUSR8UOPQsz8jHOYEqr4sEnODOPIs5fP+eKLL/jZz362JEbv7++ZhkGSaYPwx05Jo9//wfdFHVausYcTP4P+gpQ5V9Ottzx6/JR2O2FdReVqVIo0qzW68DRykb2+0thSqkjvFa5uWG+2nF8+oZ6mwi8bAV0Qo4a6+mZUie/Mcctqw//yvb/Jv/fod+mT5516+wv9NebxTSC9ZfwF4Wv85fhuhpnNswjYFHBqwqkepw4Q7jjcfcl09ylpvEXHiVW1IWewVYsttsEpCSfAOktWsN8PGG2JBu72N2QUvixw2lqy0vgYcc4srqJa5cWW11ormR4p0jY1TV1TV45YjGxyTmw3G9bdCmV1IZxCXTv6aaRuDJfVGbvdwP3uSE6Z7dmZeHoU8qkvtttKKUzZGVR1i7EVNzd3QmDTQiS11lLXNXd3d6/wAfq+53jsefTeuxyPx6VAqeua7XbLOI4FoXGFsJhxzqIQ5Ukux6y1ZLYobajqhphS4bVkfFI0VUfdblC2wSdNTJqsDUZZUo7C/0gB50whtEViTMLwN7Y4KWbaTtQg4zCCigSfCV6Mui7Oz8W/JAZevrhi7I/knNnd3tB1LXVdc+wHYor0xwP7/Y7Hj55Ia6oQRA+HA9ZVRD9xd3dLP4aFQzF7Zszx4sCCXIzTJMqW/GC8dYpinC6eM8oBD7vKN7VaTtEP59wr85s2BX4uBclQFpfz8zO+/6PfYnLn7EIiGSWOkGhMWdCWp5H+BuQ/vwnxoU0ku/u50JrPy4wOfaXQKH+XQsmgVCLHTAqBunJcXpxztjbodEbTiPttuzmjXa3pSrFxykuyxmDndFKtcdpQW4PVD06apnBjtBIeiFKzLwj4MJFiObdZTuncigRVXG4txpql+DDaEmKiH0Z8ErdR2TQkkvdUTlObCkmmzq+dN2n1ffHFF/zkJz/hD//wjxiGXq6llMkpCmKFbG68n8hkKueYfMCnSCgJu9KFLsyXgkRlwFYVq82G8ynQeI+2FrAkP4rsOoqEXFLdCkq1MIPkO8aKlP/88pIhJqaiADscelLM1FVD5Soq980Ahe/U3lMpxaN688v84rd4LH9ZcPwbMxQqKnRKkuKae5Q6ojgwTbeYfEcOA5VztO0lFjjuevpxImZD1hWHcWCYvMD/piJFOB56YsgEZRnGSYKbppH94YCfAnfhnvWqpVt1HI97pmnEaE0kF0OcVBYLLWTT+fbTSjgZSjEOIze3N6SUOA5HCVtrazZnG4bR0w+eYQhUTmzM98eBqqqIUcy4mkZQh2masKYuqIMn+EjbSGrjOI5kA23TMvQjOcHZxTlVVXN7c8vt7S11XRUljeQYrNfrZaLvum7Z2c8yzm7V0rQ1h/0eePBdsIUsZ1yFzaCMZfKRdbeh3V7i2g2TrojKga1BO9AOVbpKoiWSog4kRRfKoo3CGClqYoI8eCY/stlsiUEKgvOzM37605/iveew35OzxKq3TcXjywtSWdhCTEyjo7IGazWH44TFkjLi4ZESx+OB6D1kmMaJlMTdNcbIMIgkV3rsc596Flm+SgZ9E/x/ujF6/XHzwjoXkXOht16vXzH70tpirMNUVSloDDc3N3z2+ec8/eivEIxCWzFRC0GUG6iHVoqc7ULS45u5N/66xlxs5NIuOzU9e9P5e+V7nLaAMpnIZrPi0XmN04+pbWTdGpqmwlXCv6jrGlNaTspoKiuhgcYKZ2P2+jCoRWYuklAxgFNZFB45ZUIhaWYeCkfZCIh6KSsKAmKwSdATpS2pIJL96OlHibIPMdL3R0Kc8MPAetVwfn5GlQw6gjUK8sM1ECOMQxCO0v4oRnwKfPDFrE44ZcEHUo6yKULIwxJ7kF7h7CjFIllFabl+XIVxDmIqnA5IWeFTxif5/cU7ZSYfl7+hFdoYXNVQNx3taoVNwulIyuJ9xNpCtq3+HIqNX2V8U2Tjz7GQ/8vxHY8lSTEhcdN5RKsjSh1o6sjaWIzbwJAJw57dbs/uvmcYRpTRxdbYkMjEmDgeRS9vjUNluN8fyFlx9eKaqjJUdcvVy5c8ujinbZoC0+9FJ65UgWathCwVq/HgPVkrnDHCRzAGhWIaR1SCbt1ycbHFOktMkSl4Qpho2xZXdcA1d/eHhQAXSoiYKkqDtm0lXyUlQojUKwlTOu5vZVdsHTc3tzRNw3a7xVrLYX84UQFk2rZlu11L0WBtIZZ52rZlv9/jnOPy0SOatiXnwPG4Z7/fYYylW61wdYtSWqK6vS+Tkphkdes1tm4JaKaQCQYMBuOqItcroWjRM2WF0g1GWbSTDzZGDwgsm2LCuYrzyxqVFfe3d+wPR9arFcMw0Pc9wQcuLy9x1jKNI6vS1nr+Qhj1Smk26xWQOexFxeNchQ9i7KaNIUyThMtND66fwRcJa0pl93qSsaJl4U75JKaeh+LhdV7GabExIxvz92ZkBCTUbbnWVTHtMkYg7aqhWa24v79fuBsvr17yyefPeOe3nmJsxeADISpq69DEpesgnNu54PjzHQuywatE2VM1zuvFGSAtsJRRaTb1Eo+aVbficlOxbpX86SpsaX1YYzFmZq48cCCUOuVEyJ+UiwKknDEfPDkGUXrEWNxoAymL4ktcXIVD4UMgxEDMiYvLC4kbIOOLl0zKomDr/chhGLi533F7f8fd/T3j2HPY3/H40SU//q0f8fjsMUZDzrNxuAytLEoZUoRpkiJYPlfJT5odSwV9EAM0HcVtFiX3nyrIkZ6RjbklXWzfhcA68+KUOLFaizWS6WS0xi6k3NMhrshZKYyWKHnrGoGvjKWOGeOieO1UDXXdfKNr5S9EsVEKxW84vukDTyGQ09956C+e3qsLa37+5je6j990LLP1Dq+8jpp7fye/o177evoLr3Rh51ba17z+DH39oqHgrYSyX3W8+Wyc/Dy/epTzLWKUQuKYEloFNJ4UBnzs0ckLCYyOHMCZhmkS7fjt/Z2QmKwhxYnohVXdth37Q4+PkSlG+mGgbrb0w8B20/Hhhx/SdY6b6yuU0my3Z+KZ4WpcVXN3e4MuC8U4CPmzqh0hSHT6er3CGQcpgpIkzLqpQIG/27Pf7dB2ZL2+YN1JOFsVI9oqhrFnHCdxv0wBoyxKaQ7HA9Y6himw399jVMQ5x7EfWK/X1HUjbpl9z83NzcIDuDh/h8vLc1Zdy+7+Hm0krtsYzTiOWCsGXbIzygzHnruX17hKdtzKWikslMaaiqQU0zCS0LTdGlN1JO2ISpONlYCrYnscUoSUCs8jFxmeNL9zmlsOhpSkzZHLghKKs+v19Uv8OLFedfR9z/vvvss4DNyUzJTbm0hzeYGfRqZplOPvjwtk7qeRyjm0MRyOYoM+705n98X53+M4loIuyKJdeuhKiwviLNc9JXXOMfJv4m7M1/Ppz+cd/VxszOm0ol4yC5fDhID3gX4ccNYtC4q1mjCNpDBhm0yKAa0rxAdF5MmUz0rNk+Yyr3z9vffLj7mX/zWPOOFrwEORtpyrgh4IAjaTEGXnHkuuhimfyaprOD/r2LSars60jRVPB1UcYcsxmeIDkQopUzgX5VhV4SOkTERUHuPQ44spXAillek9KUrswOQ9x75nt9+Jl0vfM0wjv/dXfo+PfvhDSVUt6qGchATaDwMvb2746ccf88c//TOubm4gSWzC7/z2j/je974n52Y+rJNPZFYlJTRJGbIWk8CQA6Ekmmit0aaS+bHwy9KiOjn1tpBnFsRFbMhVcfcUPkstbrRZobLBOCu8jJQIKeHMjLjIgSrkGjNGCKJN27FeRwLS1kvKMPlAbRuqqqau/zVCNgyar6p1cyGpnA6FIgIS753LyS0PP+k3PQydxfIaBGoy5MUmeP5duRGSxFJnU8g3pSIvvVzI8gHOjy/lQJp98ymkrTxr0UuFkTXMFsNKJuc5v1jp+fKbd0zla6b0kPVygy53dNkFoF59t+kvwC4HyuWqHhwP5yJpng7VfK6KhC/EAYXHEdCEYkOccDox3N/S97ekcUcOA8bWhOjwMTIOhVNRNwLfDyM5ZZSGcQr4mBhi5vbQ44Hj4BmGie32DAUcdntSjKxXa4xR6EETkpBWt6sV0zTiR8npaOuaFCNGaZq6Yr3q2Kw7jNPc3d0VsqKibTveefyYR5ePuLq6Zre748n5ORqP9RmtHCl03N9HxsljuoaUFdMUAC0thqyp2zXTeOQ4iKvmFBL+bodzjrZtmCaRxn344fdELjt5Jp1xVhGCL9bRiXE8sjnbUreOYTwy3UtLoVsLQiK7S8AYbN2Acfgpke2Kuu5o1ufk5owBR8CgXV08TDJh6kVmGyN+CmglKbWZKM6hKUoxhkIruetGH5hGMeXa3+8wJN5/55xhONIfbmnrhro2rDct5My7779D3x+5uZn46Iff57PPv+TZ/gDA4XhktepouzVffPmcyhmMbjkce4Haa4WpZQ55+fIltrKQEmG++nJe0qnNQoh7KCZOUY3ZpOt1BON1nsbrSpYHCbJjmoYHcmjQ6FaRUhAfB1OhCViVMGFPx54UHA5HUq6oQGRHrbNaWj5QHDeXIKyTDZY6XUDm5ks+2f/mV34nnzxu/p1l8X7rUMvDc4ZQHq91aRtQckbgZK4SFDFnj1aZYIppFBljwJnMqoZtp7GmvD/zMIMsiaZKFD1aPczhS8mVohAf588qBsaxZxh6bm6uubm9pT8euL275XA4st/vub2743jYc3t7jVGi0GjbjkfnWz58/3vYeo1NjhTmfBoJhpyGnvv7I/eHQKBDWUWKRzQ1lXKolHFaidV8Oml5KQXWodsNZn2BrjxKg82Rw/6A1pa2bSFD17Q4k1HJo7PDKEPrannvysj5VAZScXDVQkx1dS3GYwmy0lgUKkeRtCsFtiJkShhkQiuNIxJSIGVxKDWVxThH3baopLBVxRQV6djL+mUhfsNW3l+IYuOv/9GXTO6rZNI3arTVm26A126LWbJVDFbeeXEgdRtqVfM//ckzRN1TboFcipqcyUphjRNIMKVizZpRWuAkTiv1maylCpahHo6jPET6mIibHVmitufdiPC7yk2UT3CMUqOIdvsVXODk3c7/np38XkNC/hzHHL5E+XKCFZUHnBZJWZJJc8ASqPJITaZSFr/z5KlGp3NyaEnJgzJkJX3uHEOJ41aQM0+PveycsyWMkTEEfmxGdt2RXEeOh8AHw5Yf3/6Qaq8LHDsXkRCT7HBsiYGOwS8LTVVVpa3isNagPoWQI8pZHjeXPNWG9CLRDwPaWJwxvBfPOe97rLE80h+w73sO/cBut2cYBmZDr5SVuGEa2Uk7XaGBkYEpjui9pprEjCiVxFDvJ8hw8fyc5q6irS11ka76Ir1s6hpVIs2NkX5vVa69lIT4OG8JlTbSb89gs6XSVnZxLhD0DpUVFoXWd2gryIa4jCopwmIUDwJjBCWJAR1lQ1AhUHD0nq587sPoCd7QuIq2VtzfT6x7y3bbMk0TV1eBs7MzrLPc3Qbe9ZdUruJHxw0785ScMqlKi1PoS/temckUvhPnzX6aGIxHacNgB2lvFLfRh0vxQVkSYygKhNdRAll45+LCWjEykkJkJofCvGE4/bvWZslgEW7GA6bnnCWlXFI/W5xzdLnj/GrDxR/uUbViTI6oj6QkKIwud/pc0EvpIKZWp0Od/P/18dWYuK8b3w4C1SeFmlxeJdDsdWR5/qqEQyDi0IQlctYZLtYVbV3QG035ebExm1Ed9XrRdPoSZe4sp0CnSDweMdPIeH3N4cVzxv2epj9gJs+Z0rwbHTmfkeJKOCLWog+GH/+B5Xx3Rd0csKaS9u1fu0R9sFqIptKqMMQsCq6UjaAPQYzJ1DK/PxR41lbUTUe33rA6O8d6yWUxJmHrDuNq1qstWhlWbYvTieQPdKs1aplV5wVIfDbkFcRo0FpLXdU0bUMo/TdbCqS6FuXZktiT5XTNvsHyvpIkx+qyKS4byBgfrvc8W5d/w6Xnz73YUMC/8y8++7W/Ttqc0wL/wT/99b/WX45fdTjgvV/9adry53R8/qs/7ZuGQY76dHQnfz/7Jk/igeG170WgL39eH19806P7ZcYADAiH/dc5elYA1ICQWN/nwZvng1fO3ParhwivHuD899V3eYy/oZGAq/KH3Z/vsfwbNmZx5lPgr/AO8JZwUIPch778+58D//zLVx/yDzum/+3fYraLn9GVECNaKXJKi0X+2+q1DLiq4uzsnKfvPiVm8f7QKrPb77GuZrM+wxrLum0lo+h4V1RtVfFl0cv+V/Z4hTZswFlD20jIoXHSojFaYbWi0mCNwRSuy4xqzY7Oy/tClDu6pAzL5hkgIY2qhEqBb1qUfqt55Oqs4W79izW1X1fovLm3mN9cHb2RDarL9vn1X3jzG84K1oeB9768YaosH3/vcXnquKAL8zNpkqQPnsjZFnCxyMxSFqQiz+xmVEFJHnrUD9BeXj44tCalVwHFGZtQJ0z1uQZ+qDtfPVNvG+qblpe/kfGwqzjpcL/274fvkSIqBywRQ8AUpCPHiRgmcgql/y4mTGVjI0DvrFLwYW6WEYK0KYYxYFzFNI5UztDUEqi2Wbeyy8wPDqKCdukT1ErMg+YDtiWFcUahTOFAzLvhlETRIU8k7Tfv/avEwWI/PiNmMc3R5Xm51AVpk/c6Q/LymNI6zMIgF4+GwsQ3Eq2ds/SyrRNlirG2XKdF+le09SdnftHTZ0BpgymcjJTkODLFz2FJeJTduzazZE5QvTwTK8t9Mu/85/aDLVwSUeNYeVhBRLz3ks5b5LlQwuoQFU0IHqU0zlWiNjkcGcahIAe6hOtNTOMoAXveL+/Ll2wTNU+mmcVPg/nY83yXP+w8FxBTPpTlY30gO56exdcv/5M999KeeUCUZpXK7A9R13V5jPTEbdVgqpaEJStxqZxbqaogAouD6BtbHW+eC36tM8Ty5A/3+dKSmh+y/Dsvj5wfXTwwaZymrS3OqlfO91de5mvfzCwbfXidvu/Z7Xbc3Nyy2+2Ic0owc5BYaZGpTF3VOCeI4KPLSzabrXxeWaP/h2vUsyMqPHhdaCP3RhLv/oIACMl0XsBPm1RKCXl4tV5x+fgxQ8pk7VBWQ4p093uUtqxXW6wWzpUmcDQZ7Wp8Bh8jdTmnakF51JL+arVYrNeVIxZEcxoHORsqMY0DwU8kbclWAv9U1vPEWuYeJShHEmt9P5UMF+/JYSTESNYPUvBfNL5VsfHf//Y7/H//6rvf6LFvUpe8qS2SSSidvoLvvUm7Llenkz9IS+IhMXB+AJzCil5n/uq/+pT/9R/9MftHa/6L/+hvQYr4aZCQH5Uhi5mMiSMNI+PxyG5/oO+PjKOEdPlpKouFEItikLjrqrZUrpKLSWkqV3Fx8WgxR7LW0bYtzaojmVWpSC1Za2JhCCvtSGoGCnWpLpMw0Hnl9n2tXaKWi9ikvyDsdFguWHhgxegCp86f17zkaa1Iwx7td2zZsUq31P4OO7wg9S/ZX39B8EdpQSXwPoulcGUgJY6HA4f9gf2+B23xMbMbB55f3/Dxsx3N6hLfH/jg/UveX2/54XtP+dH3n1LXGu8Hjsc9MUVSznTrrURNkxeHQaM1rnJs1xuMKS2MlDg732CckB3n3nzXrYvWXuNsxd3dHaUWYRiPjNNUFsPI8diz2/eMo8cHcUCdi+gpJEKSUDGtFIfDgaE/Uhezsa5tefL4ERcX56xXHVUN/SAOpavVistHl2zPzlmtO6ZCaq2bhqZtJOgry6cQsxRACUVSErBmV2fEZMSAKyqirkmmwboGV7UkZQgJbFUvvXOdxerd+wllVFFkhNIrHyElKmcZerFcPjs7wxjNdDigYuDzzz/j889fAJlj33NWVDf7/YGbmxtyzjRNizM1Nze3fPLJJ1z7a9rVWgoQFPfTPfvpwP5w5Pbujt1xAOO4v7+XSTHPPhqSHTF5yc3QSpRAp34Qr1iLF9XKqXMoPHA23kSKPLWIlz9xCXXruo733nuPy8tL1us15905l2eXJcXTUHUbtk/ep3t0yWjPyO6MpBqB2GNEleBCIRZKD16prwZhvc27yLxhjnibpfjcYvzGY2GezzviB58LsbAvnTsEpk95TnvWGCI1E9+7rPjB047LrcNahbIGlC4bwZOC5esOLIPRRlQmKaG04pOPf85P/oc/5P/9X/8R/+zL/47dfo8qnyVZEWIoLbLMkyeP2dQrtus1/8G/93f5G/+j73F5fsE617T/2T+U9frkGlHlfS43O5QNxEOhUQ7r4RCVtE3rWlodWVkwlqwyboqgDLausaaiWXU4nQnTEVtV2KoCY0v7ZN7VPVQ0uiAeKUWmsac/HAmllZmDpzKKcLlFYrFz+WqWIiMv74/iMBqJYWIaJjKKFAMpTMRpIhJ/s3blv9pQgOGNwXFvQzYQWGfWfeQcJbxHyv9C2JQTqaNHx4K35oQeb8nR44InhpH7/Y7d3Q23N9cMu1v6uytyikJ8CwGlNCH4xQholsbllEk50LQNdVUVxrv0+SWQqUIr2Xmt12s2ZxfoakVTt2zOzlltz6jbFVXTUrUrjCnQmLagxVsz5/nGKn8/6c0JAUyXnTTAqxI8OX0nPI9vNWv8quMBn5n//pAIwSnwcVJBZwlTCp5p2JOnAZ0irqro2pKqisKPgf544Hg84seB4dizPxyIEdr1iuE4ErJMxt5n9le3PH10RlO3rFdrVqsV97t7VqkmJ4E6Jf5deBEaFiXDzNeZzb6cs9R1hTNCEksp0bZiODVNnsPhiHOOcZyoK704evb9KEFI00hK0NQ1627FxVnk+YuX5KzwMROC8AmSsoRkmLzn7vaW42HPeiWeGeTM+dmWzWYlyAu56OEryYZpW9rVmna9Aq2pKoOrK6qmkR1XIYTmLK8XUhYWu9bUzpK0JI4eek9WlmyswKWV8AyUrYlZzRxnYowFMn64P2b4eOaLRO/pjwdijKxWK6ZJTLfSNHJ//ZKf/exnXF9fc3Z+zgfvv48xhtu7O66uXrDZbLi4uODlyxt+/sknPPv8GSDW6xLFDne3d0uhlVHElNj3A+SENZpY7NFTzjhXYbUhqJm78cDTOC0cTr/O47QAAb6ySJ/+7FT2mcrJml1du66jbVsuLi4wxiyqoaqqipRY/miFbEpSLPf/gx9CVvkVyePrNcQb7ce/9p79VcfJOVEs5zOXNZh5LZaDe/id/IBwZOYirizWqBJ9/oCgve29vXIkOSF+26VFkBXaVviYSGhc3VJHtRQbwzCQYiYqg7aWgOE4JZxPjDESy/Xx+v5dAOsiJlhkt/N8fbKgzQXHKVqmFMYJarFeb8jGoa0ja0VWkq68Xm+pXC0E9akXNNEYshauVwSs4uR1pd7TRZkT/cRw2LO7vWX0HlJiGnoaZxienBPDRIqK2djrlfdW3q8u/Lg4TfjxyDBOBB/wfmQaB3IIJ1j9149fW7HxNmTj9Iad4c0UhdSSUiKVHcQMG74KSYJSEhWTi+zQKMjZk1LEWkWOgeDHAsMmcjighlsA/Njz8U/+G4bjntvrK1FGRM9hdy+5GFYTpoHNeoOuDP1x5Px8y83NNWOR7/k0lXInkXIAHzn090L6K9VsVdWQPVkbphC52l9z9ewTyEiIlDFoV6GNQ1c1F5dPOLt8zGq94fLRE84vH1F35yTdlJTMTIzFeVBJiqZxhpw13gdJ59RjCdV6YM/PBlLf5LP57j735dNmDlnSnLSdlCRq6LIjgIStLSpp1OjReHQKPLk8Y+3WRH/B8bDjfnfPcX9g3a2Zmf5C2KtYrTSTjyWeO3Nzd08uAUlDP7Lf78icsdmuUFp2qilGQpiwtqJp22JX7gje0/cDOUeaqqZyNa60JeYiZBxHfJzoVu3iCNq27VKUiKOeXpw0nXNcbM9JgC+yR5Q4E242K0AzTp79scdoi3Y1h95zOAxEP1FXlqquWHct5+dnrFdtmcwkFfJ+d6RqKrrNms35OW0JjGu6VpJn6xplLWEcGSaPdjWT9wzjgKslZCmXQtd7UY1gDEY7xpDQuqRQhgiE5dxaY9FIOFPlLK6yhBiYpolpGgleUMGcZGeflCTPzouuV4rRizrmvQ8+pOs6cs70fY+rGn78O78nUt+7e8ZxYrVa8f3vf1+sx5WiH0emKRQEUaYyKX48q27FMI5LiyJGD8oy+QlrDWf1moRi8p5p8oSYXikQZvfP2WU0pSQeGcUF9k023K8rWB5cKPNSTIjl/C3AQg6dpmnx2oBiCoa0xAKycRG+3qw5yyXkqxTry2Lz8PX1e/yNSPF3ODI8KHrK66VSsGcNOpccEVWcOwXcLyo6sc1OOVICh5k9Y2bloC4tzm86Qgjiwqm1GGBlMK5Cu5qqWZNNQflCICuHqyNVXbM922KdJkSPT9BP4vYrh/nq688Opsu1UqS51haC6Fx4Sr3zQKCFRQpdNQ1NN4Fx2KphCh5XVcQoa9/cCgUW7kUqaI3UyWL/pUslp0p1l2JgOB7Y39+yu7thfxA/IT8OWJV4/+kFIbxHDAaSWwrRmJKgF0VSnHNcnuf27p5+kHUmxUDyIiXO6U1IwVfHtyo2vuka9bbF7NRZ7uHCF5vVHAtMWXYC4gevoaR56jlGO3iRROWEUqIJznkCIiYmYpzQcWR/d8/Viy+5fv45m1sNdPix50//xX+HypkURtqmQedIlQLr2nLsD/T7A9PxwBTEFMkZxfMvnhGiR6GKKY8jTBMheozVxBCZJr9o95NS7Psjwcsk61xFZS2VUeQ00h8niSY2ViSady/47M8UMWa25xe89/736M7eZfvoPT748CM22y2+MN/9NGJshY4BhcE6Q47+JGb4wazoTTuzX/QZ/aojv9KJfVCezER8gd5neaBAfo22EDR6iFiVWa9qzlYapyYOQ+Du9prD0LPqVpydbckxkPzESObuuKMfBpR2TJOXaOXJ4+o1YIhxYrXqePruE1brFkisNx3B94QQaJoGssYaR9/3aIproLGyADjx1zgcDiUp0pKiyNNC9JydnZdCRDJLJMMkQVbc3Nzw5ZfP6fuhmHfpsuvJ+CA22cMgCbLHcWS321PXDbZqud9LxonSmc16zZNHj2jqis16JSFf3uMqQR1Qirv7HSHJ9ZNRUK4vq4XjFEZp4aSkCKMnpIRSwtFwrkJZcQ+dvJJQu5iIWcSh1om99jhNhNGjrUPbCmtEw69chTZldzcJotF17dJG8dOIyqLGGoaBu7s7xmHEGcM777wjyhNrl8V6XpR3ux1ffPEFh8MBheLD9z5cbNpFUtqQ8yh+JMcj+/0997s9kKnrimEcJbclS+stoxhHgdanaQKlqOua9XrNi6vrpf0xm2+d3iuvoxpvGqepr/McJ06uzXKNzHkozrnl8adheeTZeGxeih/4JnJPPdxjM845L2EiiS9Geerk0flhnXzT0b99Lvh2xckpL+GBQjZbYJ+yM97+vHJ+y5/TJ/za43zDceiT9oVAENRtx/bsgvNHT5h8JCnhC63nlnjTsN6u0UYz+RGrE9pWzMEpr3sknbbMjLESSGY0rgSvzZvnh/9O3otWIuO1wvnISpOVcMgWNLioPWJWpCAxCTmrBfmS57cyt77WwiInxqHn5vqKL559wd39HTEEcop0teV4f0/wHto3G3LNOE1OsikbhyPH3T3HvpdCIwail0ykFH9N0tdfZZE67X8+ZAxkamukZZFmgmVCmxISoxU5i8xR54w1iUpNhNgTJw8pkMJA3+95efUFzz7/hPubl/T9nrHvIUz80F8Af016x2mEBIfDjpubl2ijuDw/5+UXLzj2A4OX1sh2e8Zxf+D6xRXjMLBar6jrmt1ux+7uvthAZ2KSwCmVM05La2McBrlXYhLjmBAxbcNqu+WzTz5lCp66afAhYZ2j27T040D0gdvnO26++IRkG2x3zpN33uXJO+/y+MlTHj95j7bdULcbghcfjqZbEXImxlcRjNlAKH3DqvO7GvPO6/TfcmOWXiIP0KOQjwJpGrBhorOKc1tx4da0euDZJ59yf/cSpRRn6zXWVtzd3S4FAUC36rBVw8vrW764esEUoV1tuL3vOfQjnsT2vOPycgN4NtsN2ggitV53NM2alBTOaA79DmOLY57VOOuwzkn/MyaUKq0W56jbCh88h8NBFsuqJhx7qqoiBPGe6LqOp0+fMgwj+/2x5IREhnEixog2FlTg5e0tdV1zeXlJP4z0/QGIWJNZbdc8fvSIzXqFNVI8WVdxtj1jmgb2hwO7YSABTzcbzi8v5firavkUZqfBFCLjNDD5JAtcXaGNJWfhXqQI0+jxkycjZkqmElJmP/Ro7XDNSsy+XLXwkohS+MOrrpk4WURz16JzJvgREGQIYOr7pT8c5/CzlDgeew7HI4f9gcPxWAq/lucvnkuR4hzTNPHy5hZbFu1hkHMRQlgSMZ2zNE1NTOIgCUI4VVERKBsEhOyaQiTxgE7MyNmMEMLDTvbUJfT1Me90Z5TEWsvjx4/JefbckNya8/NzyWUp52KZE4154w5eK9m96rnUOGlFzFytV9o/peCYu6y/PkxjPooyTjgK8+vOxGENJUSsSFnfgn7PfIdf9qhnvsHM41MYlLFUdcvZ+SVP3xN0Di0o1diL1KvpWjbnW7Q1TFNPf7jF1jW6FMKczqWvFaaSz2LR1lLliFJ6QZofyqeH96MLYVuX/LAQI0yJaepLewPGUUjVOmfi1DMNPZXSBD+Rkydl8WDJqlSTSpVslYRKCVIkeikUxqEXt9AcyRGxHCCXDsBrnyHzfC3PFaaRw+6eu5uX7A8H2exFcWRNwfPg8/L141sWG28mHX3bcQpTphCIeUDlSFU5lMqEELBai1+8FiLgbIFrsmfqb4lTz2F/z8urL/ny2afc3rygP9yTwijmWTlQAylH3EmvL04jL6+umaaJ87MzHl0+4uOff7wEXq3XG5qm5e72nuPxiDGGrltTuZppDITiUhljIue4XNR6Jn4mxORIG0Avz7nZrLnb3XEYJrJSjLsjVVVx+eiMY3/k5curBXJXWtOsI3n0PP/smpvnP+UPsyYry5On7/PjH/8VPvjwB2zOH6FjgKgw9Rlaua9MkL9ZrsbJZ/xAXSr/ftgN6JMdWeU0yU/oHGidplYZFUf+7Kd/SBx3GDLKSNT7brcj+sTUD1ycn/H+++/z+bMv+fL5Mw692F2PQYFJvLy+QWvPo/OKuja4SvPo8ZaUB8ahX9QbMSasqelWa6YwFJKYXpwfU5LC1yi9QOGVNWirCuwr6NVxv+N4HJimif3+wO5+zh4xhJA4v7hkHCeGYcRVChUjCdieSwujqmqJYc87MIauxGBfnJ9ztlkLiTVEurbiyeNHhOD58ssruVbamma15uLyQlo5zlF3ovmNMZCQ3vk4DuwPRzROEjOtRNn7yROz2L73/UT0YKwrvI6AUYGma1hvznHtqihWbLF6jkuxoZUiIYuqqGcKypaE/U6O1HVN13XEEFB1LcqvsihbaxnHkZc3t0w+MIUASuPqGmMM+2GHKQjnDLNrpRcEoW0bum5FSonRe7q2wRjLbrdjf7/Dz+0Q6zCmIqcsbbD9ToyQ1AO8/TphdLE9D+GthcZc4C9OoUYK15yluFutVnRdx2q1euBopHRyjypZtKzGlLZTVjM2MPfkhZ+h1cxae0AMfhPI5S8aBVyRv8/VRl4syBbSoX7L4T34l+iF6/Ht3kt+IBwkgGJbbowgF2fnPI4KpStiyoyj5zq+IJGwVUPVrqjqijo2wneqWml7a/1KDM1Cji426vqV60Z4I7EgG8xI/mkRqQvjMGcm7+knUcWlOBGnAR8yMQT86FApkXyP7/c4ZYl+LGo6UbaBWV4XhGpgrBZPoEzx+cgPSEmQuS2GgEq5NOTmITO1KsWhQiIJprHneNgzHA7S4lMZqzLaFe7gNxi/ljYK8IaiNBfpoJBnFBC8ZxwOqHhkHI7St4uJqqp45513cCYT47i8ueE4cL+75fDyCz75+Z/y5bPPmMYjzkDXWCwjh/4GkieEgRQSVtVM+hwc+Gni+ZdXkOH9976Hqyv+5R/8IXXd0HRbqsqxWnXkDMMwAYoQMiBE0eNxICVwruxefVguMO0UOakid9RFomlYrVqcFRfI28OA6daE4HHGcHZ+jq4q7q/umIKEN6UMbV1R2YSP1wx94BAFxXD1ij/bPef5sz9jc/aI7fkjfvDRj/jx7/w+AYdxq6W3mYoES53Awac7jrd8SCc/ea0F8y0++jc/9kTeycOOK07CkCYFRn/grn+BT3dURrG6OOP29pr7/T1og588GsPjx48Yh54/+ZM/YbeTECOUwKT72x3DMHHsxd465cxq3XB+sQYVMRpSlH3GNE1oXWNMQ/BeVBKKkpHgIeXF7KupanKWXXn0GuMUxpplwRjHsUDmK7puxapbc3d3x/EoyazGya6qHkY+/+IZ/TDgqrokOkqAmY+RqqmwyaKVsOK7tmMajtzf3TH0Pe+++w7ej9zcXCMtoQ1utaXdnAv3AoWtapStyglOjMPE6Cf2h56cM3VTSz6Ckn52zGB0FATAR8hWbMajQNlzLkvVrcT2OGWmaZBechaFg1KZmIWIGeMsH5esiRQEdrVGczze8Pnnn0shj6Bb88LsfeD+fsfxeFxSWuu6ZrPZYLVBRXh5dcU0eR4/fkzdtkv7o+s6MdtC7kE/247niDGauq7QIRBCXFAB52T680nkuJRjOeVewEPbdy7ihVSsvnL7nF4Hc9skRrFmn4uMmVCsy4ZrLpoEEdGCrJWEUW0MWallkX5omJQ7tBQfrxT1f05Fxutj8VUkP/AKXvn5m7Vzr1rAv9ZH+RZDzbsbCtJhNFpbqlo2FkpLnk5m4OLyMTFFXNuwObug6RpyDhzuNKaqZCOp4ZVqQwkfZFYR6fmaicKjilE/WCjwYKOwvE+QvJUYGMaB+8MgRoJxKq3HCGiUMuLEG0bidCD5iv64l7aKca/xJV6V7ctcFghhElRRy3OphKgpc34wmn3LOZydtHNKxDDhrJECNwViDMUM79fA2ZADOFmq3tK/lAyZ+anFu0Ig84hKE0RPCCP3Ny958eXnvHj2M+5ur9jt9xwPB9abDb/zO7/D3/k7f4c0TozTyPF45PPPPuPFZ5/Q314LuXM8oP0koU4ve6apR+cobNwIOSTaTctZew5HSo+24vz8kpQTf/qnf8pqLYTDiwvpvV+9uOLu7haUYrPZYIwp7FuPNYZAljyDmApUJou6SAKFLEgW2F26RInd/R3744GsFTFIv+3Je49p6ob7uzuIZceSwFaGdbvifn/N/e7lAnOjDJVxdE2FSSO7q2c8+/hP+JOf/DP+2T/5x/zN//nf4+n7P2C7vUThMNqCrogzA7z0ECnEswfX0hOjc/WwT4IC1eZvf7vPE8sptJooOxogqgfPgxQzFodShpAiPok65N33vgfhyDCODCEyxQAkpjHw6c0t3k8iO80QgrziOHgmHxnGyDBmxgF+9NFjfvDRh1TGoknEaWLsPVVl2J5tefLkXYytiv35hCITxOhiIUk5I1H0RiuC9xzHAW2NBK8VldI0ThyPR1arDZvNltWqo24adrsdVX1kfxjEodM5Li4vqQ5H6X8W/4zZHdQoTVU5rDFMw8D+/p4wjjhnubw4E85KkhTI9959iraWAVHM1E1D03VCMBtGQgyQ80KSTUmsj6uqLoiLhEtpW6GUFBE5ZVnotCaHQNt0dN0G62qSDySVsa4mpkk2kEoV8mjATyPjNCEyR1UyKUaGfkAhDokvr19ycyPhY9vzM7q2xRpLTIGXVy95/vIlt7f3DMOR41HQP+8DGOjWG0YfcePIzf2e29tblLa0rZDojLF4HzBKMU2e637geBjkPKAgZSpXlVC40mcw0toLWYh8MpE+BLLNclm13BuazCyFVYWkx8JDSjGiUFTGLomkWkEKE8EPpNihVUNVOWm3qnIYBY5X2oJp0MpI7lXOZBXJWYLysprtv19rjqi5iM8FbX342dxO+Lbj2/zejLyw/EqZQ/Ic8aCE7JllxyzzUsE4i+Q75ozPELUmlve/uCLot+S/vHFyyg+0jwxZC/JlK0vTNnRZYWyDjwlbO1KOMEmMu/jgSNFZNxJQKEjnQ7sKEKsCrciI344mYuJQ/GY80VYPrrSpIE8nXkhaZVSO+PHI2O857O/ww0jsR7z3TJMU/1JzRVTyJH8kDQ37+3umccDZCmtrXnmzczsFRUCTsiVmS8yOnBQaAzkxTIkpQsiKhCnF38OsrTIoZD4yKPmMkuZ4OAIJUkClJNYR/JqKja98rG8oOHIyaGVhJnnmhMoexYSJR/a3L/js4z/h808/Zjje4VQk7W5RhwN5v+fLL/+U+8/+iA/PLefn53zy6afcXF+z3++Z+h6GHmsMnU5c7W+IRnYD66ajPx65fXlL3dRcXjxFWVt2xnJDP758REqZT3/+KTF4cIaPfvRDhn7g5voFd7e3pJQ4OztD5cxxv196uEopVq3A00ZrVm0nqaBJJtLgPc5aYgg0Zde0EHuCsPBzDLR1jQqB2+fPGUrMuNGas+2WtuuIPuIHqOw50ySV7vn5GSY5br68WibgTOZse8bh+nP+yf/r/8oPfut3+dFv/VXe/+C3aepzjlPCVp1wOnJmih7rZPeUfFzgtTwXHKqYulAmNgpk+y0KjvlqSKr8zrLDUcV6WEZMkEkYVYGuCFHTNjVnq8ecmYFpuMMPkZfXO/rpSNUYZj+Bum4xhdDZ6Ipj7xmHwDBNxATj5NEKtlvNdrtis1pjTE1/uCdHj8ZydnbB4ydPqFpRUFS1Io6FV5RLgVzgea1Y3PSqAv0fegl6kz6+o6pNeU+KF1dXfPFlZLPZsNlsODuvWG0U/ThJW8IYlILgZafgnBP5XYokn8hBo5yodmpbsW5amqai61qM0iiV6VpJrc1RUbU1bVuxXndUbUPygcNuJ34wSdDC7OfcHSMTmBYFj0+ZSmtSVEw+EFPGak3KEW3EMEtaLVF2c1Zszeu5BRJDkYaX6PaY0CUhN4VEmALjMC6thPX6jIuLx4uFegiRQ9/TD0d2hyMxwtnFBek60w8jZ+cXGCUk7KwU64tL9s+ecXu/J6HFgMlaQUq0IeTAcOxFtoz0xFVMWG2JSlpRprQqUpLpVSOZI6EYPQmyoWQnmAGStI6YoX1NzPGV1E1nrKhHJi/IhjFYpWmspWoFtVg1NVYr/DiKm6N1Qg5kDtaSbKaMI2cNMaJUAKxIjZU5aUPKn9la8PTum6W2v/r45s8jGIR69RvzKLyE5d7PsiHVSgoNWdhEyulzJipNVIqUwGqFTvMi+qZXfv2b0oYR3od8jilnUcJK2qN8tSWGIhlyCiyeE4WUa7TGI202lCoBfg+BYykrQfkQsnaaevz+hugnVAocY8vxsF94G+o1HySVBR3o97fsd1fcXb8kDgE1Zmlb+EhIUZKIoycnQT0IR4bhIG3glErmlrRtMoJUaAxoQ1aOrGuybsk6Ekt3QVuFz1aiH5QlKU1CxBlL0YJCozFKxBkqixxX61n1BRDRJVflm4xvzdn4yndeLy0zaFvIMURJ5AyBFHqc9vzsz/6AP/tX/5Ld7XPidKSuDOM48eLZZ7x8+RIoUGYK/OSf/3copdjtdgsh8P7mhrOu4/LxI/7oj/6I/W7H9773Pay10rMfRpyr6NoVSmmuX77k5THAEznWw+HAs2fPyDlzcXHBRx99tOj+b26uca7m7OxsYb2nJCTPWW3gSzqkcw5figRX1yLxywU1OHFUWzgdBTasqwprDOMwsN/vF1h2hlmHYeBwPJZiVbJa6k1D5Sr645HghdQ2SwgrZ1ltNnz54gX/5NmX/ME//wn/1l/7W/zeX/0bnD/+gBxlJ0qGujLEHBmnEatsqWYLNFtu+FcmDJUKTPstsY25Z8sDSWzeJM27LrWgHSLlrZSmrhsq49FpYvKej3/2MdMwsDrbgI4Y60jOcHd7X5wlAzErQowcB0lTDSEt+Rd1pVmtpCDc7e5Y1RqUZbte8YMffMRq1bE77kRlUlV4rTgeDoyTkPaqpqYqTpxaaw7HI6HIiZXWhHFiKoFkQip1i5ri6uqKm5sbLi8vy+K8JpQd8iylBIHeD4fD4jY6//56fS7+HcOAAlarFXVd4b2EqRltca4mkalaad1UVQUx4sdRoFgfismYgiy7Fh88tmpkF1/g1gxMk2cYxpLqKkTKupHXzDkzjiNKR9nlqICy0iLw00SYRAJHlqyP+T3OkO7Z2dlyaWitefHihchXtWa1XmOUwvuAtRXvv/8B97s7rq+v2W63KKVo2obK1hwHz/39jt3ugNaG8/OLhf8BEEOgj4IYxaxEHdC+6n0xjuPC8bDlczUxQYiL6galGKfAOIk3AfO5UoakMhqLQTNnoxilqWyRD9ri4mqEKKqtZbvdYowoX6S1EhiHgXZll89gJk0bLYVajAFtJGQtE5eb6gSv4JeCHf8cxpsAcFFpCOtEl00NMzmX18iKfJuyp/BCTk7MzD2QmHpBKJWGylisNhyaGm80yoigOEzSxhiPB7ylFAwPMtzloJS8h5wC0Y/0hz1hGtA5URtIQYqYnCOJ+MqJsEqhUsYPA4fbO+6urgiDx3hFiokpBuFppURMnpwCOY7kqWY4HISY+YazIleF5HxRXH8lrE2u14R8X0jTYXEvXvhJ5bPIZeOkjCnqMyu5T7XI0yUUMOCq6hWS6deNb8fZ4KvFxVeQDYWkQkaPNgqfPCr1aDXy+ac/5V/9wT/j+vmnVCZy3N1wddjz8sUtYz+hlGK9WvPo0SWPnjzhsJO2yg8++oj7+3tub25Yr9b4aeLjjz9GlVaH1prD4cDhcJBAo05SKe7u7gghFP26aPCvrgQZmIuHw+HAp59+ys3NDdvtGevVFmNM8QoQguB6vQbE/KWqZPLt+56c0iKNnAmsSsmkkrNUxTMi4pxboNKZhR5CwFqRWK5WK0IoHgXjSMqZKQTOz884Pz/n+voGhST0pbknrjT39/fc73YEMkoZXnz2Of/g2f+dT372MX/jb/8dHr//fbYX74CqxDhMaUJmKYjE9MYU//sHgFaVK/fBsOW7mNXyyf32QJCSqkT6gsdxz7B/znD1DG00l48uGcPA7n6PcRXZCw9AlwV+PA7AbJyUy+ecSIhSZRwH9vt7tuvHNG1DU3V89P33ePz4EUplXONQSrG/v6eqag77g1iPl2jmWZI472ghMY7Cs2jqUgQOA/f3u8UIzpeCUCvN1dVLYeAr4Z1UVVX4CX65DmZC4XKWUiJME3UJfquriqquUUaRvbxLUzmaqiYB67MzVus12hjGfmC/PxT778SqWxFi4vbmFmMsVbvicDyijJWMBeMgRsbg8SHSOjEOCyHQ2eIVESey0jhtgPLvpInB46eJXFoHc9E1k2tnD5K+70VqiswfwzCICqBEr2/W4gSaZ7O8FNls1sQQpJViHYfdkdv7PYdDTyaz3YqF9H6/pz8ei8tqvyBHwT8UdMASqBeKo+p8r4LEYVRKo51bUI2c87KBUOphqc/5gQyrtJH3XYLZgIWLcUowHceRunaiQCrzRMoP0li1HIsClcWleJpwNhRPBXnNfCJvXG7J7wrE+A2PU58lpZZ4uZKi/at4ghTIvyRq6wxJ5TJ3SkrxcDzgWrCVXMO1cwWhC/hhgBTkc/Aj4yBIVKotWT8slzondFZiJlloAtYoslGYbAq6Ekk5Lu/p9MMyWaNiwh8H9jd33F1d44cR5aWVGQvXQ2oDsXLPYWTSkTgNC8n+qydWXkWXdmzbtqxWK2IS5YkBrKV4Aani/ZGXz0ROofT2slIyT1QNrl1Rd2ui7okhomJEK0dVNkHfZPzKbZSvFB8I8SXmiDWW4EcqHYnDkZ/+yR9wc/UMP+y4vbvi+ZefkCZPU51xvtlycXHB+dmZEL18QMXEplthlSaHyPlmi9GaMSV8FrlbXVCFvu8l6a6ul91iSkm8ElJVTmZaCpSLiwtub2/57LPPyDnTdR1d1xJC4P7+funZzlK2eeKZ/zSNkApjjMW4h8I8f5DNzUXIPPmcGg/Nz922LU3ToLUWtUXZEfoQONtuWa/W9MceciaGuBB7jBYr7cNuTz+MdN0GHzzdeo2uNJ99/Efc3j3ngx/+Nn/r3/27vPvBj4hJobVDWSdOknOfo6AMemELyY2h5yv3RM3/y468PFd+ZSJOssUu3gQJHyaUn2i7jnXt+PhnPyWpyMXlObvDgX4/ElPCWpF9hSASUq3FG8PYVJIkJeioWzWcn5/RNBVaKb73wfs8urykbhshV0Xhsjjr6Mcj3hcSYYb9/rAop0CIkta6YjV+oKoq2laumf7YL0qF+TowxtBVNcdhXJKEp7LA5pypaylmhmFYCtumaYTMqGRytFbUG9oYUJm66OK1Fr6B1Ybt2Rn1akXynuOxX+SUYpEdF2fSnIUkPYaEMpa6BldL62GaAtpIXkeIcbmvh/4ItsZWNbkoTTLiUZBiub61lrZAOU/TNC0Ez/lanwvw/X7P/f09Nzc3bLZbqqI0sdZyPATubu+4v7+nazuOhz3jOJJC5OrqmmM/oZS0TqqqYr/fs9vtaMpzny5gs7RwKf6UWr4u/Ag15xYpjJYo7RgiSomMMcZI2B9l0Sq7YRWLD4aCqqpoauG/pJTIMUJ5L/rkfjk1C1vks/NOUovV9SynzTEwjT26P2LqFVkHsoqgHcui8K9RhfHWoiE/cCBmEuVcgM3miL90waHmDVNhi+TZ+CwTpoHDfk+dMnXKRB+BRPITw9ijJo2rLNooxn5Ho4SLlHP36hSYo7TfSgtav8aTme+BGMNDsXHyc40Wo7MQmQ4D474nTBNVMQgRnspsFJZLflSCGMgpYHiQm7/p3Bqjl/lpvVoJum40OidM4bDMirxXzrUqsmFV2ojFbGy13rK56LHDQPABN/RoBZvtmrZ+s1fH6+PbFRuvFRZvMrkRGMagjCs3kqarGz77/KfcvXjO/csr7q6+wOnAB0+eCrzvtpyfXS5thHEcZYJPmbpyXL+4Eib+06coBau25pNPPuGdd94pZj77h4lGPZjvCJpgqWxVjldzeXkJwLNnzxiGYXEE7LqO/X6P0dUCA6/X68UwaN557nY7Li4u0FozFGns/LMZtRADKLsUGKdugnMxMjPWZwOjw0HsnKdJ2Pxt17Fer5mmievr6+WYlFI0TbPsjkMIrLsVlXJMQ49dRRrnOPoDh5ue//7FZ7z88nP+nX//7/FbP/59tFuTiWhbyw4OtcTCn/Z+Xwdtf+WRl/8tu8V5OhCSYSSkiVpBt2pQhwnvBz766IcM48CXL77g9v6epupQCnyI9MPIMAyYqpXIbWVKWNisKshsNyvWmw7nDOcXWx49ucRVhuPhIPI7oxhHITYejgd8DBgnssRpEmte52RXmpBrKpHRRjNOo/ijxIgyGqsdKgrvpXLy+3c312ht0bZimsJiez8rELz33N3dFemmWFmfn224ONvStI0UuUSGQVwrV+sVdSPXjNaabr0RPkFIDMeB3W6HKBwcx8NAf9yVYkZ8P4TwVtweC3Q9F8xt04l9tp+kJZQSkx8F2k8aPyQiI9o6jBO3S10KDVMmRj/5pS00L6zAK4VH34sfyWq1gpzZ7XZcX1/x+eef8+LL56zWMkHWdc1qteKw27NZr2laiirsyP39PX3fS2ZReW5VIs198KDMEvI2o5unplvzfTlflXNc/f1uT0xeyHI5CzkPScl0riJm6ZEbjRj1VTJfTNNEmNstRpGjFA8xKYxpluC1+c+8QZmJp9JODaJIyEcmV1OHDZhKkA0SKetynf9rC2gs42ElmTkWEVRcNmq/utupWpD4Uk+So5ilRT9x8+KI0tcYY9Exc9zv2B/3hOilNZIDioiLl6QwyfV9yrlIUThBqmzFcsaPI8PQo3PGWbkmUk4k4sKumYczhlo7HAabNDZJ6aKSEJITSYQWQRGiJ+UJTRDu0eRJZS1505gxlAXBK3+W86IUMYmaUgq8N5w5rUGLB42tatrNlvMx0oVI8F5a/SnRdS1V9c3KiF9KjXL6Jt+EbOSksKbBkolZoJvpMPL8sy8YdpKY13QdH33vBwzDwP3tEY1A2Xd3d7z77rvkDMPQk2Jiveq4vb7BWcs0jez2Ox49eoRzjg8++IA//MM/XNoUwzAscG7XdRyGHm0LL0HxCsTrvadpGh4/fszt7S1a6WWH+ejRo5J3MtI0DcMg+Srzjmr2D5gLjbn/Oz++ruuFVzH3iadiiT23debJbyaBzhNS13Ws1tIu6vueWCZupRSrrsNoLSqNYkajjSHHyLrr0Dmxu3vJGCdc26Cz5uM//glD7zneH/j93/+bVKtz0I6MYfDT0sPPM6kCsRd+yCNYGivf8CJZRO4PV4WCB+F9+W7OqOLOl2IspESklxkmnLUMx56rl9d4nzg/f0ROcHN9I+2LfhAfFi2+JyFGQswEn9HO0HYdSmUmP7Bdb4XzME00tmEcRLqpnUguQ4ykKEqjylWlt57wkyfFjNaGYRix1vH06bsi0yw76zSOVM6xWq/JKXFzcyOJrihCylRG0XUr1muz7PhnL48Z9m/blsvLS7quxTmzcHVd5aQwSLIzsc5hXQU6YKuK1XqNbRqmceJwOBCCIIqHw5GhHwlhLmAzdd2IM2iMuKoWM7ogpNC5+J2LteA9k5nQtS47qNJjVw8tHyG/lyTVmIgBxl42C6fclJkrMctAQwi88847PH33XYZh4GeffcbxuGccRqqqYrPZ0Pc9RivZgBzFtVAMwxR1VdHvD+x2O6Zp4rAXBMSWDUoIgUSkL8z+2dFz3hjMxXpK4mvjvRf1WOnn+zEQYhJ+lFKAKMvathY75xjEZ8BobOlXV5VjVbxNQowM/UAsZkfCLxLX0NMNiC7FUM6g6pK0mwJZeXIQLoy1AYzsvpV+gOITWSyqXwPTv1tL8m/ep/nWHZ2ZLzYneKt5jpaFUBXPoNnunHl+Wn797fPR3J5VShyoVeHMpRjIwdPv99zc3uFjmaMmWUBDkkIDJVwZrRJh05ELeTSGB+lrSuJiPVuQZ8rnPgj515gaH0umiiqo2Mkx5zRT8HU5DxBDIiTZUPoYFp+OnCPWimdPThJ5kPK8sf8qQVZQb1PQXjEck+tNCVeEUIpi/UqI2qxQTKpwarJCaYurKuHTNQ0qRGzdgLHCWaxrTOX4JuNbczZ+4SgfbI6ZaZxojCX0B95/8g7fe/oBf/TiC955/C5dbfn0ky+FZBUzwV/JTagNfT9wf3dH27a8/977rNZrUkx8/tnnPHnnCbvdnh/96IfknDkcDmw2G5qmWWDaeRGXgJ3IEAboBLK/ublZdjQffvjhklUwy9wALi4u8N7T9/0Cv86BatM0LUjHq8mOeWmdNE2zFBUzue50gRH3UXnecRyXQqZpGmnPBOnZHg6HpcgYhoH33nuP9XrNy5cvCZMsxt57gp9o6gofJ4bBk1RA5cjh7o6sHavtJcP9S/4//9X/g5svvuCv/0/+Xc6ffoCtWlZVw2HsUbYSAmYu/ZXSp9YL4vFtio2Z//HA+VAl4+WBujWjHMU5stxUKXtCmOi6huF+zzhMsjAmMZwSzoOoD25ub3n85CnXt/eyQy95BN265el77/HBB4/oVi1aS16BdUbkmFNfEKXAcRg4HA7sdzt2t3dF0iwmQApOyJ+WDAy9EHsTJYxMaaq6LZ/RJAXMnPjrKnKGvh/4/PNnBVlQS7bGvDBst1vati3ulHK95ORRCoyzSzGwWq+o2gZjLbapaZqWqusY+oHd/Y6+F5Ql+Mjd3T3GWIZBFtzN5oxp8tzvD2jnWGlDmAJ1Vri6YdW2ZOD25obdoadqWrr1hqZriGRi9ChdoQzkHCEV46oSf55CYBpFon567R8OB8kNmttKJ34T/TDw2aef8vLlS87ONozTWO4hmfxjcWhVWc7RFDK73Z5xmhbb75l4PXt7BO8Fwp4C++PAVAr+GQmaW5bW2gUpbNuWYZT73RrD+dkZ++NRkkGBytnSIhL1VooRrGaz6mhXK5E998eSiZFQUUyVtKmo25qz8zPqMiHP7bOmaUW2PI6M04hCs16nRRGQ40SYeky9ln49s+F1uT9PuYpvQJi/i/Gw0fjF422tnbcVBWpuI6mZvSXqB60elH8P8+qbS5m3PjfIaTJqmb10zoRRSJz7+1v29/dFAp2QTD4hcuYsbcKcIymMjGdrwjiQQiSetC2EJClIW4KSYJtBG7Rz6KrBpzzT0eRcnvy+IIsa6yrqtqVuOyFilol3GEdJakXQtcoZnI5YnYlYUhbS5yudndeuAV3i301B05QSkuzszbG4m57MRTOHbvl8NFhnqZuauqnJk8fqIi4ofMOm+XVwNso7+7qLWynIKVK7Cj95nM1UWno771xecPv4CYfdLbfHnovLC4ZpZBp7XF0taMD+sOfd99/j/Pyc9XrNv/gX/4LtVmxkx2lceBoAX3zxBSEI675pGi4vL6nrmsNBdj7DOOLW1XLym6YhpcT5+bkoPw6H5T1pram7B0RitidPKS2eG6e9V80DOgEPvVlfJrzNZsNut0MpkU6t12vpQRcex0yeM8ZwcXGxFCHTNIlcMAoZ0SjNb/3wRxhjuL+7ZzgKohG8ZxpHNpsNSUcJv0oebTQhQvQZ1xgcBh0m9vsv+ef/7T/i5Ysv+Jv/zr/PX/n9/zEpQuccU5bwHoHbZsLWd0EKffUCej0hMC0XeWTyE0F5Gi0Xe1WJXfhwv8MXsmNMifPLCz75+c+L9bHhfrdnvb0gv7jh5qZnCPCuAmM0Sstu0TkhIpqiolAqczge2R/F4nryEoC0WFErqMsOePZ7OD8/BwWHmyM+JIbxAaKfYfoZHp/dIttWUk4fPXpo880L8KnL5JwFYq3h/GzNdrPGGI2fJtCa9aajXXUoK8TOpm1xtcOHyP3dgd1uL22ZKbLf7zkcDrTtSsiyiYUT5GNkfXYmKo4sQWjDbgfHAeVqlLZsNhva1bpA/rKAUoIOtXNkZTG2xhlRYPhpYirk2JmHMqMZs6JrRvgA1us1fd9zfXuLMYbtdsu+eOw46zgc9tSVE2XQOHJ5ccF6veaP/vindJ20Wu+ub9jv98KxurtbdmUhhJKcKtqBuVCEh8JRa0nknc95SonVuiXtQulZKypr2W7W3Nze0TYNzhrCNKKUoa1r2rqiq1saV6MzkBKxbCxEuSOFTVd65sbohcMlLaWAq2uapl7ujXm3rJTA+CJ5DEIOJZU/utxD3/W9+Zsdp+9gKTiUemVB/qWHOklhzWUxLZyaMIwcd5L0vdB+pwhZ2jiStxVRSj6LOPViX/CaRkZUHpqkRa2hrMNWHZ2uqetKSJO2KghOKvKOE7S3qBaVs9i2pV6vUT5glcPHQIqlbQ5AIhlktVaZKVv6UOS3bzq3eY4DMUuI5EzcVqq8dkEAxQ3XvfJ7MxCtVMaoRGU1bVez9RvcMJV0XIPzFW1T09T1G4/j9fFL2ZUvB/XGdwope6xSZC021P3uij/9sz/kxfNPSaHH6ISuLdpZ8jSx2Z4xDkeub244Pz/n/OKCs/NzfvIHfyC7svWaP/nTP6XtOg79gXceP2a32y2FwrxbmWWDs5Lk8ePH6NsbctGd5yQugOfn57zzzjv8/Oc/p2ma5fEXF5fs7g+LSmRGHLque0WieNr7hYcKe94pzTu6lJKYOs27sJMd1fyac6++bdsFYp8nznmRu7y8xFrL7e2ttHuKM+HsB9J2LfeHW5IW6C14sZlxpsVQMex7tJ5omhp/vOOnP/mn+H4PfuSHv/dvY1cXokbRshOXe/Q0Ru07GGpuxqjXvw3kohsP2FrTuQY3wiefPGN3eyD4xDhN+Jh5+vRJSXFVbLZn7A9HfAigNT4m9ofIlJBY92kipar4RVhQmf54JMeJ9XqFW1pfE7v7vWQHKL20GryPYpZVN2htuLu7L3Hqif3+QEiJumlwxSJcKQjH0iqIQXT8xqCMJnnPfr9fdv0Prpmye59Jytvtlu26WRZ5bTRNYZRXdS1BT67GVhXj5Nnt94yDl+K0olxXvqB4d2JGZhzPn78oxkbCMdjt9wzDhE/QrdacbbclDTZinEh+M5IhlIrZlAoeayucEwMxjV7akX7yyz0xc0BmyfqTJ08WCe44jlxdXQl6570gFuPIz372nBAj69Watm2oKws5F4LbGq01H330EeMobRPfddzd3XF7c7MQrfvDsfBTPL70/mdkcG7nzPLT+V5erVY457i7v1/mkBQDRmu26035DGzxxDAiaVWQ/MTN9fXi9Fk3Ipm3O8PLlxM+elKSlE6t1TIHoM2yochKL7LqlObE2gmLwrgao0ERJVlTVWRlFpRR8VVXzn89R3k/ShWC+q+e5TTjsGpGiXLxzlBSqKXgCUNPovikROHhKC0mVVqLp05OojShOF6/ssHWEp6mlMZUDe36jItH74BStE2DNZqmW5fN21cdRDEajEbXFfV6TbvdYn2isi3DODEmAyGgjUJrqCpD6zSKiK7X4NqijnkLuqOlTTKToU0xJtRZlFQzp3BxPj1FNgo0pMthOqtp65qpjWjriCi0gaEXdeQSIvgLxi+lRvlFyEbtFDHcQ7zH+4F/9Qf/lJ/+D/+M26svCeFIs6oJCQ7jkWbd0O93aK14/OQJm+2G/W7Pf/1P/hvOz87JOTOMI03bsj3bcnd7w09/+qcLgvHee++x3+8XXX4uhLMnT57wzjvvgNFchHM4sBAH9/u9SFezFB+nqoJZDntzc8PFxQWr1WrpMy9eGqUNM6Mc8/mYi425X933/bKzmr8HLMWGc47zc3Euvbu7W3Z+4zjSH3seXV7y9OlTrq6EPHdarIQQePz4MRcXF1y9fM6YBpJORC3GTdbUOFPLTsv3bNct9y+fY4zi/PwRt88+5h/9P/9Lrl5e8df/9t+lvXwXnxNRndqbnyYv/GrjgV6VH76jHopAlMIt0Hjg5vZG7KqritH3KAyXF4+om5aPP/6YYRip6ppxCkv7bCp/bxq5yWIKaA3n51suLs4xRmNthTUVFA7N7e0tV1cv8aMsPFUliFZGeCRzy6PrOsZR+DwoLW6nIRILWVEpxXq9XmLSjTFUdb3wA4bjuBQX8+j7fkG65rhxrRXDOKKVpW3FhdQ4x+Qn6q6jW63Q1jH5yP3+wH5/wI8BPwXGUcim3kuMfc6KGCXI8PbmltVqhbZWyMgZrKvZrldcPnpCu97QeyHpig2zqDN8DmIemBLjOJC0w9qGyY+orJlG4RtlBDnwheA8E7c3m82CBHZdRwiB6+trOaerFT//+GNeXl0thOz1es2jx5cEP4rkt6poqpoYEsfhjqurKz777HP6glzOCIJzjl7NhEuBgq01WOteKTZmqfF8/5+fn9O2Lc+ffyn3ZFXx+PEjQkw8e/aFGIrFQDYGZxtcZTFZMIaoRIJtrKFyFZfngsDUdc3+KBuheUc5b1K0dQu/rKoqycXoe0DeR4yhkOOFFAq5uFLGAn8LeS8vfKp/ncfcSjmVIv/qbypLb6CYFs6FB0Akp0iO8lWpjMZg5r5LksIu4lE5iR/K8YCfpA2oTgNdVHEHM46q6dicX/JklHZ7VdXknKibVQmflFbI6ZqZyGSrqbuW88ePCEbjg0JTMwweVa8JMSwbpa5xOANhPKLrFR5byoE3jFP0fT6vOYvKLCWsyct6FmKkOqEBLGt7jqissBqs1VgjxGebhWMyh2qK4OHXEMQ29/Dlkn+lW7TcGIpEmvbksMP3t/zRH/5Lfv7H/5Iw7iGPKBXxvsc1Hc1qxcvrG3744fc57Peg4PPPPwcEwkbBxeUl+8OedtUJ30KLZ8VM3uy6jqurKzabDefn5wvP4nv/P+r+9Nuy4zzvBH8Rsccz3TkHZCaQIACClCjRkl12S1VeZS/XsJZXf+lv/a3/yXb16nJ1l1UqW2WLsklRBAmAQCYSOd35nmmPMfSHN/a+J0FAAmSxy9pcyUzce84+++wh4o3nfYaHD7m8vOThw4c88nfhl7e9qa7rmE6n4wB0dHRElmW8evUKkEHg3v17TMoJdV3RNHWcDGXgEBOhCNX5W0DQexdXxo40TVBKM5/P2UQCmxQcYqSSpgnz+SzGki/H1e2AmpycnLC/tycEuPg7a+1IKBx8BpbLJVVdE3QQW+eQiP9DMok26D2pFjKPdz2J0XjbkBi4Pn/Jf/i3/4ZN0/EH//U/Z+/4rZi2KxCh5DIogYm/dsXx5uDwzSjIQAK7/a/I+BL3Phyp0UySFE2gqWu2mzW97ZjP9lFJxmbbYEzC5cUF280mVuTSrthuK3pvaNse76FIpGDKs5yynLC/f8B8PiOEnnJS4nopMjbbiuvrFcv1htlkhklTPGCD9OWrqqZuRHacbbaYRCbTEGAymUJsjw3FaBvdYIco8fOzC6yzkh5rUtIkFSKrlTyBQYp5cHBAmiaIXbIVxCURa+U0zzFpSj6ZkBdlVETAdltFonKIvBEhvQ6EQ+ccSZKN6qZBNTGdTinnM8rpjKKYYqKLZdM0dL1DK0ORZeSFcHgyk7GuJTVSm4I8svu7tkMrg4v3pLdulHC2bcuLFy9Gdc1wDHVdc319PQ6AIQRMJEtmWcbdu3dJtKhN+q4dZaXbbYXtHdV2y2q5pG2a0atDa83FxYXcVwEp2IyhbnvQohYhIipDQTgELLZty9nZGbOZDOzGGKptJchNVuB6S9s2JBGJGjCF4d4LPuBw+BDQpuPFixcywKeG6WRCZ3tQUNU1eZahtLRnxmdF+geSFqqGBUnkK3iHsx2J7yVYUjlc8JJWGvcwyDn/vm8DAXksDMZ/BVA+DhjfvggZskgGmYXSaixk5JMkJE0IvD3GIbJS5SD0gEPhohqrxw4GdW8e9Rhdb1Lh4BSTGSgdXaR7nGTbMlgH7JqCBT28NyMtS/Kuw1iNDgUqsbQxl0knYtxY5AmpDrRGo0yGF87411794RxKgSWFgw+DzFVccoMSftubC8AB2RjmNZnvxU3Gi8Kq63FKS2vJO4LXMl98i+27FRvBgQsoEnyQyVTjCKEjM57gKoLdovsV9c0pf/mT/4PVzQ3L89eUecrBwSHPvnzGZFJS5gVawYN7d5hOSpZXl1zdXPPf/PF/zU/+4i94/PYjzs/OWF5fMd+bkyYp88kdTk9fke14abx48YIhYrqK5j7ee37xi1/w4MEDvvziKduDCVCSJIYiSVFkVNsN2mge3H9L+uibNUWWsVqtMSZhPplLYWAlQKrvu6heybHeSmWnDEWRi/Oi1mLXDLFHrdnbmyGhXh15nrDZyGrnrbfeGnu3m82K1WopxMJIIF0s9ljMF1R1TVVVoBRN20AcnMupKFK2VUVAWNfGpqjOk6Zib+2cp2q2DA+s9Y4kzVFas9m0KGXRaUZne/7jv/uf2azP+Ed//M+4+/b7ZJN9WmdwZChlUMqiwm2Gym1bVRHiAxXiw+yVQ+nYZvIKMOhIGhbSUSAoH3u2jiTpMP2aaWKZawjrLdXNNXmSsf/ggPOLG7yGg8M91qsVfVPh+46ubimnKc6B0inohKYVSW1i5EFbr2qcNXivsLZnMklYVRXr1Yb1qmK9qbipLI036N5TBkWe5lgnKIFOMg6P59LnVxrnxYRrs97QO08I0t5RJpGCzkls+WKxQJkUkxoypairmtNXL0m0YTafsbe/DyrEOPsEbUTxMSSDFmUuEtfpFG0Mi8UeeTnBWVHb1FWLt548zVnWa1ZbIWW6TrhGtrPYrse2sRAIgYPDIw6Pj5gu9slnU9IsQ+s0wsFCBDOa6HTYo3yH9lC3js31is4FDo/3KJKcru5Ap3Sup2s7bCfuoQHobMum3pBkCfO9OSpR9G2PVx5lFBiYLWYMc4FCCgCtZ5IH0wnicHBwSN/22D5wdbWi3lZcnJ+zur6mrSv6riNRklhru56iKCkLIeo6G0i0ZNyomPUSEnEHTUxC1/dCRgVx020lxbbrOjyKm5sVadYwm8+juRLMypLJZMp6vYJoyqVj4ZDlGffu3UMnYlXfdjVplnN4sEcaJdAASZ5FImCCMlGFFbkl4k3kcUrReciDw/gW1a2E8Z9moDwu0hoSBUmQ9/znNx7+/78NMQAqclSIvAYVJHfMWwhJ9JJQDpmqvmXTSCl8HKS8kkySJMlI84KgNVZpeq3posHbIi9o60ZI4abARUPKxIA3pRCjUXJQw0fgRJGkFYm+ldn6oHBemhAuaHqncV7jMaKOGQ9RFs1pLmnPSZIJ9yPkYHry3mCdJ00T0iQjNQmpUQTbkacao0Utgx4KNSHYgrTkDJAqSA2kJmB0wGQGhZFWkVFY141IvFEaow2oiGbrIGMrGq0NOOFiyrzVEWw/Go3ZYPk2298uiC0YCfryXoh8JmDrFalquDz7kmcf/5SnH/8l2+1mlI9VtuPsdMPenig9Vqs1h4eHZCbhV7/4Kz788EPu37vL8y+f4W1PW1eURU7b1HR1jU96VFFwdHg4rhyttZRlOa7wBzfO4+NjVqvVaJZ0dnoK3CHLMt6++4i2bUfZ3NXVpSgNQqCuK/b39nj33XdHS+U8T7G2I03KUaqmgDwf4qENWou9MIRILJUci+12M5o4bbdbsizl4OBAHAL7npubm9HRsG2F4Pro0UOcc1xeXUYoXFz1RN2QMp/PR5RDtNJCRlVKMcnEsjo4i+06QujjgOZkdR31/V1nybME13fgOlJj+fLTn2P7mv/LP/vvefTej1ChJCtKCemxPcS6/E3+lnnj3hjrZCUET5RC+V30QzEmJwYZYLSxpK4jVR3KNvSbLc22YpKnXF3fUExKNuuK6+sryizn7PIS5QN5DBPbbLa0nWWyyCTcynYkiUiYr69XVNuWqmq4uLxEX/XUVYVWOVVlWW1bqsZjQ0IwKcrIw5/rEpOkcbUvMrMQwq1HRpZF6FBQhaZp2Ww2NE0TfVlqmqYTz4z9faaTGQ/vP8AoHYmR17RtLQ+0l4JjNp+x2F+wf7CPSVLSLBf0BEWSFQTE10PQOFF6La+vWW+3OK0xXrgheZ7jeicZHcqMpnHz/X0m0ykBqOoW3TtM0gvfIfZyffQLyRKFwdN1FW3tmBYlR5MF070DVJITOpGV2ogMSuaPx3m5JwEOjw45Pj6maRqaVsiwKFGViHuqZ70S3kSRFxJIGQfz4+MjvAtcX95grWe13LBZrSnSnMwYMpMQ0kDnpcVwsH9AnpWj9HxAq5MoKU7TlLppxgI/y7IxsXdUlxkh2w68jcGNeFC5zWYz5vM5bdvQ1CLJz/OcNBUjweVqyf7hPt//8ANevnrB5dUVWkNdV9y5c48sz1muV2w2m3iNpdedZhnaOXFFVyBZHvFpCZZgG1y3QScFpDkoMEpLiR+L99tn6+/hpobvIF4UkoG4wxVTPr7mO0xVCoIa1Dtxi3wsZRLSvKSYztE+kGgjyEYfUGmCyVPwniQxEHq8zumsKBn1ThtFzLx85HfIdxja21ZJho6NduB+RDjevEZaK9IkocgzyryUwDSbEFSgKDM8hrIoyZKSRKckeBplyfM0LqhiG/oN4YY0rI3WpIkmS+TvNIGQGGnFBSf3mX+zTB3o+7eWaOJKrYimc7bH9z2ul0V4sFbQtm9ZA37HYkOS7kDczJIkoHxHs7kgUx0X5y/4q5/+ORcvP8O2DW3TjBdgEk2qAObzOdPplE8//ZSDgwPu3L3D+fk5F7F3+/7771NV1QhJD5P8drtlOp2Ojp7z+RyQAUyQBctyuRyJadvtloCjSvfkmwa5IIPCwHtPWZZ0XcdqteLBgwcU+YQvvviCWUyDdc6N5mFVVY0+GoNV8nCRB6LnYEozsNwHxq8xhsPDQxaLBdfX16OxUV3XYuJVlhwfH+O9Z7PZROKdHdUNeR6dJPWt0ygwwsO7TPuBfDpe5CQZNf6DoZg2ms12g040WZnTNg1PPv2YoDKUKnj7/d+lbdegMkHVuGUpD7fmwIuXbaeV9o0yvGhsEwYJX4DBs0DBEIaklGK5XJIVQgLsO3HlXC2XAJTlBHSCQ1NfLmki+98kEjLmEXItvWG93lJXFd1UkxjJmHDO03U91bZiuVrjlbjq1UWK7W8dPncLuUFhkabpSDIE4j1qRtfQ4b9B3bbFlLQpjBKE8uBwD2MOMEmE0RNNOZkwmU3IixylDVleiIV35AK1rRTtdVWz3dZy/FUtBc6kZL6YkaiE89NT6rpGG8Pdk7tMZlPK6ZRiMiHLc3SWo7NCCoWdXq2kEfs3SGWtdWLbXkzJigLbW9pmTe+hbnqyvJAJu2lp2pqua0myhOPj41HyPXAkhkJtKAiMjsQypWIh0DOdlsxmM7z3rJZrvJf8okk55eLsjC+ffB75DVAWBVnkQyRJineMrRUAow0uhHHBQVxJrjZremvpbD9eT+89y9XNuHjZNeobPHIGe3mIuUTaxMlE7s22bbheXuODGPTt7+9zfHwcvU8s870Fx/kJtndkuSTPDmRwrSWYDyXJpy464Ar6IyoK5yzeOKnvY9qsC2EkdP/928JYJwUYjcoG+XoIt22Av12zaKcQCwMJ1ZClJZPJnGnjSL0nMQbd9bjeY9KEZFKAUuR5igou5kkJYqF27MpRtx5EIpWP6cE2GsKhxmdKJvivfAMFSotvTlmUdH3A+4RAQedaXOiwvifNC/K0IDM5yltcn5MmYjX+TZvzjjH2PTC2/quqAQI6WHwaPX+cJYQsElgHW2kVjy+6iQYfF6k9dV0JATua5YXg3vDq+Ou271RseBUhV3pU6HC2RrsG+iX/6T/9B85ffkm9WrJZ3qBCN7YFyrLk4OCA09NTTk5OSNOUjz/+mMePH5NlGcur65Eg9uGHH/L5559zdnY2mnYNCozFYjG6iw6T79XVFTc3N2MfdiBcGmPY29vjZnmNWzlIZJB4/fo1s9mMd999lyzLeP78OUqpKJetcDaMhYqNUr4x8XU6Hcl/u1Vhnot9cpqmYyFwKytinKCstZyfn4+DYlVV436HsKqzszMxKErSNxj0o/9GnACHSch7z2QyGY9pGBSHScQYM4ZjDROnSQ29c6Dlezd1TeI9Sab4+V/8B5q645/9D44Hb79PWsxpQzISR9/oqcabbKzZI+tbDePIOJgMt/FQaESTouAwvscoi8bibYdzPcYo0jxjvV4LCQtYr5fi0Ne2OBdIixKTFgQCeeyphyBpst4Hus5SJCKLfXV6hk46ylxgd0XGthLEo2k7siIlzwsIiq6zJMkgmcxiD10Qg6GP3DTd+BAO9+NQnA7o0TApem9RqSHPElIjXAUbpAWTpJrF3h5JmlCUOUmWitGOyUjTEhN9NwZeSN9Z6rqh7+1ILjXGcHx8TNu1XF6fU29qjo4OODk6Ic8KOe9JQpImJFmCyVK8UmRRojrkdHS9cGeGn6molsnzAp3loFUseix11+ODiQjliuvLS7xzTKdTFnv7zKazkXgLYoTXdw6lYLvdUm2b2DqRwEYhUsvzPZtOOD19zWq9wqiUpqnZrCvOz89Hl125z2WiTiJit64kG2lYoExnU7QRRVnbdbfy1yQhKwqotjFp148qtsGMbygwBhnvbmEvqpJcOBgRkRycImUR1JNlGXfu3GGxWHB4eEjXWW5WSwKKLM3pe8tkOoG4AiUqoHw0ptM+xARhqSuSBIL2SMPEjx4Pt/PX379qY+AbjAyUWGA563FuyAXRcYj5Lo2inUl94CzEEUqrhKKYsrc4wpPTB0hMQhos2+kGnRrSSYlJE8oyJ3iLdj1JUaKSDJXe+kmEWByKl4WQydumptnWEe3WtFW6E8Y2jIKyGTVcW02R5zStw/kEnZToXlE1Kb53ccEi3CBDGH0ztPlrIiSEEDTyokZxg5dfOtfT40YX0uCjrPqrKtNYcEDA2p6mqVkul3RdL8Wvs2Ka9i3lyn+L1NeA8g3Bb1FuzdXZS37+53/G6y++QHtJ75ykKWlRsNlsODo6om1bXr9+zf7+Ps+ePSNNUxkg25bz83OCdbzzzjsopfjyyy+5ublBKcXJyQlJknDv3j0uLy9Zr9fM5/PR/+Ls7IyiKNjfl4TMYfB49EhaJTc3N4TgR8dOrTXT6ZTlcsnbb78tEeVRBTBYl5+enrJYLJhMJiRJQtu2nJ6ejj4Lg0fAQO4bCpFBSbKbj1FVFfP5nLt3745BcVVV/Ybu/+DgYJQEDpLIum5IknQ0exoY/rsTGjAGew2rr+HnwzYoZ4YCKISBIiVmRlmasF6LEqacOJRT/Pqjn6G855/9d/8DD9/9AF0c4ri1do7xSbFdIux/oUHF1UQY7hW18zwMOvPYmx2KDTxpsGBb2npD3zYYLxO8SRJCUEwnBWWWUq03qAC9CzgUnZVYeRPNs4R4q4dPAyWy1m1V0zQlqcnRKsH2nt46mq6n7XpMlqC0kbRYf8udESmiZUjmHQrKAWYflCpiECbs9cE3JYtpiMvlDdfOkhnNneMjFntzEpMxmRTSElNRum2EtCWpo3J9e+vwXlbnm82W66sbiqKMCccN1jomM3HY3GzWpMZw//E7zKfyjPgQWFc1SqvoPJpGkzBxXR2KCr9zL2VFjlKarm0jOU64NzoJ2KBGkvJkWtJ2/eh1sbeYc+fkBJMKWjcgdn3fjwGG1louL6/GCf3s7IKqqimLgtlckE8ZgEXS1/c9FxfPqbb1aOl+i9K5iNKp6LNTj6hGCEEWH0n6BuI3DLpFbK8MCJVSivpqOWa07OYg7dqRD4ZlYWyX5m/4Z8wWMynqUs1mvaZtW+7evUtZFtwslygtRazzokYry8lYsN+2DrQ44gYvkldnJAdDB4Jy9GEI99KS1Kz+PpYayDChGOW7g4OIiwusECIP7G8luQk77ZlhTBL+WJYWLOYHmHRGMNIC6+sNJpGCOpmUJFkS3WAl08skGcTnctgGX5BBPmrU0IQQ6azRAaU8YhAWkY3drk68bloNqb/QO4ftOvq+w1qZ0LXSONUTrBf0lzA6gn79qZH5IfFeAgJhfAbrpoPg8X1DamB5c01Tb5mWop4ZwvCG/e7ycq2zkibdVPR9VEYNkuBviTt9t2IjBFToIFR4u2J7/YInn/yMen1KvT4nUwl5MSF4S9uIFfirV6/GFcPz588lctv7URp37949vvf4MZcXl1xdXb1huJWmKZeXl9R1zfe///0xwGmwC5/NZhhjuHv3Lp988gmPHj2iLEUaObRhDo/2BRINt3HeWZbx8ccfc3R0NAa4LRYLjo6O6TvLkMY5pMcOq7D1es10OuXg4IDVajXCzVmWcXNzIzdRhF/ruqYsS/b29qjrmsvLy1vTss2GsizZ39+XAKqqEug7tkCA2C++7UMD4yA9DOIDajEcLzAWQLv2zIMaYCCjheDIy5zgHE1dkRi5KevVinwyJU8VX/76I/409PzX/+y/48EP/yt0OjCqTSR4yn9rFWI8/ZstlKEYeeNGjK0SKTQ8OjiM7zD0mGDBWVKjKLOScjrFB8X5+RV9ZyXUz1qSNGO2N6XrLedXN3gnpNmm64S4awzWBZR3dL2lsxZrvcjJ8GRJwmpZs1rLBN60PeVsElGMlr4Vu+3B+2QwkBuUJsP5bdueLkbR971IpieTgslkEmWOokiaTCbMZxOO9hdkkVOUFnlEFIQVb4wmEGIBqzGJwntHb2Xg7bqOvrOUpfi9XF1d0XUde3v7ZFnGertmMZ9zfHRMnqQEF8ZVd9+1lLOUrMwxaYJKDIQEG1t8Q2EVQiDNBo6CGI9t6ob5IiMER1VvqVqLVwlZVtI0NW1nybKUxfwu+4s5k3LCtm7ekNDtpqx2XReNyxLOzs54/fqMosiZz2SVX1WVJMn2PW3TsN3U8pqsZG+xh+9bNpt1RPFu2ybeS1E2SMzFgfVWoj4UA+IsKm6fxKJ9OK7hWR7QjIFP1ff96Bw8IBs65rOIGkbuh8Exdb1akRfiQzJEG9h4HUHOQ5Zn0dq6wY9SemkDmiwRAqp3uL7Da6CvSUIvPjqhxwYNiKGa+S+81PhGZ1OlRgTUK+E5Ds1VP87LalTE/efLfBVKaZIkp8gnOOxYbCQq0PUdXombrskS0qLE9S3OJNhAzKXRb+wv7jX+TwoKyc2JpNHguYWg3lSzDJktapjcg8fbgPNRjNC19H2DGLAEDIlUKF7M3r4pGyWqXAHh+lnbRyuFis2miuN1j85E+Te6YEf8WSzQd5gbEVkKg4rOiSGC0ZrBzuDbGiN8RzWKxbcbfH/Nk09+yic//3Pevn+E77YyYWho6jX7h8dsWuknD2RNa62w9ONdM51O+dGPfsTl5SWvXr7i7OyMR48ecXNzM2aXXEeTr729PbbbLa9evSJNUx4/fsz+/j7r9ZqnT5/y7NkzHj9+zJdffsnV1RVZljGbzeIKsObOvTtwCl3fse23HB0dUVUVl5eXpGnKdDrlnXfe4eLikrfeeovtdsvl5SUhBG5ublgsFiOqMqyOlFLs7++PduOTyWS0ZQZ4/PgxAOfn56N8dXfSOjw8pCxLVtFMaGjBDEZEQ/9/6PsPbZuB/zGdTkcDpd0o+2GgHwbNXWvswVhGGyPSLGtH1sUQ7pNqhWsrkjTn6a8/4mZ5zX9rNe98//eY7x/QOYsfzYVib/WNini3h6JueywqPpRBgQ8EZwm2JaGnSB2hrvF9w958ysn+grptOT275Ga5pEhzur7j6vqa6XSG847JdMb2+SuaridNM2FgG4NtW7TSJEmKtQ7vFev1lpuJRlFyub1GkeKDwjofi0tpI7TLG6alyEuLvOTo8JiiKEZC8WazwfYOZyUKvSympFkqXIUkicodxXw+o5wUMRo+ZVoWKO+YTKUYQWvJdFGK9VpsxqezGblJo9JKEYIUxsubFdY6FosFSQKnp6c4F9jfP6DrejbrDUmeMJtMwDk659Da0G072r6nKAuSLJGVEh6DFuVGlo0mckZr0jQjMYOHixsdTkPwowV627R4LQ6iPmiM0cxnMxaLOUapGKLYjQTQPC+YTKT1OLQ7Z7M5VVWz3VZMJhOyNGG72dJ1wtuothvquholtHfu3KHMS26urthuq5G4OSAsYrwnBkbDZowh4Eeb8rIoaGN7pYvI4/B8Da3HgBkXCO+//z4//elPcc6xWq04ODigaZqRc6a1wroQCxgp+lOVxWdXWjcnd+7w8OHDuECSOIT5fCFOs0H8PZQSRZksBKKUVamYEurxtoHUQ7fFN2sIKSrNMCohqIRvCBn/e7OFnX+4uGAR5EbaRNY5nBPFR/gOCI5cByU7ZZzrhX8UvVv6oMGIMEDM1VLKoqCczcCIwkgXOV1q0CZFaSXIZ9wG8z+TJKSpTKOJFnTX9x2NtWQq0Lc1Wh8JSrGz8FJIGFuiJK/EaA30eNvRbFZ0TYW1HbZtcCanzCe03pEZR7VZ0jYbgpf5KDUS83Er75WxV/iOZiS2yiIPeb1SNHU9erhIvROkXRpVQQS5FwVRNCSJ2MkrrUlMQp6mpInkCX2b7TsVGya09NuWT37xH/hPf/6n7JWai1ctfdMwm03w1nN4fMzF1TUmy95obQzkyaZp+OCDD0Zi5suXLynzgn/+z/85H3300UiuvH9f7MovLy/H1Vye57z77rvcuXOHm5sbXr58ObY1hn8PhUoIQRxEjRqJnFmasTfZ44svvuDhw4dje+L8/Jzr62sePHjIxfmXAOMAk6bpOAglScJyucRaO7p6hhBYLpfj6ma73fLee+9RFMXorii95WREUQbp6+npKUVcAQ19YjGlktVl39+2RIZV4i6UX1XVuO9hxT3AxsCIkozx1REW1kkSw45iDoEXaC5NEry19LFwsd5xff6Kf/M//z/5p87xw9/7MSafoXQGicH5rzC+kThnFYhS2NsBJeBj31HU51oFjAoURtGsrnHrM9453uOtg4Kbq3NWqy2Xl1copUmzjO1ajLOKchKVEIPzHRINHh9lZRSSemToesv19ZLDwwk+wGZb4ZxnWoqdfV3H6PegxBY+Tk5Dm0uMwroRhRtaBpJp04nxlZGHcFJOyDJpj5RlQVFmFEXObDZlPp2KTMzZ0Tkyz8WsKzFJJBI7UBrbezbbmvW6wnmRMmstyNVqtSFNc+bzIhJFxWF2bz5HK9hWG2xnx4whGyApMkmedD1lkUUCqiaJ37VtW8rplLIo8UGkoFXVCNKWZhhtqNqGm9WGqrXkkwW5gtl0ynS+oMgL2mZIYW1xXuG9eL60bTc+Y23bkWU5SZJGFG+LUgYfPF3TEEiZlPlofLZer+n7niwrYnvQ0TT1+BxsNhuqqopFkcH2fgw4CyGQRJ7Uzc3NmKUynU5Zrld450VqGtE+AB88BwcHopyrKo6Pj3n58iVDSu1gcb6bZST8EMWdO3fwBOq2lt52W1Ntt6Nt/HCt+74nzzzlVNCvspwwn824Wa5EmZCl2N6KhDHxJEpRGE+ROBQd+BbnOzAZnnSE8v/+blH9EIZ/Kwi76dO3eP53/5Zvcg/QEl+QZYY0M+i+BwNJkjKfZqLuiLHsJJqizMDLgsjoW5OsNz9hZ00fEM5N3xGc8CH6VFqUKnLZdvsowVtBQWLmjrU9TbWlaxxNtcY1lfC9lBI+VW/BOZxx1GlHX1eCcPhk5F1IAGbAeUGZpSUvKhwCKOeFPNpZVCpRCMG5EZUWiDrEgjd6SBtI85T5fMrhwQFGp0K21oY8TcnT5LdTbDSbK55/8pzP/uovcdstq41loyxHh/tY32PSjMb1dM5SqpyXL14wm804OTnh9PSU4+NjHjx4QFmWfPTRR6Rpyh/8wR9w9vo1f/mXfzmS6j744AOWyyUvXrwY+6Tf//73mUwmpGnKn/3Zn8nFjpDpH/3RH/Hs2TNBMe7c4Xvf+x6fffYZVVVxs7ximz0GhKVrreXevXt0Xccnn3zCD37wA8qy5PT0VFjjsz329vbw3vPll1+OfA+RvYnE8dGjR2PM9WAmNhg7/fCHP8QYw+Xl5egOOfApBvWMjsZFXRwELy4u8N5zfHw8KmW6rn8D1RgQj69yMIbzAIw950FFMxDbhteNcfdx5aQUKC/JimkqHIMuJqBq20OiUcqzvnjB//G//r8IfcU/+Mf/DVprmtahk4KAilr2SGhDEmM9t5KsoAbL4KEvKGIwo8B3FXZ9w739OUf7JTfXZzR1jXWSlbJ/cEy1WnNxccHdO3dIs4K2c9RNh/PiM1CkKbaXlkQIiuCFqW+CqGhs9L/QWqM8rFZblss16+0WtKKqa7ZVzsm0IEtNlCf76Ayqo/NoEkmiRGRLoHylPGkqBFwhOhryXNCOvpc/1XZNmWZkqZHCprdsNkJoVtoIAcx38fi3bGshCM+ms5FsutlUkQgsaFWSpBwfn6A0ZGmK1kikemwVhgCzPUESldaoRP5oY9BK0e0QjYW7ofCdIAUD/yHRolja1A113ZLmE2azKZPZlKIoSRNhtK9XK2zXk2cZLhiurq5joaw4P7+Q9khsGwwqlaqqpNBOE6azGdNpgTG3duZt0zOfLiiKgs1qQ9e1O0TNbrQ/z7J85CINrRApvN3Iw0rSlNVqJbybPAelaPtufG6890yms1E9cnp6OvI5hlakc47lciloULRnF6l9zuvXr9k72Mc5x7ScUk4Kur4fi5U8LykmJWYgaUfllLWWNMuYTidizqY12kjRkZuePAnk2qH7LegUrQq0LvEqR5s0TsrmawuOb2xf/BeyCaIgdYCPaOigOhmQjeHPYDT17ZEN9YYX0GBwpZTHJEroF9qhTEJRGhIUk7LAR/I8RongZ2hvODGtI3nTKVP4GsOnyOu6tiFYaXV0CfR9R/Di9fnGwixE7yElRn59Jy3cZttg6w2uq6IUXOEt9GEL1tKoHu1SNsu7uO4uPhUBg9d+NHkL0WhyuC+sk2DPpq7xtgPv8Lmhbxuc63cKoV0FjyzklBLUfTIthW+WpBBEiZioSPD9bRBET58/4aX/jO+9dZefvX6O7XpMoljeVDLLOMvl+pxJXvLy1Uv29vZGxcd8Ph9RiF/+8pe8/fbbzOdzqqri/Ox8XE0cHR2xWq3Gdsg777zDZDLh7t27fPrpp7x8+XIkfP3xH/8xfd/z61//mnsxrto5x0cffTQiHAcHhyRdCr1Mtvv7+zx58oQkSTg6OqLve7788kveeecd+l6kb7IayXnw4AFffPHFqELZbre8++67I8TsvR/VJdPplKIoRuntgFIMhcL+/v5YdJyfn4/tkPV6DQiSMrRV5GdRPhUH2KGQ2LU+HpCVIdFyKDQGeHgoOgZFixAZHX3fCg8tMMq2EiNGQ661oDRV04rnRK4xruH6xef82f/aUBQlv/sP/4hUy4p5cDTcDWZUYUehMmyKaB88SGQ9Cke1uebt4wNOFinnp09YXbxib09cHo+OT+h6z2a74fDggP2DA05PLzFJQr1e4pys/JM0xbdNbJsEAg60xgZBbNq25+rqhqPDGbbrWa9aut7jbMAFkZXa3rFcrckSQ5YJ2pQk2ajkERWQJc8lm8SYBGtjlkUiq2mTCFqg9MAHUCRpMkL2qZmitKbva/rOorQUG5K9IgZpg5RzOp1SNw3X1zcRHRPXSmsdfeci4dHF1N9G9P5aY3TMZkhTyW1JDGmeoNMMVKC3PVmSso6oTT6kJa83NE1L00ksvfOe0PeopkFpxf7ePuV8j7SYEpRYgXd9S103keluCT7QOz0W5V8teHcj5wfipQ+e3Mi/66ahqrY8ePAAhabIJmw2FTdXN5FrFKL9u6hyBLFrIuKXSz5KXcvzpxRKCwF8Mp2OShaVmCgNDGNA1YBADk69y+WSvb09Tk5OpJUVowSGsL2yLMmzPD7bws+y3jGZTehtj206ZlpzeHgYFy6wWq8JCvIsg0gwreuaru+ZzeaC3HQdaW5QKpAoT5kqJmnAaEfvG5yt0GaK0R1O56CSKJlld9H8X/w2op2D3pWdTNsg7AZxyAzj9LcTJP03bLsnYyBxSusryTRpqtDGE+ikDddaEm9QIWB0cttWjqZVeB+RCX8rJx0+RkXuBbL4a9qaarMmOAfekapA34tSSdzKdtooQyHQ91TVlu1mQ71dU28raaH0Fc5LOzJ4hVEJ9D29q1l7TVut8b4f5eu7+1U6tqsRtNv2gqh2bQPOoXXAe3Ury925MkFFyq4Ss8ghJ2bkmChGKa/zggj+hqz3G7bvVGxcnb8kOWjYmx7x9v2HPHv2jLZqcA5Mbji+d0xzdc62bdjf2x9XFnt7e5RlyatXr+i6jnfffZfZbBYzLhpc1/Pee+/x+eefjy2PxWLB/v4+P/jBDzg7O+Pf/tt/O1p1z+czFos9vvzyS1ar1Ug4G9CCt956iydPnrBYLPDBcvfOHXghA97Z2RllDLbqum5UowxWrmdnZ+zt7Y39YGGcGzabDWdnZzx+/HgMb7u+vh4H08ViwXw+5+zsLMK/2RgW9+DBA6bTKc+fP49Jmsl4Y4graTFyNgaPEFBY699ImoVbstvuNhQao9nXYK8bORyDFHjw/QjBo7wjBI3RqTjHoSBo6R0nCbZrwDoxZOoappMpp19+zv/+J/8f0nLOow9+hIrtmvFWVbf33UAdGh/74TuE28oZHyizjGBvePn8JWm3YTETl8Y8L0jSkrPzSzFUKktWyxV12zKbLdhWFShNWU5Ba4H+/O6IJIhG34uxjnVebM3bjrZ1OG/iwBapaFpcPPM8xzk3cmxMVC4MBcNwvYUca8iyfHwIJVsliVJcJRKzHrx1pMrQ1pHc60IsigA0s5miNQ4fRBESCGOh0TRNVFuVEq/etJHoXI0hZ9DS1FtskGCkLBcVU5rnJHmONgnjQSpG6axzjknkBw0cCJCiXOkYIOccaVEymc+YzOdgUjoH6/VanDCVeLz0tqdua6rmtrhYLpfj74c/A1IzPMt5nlJmmTwXpiQ5uRMJc5oQT/UQVjbc596HkZAp+7HjMzV4e3hgOhM/n3ynVdlXUgyaxIzQeJqm2OjTMZBGV6sVi8WCk5MTnj9/PrYvh/OWmCRmMQnB1w8EX+QaOmvHseno6AQXPNuIfqIVrq5RStPH0MckkcBA7zwm9VK8WXCdx4ZASAE9RfkegpX+u7pFB3Yn4tuxf3ctrfjKy/5P3UJQvNmCFUMq+d3wIuEuCbLx3fc/YCHDRJkYUX2AuBx776hrS65TEpXgg6S/qujoa3TAu15I7SJ3e+N42RmXg/fYrhdkw0mL2vaJmD0Gv8NjG94uZZS1ls16LQKJyytsVWO7RorL0EvLz4JCQ29RvsG1UG9WBOd3TsuuOkaOXx55QeeSmLcj/CwlsRVuSKkO4/HsHuVAEFUaQvDRZ6cV8z8fUN6L18Bvo9jo24bnz55yvHfAbDrl+OiIs4sL5osZQQdevTjl4OSAblvTbiuKvGBvsYiQ9JI7d+7KairL+eUvf8l8Pud3f+d32axWfPHFF2RpirU9aZrye7/3e+R5xl/85Ce8Pn0djbsU/9U/+q948eoFXzx9SpaL4dTB/iGXl5ccHh7StjVXV5e89dY9tNbcLG/G9kyWZZzsH/P06VMSY/jR7/1oXDkmiaHIc5wNXF5esLe3x8OHj5jPZvzio18QQuDtt98meM/55QVpIh4A89mc9977Hl3XcXp6GtM6JVVSa83x0RF5nnMZWyXDcewWKYO518DpGHgBxrwZ4rZLBB0KB631CPkOK7RhG9CPYUUmvwtopeN+RPqotIkojDhAplomTJMYXG9JTcp225BkJc8//5T/7V//K/55onnnvd8RK1+T4zyQpNIS0APLXEmlrCOiETwGRMrnPcpZjLcE3zCb5KRFie8rDo8OCaR8+eUrEjRZUXJxccnr09fsH91FJ4agiIZkKa0NaCNyTgKSjhnELbBxsuqdTScE16CUkSyQ3tFZhzHxOfOBrMwwJqVuWxSKopySZWns49dUtUD3s9kcozVZmlLkOW3bsN6sWd0suXNyjFZGwp6sxwZL3/Ws256+7TGJoZzMIvlSMZnOSMtCJLZeciCapmN7fUPdtFGF41mvL6mrmjRJR56PVopJmbGpGjyCpuSTCWVRUk4EgUgzgYedDZGQllDvtCSGIKWu72RV7sT4TGmNVinT+QydCqnOOktV1bhgSPMJIpHtWa221FsxvNtsY+siMUynE5bL1SgT3m43VFUd24dSqBoj6pwkybC2wzkhhgYXcNazWt5wfXnB9eUVLoaWXd8sIcDBwT5pmlO3HduqYlVt2azXOKCcTDg6OmC9qXh1+pqul5ZR13djG6jtop9GUbK9vqHpmpEkK23Rnul0QpYJAuLiwLxYzLk4vyCEwIcffoBznsvrSzaVmPElacKdO8fcuXsH55wo3w6PSE1CVTWiYCtyeivXYL1aM53NSdOM1lkSm0AivJppkaKVp/PQ+o7gW6DHKodTw6p7SJ+OKIAioo7EyeNWnv5bR0C+bv9f+dnA5lKx/bpbBgWlRoSDmMqhIqIwrODHCf4rk9zwc4UaP1POiUy8Ho91HXghwmsd3Vr7ns51dNbhlSLJMkyi0ARs00ZzSksfdkIqw6DHUxgCBiX7VOB1bJEksuDxYWTJj+8f4t5D5ARdX19ydvqK0FYEb/HBjpN4CAhSYR0qdKRGrMz9zvkYiws8qEHtB3mSMCtKFtM56tCTJomgO8pRTmdonRAwEOSbKC+m58MxD5qT4AOu72nbmqaRuATbi6jgq4vfb9q+U7FRVw3rassnn37K/v4BSZ5QTvPoHtiRaIXdtKyulxRpwr07d+IEnHBydMzRwQHPnz/n2dOnvPf43VEV8urlc2zfsn98zHwusc6ffPzLMb01TQwffv8DTk5O+PnPf85yeUOeGRSesigp8pSyyMmzhPe+9y5KKW5urlktV7R1hUv6eBNCWeScHB9xdXXF65cv+eKLL7hz5w5GKS7OzyiKCQf7e6zXa1bLa+pqw2w6oW1qPvzwQz766CMInsmkIFslpKkheMflxXmsIDVtY8lSMRUzxkSFTTPeZAPxdEjE3G4lM+XNguE2rGpYdQ4P11dbK3DrJDr4CQx+HGmavtFWub3RJTbc+YB13diXTlMtznlGvldvPX1S4HzCLC0hOJ5//kv+t/+343/8v8K9R+9jg0epAh8MLnhSE7MJFAStUcpBbKGY4DAhQPCY4Em8JdOe4Dps6DGpSEKXlxdM84KymPHi5QuarsUrRdN2XN4sme0tuLi+HjMlSBQ6SWkbgaI7ByFY6q4RC20bKNKCxnagPTY40IYiT8nTlEwbmfCCuE/2vaXfbEekwTuPNhIZ3luHSRSzcsJ0WtIVOQf7e6SJoSwLfHC0XUfXWpq2id4bmawygqJdbsmLjL39fenlJ4bOdljfcbWqqOqetnNjHsvF5VIce42mLHP6voMQOD45wXmLUprJdC5OlIkhJCleGZIslWMOSK+ahK5pJdjMdRRFCRAJsJIpNKgspjG9lMhFSfICHTxVXWGSEutr2mZN0/QQDKicrpOWxna7Ge/DpqnGQnm73YwFdZIY8jIjzTPamLY6m85x1tG3jrrZ0rUt29WSpt6QZhrXw/X1DavVmqOTEzyabdPQRXLtdDrFpDmXl5corZjMJpxdnLHeLClKSeMtTUlRlOR5MaoSJ9MZTiO+PFqs4D0enWguLs8kktsoeu+wfcdqeUNvOxbZnG214frmhtV6hXWO/YMD8qzg5avXTKYzIQ0XJZcXF8zmcxQK1/eoPMf1FmVEMdA1LRolhWgi6gcXFOiMBCdukr5Fqw5Fj6UjqBS8wkTehjQQxPpLa1FZuIguer6hvfkdt2/kgoz7fnPvX8e0GPag5QVj8SFIgcH6gPegVcxVkh4afE2R8XV8lVHOGQayozRktNYk2qAIuF4CFY1JIwlcJtGgJH1aJwqcI3jL1VWO9e+OXkMQWwk+kAAJSlyQjUErJBHaO7LeUDctHi1x9non3iGEOIkL18P2HXW1IqUl0Tq2KpS0txViEa4s4LFe0fXQWzlP3nu8chithHiKQuNJtWZelhzvHdAe9cxnRyRGYuoDlsniEJXmomwKCZoEPfi9RJM1b8HbQN82bNZLVqsbzi+ucD7QRdOxr3o7fdP2nYoNYYS3rNZrLqJsNE1TbCsqieVySVM3ZGmCc55nz54JmnByQtM0/PSnP0UpYW8rpTg9PeXJkyfkmfAn7t+/T5IkfPrpp2MqYwiBf/kv/yX//t//e375y1+SJGZEP/7wD/+QO3fu8Cd/8id8//sfMJlM+Pjjj0mShM1mww9/+EM+/vhjcYeMN6HkGaQ451iv1zx69IjJZBIlrntstxVXV1e8evUq9u3lFJVlyX/8j/+Rhw8fjr4YgwX469evmU6nWGu5ubnBGMO7777LdrtltVpxcnLC2dkZdbMeiWwnJycYYzg7O2OwTR4KDSkUUkJgRDN2yaIDnDww74do+11r9IFIOjDpgbG9Mvx+96EdipkBRh4IqEqLqiMERdPUJLkoVp5+9gn/2//6v/Df/8sZi+OHpHlBazuyrMA7Kzes2h0UorNoZG3rmGvhrcWqlm1zQ5k7FtOCm8srynLKzdWGi/MvcV48VdaVTIjr9ZqTe/eYzWbcrM5wKPquAzTOB4z3oov3ImkNxIEAT55n1L0gFyaJMfB5TlFkGJPgvJVBKCJMzY6vSVEUTMoJwXuatmGzucGYgLVuzLYZ/C2kH99RVxXbqiIvxLNDqQ7rPWku/fxyOhHEoK6jc6Kstqz1AtTE/mhqNE5B07UYpZnNRXZbr8T2PitLEpPiEWOwLM9Jc7E+Z1AXWStpsesKjIF8sFQPo+lW3VRobchSaR+aNJHJL8Kzk7Kk6eOFVGKkFpwleFDajA63QxtyaEcNHKosy0ZS9XwxJ8tziZafTFgtlyxXa05fvgTvcRFxPDjY5/rqGoCLi0vu3r3LnTt3eP36dLT0T5NcVDdBvGSyPI8kaBVl0JY0zUmMZr6Y4z3YzRalFZtqy8mdE9q2EfOwSDy3k5J2VcccHjWqT3zwTCYlBwf73Lt3j9VqRVVVlJMJ2+2Whw8f0p2dsVqtOD4+Jk3TsbViolGfyLMTXJAVYvDRtTaIgZfwWaDre4Lv6LoepTJMcCjXEroGlRcoIlF0WEiMnIXdSXiAx789yfK3uX3zEcptFULY4X/97cqjN2kskfvhZazsuo7tZkPTdtK6iWha03bSih1Yn8FDsOzNctq6wRXJVz4j7LQQBofdlqYTG3CtA20XHZ1hvEaAKNm8Q+udjBMCfeQ1KTTBSWsueDs6dkr4WkZnvSh73zg10goRxFrGt7Is2d/bp+0DvVNx4dTiXEsZCecjiZad1pPs6da2v2lZLm+4ubnm8uqSrnPUnSzifyvFxsnJCWotK/EhOnogPQ7yz729PTHm6VpmMyFNPn36FIDDw8PRtfOjjz5iOp2yv7+Psx0ffvghP/vZz0a3wcPDQ959911OT0/5yU9+wna75erqit/93d+hrivyPOfP//zPR4XAarWibVuOj495/Pgxl5eXfPnll6P8B4TF/vr1ax49esTdu3d5/vw5p6envPPOO6NvR9PIpHHnzh2urq744IMPODs7YzKZcHBwIDH3cSKX9ktCWZbR7fCC6XTKo0ePaBoZuC4uLiIRdDPKUgeirDichtFtdPDvKIoiOkd2o5x1qNaHQmEgqO6qVYZCZJC+DgjI8N4haXCQI+7+fvicAU0ZNu8c1voYPtYRgiZNDY1t+cXPf4r1iv/b//3/QaoMWTohuHa8ZcdnIbKaB3hzsO4doqW32y2LWcn+Xspmeclmu+H11RlHh3eZlBOarqFuK0KA+WzO6/NzgNHQLMnEh0MImrHS1sJhUUivNk1TEiyJNkAjK32VkBgjltlVRdsMZmo5thdUQlaMkqfi40opzTKmiyllZiS5NUtJ0uh9ohTLVUtVSzR9kubs7ecsl0spOvKcw6Mj7tw54eDoSFQOqxXrmAjsgshG5U8vnACtaQkURU6eZySpkFPr6O9CosVCWHv2DvYpywlaG9I8Z7OtWEYlhrOeurMivS1KacVELoTWGmc9bduNmSx934FO8Bg26y0md6hUQuiatsI7Rd93uD5EH456LAbbtsVaGyW7K7quY39/f8xI2tvbI82ycbCs65rXr19zfn7OYrFgeX0thV7fsVwuxcistuPiYCiynZOFTpGX1E2U7MasmqbtSLOc6Syw3VagFav1hqwo0dqQpIJUXVxestgXa/HLyyvwsgrWSmODDLRaaYHGEVv8g4N9Xr9+zc3NzaiSc17ukdVqRTkp2Ww2sljxYeT92EgSHBYVGh3VWjKO6q6Lbq+GJBGFkUYJf8D3uK5C6Qmp6elsjdcZQcd2hNp1coguukHg9REd+C+i3Pjm7Q1PoLFN8HfT/fEx46NpGjbrDevtVop6GwPUnCPaAIn3hALlLfVmH9s1hKz45n17h42GdFIsxJ8PZorhN029UCoCNirakKe4YPFK+GC9tXRdHE9VTArWAUxGGHl2v3ksu+dL7PlzyskU1Vl0kpCYCV23xXnhho1FUDwmtXPeBx6IjXL0Fy9e8OLVKW1naTsrDsS/jTbKfD7n4cOHnJ6ejqvxQX4qE6QfGeeLxRyt9Rj5XhQFR0dHPHnyhFevXo0n+K233uLli+f8+Z//ObOZSP2GIKftdsv19fWoDtnb22M2m1OWBc+fP2fIL7i8vOTy8pIf//jHKKVGGezjx4/FVOfpNfCQyaTkH//jf4z3nidPnoz5BYNboDGGBw8esFqtmM1mLJdLfv3rX3NycsLl5SV37tzh+vp6TLMcIu0H4tiQcTKZTMYCJYQwKlpm8/not2GtuE7WdT1KAgeVjZxD3iCH7uakAG8w+yH2ECPicbsP/0ZRMpD1hoJieJh33ztcw0H25/qeLM9IMoP3lqpek/QJwSTgFR9/9HP+j//93/BH/+1/R6pAmQJU9AAYViXqljAKsY7WcizaSBiRdT2bTRuDs3qOjo/QaNq2wkYb8DzPRkMzH82rjNZkmZA60yQT2asT9ri3gUlZiJJlb4HvGraVFAFGG4xJ0EpQkXoLxTRHOUdVNzvSSjFhmi0WY0iXc5J26HzABCMrX6Np2obLyyucExMuYxKur67ZrDdiKmUMxycnvP/B++wfHIx2wnlesL9/QFBweXnFdrulaex4T5oiJ0kFeWka8U7Jphk+IjeZkUk7z/OonEIUFH2P84F+2I8xZJl4kAykZKVuSZdlOaGczkfjo03dsLy4RJsMneUkHlzdsd7UrLYNfR8IQZMlOX1rWd5cs6nEj2TgCA2LgAElHPwuDg8PmcymbDYbrq6uqLZbjFJ88MEHBGu5jMGM2/WKuq6ZlBP2ZgcMi97r62tpHSLXCCWk1aZrMSZhW1Vsq20ssHOy3GEi8bXvHSKDTEizlKyqeP3qlJM7J2RpTogGXGVREJwkg5bFhKIoODs7G5+VLMvGjKM0TWP7QvPq1SsePHxI73qurq7QKA4ODmSMjN452mis8/IcIDJqMU/rIERjucxQ5oYUcLajrrb0NsHokjyf4kMnGRdht52gIz9jIGf7v1fpsF8tNP5O9x1zhpqmYbPdsl6thffmHMFaad8E8amwPppwBU9TbfGux/yNEs9IOFaDpb0e7fC/2u4Z7Qwi8lbkBcVkRvC58MqUxrNFBU2aJNHWXHJIdKLwKhkJtV93HEC0gw/RRSPgkAWEjXYHSSINFRSjf3og8k2GsVohhNOIcFgr9utpkmDSnDzPv/W1+m6cjbrm+PiYuq5H7sFAQhwGlyzPyDJReJydnSGOiuJR8eTJE/I85+LiIjroKT7++GMU4vg2uPVVcQX4+vXrUSLX9z3vvvsun332a9brFXfv3h2dSafTKUdHRzx9+pT79+/z/e9/n08//ZRnz57hnON3yzvQCtzz9OlTyrLkn/7Tf8qXX37JT37yE6y1/OAHP2C5XFIUYrjz+eef8/777/P69etxkj89PeXx48ecn59zeSkhU/fu3RvVKffu3aPve7744otRFnubneHI83wsnvb3xUb95uZmDIEaOBsSsJWSpskIPwMj0XNY1e1e5F2PgUGxsltUDByOoZh4kyDox3aR3GC38cOyz0DbVoA8jN5bIVGlKa5v+Hf/+/+Xxf4ev/fjf0g+TcT0Nhi82oFAgyK6cMgAqwxGi8dCu7G0riU1cjyz2YwiKbCdEGg31Zb28oxiOuX169fRRj6qHJJkTHsdSIfOiWlYXiiOjg4oJyWKQDnJ6V2PMRoTI6MTo8mzhKIsmEzKcWAYVpq7SZ8DAhC8o8w15WQmBU5wONezXm9izk7LdiOk5CRJeefxuyz255STguPjY/JCVr03qyVt149SU1FwrNlsa5RKUYm0JUIthb0LxNRFuSfSTLw4VKJkciwn0UiqomnFt2Oxv88ihvz1ncXXLV4ZtDYRAQ5kaU5ZzCjLCcpo4WZtNlTbWvxLQkC1jm65Yd20dL2j9wqjM5q6p97W9E1HZ3swanxeh2Tl+Xw+Ihr379/nrbfeknTXeF92XSetHoR79MlHH/H8yy+5urwE79jb2+P4+Jj1jRhlZXkREdJS2gwhsFmv2Ww3pDGnZrPZIgqhGdoo8qJAKQNs5XO1om876qal7TqulzesNxtp0ZokesmIM2yWZiMPakAAnz59Oha+TdPEED0zGsOtViums6mEKsaE2DRNSbJ8VLhVTSuTWxArc5mQQiw4EjTCATBBkWlNrj0q9IR2i8q3lJmshK2+LeMHMqhWt9iiIqBDiL/7L3sb7QJ+C0caQqCPbs43N9fc3Mj8kXpHcC5yzyRs1HtPojUES981BO/+mjRrSJOUyaQUp99MSP15LkW9Scz4+bcHI+Nskt6+b7+uxfhQJ3gbaHsojIT+Be9IE00IFud7WqewX1NseCV26ToxohZLhDgftCJoyZ5JE0NSlEyKhLwsRpUa3BatOrbxgpf8oTzLKMuSxXxO2/UonRB0MhpMfpvtOxcbr1evx1V8kiTM53Nubm5Yr9dCepzNqaoNq9Waqqo4ODgYT/RwI00mk0jivJHBud6OMfHee2azmZC84sD/+PFjTk9Pubi4GFUpQ7LiNGroz8/PxxXGUGTcu3eP9957j6O/uoIbWXWcnJygteZf/+t/zWaz4Q//8A/ZbDY8efIEYwxXVze88847bDYblsslIQROT0/5gz/4A54+fcrLly9Hn427d++yXq9ZLpdvBE+VZcnl5SVN04wkUefc+H2NMTE97zaTIYQQ7Z4Homf4DQdSY8zoXvjVguDWQ6Mfi4vhvA+tlWESGDgew+tGGD3+fChYANIsE8OnId/BO7x3kgDsGxwtnbX82Z/8Lxzszfj+7/yBDHFa4R2oJMEog+17iViXo5diWisWizmnL0PUcxv6rmc6mTBJp1zVK9bris5243lbb9bcW8yjyylSICQpeZ5R1T3OxwTu4MnShOAd11eX6DZlMZ3grCPRmjSRloXSUJaFrGKDGL+BWJnX0c47TbNRAyZZIAuKUnrlnRW1CQrSLKWqatpOVBUEBImbTjg4PGC+mJOkkoXT9j1aG/I8AdVRb8Tnou2khYJG5GqRWOacZ7u5AaDTiuVyyWIxZ39vnzQTz5PBLTMvSo6Pj5nN5yNUut1uxWo9yPcQhVNMPM2LETFrmpb1Rp5f5wMKLfelddEZBYJXNNsaVMtmU1GtJZF2IHoOPe+iKDg8PBxRlPl8zsnJCUVRcHl5yXK9GvlVBzFD6Mnnn3N+cUGapmK+19Tcu3ePtmnj851hY/GdpglN2zKZTlitNiRGsmi22xrnA9NpgTLiR5JmmXAbtKJuG7Isp++lKKibGucsm3VFmor9ctOIMmVSlFR9Rd/1NI0gNIv5gs1mPRb08/lc9ts0OGRRUDc1WS5y2SwRPlZdN+Rl+YbEuHeW0Hu8R4LylMYoRZposizB2x4fOvJEo3VG5QLbdoVpC1Gr6QU2WG5jEE0khgZ0lH+K2ZOsTP9LKTbe5FTESS4usgdnYKUUu0mpf9M2Tno7iC3j59wuInYDAr336Ij8GK2ijD46ewaH8ozy1a9O7cPHGaOZTCccH5+IcilLSbKExCims2m0FvjK99WiITJGk+U58/mcI+cI0ZW4a3ssBtv1ZGmG0ZBlCcH3NG1FMrR0xu8YTcqjmicIJIFODEmRkc8maAcmMUzKDK09mQmi7JMLEP8e/++WP4MaYz2Oj4/FsweDV7chhN9m+07FxuA/MLgBDgSwpmkoyzJWauJw6KJjptZ6lHYOIWjz+XxMKDXGcHBwyNC7n06n4+phNpvRNA2vXr1Cax3bD/2YCzKs1K+vr7lz5w7L5XJMgv0H/+AfjGhK9fQaEFfIFy9esNlsRqdRay1PnjwhyzJ++MMf8vz5Sy4uLjg4OODp06dkWcb3vvc9Pv7445FhP6TBDnJVpSQmfuBzhBDYbDYjyTVNxeJ1u92OeSXDd5Sb1YzkzuGBEBhfj8jOrgPjwMHYJYvuemwMx/BVvseuPTPxxtx1SBzeMxyHjv4VLvYvBzq7UQYzeuIFvOs4ffEFH/30J9y//4DJ/j0IGq0kJkopPTqMClFL1CBBu1ikShLrtmpkskJxdnZOU/VsthW97emdZbnZMCmF1NRZycSYTCasq5bJZMr5xYWw2I20UqyFrm1o65pGO7R3IvP1njRJSZB45zLP0UbROU/X35IZ5fzKw9i0XSRvBvYPEqbTGfgu2mbXeC+tJ4JisbfHZCqKh8V8wf7+AZP5hGxS0lYVTddGDogUNVVds15vqeuGqmpxAYwSu3q5bp7lzQ1aKVG7JIYyz5lOZ1JINFXk4qTsHexzeHhEkmZ0fU9VN3TxuzjnQCUCiQawvcVahzYJXdfTdlv63mF7i3cO5wUlapuOqmlkgEvSeO9XEDSX5xecvT7HOU+SpqR5zmQ6ZR5bhsOf6XTKIraidg20IDrbJgkXFxc0TcNbb73FqxcvePDWWySx3fbRLz4akb0hWddaNw62KiJdu61FlBT5zgcSZFWntGa5XKG03PfbqsJ5h3eOJIlZQ53HOU9ZSKHlnfBmhsWTMck4NsGg7pL3DobWQ1jbdDoduVpDRsWAkqFuQ7DEUlpI00ZBZgxNtWVSahSezeYGpRPSbMosyej7Nd0moKZzURSYBHQWFxkmmmJJoRFQI9LxXSiiv1UXUvWbSpLdlvHw33+bTc5p/De3E+bQ0hhe470fxzcZzRTBOVzw0sZwFo2IIqTltWNXruLoF4SQPQRrTqclyqiIFFuKvPgNm3P5bpIzkiQJRZ4zX8xxWhOSFK0TmrrBWiKHSmzBxYHYsq3WmNjeHThlu+dQHFjlD1qR5RmTGbR+SBrXhNCjjRBSgx7kxiGm1w6eRbfXfyg2FnutFLNe0XtxA/667/d123cqNgYJJUjhsQvrDxPm9dXVOOE5Jz4Qm81mREKGlsN6vUY8C2Ysl9fRCOl2UhxMe6bT6XgTai2R3UmEpc7Pz0dItqoq9vf3AZGwPXv2bEQZfv/3fx/+k6x25/M5H3zwAZ9++inL5RLnHD/4wQ84ODjgz/7sz0hTyXN5//33efToEc+ePeP6+pqrq6sYhiUrsUlknj948GAM6Voul2MhorUeiaDOuRHNub6+HhGFobUxFBNDITGgE2Ihbt5oe3yVVzG8f1CdDAUM3EbYD5X8cA6HQWSXSLp7XYfPMcbIzYgoFhSGLEnAixTLBFDKkKqArbd89qu/Yv/wmD/8o3/OdO8Eo01UtQTSJAHfgx9ipT0ueOqmpkhS8PLa4HuabYUKCVobZtMZm2pD09VkWYqzwuDu+p5JfJCXyyV5XmJtx/D1g4e+9Rgj6NskU+gQVyhafDmSNKPMcxTQdx1VK25/uwjTEDk+cI4ODw8l5Xe5pt4u47XUbLdSdKZZwiKbs3+4P3qoJKmm7xvsWla3fd/TO0/fW66ul9zcrFit1mzqmuAlKC9YH4vbHmdjFouWdkCeF2RFQdW23KyXzOczTk6OOT4+ZrG/h9YJfdfRWyuIxm7LjIBB07a3LSLTWfq+GpU3oAQRqWqqbc9yXdH7QFZA67as6wrr4ezsgquLa6ptjVaavf19jo+PmS8WHB8fI0Flt6jUYrEgz3PqumY+n9P1Pb0VkvWXX37Jer3m4OCAT3/1KxZ7e9y5c0KZ57x69QqTJPg+jAuU3rmYXdMTlLTQmtaMipcQhLSa5uKZsFqvafueumrZ1k0svIYsHMmnGMYyILaFO7zV5Gk2oq5d18W0WSG/9jHsLo2fORb33BJBXeZGb5QA8TskuGDHNlCSmFiAhOjw6Fhtl4TKcm9RkBpFU2+wbUsycYSuI58mWFehVQEUkj7xrxsAAQAASURBVJCqVMQChnHgP1fs+i03NShH/g52pZQUXjuF5N9FW0U4YlHBZ110+xXuDD7gul7KMa0JypEa8fPpnae3Fvs1K3gZU1XkIk2wNhXDOK3wXpDrkYC5U7xZ2+MTg5huDUV5SshSjEnxAfKyRGlDkRcShGYCSmV4JYug29RVfqOIDABa2jTFdILPCqYDtyszaGXRvqcoU7QRVMx5h/EixR73gbgFF2XJfD5nU9X0cSFHjNQoim8mzu5u36nY2FU0DEXBdrsdiXTDCWvbBqVkMmvbdnRlnM/nY3shy7Ix5Krabsnia7uuGyfI+/fvc3FxMZJFHz58SJ5nvHjxnEUc0LquEwlkbFEopYRYtt3yve99j+vraz777DPg+6SpyM5evHhB0zT8/u//Pjc3N/zqV7+KapND9vb2WS6XzGYzPvnkE0DaLz/60Y948uQJ2+0WY8wYMT/kPoQQODo6Gg2Mhgsg3hXp+L4hGn4oGoA3iJwDqiDfRY8W7ENhMRQeAyoBvGHkNWy7iMUwiO4WObv8jmH/u8TQYYWhGOR0sjpKtJE+c/zOzrekMwlxW15e8POf/gUnDx7zuz8+xnoLpIgLoBjHoCSh05CA78Qie3lDnvSkiaA/fdeTktHWDu+l+ErTlKwsWC3PZYCILaYBbdKpZAyoEQlVTCaGxChs1xHSlLIsSLMcU3j6aPolqwWhUJWTScxksTjfCMkrUWhj6K1jWzWUk44AmBDwztC20mppu5bj40NOTo44Pjnk+PiAohTStHc9vYNms8Vah/WBuq5ZrtZc36zoOkdAo1WCToUwG+J94bzwOZTSMmknCeu1FLaHhwccHh6wf7BgMp2MhGxpjwiUuuvN4pwnaE2ISa/OyX67rgdshLCJmUJL1pstbeNZb2qC1vTWs+1a1tstbe+othvKMmcxm1PkJQdHh0wX++zt7zOfz6lrkeWK02YYXXU3mw2z2QwTxIr++vqaL589o8gyqujC+ejhQy4vrzg5OuTi4oK+7+hqaT8URQFas91WI+KZpQXGyH0xPFvWWZIQaLuOs/MLQkDaXpE4G90nZBUXoow3Lui8C/Q4CfZLxd3S+zDapFf1eiSoZ6lEyruIJIbIw8hy4Wc478bFgChhJEI+oGLb7laRoJQ0RNqm5mA6obl5zUW74u7xHr5XuK6j396QTvYI3Zo0r8iTaUTdDL1XeAUSRhhXnEFx6/f9nzdpfyPaEHf/bZGTbzqKEXl484d/zTu+/barvFJao43G6IQszQXJ62SRmGYim07TBIOXotRLotNXj1X+DPs1GCOkXxBEUsZe/wbaMhyLiWPtwAvKsp6QirGg8qJKTHRClgkqZjRkqYbgJCVW6egWesu5ECWveK1gNGmeUwaNdkAmrdI8M+A6bLuJBNLIGwrRwjzeNyp+Rx3b9EURU5yjaWLWtlRV/dspNpLEYKxMcEOFP8D8QwHSti15ljN4DR8cHIySziFqfZjIBvWF1pKnMCSrXl1dMZvNeP78+Zg3kiQJNzc3bDYb7t+/P07yZ2dnnJyc8M477/Dxxx9zfHxMGfuiv/jFL0iShH+09z1YCST1+eefc3Jywr/4F/+Cn/70pxKitLfH/fv3efz4Mb/61Sf88pe/5Orqislkwocffshf/dVf8fr169Fbo4kSOyEDNmPrI0mS8XfDz/M8ZzKZsFqtx5bL7cCiduD6WyOaAa3YDZj66mt3vTN23w+3hQbcSuyGbUBPhuJmuPF35a8DmuL9LXSoogSPIHCviSSqYD0q9sSt63n14gV/9fO/5PH3PqScHoriQsfCKhY7xpixF9p3HRmKo6MjJmXgtW1pdcPN+RrvNMFLO8Erh4sr0sGsp7fy3cpJyTZaTQ+bpB1KxgF4ptMJ82lB7zyFSvG9i0qYBKOcoDAmkao9/pHib0edk6Ts7e8zS+fQW5xTtI2l6+XePTg45N79e8znBWlm8L6n60QC6ryRtFrnafuem+slq3VFCIo0zeitJ8vyWMDJeR9QhjTLSdMEbQybaovreg4P9jk6OqYsC5q6kbEmSHKl90FWSXEQGpAzKS6SaFmcYq0Ynrm2G++7rrMslyuur65YbSqqbc/FxQ1eBeZ7B5ClsTVqOD46oihKpuWMSTllOp+TT6YU5WRcOIQQuLm5GZ/xAfWbTCYksVgfgtnOXr+myHPef/99qpjkXFfbUc11fX1OkshCxEZCep4XrDZrFB3Sl9dRMthhnfBINlXNelOJ8sg5UOp29RYY2x+Dv4OCmDnhMGiatsOMrcseFeHv8dkz4t9TxTantG78yKXqu250MVYmGR1/ldKSzruDXCrEVE6FwHQy5eHh97h6+TnKe44P9lhtaq43Naqv8cFAtsKYqbQDdIJFE4JGmyFXJMphQyyufksgh/qOyMY3lg+7bY4B2fg7OmYfbjlueZZRFiXGJKQmujZH3sxkJq38PEvRymHSjBBNEHe/wTAZDwUHRL8iG/BBMqjaLKXvLXmWvXHqB+nrQD0oioJJCFhtSJOMVEtr02WeosjJUk2aCg8kTYQjkiRCth9bKTvnbuCoKK1GVUoXfau6ztO3Fa7dMi0SZmVJpgfSeET4Irlf6zhXxeOeTErQop4KWo+t3m+zfadigwhnhcjkT9OUvb09zs7PcE6MlySDoCY4R5aJmU5ZliPasBsIJgPerdvlYL9dFAVFUfD69Wvmc5HiDf4X0+mEshTvjtlsxg9+8AMuLi749NNPefTo0Sj9vLy8ZG9vjw8//JB3eAu+bMjyjPfee48sy/jJT34yFjN7e3t0Xce/+Tf/hqbp+PGPf8zJyQk/+9nP+NM//dMxtG0+n3NxccHr16+5e/cu0+mUsix5/vw5XdeNRdGjR49GJY0xhuvra6qqJk2zMZNl+M6DycwtkTMbV+vOvYl4DA/KMKkOba3dgW9Mdo2D8W6hsYto7JJLh4dk2O8uZ8PFStdo4V84KwOyMQk+riKdtdSbDTo1ODo+/ugX/M7v/pjf/b1/hPM9SWZGvgaItrzvO+x2Ta4CR8dH5KWhqq7wARKTcnCwDz6hqloyZ9GppKVmubTu0kQyXfrgWMzm3KxOKfKS4Fd4BUoHCLJ6r+uG1WqN7Wp661BpSd070iwnUQEbJxCyjKpqxuszFIRFUTCbzbhz5y7z+ZzVcsXF63PqzQbvevb25zx69JBHjx5wcLggSRUhWDH3aoVcaG2Cc1I4Xd/csN3WpFlGmmRYD8vVRox+kpSuj8oFYygnE7I0oe872rqhLCbMDoUAure/j7M9WRZJxgg8PJA+A4q6kqh6yWIJaJQMVFnBZlPRdh1JmkFQ9DH0brPZUMVztrqp2GwqposFi/09iumUtCgwMW2y7y2TcsosrniGVU4blRyD0d1kMhnl3vv7+6K6imZfV5eXkvRrLeuu49NPP+Xm5oa92YyPf/UrhsAzeab3Bf1ykuS7fyD3b9O0oCVNtd5U1E1DZ3u2VUNvrfTjI2n5duiUYkwQvDAsziHeqyqIbLjrexIjcQayYnVkeXrLleot67UsJiaTSSz8fRzYO7zztzD5yFORYjAd0KtYRIu5Wk8+m9DUFSov+cEH3+fm4pW0hQtxuV1uK0gULl3hVCn+NTqDJIsT+RB6+NvRoPwGwvFdkY1Y2I0qiDeo43GH4yT23aqN3/i24XaVniQJ5WTCfLGHCwqTpORJTm8tOtmQFyWL/T2ctyxmU5zryRIwWS6Q6XBEY7Egc3zY8fCwzsaE8Y4yS6NpW7hFmsbj8mgFeZoym5aYLCUkqZCgrSNPU6x1FHlOmhjyLCEEh7MNZZ6LampQManxq45k9oC0f+qqYtN0rBopwH3f0rdbIYge7kmicpKSRA7Rmzbo4o/kg8js1+sN18sV1gZsEBK9/ur3+obtOxUbSoGPq8ntZi2chGpDmYvevO/EeyLLUqbTSXTck4l/WG03TSNyxs1mbKWEEJhMJngviYo3NzcAo3X50O8sioIvvnhKmib8w3/4Dzk9PeXXv/41d+/exRjD4eEhP//5z3nw4AHf+973ODs74y/+4i84Si74H/k+aXzYB7Ln0Fb55S9/yXQ65d69+3z44Q/42c9+xk9+8pOxzywk1gO++OILrq6uODw8ZLFY8PzFc07u3KFpW4G50gTrLDfLGzrbC9yeGEmEnJaAQulAUQh5b+C0DGzfW9+SflzVDhkowOhQucuTgds2ynCOBwQEGAuXoc21G9IGt66iw8S623IBuYezJCV4H4tMIV92TnI+kiSh6zs0Et9O37O5vuDJL3/OD959j7wU+DeoBB/AocB5fNeSKUdRJHjnWa9ari+uaKuG1CjKNGe73uKDjX4GLV3TkqUZGrEKr9uORIuX//HBIdfXr0gMdA6SVDIVXNdje8+qqumtFvVJYkgThVIdzqtIUBSprkn0SP4S8pgnWBf9KsS+XemEoKBqa9JUU5QJ+wdTjo7mdH1N1zmKsqTtYL1xtI2TILhNHa+7x5icIi8AjbUdk7KIK/4p5WSG0oblzQ1VvWWz2uBcT15kHBzsM5uW4nuCp5hI+0Ch6K2nbToZFLWoUtpWWgZSZIrRl8nSSLrtWa1XBBTTyQxlEqwLpHlBVnT4sEKlhnsP7/Pg0SP29g+ouw6MQacJfS/eFSY1OC0y06OjO1R1PaoJrLXMZgsmkzKqu+T+kVbQNbZr8bYnNZoQkcHVaiVtMmvJ8gKjNTfXS1moJCmNERto5zzL1SqqiEBFZj9asalrWmtpuj7OaWFIEImchjiZDU3vOPENQaTDPOdVBM+9o+t7snRwU1Vjym9VV+guco4IWO/eQG1DgCwvySfTyOERdZMUzQnepdG/QMzFizKlayqc8awua+5M7jCfL1htbuisY+/ggGLiefHqDGtfkCiNTxO2/ZTal5BCpjwaG5UWCq/MWHb85wIFX9tKGaGKrylsvub1gsSF8eVRnyZEWxti28qLXBMX96HGz1c7qomvO75I+5BXCBUGpYLcI1nJdL5HSArSvKDIp/TWosqlOApPJmRZwnxaEpwF1+J1QvcVyoaKK38TOT9NU3NzI74yA6I4yRv5LgF65yh3zknwnsQEZrnBkDD1BpUlQoBvPbk3WAtJAkWRkqaGvvN0k5QyVYjmSO4b72PInBJSvwqgnMc2Lc16xWa9ZrXd0nUtXVfTVhsSA6V5RH+wD+UEQojoRixWlJaiwoNXBu8V223Dei08MJMYguvpnOPbbN+p2MizFNsL6SVNRCK2XK4inJlTbTdjHPvQNhk4GOv1mvv37+/AtdKGuXfvHkeH+8xmM16/fs12u+Xo6IjNZkMIYeRlpGnKp59+ynQ6Ic8zXr16Na4EBgfTjz76iHv37nF1dTVOuHt7e7x75zH8JWy2Gz766CPyPOfevXs8ffqU58+fs7+/zw9/+ENCgL/8y7/kyZMnFEXBw4cPWS6XfP755/zqV7/i4OCAe/fucXp6ymazoa4bnjx5Iimys/no++GcG+OuTWJGhc1gAS5FgWJT1XRtR5JEg6bOjpD3UGgMZNtdxcjQItnl0Az//VW1yoB2DG2u3R4+vJkYO+xjt5gZHBAVCk/Yae2IM6uLA6t8N0dS5HRtzeunn3H+4gmP3/shnTWERGGSXFYZQVEWOZlOaNYbdL+myAy2hyydcLA/5+zFC4o8pe8bVqutwN4B8dgIopdPBt5BgPl0ymI2IcsUdRVZ3llCluQkWYFKEkhAK09VywRrkhSCx/oMrEEbJZV/cLG6D0KutJ7VakPX2dEJttps6Z3l/oP7/OCH3+fuvSOcbwHPer3iZrWhbTyeBG2mbJbXbKtWHrohz6azY1vh4GA/ogKKum4ipyDgbM/B4R6J0ezti+w7zzPm85nAwtLVGu+fJvJYQlVzdn6J0ppyMiVNC3G6LCc0ndhfr9drzs/P6a3n6PiExd5BtKd3bLcNKDg4PuTw+IT7b90DndDeWNCag4MDrq6uWa7WeCoODo+YJYab5ZLJZDoSYU9OTvDes1wu6XvLYrGgqmpOT8/wrsM7y2w6ITWGarslz0748tmXZBE1vXv3LqvlktPTM6bllLKc0PU9TdPTxfgECe/S+D6M8tzeBzoXRt5K2HledmaL3b/GTaSDsf6Ic5zyxGj6ZHxOnHNCHHQ2JuXqaMZ02/oUEmKKTqQNZqLyRkfi4O6iQSsl92DXkOSGhIDvWrbrFXfvHmJyTWsdIRi6tuJ4f4+6tzhV0ylLYzTKyBTkXY8JVmTgJiFoI9C4+/Yox1+nBvlNZCN8Y7vja38eIpqk9LAcR6oCM66sfYQkxHY++cpn/qZR1psfentddRD/CQUMzrrFdIZPC/J8hslyjHWUQZNnBWmeUuQpOk0wOJxVOGMIyrz5AcP+tWhZvB+8j/TYVuk7CVgUDtbtuRcaiscQSDV4HfDaE0JHojwmcehSEYKM/UU+KJcsmXIkuBhQF0WvIcqFI3IcPBg02gfq1YrTF8/54sWX1HVN39X0fUOeJsyzhPcePkQFGeOVkoDIMOx5BJoUvbX0naNtWuqmRhuia2r3zddhZ/tOxcaABIDA7oOiZJCpDm6hfd8zm81IkuQN1UkIYZStZVnGbDYb3//s2TOKoiCEwPX1Ne+99x5XV1fs7e3xi1/8gpOTEx48eMDV1eW4kr9z5w5VVbFarbhz585IPtvf36frOqbTqeSvbJ8AH5JnGffv38c5x6tXr3DO8aMf/QitNb/+9a95/fo1eV4ym82YTqd8/vnnPH36lD/8wz/kd37nd/jFL37BJ598QgiBy8tLIfYYQQxevHjBdDolTVMmkwl5no9GPsMKb0BTxCWzQ6GiwY+QY53r+epDtJtzMqBAw2S1a0++22oZWyC7ElZ+E8XYZc8PeukR0Rh7keLHMHzeLll1V347fD8DuGB5/upLPvvsEx688y4hy/HeopCiBBeYZTmhkQdwOp2wXV2TJIZ3Hj7k+vKco6NDnLesNiu00UynMzoHvZMHoqoqlNLkWU4IGxQwm81iRkYjttGbiiS0LBYTjJ5R5iVZltK2slLKMtG3J1mJDwHrLIO6ZDhfzonJzuB5IiF6OQQf7eQDWVZCMHSNpW5q6qojKENVW7b1hs1mS91JWyHPxVgMxPFztpiNlvdZlgkXydvIZeg4PNrnzh0puEEcBCeTAqWh3mwpywl916OV8GLEJtyRJCmT6YxiUgoihsFECfZw3QbVjQ/RVyKt8J6YeGo5ODxktthnOp+JOVGakyQpGEPbO7Is5/AwJStK5vM9ppMZzqmRJDmopDabzYhk1nXN5eUlVVUxmxZcLa9JTcLx0RHXStHUzXhu2rbl6kLM8/b3D6jXFZPplG1Vk2YpoWmkPaSkmOj6XpJzd+TeX+0ni8z0b1jbq1tkY/ybSB5FVn8mSYAgRL0wKEmi7bRSGH0rw83LfHzehvMyPNe7qKSoMIRQeLA4YOJqwrbl+uKcIofGdnTec3BwQmYyXq5foVxP6BtCV5EXHd542uAIXoh/0idSBKX564Svf1uZ6c4OhlP37V6OcAnUV98bwY5BVjq0X/8uaBuDOq8oCjGwSxxZXoJJSJLAtLfkpfCj0jShzFN0cNjWSbaSuW0X7BI+1Q7qMnJ2ekvwjjYD5yR24I3vryQubXecVQyOowZjFEkyOD/vKqFi6Gli8G6X+B+PyXtC5NUNZP+2bbm4uODq4lLaqq6DYAlFTte0wrsL0dLeS6vVx32JVT7gLV3T0HU1dbUVn5ngcb6P89bfvH1nU6/NZjPC+sMkOBhWTadTQELLBgLobDZjs9ngnOP09HS0Qt5ut5SlQKvOCndjs9nw9ttvj4Pvdrvl9PSUt99+W3q7V1fRMvyK6XTKer3m7bffHq3F3377bS4vLzk4OODjjz9mMplw9+5dTiYnsIa+t1xeXo7Jk/fu3aPrutEQbHjghjyHtm35oz/6I5RSfP7553zyySdst1vee+89PvzwQ169fs1Pf/ZTAPb397l79y7L5XI0ORsIoYME9+TkhPV6PQ7mRT4hTQXVGOBWYBykvmohPvQcd3+/SyQd+B3DADZAubtBOcN7Bm7L8Ppdf4Lhd8ODI1D/rWpl2OfwmVVVje/r+pqQJqzW1zz54jN+f3nF3t05BIGXnRMnvqAEIpxlGWdnz8kMzCYFNzfisJppuLi8oO8lDK6qK4JKqGrH0fGx2NRfLzm5cz+eG0eaJBRlTrJpsL0lTxXKJLigaDvHcrVlGTrpMhhF7kEnOUXQmDRlOpXiN8saQkCMdYKPLQEZIKx1eN+ilWI6nfHgrUfsHxwRgqLrHEZnKNVxfbPk+csz+t5jg+fg8JDFYk5ZlnGwEEnu4IALQzss0PUNXd8wX8xRKnB2dspsNiHLM/b29phOJ5ETYei6htWyQitD23as1muSJGERzb4ICucCWSGraecciUnolKii5vM5RTmlKKeS5BgZ+YvFgv3Dg5hCKtbcxWSK94HrmyUuroImkwmLvX2KYiKrUh9YLpdMp1NpMVTVuPiw1oqJVi0mWldXV6yXG9566y2m5YxtWnF9tSS4QNu3EILEWXvP9fUNk0xi7YcCSZ4bgw2SSdJbS2t7irgg+qrZ0G8gG3/TFnb+CsQME2QV6QauhRVNixf7c6PUmEkSIk8mhNsMo1112NcVRLbv6L3l6uqcR+++jU8DxlXiIGkUm6qirbc8evAOmTI8e/GCtWuh3aC7DamZSPS8zgCBwr3SONGbf1eS3nfavlNBEAmlAxFRESLBdFilh1tex7ckIP5NmzGGPBcnzMmkhc6KOi0iy+LyHG3185TZJEcFS0OPUeprj2O3WBiK/c12i/dA8JS5GqXnX92EfGkih83S2U44ZTvjsoxtgsgMBOu6rtFG09t+XEAOSBxB3GP7yBka1T3WRX6wJtW3cRKuszjrpNhACo6hQzXwfowCHaT1vb654urijNVqScBH0u1vwdRrMPZYr9dvqBmGxMgQAlVVjYS6gT+wXq8l5TGaee16dIh8tuPg4GC0Na6qiufPn3N8fMzl5SWr1Yp33nmHly9f8uMf/5jXr1+Nnuy/+tWv+OEPf8j5+TnPnz9nOp3y7NkzyrJksA2uqIDDkcTz7rsSQ//kyRPqumaxWDCZSPbB8+cvaduWx48fS/S8Mfzyl78c3eY++OCDaP71nC++ENOv/X2Ry7ZtGz0f8vHmHrY8z7lz5w6r1Yo8z+X4rMg1B3c/4upp19Rmt60xtEsG58LdYmN4/e2NfOvBsWsYtmtrfqtSuDUT25WHjW0fnXztMQ3HMpB9A4Gu71AJuOB4dfacz59+yu+fPCDJND2BxGgyneFdg8JQb7eUiaYsUmzfoIymyFNWN1dMJiVHx8dRIrpEmcByteH45CQWAy1t26GVoust2hj2FjPOrpZ4H+gtNJ2jc6BMSlpkKFISHYTZnaYEZCDWcXXvnHCEkiRFq4a27UcUZWhzhOAE5r9zxN7eIdW2YbPqxCOkqbi6vubs4pJXpxcUkzlFWaK1kvsvkseClR6v0hofxLRrtVpFrwvL0ZG0VXpr2d9fYIyOUvGtEM/Kkul0wsXFpSA9dUNdV2RZxsHhIQcHhzIhh9tUV5Ok2KYbHT6HwMNyMgNlWK5kUZBnOUprikzu2d46emfp6mZs27348jn37r/FYjYnS1ORpnYNIZjxvlivxWVzIIcOMu5h4ZEqzcnJEUdHR5yfntE2DTr2soMXcuXF+YWQqNsOl3q0SVhtt4ASQ7JUGPG9d9SdoDouIKTQv6NJaiQ3hwhZj9ByjAGIxNRBuTUU5UNRv4sWDijj8AyPk0GcJIo8p9CK1y+e88P7xxzNpygHKlgW+3PuvXWfq6slX3z+Gd4qFtMJ2mmgI7Q3eJORJQswCU4Z6bUrQ8D8tUTRb1uE/U3n9Kv7+Wtfr3YLFDUiSrsi0RD+5s/8tptcjzTOT1NM70jTgiSVRF7b90LazhKyLCHRCo2hUxCcjcq24cDe3PfArev7XkzxggSvtW0jBNEQ3nhTCEHabrEVXFU1m+2Gum2FLxFuYyVsLCqGubPve8mLunuCtXuy8FS3pNWBLzVsguhkBAe+9xAcWgWct/RtR3AeHYsNFVESE9tXIYgUW4YMR9829HVFW1cimlX+WxeD37nYGAaN6XQ6+kk458ZQsWEFM6Q+Nk0zFgYhhNFVdFjht23L/XsyCS8WC169eiXky+fPqeuahw8f8vLlSxaLBScnJ3z++WcsFgu6ruP+/ftcXV1hrX0jIK6ua0II/PEf/zE//elP2Ww2AGRZzv7+Pp9++ilN03D//v0RNRkUJYeHhxweHnJwcMBnn33GZ599NrZV1us1FxcX/E//0/8kUFwMWhtQl81mw8HBwXhehkp0MAL75JNPWK/XtwFsdcd8vmC3dTIM5kOlvKtcGezKB3hsmPR3Eabh/bu8jYGvMexrUKsINM/Yhhkq4V2+BzAGecFtFP1QsAxeCoOpGwZc32LShPX6hufPv+AHP95SFFOCA6Pl5u8rMQU7PDggb2s0Hq8cmdHYznPnzh3SNBU2dduTZo3IF2OROzjJVtstaZqxXm1QWloRnZXPCamitYH1VkinzmUoerIUFvOpKD+6Dusg7YVsJRBqhtGBumrG1lTfW9I0nuugsL2LrcA5HkW1bbm5uWS73XC9vGG12mBMxsmdO6LSKhM8XtJhZ5OxbWWU4vrmipcvX+KcY39/n6KYoRX4YNnbmzGdTlAK5vPZmDRsjObmZhUHk4QQGvK84PjkhHIypbNWBlKjSbOMJMmiWZEaWwmS+irFZ912o3JsOpuTpuL0a4yRvBEfqKqa9XLJ1c2SLKKWzjlev3qN1oYsm+CCTP4D/2ogee9yjzabDUYbFvM5eZZzc3XNq1evODo8JEtSJjHjpa5ruohsdG1HU3WkeU7Xie+KdU6Qjq6j6Tvarqe3XmSx7tvbXO9u38RxfOMFgXEyGMiiOnoehBDw1uKUGsnVHjU+s2O7JG7Dc22tFMvBCw8nSxI+/fhXvPvf/BOW5zeYNNC3LfsHhzx68ICnT56Lc2yWkhJI+4auW5ElE5K0oPEdThuC1iidkGhp+/A1ceDfBe0Zxodv+t13ef3XbhHNEFTjTW+KryJT37jf3eN44zW3HLeiKNEZZFmBd57WOTQeFRwqiPS474U/4W2Ptz34r0covtoik7G2x9qeMhcux1dvqkGeCkKYbpqW9boW6/wgi9C+7yOhsyOE2ygK5xxd31I/enhrBcBtsSHFh0bryH3x4idjVDRkdD6iuxL4RuS9yR8fg9cU3sdIBwJGBYwa/GhFUG2Dxzsbv9/fvH2nYmO9FiObAZUYJqxhwpvP51xdXXH37t2RlzCsqo+Pj1mv10wmE1nV7LRYplNJfxQlRjvaiQ8T9b1799hut9y/f5/Ly0uKouDg4IDVajWSTwf5atu24+v+9E//dERL2EiI1enpKR9++OFILH3rrbf4V//qX5EkYkH83nsf4Jzj3//7f4/3fixGrq+v2Ww2aK3Z39/n3r17PHn6FGstz58/5+DggPV6zc3NzRteFWmacnx8zPn5+ThxrVar6FWQRiJtH4+9eyOQbbh5h8LDe//Guf+6VsrwMA7nfZczArdx89bepooON+yuWdtQzAi3RJPkyRsP1fC7oQgZjyU4ggvoBLarFc+ePeHq6oz780M0krbqbY9rG4wPeNuzXt0wj2qkqq1ZzKZoLW6rSmuqpiEvC+rWQlwdt23H/sEBN8stWZ5xcHDIx59+IWoEpUF52k4ejKsbYV7nuWFSptRtRZr2KCNSRlQY0wz72KZqmjZO7ClNI+fAOwk5SqIKZ763EAhdaVprub5ZcXMT00i15t69+9y7d4+8yJnOJ7FoUGORdnNzQwiey8tr8ry8Jfr6QFZmTCZlbEm6aEeejLLx1WrNarWmbTuc09R1y3wu7qHWerK8YFvXJElKOZlFKSyk0UNkDHNLFQbIs5wut3gPi/mCRXyeqqpCG1ExLZdrtpstiTHsl1LoWbem63qSNOP09JK296RpxtXVFZeXlxweHvLOO++wWCzk+lYVh4eH3L1zl76p2W7WLG+WGGXomk6+U9OxulmxWi6lX+0cIUDTtlxcXpGmGdu6hvictF1H04krq3A3XITnv/02tN3fXH/Kz8SDIw68EZ3w1qF8zJbwAW8dTjmxGzeG2XSKCgFnLeh0VJINBnXDmDkUIVrrqHSR5+rtRw95+vP/xE9+8ud88M5d2raj6htW24rjk3scHx2R6oyu7zBNTY6n65b0XpFkOYaUXmUQZetBIWFi39AO+LtAD/4uEQjbW1CFtKI0b0zku3//dccxhLQORzUQNLMsZzbTbFuxF3C2o28bwNO2Fd73BJeC7zHa07U1bVOhwu2kKueRN66j915C3pY3WCvnuqo9bVcTEH7D7vvF60XTtZbVcs3V1RVtZ6mjV5NzEjHf2w4hyYboLQRNW7NaLcd5YORshIBS4kyqlI+toxyjDYnJMCYjMYbUBPJ8UEFFPh/yt4leNiYxQpaPxcTAW2qjb4wjJsCm366M+I7SVzX2HJVSLBaLkU0/9O3n8/kbsPzBwQEXMVhpuVyORmDe+zHlNcvE5Ob8/HxEPuq65vvf/z6vX78e/7bW8vbbj/j1rz9lLwY33bt3bwxt+/73v09VVbx48YKTkxOSJOHg4IDXz16Dfps8z3nv8XujP8i/+3f/jl/96ldj5LX3np/97GfUdc2dO3coS0nnHDJJPvjgA4qi4NmzZ3z++ees1isePHw4qlQ+++yz0WzMWsve3h6r1YqXL1+ilGK9XkvPPE5o09mM1XItDOk0jYhDGMmWcEtSvJXR3RYgu6qUXefPXU+OoXAY0I5hn8Pvh1VnFu3ih98N11kepNs2ylffM9zgAzLilShVvLVorbg6P+P50yfcuf82Jp2igiLRkGQJuvcsby6Z4WmqDVlqSBLNzfJK+vJe2Ooqeq/c3GzG4qnrOvroHqvbjsLkOO8l1VV4dlgFnYWq6VlvWwgWo0Q33zYthx4OD49R2sRrfGuuZK2NqxNHYtKRJGuthGahwHtL0zU0jePq+obr6xustewfHHB4cszdt95isb8nBj/WSjtiWJ1Yx7aquTi/AMRu2weYz+YcHe8xnRQiqzNS5BVFOQ6ibdPTdxZjMoKXfJP9g0NOTk6YzubSigNM16O0iaZhPW3XC7chxpzneUYI4tq6qWo2m4rJZDYW100jqpjNtqKq6qjsUFEpY7i4uGTv8JDUpGy3Eum+2bYyIMVxYSgwsixjMpmM2UjXV1fkJqFvWuazOd46Xr9+TdM03FzdCF+mdyORejKZ0XRRfSKDkdixVxVNb+MxDeOUqAP4tgXHINvcnZzGVokoUzSMZmiaW9J0/KRxEk9ikNbQPsmyDEz6xgQ5/Ht4Ngd0A6Vo2oZMWTbbitlij9Ozc/amCUWZcHTvhNWm4pe/+hXvPf5A+vh9j7OOvtpiVM/EFGyW5+iDKUpZvHZAL+RVwhuEzP8zNxVPelBRb6ziiWagaXxz4fJtCo6BaLr7W5MY0vQ2HqJrW6z14C22b7FdKzlCtsf3CQRLojxdU9G3Df4biJByLLdtlL7rsc6hFTinIoHyN/lDIQRxqu0tTdOxXm9ZrTYEpbBWFp9NU9HbFms7lBL1h9Jq7B7cqgh3SKvqtj81ImpJgklT0jRHK4/WMkf7yPcYIuZ33+uCFBfaGEyaiw+QSQAl6KG1Ir39xiv15vadio1B+bFYLEanzLZtWSwWzGazEckYnDaHG2KIVg8hjNbik8lktBf/5JOPuXv3Llprrq+v2d/fH2Hm1WrFs2fPxtXggDYMq/HPPvuM999/nyRJxok+SRIePHgwcjaUmr7xPa6urvjFL34xhkO9evWKJ0+eEELg8PCY999/HxD1x7NnzzDG8PjxY0IIo9X53t4exaTkn/yTf8Lz58/HgLfdQmC7lUjsNE1HHsdgV75YLEiTjNVyPU7qaZqNQXfDRP7V7au9XmBsawzFxFdXAHDLdt/10RhQEbhFPHalseN+lXlDqTLePIMtdERVxE3RkOoc5zqyNGF9fcOzJ5/xox//I6Z7E3AtCkdmEJmp7cgKA85y984RXz77gjQxHB4e8OLlK6qmpelc1L+XlIUZSVNX19fM5we0XU9WxmwdF8gzRduBddCGQBE0QYn8z9qeLDWU5ZQsy6MnhLSpjo9P0FqIh3Kvbwi+wUdvET9aXINJMvIywySGZiuqjgDkecHhyTH337rPdDEj4Gi7jqqSNodzjrqqWa6WXFycY63j8OCQsizQRlMUU5RKdtplNiJTsdjpZfK+uVmz3QqXZH//kHI6xQcJTRO/F8dmW8m9treHjT3jyXRKqKTgB0HVNtsa76SCatuW07NTvB9anoZ6W7GtKsrJLFqww7qqZLWkDRcXF9ysViiV0jYdXX/bahtk10VRjNHqm82Gm5slSQDbWZpKQh2XNysuzs+lUFVCmA4elFYk0Sektz0uiOdJ23ZUbY8IAG9XsLeD7tePY980kb2BaoxSA8aJy3tJYtbBEZyVojqqUIYokuADGJGxOi/P4HQ6G1HDYaIbzs9XFWWr1RrXLpnePea9d7/Hq1//krZz2GBRV0vuPnyATgp+9emvWUz3sN7RtOJmnDjPTXXNwd1jlkFCxHSwOK8lTdbcqiZ+G9t327OUArfj2PBvWSn4cNvG/S7hcX/tJ/rboMmu6+n6juDBdQ1tU9NGpUffatJE4uUTHbBNRd+2ePebLajhW8u9fuutokPAmIET95utBq01QSdonTDITb0T+XpQYlK3XF6z2azlXCmHidL8AcVo2+52PI/EWqX17T0c5yFBRHOyvBCyeKox2mEM5MVElEpKyLpebJ/wDIFugE7QJkFpg/WB3nusD2KiF6XE32b7zpyNgbfR9/0YOTvIW7fbLYMDaNM0vPPOO5yfn5NlGavVCu/9WJQ8fvyYjz/+mDzG6z5//pwHDx7IQcVK/9NPP+X+/fucnZ0RQogKlQ3z+ZS9vT3yPB9TZ4eH9969ewzE0WHA9r4EIxfwk08+4fDwkLfffpsvvviCy8tL2rbl3r17sUgQt9A/+ZM/YfDpODw8ZL1e88UXXzCfz3n8+LEoT1ZLfvKTn/Dy5Utms9koX7y4uBBFwGLB3t5eTAYV5GcYcN566y2effEl0kccEIpbI65dG/JdG+7dgXK34Bgg2uH9IIXJbiLs7nsGtGTIrRn4G8NnfLWFs9ueGa7RAOENrxVCaoL2Bu80xiu6ruHll19y/voVs/kBiVGorif6XTKZTem3V3zv8TtcXpwJYlVtWa1WUnReX1NMhTF+c73hYCFk3IODA169PqOua6qm5Xq1xWiNcgFtlLDvlQKt8RhsUFgnfUedpHTWslytAUmnzYuMsszjJCxpp8OkJS2pgFK3bassSyknOcYQ1SMtSZZy//5dTk6OUQrOL05pugbrPFrlNHU/ogYXF5es1ysmkymEJXkuROXFXFbNLthRpVSWE7RK2Gy2rFbrCJEmZGnBW289IC8LOmuxvcMmnuA8XWdF6USgsxbjo3GcyfC+Yr1es15vaNuO3nq6MVjMcXZ2AUrx4MEDvBcVmrOW1XLJer0Bpbm4uubw+JibGFIYgoTjbaqGNMuZTqdMJhPu3bvHW2+9BTAGlz1//pyLi3P2pvNbtDNNUSHQRUv6xBi6to0cmRBTMJ0EY/U926qOmS5vbrsFw9dNUX9dobH79+6bQ7hFNhRSdCjFmGgbQiAzCSpyLoRULf3wRItix2Tpb7Qvh2d0V/nlQmC12XKmFb/79gOOTu5yfnnGZJaTzSZ8/vQZYLh77y3aqqMoJpgs4fr8lL5zTI4f0NdLlDnApJ5Eg1IBoxyJkufg76bZ8eb2XcuBkRgr/yGL6ghFiIRzaNf+3RZHPiquNps1N6stCoVttnRtQ1MLPwIlnC8VLEYHXFfRtQ1hR1UysEmUGhZ3Er9go+OsjN+avlc4Zwnhq7wGacNorWJAaU6aZqxWG9quo+1qtlsx80vTJI67kqkjBWtPb2+DUQlisa9vD5CBn5dlGUU54f/H3H89S5LdeZ7Y5whXIa/OvClLQTTQjW5sz+yYsUmure37vtNo/P+4trR9INc4D72zs81B96DZM0ALoAolUMgSKa4M6eHqCD6c434jswqiuoFZOuyiroj0iPA4fs7vfH9fcXh0Ql7UJBqEMEhhyUZFyEbB4SLy5XqirlA4AcYE/xqhU9JixGh6wDwCh0IGVPZ3Ob5RsdFbi/e+GkIE/4zb29uYejemaRqm0ynL5ZK6rofWwdnZ2SABraqK6+vroY+72WwYj8ccHx8PiECPTPRJkD0J8Ve/+pS6rvg3/+bf8PLlyyGMaz6fU5Yl77///kCK7EOg5tsZbIOv+7/503/DF198wUcffYS1luPjY05PT0nTlCRJ+A//4a9ZLpeRv/EuL1++5IMPPsCYYEj05MkTHj9+zM9+9jNevnpF0zYD/0MIwYsXLwYvjX4iWi6XZFk2cC76ALcykhv9Hns4z/NhQvpthKv99sk+YrFPNt03A+t/t38uuFOU7HMyXjMlIuRt7KMh+4/tzxUmUIlrLQJoo/JheXvLJx/9grP7D7l3cgq2RRgDLpgO3T+/x3K1CGgWQfJ1eHhI1YQ2iUWxKSt0olkuF9zeLjg8PBrssIWQgcyVJLgmRM9bB1ILECGKfL3eYltPkcC4SNlud+jIM1JJz9NoI4pgWa3WVLuKHp2/642GiSWY2TQY2+FxTKZjjo8OOT4+Is1SttWW28UNnW3xSFarm5jzEsLnjDEUozGT6SxcZ8cQOV9VDd41uGhXr1WKkgbvBUomoR1iYTo9YDSeBP2/UEwnGTLR7KoaIYLHi8PTtR1OBVOq7sZQ7kqSJGE6nVLX1ywWixg+B11n6Uzw0Li5uQnmQCpME7tdRdcZZEypbNuWbbmLRb3i9naF0Bnzg0POz8+Zz+ccHx9zeHjIbrcbEpDDbn88ZCWlSZDi7Xa7cO9UFSIq3NTAy4jwNCEMraoq2jfIjqHQ+OctTl/hauz/gbgoJOF99+z8nkfT56CoCNHvnyxJ0gB87PX1+/uoR2H73wcioEAqze1yxcX1DU/u3+fCbNlsN8jliqPTM6yTlFXN44dPqU3HenHNW0+eYrqWzy/X3JYenZxBFu4vhEcDCo/tdY1/gOObnFUAceIb2ih3GaZ3Gx7/myCqb3iEuSsUvYvFgourEI+gnAFrMNbGTCSLxyCdQ0kPpqJt66gcjMfAJO6lr3sFlL9Dga01sdh4vanjrMPLOzXnZDLl4OCQyeSa5naBtX5AU4210Tgrwfke6VSY7k5lCG96yvhoYhY2qzoJxnIIiXEG5wz4lqZrsVE6br0DZzHeBX6SlCAFHgtSU4wnzA6OOD7ZkhU5iFC4/WZG9d3xjYqNHhYtyzKy5vOhqLDWDruZ8XjMq1ev+Pzzz5nP54PZ1m4X1ARHR0cRSl1yenpKCIgS/PjHP+aP//iPh2j20WgUdPyzGe+//z4nJyc8ePCApqn5X//X/zVEas9mPH78hF/84gNCgmVYoB48eMDNzQ2ffPIJPzz+ARDaIh9++BG3tzc8evRoKFIePnzI//K//LtB3jqbzbi8vOSnP/3p8HNPaL28vOSjjz5itVrx6PFjbm5vkFIO6bQnJycDJ6SPpk/TdIjZ3m5Lqqrmk08+wRpPluXRjVNjrSFJVGyp+tiieDMV8i7XZL9w6FsxfeGwv+sahp/vfTPuwtdMDK3aP89X+SBieEwgsIpY5O076hF12YERrUSCtSFToit3fPrhL/jOd77H+fEhQoQk1NY06ExTNxu0UpyeHiPxUb1TopM0OFR6jzeGzXqDbT3GdLFwy/FSoYVG5zll07GpW/IsY7U1EZ0wdEbSCE+WJKhRRtM5ishR6Vtvo9GItmvJiwwhA8FKJzkCMfil7F9LrTRahl236Tqk9xR5QImqqmK3q6kbQ2tD+FVnQoZG3bRUu5CfIIVguSo5OJgHrpMTrNYlxuxQwlLkOWmWsil3LBYb0iQbinEpFA8fHqGURuqQAJkVQSrbdYGrITUk0bEyTVI8gWAW+AHg217NpMG4WADvkCpByoRXry7BeUajAqk0ddtirCe4PELbdjSd4XaxAinxKO6dBe7I2b17oeWC53Zxy67ccXN9Tdu0dHVDvd2xvLlhlOdoqbi6vsaZLiziWg/oqHOOpm3YNS21CyTRXVXRGosZxh1DK+UOjv+15cPXHoFK/Bt4ApEE2tGhBQh95/DrnMXZwN6QQiK1pGmb4T7rOoPO7tpK/b1qo+lfyMDo6OoG2zWMihFJIvnsy+fM84RifMj88JDb5TWXry555933mIymVPWGk+MTxgncXl9ineXBw3PmcsLLZoMyG5wraGWBEwq8irkwd335ryA6/wLcQ+z9/28+fCw04veib5T4115XuN/69Nr919V/voKvf739SfpX5INhlrP4ztBVO3brFdvFDcZaUuGR0QLNeQb/FIFFCY+3DXUdiKN378ANUMz+/3pTLB+fFxd4GTgxEFYhviUJKlHko4LJbMK8qhhNCtbbDdok6CSlbT1aB9l6mqqAkAhPkWUhfRWJ84K+2SQInhgiOtlK0SuhoorPdQiCXFUKifMSa8G58N5FRJWc92jtUVJg4uuczGYcnZzQWMe8aUJkQxWyn36X4xsVG3Ud3P16BCLPcz766COklJyeng68ii+//HLoz67Xa+7duzd4bVxeXpJlGQ8ePBh0w9vtBq0V9+7d4/nz5zFwbTwYIP385z/nnXfeYbvdcnl5FVGQU05OwnOuVmvquiVNU54+fRvnHD//+QfMZjPeffdbPJ0/hY9C9G5V1Tx8+HjIZ7m9XfKf/tNPyLKMx4+fsNlsuLm5YbPZDDLY73znOxhj+Pf//t/jnBtSZp+/eD6gFn1g3NnZGVJKXr58yW63w3s/oBXr1faOdCkUlo6iyAc1Tl33DqEJeZ6x25VYG5It71oaoYJNkgQlg3TTx+yLHtXoyab7hck+iXM/cG2/6Ogh3f02DoB1Bqki2UjHRVd4hPR4a5EqKDS8D5C9R6F88LLwzmGqisvnz/ny01/yrbeeUGiBi/HdxlrGswkHswmL6ytSrVkvV3jnKHcVB+MJ67LCtx3jIqOSDUk+R+iEbVnz4NEjXl1dIhKNSqEYp9y7f48XV79ECLDOY6zHaYVxgm1lEFJQ1w2J9EyKjDTRWNvhxQiHxEuBRw9mZ9aBkGFRT/NAdBwVI2wHu22FaTqm4wmjPKe1lqrt6KygdQmXNxuaxoCFtm2oqnq41hCN1zY1k+kRUmW0nQcSdJqxqzt2TcXIwMFsFjJClks603J4cBDAT9eRJiNaa7hdLBAqRtQ7H9Ij05DDkSQagcLsSto6tCDaxsT7tKKpW7bbHUqmjIoJ201NXbfkWTAOaztDZxwOQVW1tJ0LNvtCIFCMijEHp6c8evst5oeHJHnKarPC2BAoVWQ5bVNxc3WNcJ5ysUC4EFPf1jWb9RrTBYlfmobPQyeKsmqw0tMJaPCUxlBGHwMZe/x36FpYBn7Tgvm1hEIPQzpqvyKIuCD6ff5omHwTLUnixG86ExZwY0LhiWM2PgRnyccjvAgUOo/EOdA6xVuLaUIEg+s60tGIrt5RlRtstyORjvnBHOE7ltuK09mE1eqa+eSINBFsb645mY1QmWJ9/cUgZVTjCUYomu2Sg9QB1zRWsRWn1GqGERnJa9XFnh31cIFed+385u2R3+1x3vVtYwkDgE+MTu+9IkJs+1eDvvYLjV9HzAnkg749IzwkQqGtQ7Yt2jTIdodrdjgpaJoQ2BcCzIjohsO5FqUcm+0C1P64CkoMfCjfJJJEapRQjPMc5y1aSbLEY2uHdIo9NkUge/qQZu2VpXUVZbslG+fk4wwrPCgN25JiNEZpiRQCnQictygR3JRlkkZFXO9CGlpAzjvwIdenj6EXOJyxhET2MCq72tI2Du8VUuhgNCYEWnjkENtgQTiyLGU0nXJPZ7TG01kodxXr7e53+sy/UbFxcHDAaD0iSZLBbCs4ei4QQrBerzk+Pub58+eD3LWPWH/x4gVKKebz+aAr70lRh4eHLJdLjo6OBofR8XjM+++/z7vvvsvJyQkvX758rRVR1/Ugrbu6uhrODXB1dcXx8TGPHj3io48+4rq7BqZIIYeckizL+NnPfkaSJDx58gTnHL/61a8GI6wHDx7w3e9+l67r+NGPfjTsbHtjs+fPn1PudkynU05OTjg+PubFixe8ePGCsizpU1SVUoM8OMuyQd7YL2Q9EbQvTPrclH2Z4z6S0ZtLee8HjkZALO74HvsT6r60dd+wq1dFAEMRso+M7Ie69VBc/3P/WvY9N5RSIRpd+CGzQwA+kunq3Y6PfvEB/+pf/VckswJsS1FkTMWU2VzQthXOcRdNbmMrpmvYrtd4Z5nN5wilUCKjaYJ7ZBcTN8fzCVpLvAu5NGpgWPt4rSxV43BWYruaPJWMMk1rglIki0VY3bQYY8PO2YbFBIgyZcA5lAywadt22M6gdYJK9GC4s9luuVluuLxdcrPaYjqHiemfbdtiXQjgyrIMLyS5D0VR24X2mbEVaRN8MEZFQZIEYtdqFSzdHzx4SFFkCBkUMav1CqUDeiZUsEFGSLoY9GddaE1J4YJs9eCQqqrY+JKiGGOsoG4MUqV0bVDbDNHS3tE2QVbadpZd1dBZj5Th3hVaMx5POX/wkHsPHzI5OmA6n7PebNhsA/nZ5wXOhGtZlTt2my2261iuFkgl0ULRRMK5kjKYesVxaZ2Lu01P3bS05o79Hna/7usLiG98vLl4+df+EoZTmIj7Sd374CQq9lqLQki00sgojxYihmL1Zx3aAx5nDVkapM1KSpq6YrW45t7JnKauOTw5onNgvCBNCy4vXnH/3jFpItluNrzzzlvcmpbNukQqxWQyozIOc7uibQ2SBKGmKN2ilQSZIDvDHuPgtffpY4tFxCJEvH4ZfvsV/CafwwCuhKUaQhrvazH1vTrF/+5FzOuHB793Thf4TKYzNNWO7WZFVe/AhIwe70HKJOYNuWDEh41cmw7zFT8Jf1ec4kgTzWRSoKRDKkmaaoosFOID8hEPpTRGuthCdaw3W65ubqjaBickOs3IhKYznqwYxza/IklVULd4g9RJLNLuruSAEPl44Xww9QtohQvzcZ9LszcW96/169fP3419KRkVBQaN7DzSQmsFSfMH8NnoyYH9C9ztdoNp13Q65dWrV8OiNp8HaHixWPDll18ynU6Hx5yeng4Qadu25FnCe++9x4sXL3jrrbe4vr6mrmu+/e1v8+LFiyG2uixL7t27N2QtVFVFVVV85zvf4erqimfPnnF+fs4f/dEfUVUVFxcXQ4YLhJthNBqxWq348MMPOT09pSgKXr58yXa7pes6Hj16xHQ65cmTJ/z4xz8e7Nnfffddbm5u+PLLL/nggw84PDzkz//8z6mbms8++4wXL17w5ZdfDov9aDSirkMKbpZloV1kArrQNM2AQAQH1TKShHJ2u92QlrsfcNYPAiEY3FGtqb9CJu3f5z6fIgxuNfxt36cD7ooNuFOl7BM/e5Rkf3D2E8u+CoaoeAiTrgcRqEfetXjb8OLFF3zw83/kz//sj0lcQ12VtLrhxYtbZuOCJ0+f8OLLL8jzHGcMLho6JUmCTrIQYCUF89mcy4sF5XZLlmXoyIFJ0xQhd0gHoyILCaXxdRvrMcaj8BRZUJE4GHbs1gmqug6M6zYgYF3b4j3kMfhsFAmhwShLA5HrIqHpWlbrml1ds1yXXC/XLDc72i4kxq5WK/AMiq08D74XNhac27IcXqtzDdZEc7ZYxG22G/Is5/DokNl0GqBhIcizFJmkNF2YLNMsAwRdF4qubVnGVGIJXmKsoW0DxyL43AQ78bwoyPIxbWPY7erB3dDZiq5tUUqjnUDJjiTLmU7nCN0Tz045Pj5hdnjA4enZYEs/HoXC3jvH9dUVr16+ZLVcslmsKDcbfNxyNruGardDAIlOogtoQMnqtmVXN5S7irI2Q1H9ZlvrD3n0JYiSKuy8eZ1E3attnPch8GvgaOyjLuF+sdbAXqtSqXDO7XbLzfU1m9Uts1FK2vvMTEZMEsnj83s4E7xZmjYhL1JePH9OkSToJAEvSbTCK8XRwZSLRYlpKkS7A93g2UGaxo6EGBZJwR5htOdzCP/6wvV7P/aUJ68hE+Irvwnr5W9Kdfm6Y68I2Hv5UoW0YimDbLpuGqq6Qbie2xILXRdRLMJnaW1QP/WKsPC67kpeIRxJIjk8mvLY3KfrGrIioHkSmM2mcZN391qcc+FjiKhDXdfc3i4pG4tIMuazMdY4ZLKmKMYxzyUlyxLarsGYJqQHf9279z065WNrm1jc3V134eMcTbTT930R7+MQ6IPdwtjVKsiG0ywltyG9PBUKEVvJv8vxjdsoVVVxdXWFEILz83OqqhqImb2BVq//vbi4eC33YTqdUpYl8/mcTz75hNlsxng8Zr1aDNK4Xp/fG4j1yMXJycmwq+6JZE+fPuXi4oK//du/5ezsjO9973ucnJzw4sULnj17NkwIs9kcbsKNfnt7y7e+9S3quub58+f88pe/ROugQOnbOx999BGffvopl5eXnJ+fo7XmxYsXQ+FweHjId7/7XXbVjpubG8qyjK2RQHxbr9dDAXB6ckJZBhWBd3dwVx9op7VmPB4PvgZw52XRT2TO3ZFBTfRqIA6gHomQ6nX+hhgWrrvWyL4qpf/b/qS9bxLWoyE9OrNf0PQFS1+E9AS3cD6JkiK48RGscZEC5zrqasPP/+nveXL/kPuHBabacN1ccn/isS6EkKVJQloUXF1cDONoNpthnadqW/I0o20b0iTh7PSUtjMUecbN6pZsokkTxWq7YzwaUzVtvJnCpCGFRKoAZjatocPgEkWW5zgvkColSVKgRdYtQim0lOR5xsHBlJOTQ5QSlOUaawxd12KdRcqwO2k6w3qzY1sG7kNV1dSdo2lamrp57br1yq59cmBvkhY+LzFILdM0ZTKbMS5G5FkeQHkhh+d10ZfFGMt2uyVLc8bjSWCSr9cDOoWXtF2Lc5Y0yWnbwLuZz+covaNuDM56kjS4nSZW4lSCVCL4daSCbDSmGE3IignWQTEKxO7JbEaWZUgEN9fXVLsdeSSD31xfs1muAl+jDfbvbdOgEsVmu2G33ZLoFC11MPXqWhywq2vqJpBDm85iui70lfcIyv/Fjlh0h8J6T4HVs/0j2thYQ79chs9IDhM2BO4R3KkQlAy9/dvbWxa3N3jXUZYx0bWuMXnK7WqJoGM8KphOc5wLsQhKKSolMcah8gLR1BgkearJJKzrLTLdorIWpQLpWMoUF58T3mhEDO2KOw7F7wU0+soRd96vFRxhzugLkQFzia2sb97Q8XeIhugZFAIpgq28BYwLREglVeTsBIl7307rrEcpT6Q0DWTxcPQbOxDSo7RgNh+TpKdYZxiNsqD0ayzj0QilBULuzdHe4WO+T13XlGXFtqxofEgJnx4cYk1olaRphk5D4GGaJ6AVopUI9TpnbnjnEanYL2jvrA1cbF+5gcjqIwISfr+PLIWvEMYm0Ukwq3M0KONAaDySuql/p0/km0XMZxmyutOE962As7Mzrq+vaZpmmERnsxmvXr1isVgMSIdSirIsefXqFWdnZxhjKMtyUIN477m6uuLx48c8ePCAv/u7v2M6nQIBXl8ul1xfX/Otb32LzWbDs2fPGI/HCBEMxoqi4D//5/88yGwfPHhAlmU8//Q55E/Jsozvfe97HB0d8T//z/8zQoghHfbg4IDxeMzPf/5zFosFJycnPHz4kJubm8Gg6/j4mL/4i7/gL//yL/n444+5ub2hjihFloWdd0+kVEpRFAWPHj/ms88+C4x9oV+TmtZ1PUwaPeS6z5no3d+sdRHdCBNcWZZxshU4Gzkg6q6ACDfB3bn6AbivShkGJXeTN7weXrXPoN8vKu6UJ7z2b8LzqdC+MBa8AUJqqu0athvLzeVLPvzgn5j88XvME0FiJFkWZIFXl1ekWmJjno5UMSXVMFwvYw1NXZKoEef3z/jFxx8jkiAJq6qKVCc01YokUXfphYGnFec3hTXgsGGiL1KUzrBItmWF91VoZbRmgBA9nqJImU5GSOmRvsVaw6hIyTKNdcFVrzGWsm4o64aqDtbZ+IAIVnGcpElCXhQUoxFJliGFCP/VOrxWa7Ftg/WS0XSCdZ71es1mIzi/f48kTZAmOLomaShWemfLJElQWsXMF89yucJEX5ee6DsZj9GJQsmE7bYky1KcD3ZP1pVUoibLUoQUFEVOmoQdelU31HWHznLyYkwxnqKTnLwoyIspSqd4YLG4ZbVYIpSkqSqWt7dcvLrA1A3ldktZlqGF5zxVuaOO7UNrLfWuZrvZ4gAnJFXTUtUddWdeU578lysyQrtsgJJlMJjzzoXiQ8ogh41FZJKmdD62yrxDZ+nAb5LeIVy8fyCQkH1Ijd3tSjarJU3ToIRjvVpRHsw5PphhAZ3lbHcVbeO4dzznwfkjfvnLD1FCcPDoYdhxaw2xZamFoEgEy80Wn6woDhoQHca3CKEHXkL/Hu++Fa/99k2mxO/72u4/U09x7Anm/fGbGTi/6bhrFPWllfM+EIl7dqZUCKURMs5/ccGV0dfFeYfWILDgVeSB7J/+js0jBIwnOZNpiqcjL1ISram3HYlM0Foi5R4yEluQfQvcewJyS4JMghGcdxYvFegEodPwGSsNLkEos1ccvvHO9+Z1FROKg3JKhrRiHxSGwrs9hVTgeAw2dX3rz/eoniRRikQH40XjCD4bCqT4AwSxee8Hu3Ctg/GQMYbnz59zdHTEZDIB4CLuSnvEoG+5fPjhhxwfH7Pb7dhutxwfH5PnOXmeDRHt3//+9/n444/x3g/tll4C+/DhQ7z3Qwuk5wporfnwww95+fIlxhjeeustttstL168AOC7+dtAWBT/8R//kaurK46Ojnj48CGvXr0C4NmzZ0gph6Ll888/B4Is6e233+bBgwfcu3ePf/zHf+Ty8jJ4hkzD++1bH70KpJf0WmsHDsd0OsV0d8iAlHf8kb61cnh4yGg0GgzQkiQZdqXe9/wIPSAbwX427JJ7K9w3YVut9eBCuo9g7Htn7Ptp7KfL9r/rUZW+mNm3Y9/345BShhsIsF2NjW6eUoYvIRyma/j04w+YqpqnZxO+dRYQDT0p0EpRltthAcqyoAypm5LdbhcC0bQgSwtEdFFy0bMgSRJqY5BS07UNzsQbyd71Ha0N3I1QrTtEEnIjLJK6NTQ9SVYosiRBS8l0MuKtp4946+lD0kTiXEeiLHVVkaWh6N5ud6zLHeWuorOOqjHs6jbuylu22xJrAhnXJ2LYSVljkWkaWOEuQLVShp3MdFagVTDnytIktE6EZFuWiMkEnWikVGRZzq6pBmv10XiM6SzL5QohA9JWNw2bzYauNWR5Tpal5FlQhbVtB6JBKkW52zEa5SRpBijyPMOYBuc9adUwtp7xZEqajUjzMZPJjLQoqBvLZrNlt9mwq2t21Q7rHMZZbm5uqKsK07TcXF/TVTXOBgQmkI4F1sKuLKnrUKh31mHxNJ1l1zRUbUtrXFyE9kl2vx904/U29R4jdG/fL4SMoXbuNe5Gfz8FBVCPUjVoPE3TkI9GFFIBPvJgBM5apFBAONd2s2a1XFLkGW1dsV1vuLy4YJQljDLF/OCARDpsW7IpSw67Gf/VD/8rvvjsGZvNlqLIg611UwfzpjThcFawWu+o2y2i3ZImY5zN8DrFo/f4FWJYlnu4/a5p8dX2xR+m0Bt0bOHSRzXE3Y776zdGv44jst/+2Wt2gBBIFRABlaZkRYGPbaw05iR10pClKYlSgfiuoGsrpNB49/rz3fF0QGuJkIo0S1AqCSnLQuA7wN7Nr/uvUkoVTBCTjOlkxvHJGevOk6QFKk1xGLJR4GukWYYIwVIkqgCCgeLdFWQIAhza3HvoWxI5gFmaYK0AbwN5VAXCaSAGO4RQAw+qx5ckYiCZGtPRNQ2dsSidxXXpD8DZAJjNZkOwWJ9Ncnh4CDAQI6fTEKW9WCyGzJDZbMZisWC5XDKdToc+1WKx4MH5PU5OTlgul3z55Zecn58PEr+rqyvu37/PfD7n8vKS0Wg07OLquh7sz/sF/+TkhF/+8pdDu+DRo0c8nj6CL6BpWpbNknv37jGbzTg/P+fZs2eUZclsNuPw8JBnz57Rq2rOzs74wQ9+wA9+8AP+6q/+ig8//JDr62tOYmvERFvyHul59913kVKyWq24vLxECDGYn2mtKSNhrm8J9dA5wGQy4ejoaFjAi6IYpJlZFiDvIbApDiqlA1PbWot1dxLZ/Zuw3zUOduJ7XI3X4TU/FBr7HI2+dfJmW2XfyXRfltsb8jjn8MaSZCkCMN6FfnxT4U3G4vqS+xPBrmw5OpmQ5znb1YptWZJERKZt2xDEVjUBXRjlZHlG23qaqqMsK/IsJSkyyqbi9PSYm2WNjxN+T+KTIlTmoS1hSZN0aFV01rOrW6zzZHlClqZhwk4SFKGN9+jhAx4+PMd2FUp6trnk4uKCrq2p6prlektroHNQNYbVumRXhQTSXVmxK6vQL5YSJTUQiiCDQ0qHMYGE6V3g46RZMPixNoQ4ZXkWCos8RytFXozQ0dOkrhuur6/Ji4IkSeP4UEGtEnvTTWzPqDyJk4gc3Ht7kq91JmTMHB0NboOz6ZTFchH0/giMDYF0FslkOqGqa8qmpus8m22wNN9VFZ0xLFZLOtOxWq4YF6Nhd2QQmLaLLceCcueoqzqc11qUSrDG0DpP2wVpbWscNgzmrywwvx9yKLy2h/YMPX/vg1GSVjEFGU8WycJ9//vu/jEBoRUBBe7vjT4KQMm4dPtAJkwSzXa7oW1a1us4L3pHuzM0TcvNYsF0ktN0hsOzQ3YrQyo06/WW44M5D84f8Pzzz0KBmo+xXYdpa7wJZl/H84IvbjY062smo0MMLY3tIPrPEF/LHn2Vfwme8eZn8dsKk9d0L94PtFXnLNaYSBSWX0Fj35zjvvpC9omSfVB6/JOUSKVI84LJ/ADdNEgpGOWBy+eoKSZTJFGCnSnqSqFUipT69efYex1SSeq2RciYABz5EFJFTw1vMXt251IqbFjGSXTKdDrj6KjGljVearI8B9GRjRxKBnmsEB6VaLI8odHElm80cew3hHvI9r5nklQKEUnfXWfxziCkxzmDtSFvJczp4fonKhlaKE7IwGnxHmfD+zDGYL3HmBbr7qwTftPxjQmi/SLZGy/1H75zjvPzc66ursIuqus4Ojoa5K9aa87PzxmPx8NCDPDWW2/RtfWQkzKdTjk4OOBHP/oRx8fHjMdjrq+vh902hIInxH4Lnj9/zmg04t133+X6+noIPPv2t789EPE++fgTyM7QWjFJJiil2Gw2/Mf/+B+p65of/OAHWGv5+c9/PhQPf/Inf8J7773H559/zv/4P/6PvHz5kjRNB9+Qm5sbilGBVIrvfOc7rFYrzs/P+elPfzpYjk+nU7wPwUqbzRbTWeq6HkiqzrnBdbTPjukHT1VVQO8CmqBUG4uGO7fOfgHdJ4cO90IcbE3TDJ/dfjGx7zLaf+1Hz+8TRvfVKfD6JPJaqwYCuc86hA9urDJsldDRz19gaaqSulR4e0KeT5lOJzgTDJ60UoyKgkaE2PjdrqIsK4TSjMbBq+Tm6hJrFNbKGKl+yOXNJef37rHdvWA8Lri62aKURJpYNBF6KULAZtuSZxKERhqHMg6dihDJjgzqB2tRSqKVxJiOelcyKhR4Q5Frjo7mVHVLtSpp2obWCHa1YV1WNMbSdsGWvCyDjwdChDCs/gtAxj6uFMHFz4dsF2uT4FiqBWmWk6U5luA0e3h4RNe1GO/oql1MEU4H1VNnTeA/JAk+kl77FOJRMaafhoOyK6BHOsk4OjmJk72k3O3COJOefFQAIQa7bjqSNCPNR+x2Lav1GiEV1gl29Y7OGHbbkqqp6dqO1XoV7oPZlPXCxtZbaB2kScJoNMaYlnLoK9uoxrFUTcuubulcsE7+wxAVf/sRijMfuUjhS8bPTSmFEhKt+jTX4BbqhUCnCVmRh92td6RxQyJx6DRFSsF2vebzzz5jPB4NIZanh0fMioKmDsXbYrnCPH6A8Y6j01Pa7YJXX35Bu9vy6DxEPKzWK+6NxkwmBbvdltOjQ1arNeNEcDzJWbdbmvUV+nBEQ5B0DrJhiAQ/CfzzC7ffX9H3ezh8IKfvv5++lAmfTcpoPGZ+cMTIGpTSJDrFodDJjulkRppoijwnTxW73ZjJdB6UKnvHHcISFUhCBeWWaaiqoOBodxZcKO7T14qmfh6VAcmczDg6ssiJwSJIkgy9ayLfTIY4AylIU43SIfAyy+4QKhELjjcuxF1BFAuFrmto2wbvDFZ4lCKKNe7C1u5aUH3K6x3q1LUNbV1R1S0oHbJ5oqfMbzu+UbGhlR48/nu+wWg0GoifL1++pGkaHjx4QHBkrLm4uGAymaC1ZrFYMJvNAAaFyfX1NUeH82FR+/LLL9lsNjx69IjlcsnZ2Rnb7ZZnz57x9ttvDw6kvenX48ePWSwWfPbZZ6Rpyv379+l9I66vrynLkscy2KA752lMw+3tbczCOGE8HvPRRx+x2wVW/unpKU+ePOH6+pqf//znfPbZZ4zHY548eTIoToQQHB8fc+/+fdabNQ8ePOCTTz7h2bNnWGsHXkbTNKxXK9JIKj06POHVq1dst9vBBK3nqmw2m9eUJRDQDuccu135GvLQL+6h19cXe3YgA/WIxD6pcx+lCIN9T/bEPiR41zrZ533sE0d7Kew+ugGEqtcE17tESJRQyBj/pKQOUeWdoa1rJqP7ZFqT6IRqV1HtNsEMbRJ2aGma0nYGnWim0yltZ6iqHdYaqrpiXByGXZl3JOrOQVYpRZ4GhUpn7lxRnbuz4s1SidQqxqY34fVFI7Yiz8BHyaVwKK3J0hRrOpbLFWkiwHVY04b2kAoZHq0xtDYUGd4H6akxNnBqpELGQLQkCbHuIvJbkjSLmQpq+FIqGHFpreIOORhDLVYrdKKDb0WRs1gE1KFvabVth1ASoy2iaVlt1kP6aS+57vlUbdtR18FkbzwZ40UoTJyzA4xaVTsg7oi0IhNqCMZbrJbBQG8+QxpB27TsdtXAwxJCgPOkWpPpFO88woUAOh+5CiqOtcHPxFqqqqPuLFXVUHcdJk53/O9UcPTM/n4aZkDyQrEREjTDvOh8CF6TOuRF5KMgRQ7oWihQsizDWctqueCLzz7n+fMvOD09ZT6dcnlxQVXtyKdz0jSjrhteXV5R1m9xuxRMH9xjOptTTpdU1Y7b21syHdDPzWrF0ekxk1GBEo48kdwuFkyyI9rOsl5cMZmfBx6VT/C+l+sCezv/+FbDIb56zf//qqjg69ETIe4s3l4roCKqoZNgu11MWhLn0DJwnYzz6CQjzwqyJGFcZBRZSOLVOuHrrNN7npoQCmehrCqMqYEg3dciIdUpzrt9KkqYk6OPSMjOyhmPR9jMhfRiJ+h0MOMShPEW+KUuENNth9ZZLBrj5pI7rt6AGfWIM8Evqe1a6qYCFwzLwFJVu2geeZcy1N9xCDEQnAXgTEfbBqGI8YQE6T8EQVRIEYPNBHmeD7upzz//fMgBqaoq3ARZxqNHj1gsFrx69YqexNlDim3bDkmrQghOTk6o65onT54MO/Lvf//7/MM//MPgyJmmKV9++SVpmvLuu+/SdR2fffbZUID0rYjLy0uePXs2kFW7roOMwdfiyZMnXF1d8erVK6oqZFL0niBnZ2e8fPlyyEzpi6IXL15Q1zVJknB8fMz5+TlZnvHss2f8+3//7wdb9eUyTMJVVaG1Zn5wMLSeen5J27YDg71pmphw6jg4OAg2zLEddO/ePT7//HOur2/I82Igi77Jr5BSYjr/FWlV7xb6OoHz9eTXfqDuZzXs9xb71smb53nTIr1/vtDyDn1Q0We9xJtqPBrT1TtSnTDKC/CwXW+YpgXj0RgRmdJN07C8vUXIEN9tjEVpTTHK2ZabQHJUmqau0Dq8jvl8xovLV3gxQkauQtWEseptKDICyQ/qJrQutIaiSEnTgiwr2EWnUKJtb5KnJIkmLzKcN6xXS7JU4EzDcr1G6pzOeKzzkYAqMTHC3TqPc6ETrrUmyQuKYkRRjEIhodRQZLC3oCqdgBB0JhRcSbS3l1KQF0VAHbIs3iMtBwdzkmi3nqYKnYZzescgx2zaNowxG4qupmlYrwNZczweI7UKNulRMeO9R0Y5cQhkinwFH95fZwKJLh/F2HRrQIRgvd5fpnev7VHAcrsNbcOmQTiPiPd40zRY29GnHTddy64KfhphDz5Me/wXLzb2uio9f4ChOA/3QKITkkjA66Kxl4rFR3/IgEGjpGDbNFxfXnJ5ecHNdcj2WS6XHL/1FvPplHK7RTrP8dFBhLk7Xl1e0VYjpkXKLNNMxlOMlDjrcCq0aZqm5fb2lvl0hJY5J4cz6qrl+c2SytSoXFNtVvjjQyJNEiF6O3AYrvHveIl/XdHxX1Qh9PWv4I3/Qq92EZGz0XugJGmG9MGvR6uE0RgEmlQnpGkSCdeaNMkg3tv7R99KC0FqHmNCkSBFgtIZWgkSGYrREFNhvvJvA/+n32hIpLRgQ76Os8Ht01pH19ahWBEasGgtydJ04Gn0rZS+4PC9kkT0KpgwjzZNHeIAcAjvsLaNLsrmjSsWr5cPyKKg5/0bmrpiu1lTtwbjHG33B3AQFYQI+a7rwm7E2iGDpOu6QdaapinL5ZIPP/wQpRRHR0chOnq5ZLvdcv/+feq65tWrV7z33nvsyiBz3W63FEXBfD4fFvfe/nu1WnFxccHZ2Rlaaz7++OMguZtMhiKgqir+5m/+5rXslvV6jU2OwptNNE+fPB3ku5PJhJOTE46OjphOp/zsZz/js88+o2kaDg4OYgtDs9ls0Frz9ttv88477/Dq1Su01vzd3/0dMhZcaZqyWCwGrkjvJtpLgS8uLrDmAucco9FoSMztkY19eWl/TcuypCxLsiyPAzQMGiFknMwtLrZWejvafSLovjPoPiLS//xmUdEfcm8A76MdffumR2B6JnP/HEEPH3bmUoJ3Bo8PvU4PSki80hwfnQRS5SohfXAP53wwnqoryvWKtqkR8dqnaQJCh7hjoEgzsoOCqjLRwCu/e9/OY324/uOi4FZu7ubQqEZxHoo8CdcOi3GOXVXjhef09AidpkFf7y3T2YST0zA+Em0YF5KuKSlLR5ZmNMbRtl1glscCwTlP4KQKhJRoHSatYjyimIzJI1dCRDhXJjqmPoFKNMWoCOFuTcN4NBpIZEWRgYCb21uyJCXPMybjcZDx9RkqWqNkQHSaOijDhJQxVr7D2WBJvtmUA5cgz3M2my3L1QohZSB9ZinT2YzZbE5ZNbSdDRHy2zL0e6Vivdmh05zlck3T2dgH9sznc2beB6OxiLLd3FyH9squoqkbtJQID6vVis16TdO0WBOsyOu2oe4M5rUiI7Sahhn0D368sZvnrlDvuTcyFuha30XEJ2loX6VZSpZnNF1HohVKROWKCg6wv3r2jO16FfrmQLnZUNc1s9kspIsaS7nbMR7nTGJ7ZVIkbMsd2qfM85xEeExTc3J6xnK1YFdVmE0HpqFIJOlsxunJITujuHlV0rk1ot4inEF4ixAKUBG9EP/F67jXjzsi7v5vfpeX9PXIRu9HGs67r7wRIhBEdZKQphkOkDpFIrEGbCbI05RxkZMnilQrvOvQSgwoAvHV7hNVQ4EZTRh9h1ICJUXkahiM6QaPGAg2DE6piJRFlZMHZyzOmOCmax0qvkdvDM47bLRJT7Iwrwx3yB4hVEqJM7GgjI6g3jm6rqGuK3bVLsiMncV0wVDPWhsvukOg4vn693n36TRNQ1luWS0WbHYVrTVY9wdQo5TbLeVmjVKaIkvROmFbhsyT6XwGwrPerkM8dIyo7omk/c49z3NevXrFw4cPg9viZsPlxSseP37Ee++9x6effspiseDRo0fRTvyW7XbLZDLh3r17XFxcDITMqqrI83xoWVxdXXFwcDAUO32sey+fNV3H3//937PdbpnNZpydnXF6esrFxQUff/wx27Lk8PCAYlTQNA3bckuW5RyfHPPWW29R7nZIpfgkElDbtmUa+SNZlg0oyePHjwflznK5jB9SS54VQ8ha13U0cYLpeR09v6JfBFbLJbvdjoP5QSCndS3FqMCaIJWSIhD7pNb0Nub9zbf/3zcLia9rn/TFw35R8qYcdp9Qul+wvIaS4NAy7AD7RTdIBsG5DqUdQjkWmxWPHhyikhF1XVKudwjfxf5kwagIEehV3aISyXK1QXuJtwZvoWtrVAKdaUhUQds5pM4QVpEmCucaEJ7WuhCf7AVaKlpnMVWLVpBn0cuiMaRJymq9w3sYj3Imecp4GoirSarQSuA6TdlaTGdJdEbnLDgb0EfrMXWDtw5NWBt1ogPvI0ko8owsonrWBodBEX0YEqUY5QG2HelIVM1y5gcz+kj6YI/t2JQlTAWpLlCjgl3XgTEUeR6zFCy7XcluVzGejJlMpohC0MoGQSDECiEoihE60TgPUmgm0zkQQsBAUVUtTdNRtR2tsTRNR2uC4VfvJOp9SV03waNEKqQXJGmCtaEI25Ul2802sNeb0Cs2zoRYb+No64ZmZzGtCAZenaexjpZgYB26xXHZcXDn1vjbj2++w/4aRn3sIoRdKOgkcHiILSFJqH+sd2gpSfIstK1UCLCyxMVCKlAKhGRXVrRVGCc4j4wT/OL2hgcPHrC4vgbh2ZUlEkeiYJxOMHVLudlCp6mVJNeSxc0NxydHjIoxl5dXnBwfcXZ6xnq9Cu1BqelMy73jMV9c3JB1t/h2DSpByBG1FRgnkSpBBMExCDsUIP0l/DoM41+OYPSQfSCBBnt48CJ87i5ynJwAv+dP0c9Jv/75I1oj9n9z910vLsiygsIGd1ql04CixleV5zmjcUGuFakSmHaDEo5U3520F4mGBd6T6KA+McYEDx4cSgUVodaK1lrq9vVsFedNNIILZnAOgUVhvMUiML6X6gb+msDftcsRQbVnTRhjLviDyIhqhDZtTIIVnkQB3iCFRyuQPqCpSghwNiTa+nCfCSGCDwgClAp8DhFypqxpKNcrys2C7TZwu4x7PRDx1x3fqNgI7o0z1qsVq2oXopOl4PT0hKZrWayWFKMRk9GYxc2CzWbDaDRiPp9zdXXFxcUFjx49YjabcXt7O7hnHh8f0XUdn3zyCfP5HGst6/Wam5ubwYm096boI+r7/I8+1r5XqbRtgIV64uXR0RF//M6fwPuBs9HbqO92O7788ksuLy9Zr9ecn59zcnLC9e0N19fXOOe4d+8e3/72twc30A8++ICf//znQ/Gjk2R4fiHEIMddrVZIKQdpcEAzCro2vIe+DXV0fDxchyzLhvdW1zVffvklzgZDmFAV271KNiQT4m3IJ/E9z+PuRtxvh+yTR/e5GvtmXPvkUHgdJv26iPv+Wu+jJUqpIBnzodp1eLwIWSNSK6qm5HA2oqy2PLh3jMhyvnx5yVEOqRaM84T5dMa4SKmrLZeXF3SdQyc5R0eHuK5ju1oE/TkdiYbNasdofsBoMsOQULWO+XzKdrcjWUpsGZAg4xVSJqEvi6PIJJNpgTeh2Cg3dQj2EiHdM88THLAuN1xeXTPKJd7UWOexVuCIYW3akqaepg1oTJ+RIQnFhpCSNEsZjQrSLEUqjVREGDdFKUGWaGaTMYfzKQrLeFJEMqwkLXKEmmG9o9w1ZHmGSsL7WKyCGmyaZSgd0mAXtwu22+3Q/kp0aCNWVYV1wSlVKI2xQfGktQ5KE+Nom5ZiPEEoweXLC6q6IckKVpvAMZpO5hwcjNhud9S7FhAkSlHtGuq2Cjkp0beg2oWApq5pqMoSJWXYzbkwVtumpd11mAaa1rFrDbu2C3bbhEUG+vriLkb761a+r4P0/9mL4dec33uCPLC/d3Dgg3RZSInwEocLC6MIVMQm7vi8ECgdzI96qp2Wmto4sBadhP76arXkwYNzxpMR1bYMAVhdi20TmrKC4wN22x3V1jIuMs7vnaDygv/vf/4p7zx5RKJT6l2Fd4Kzs3PWZYlxMJ2m+G3F6UxydfsF44NHOJXjkjFCTBAqjbyejmB15eLPvSvwv4w0+ps+BxEzUSL4T9ycDwRqi49Fxzdo2YjeGXP/efqhEzY+iU7IsoTMZnhA63D/EB+XZRlpngQ0wwe/IOcDIrT/3OHp43wo4/cRfXPW453DCR+SU8XrqhohPHgbFnQZCOzGWtoolzedoW1qmqYNyBrB9t4SYyecD0Z59THOTbA2eY2o3/NxJB4lHFqGGjj4YghCVRF8gLq2xVmLCAMd4X0Yz17ctculw3lHvStZ3Fxxc3XJerMBxGubzd90fDOfDeB2tSRNEg6mE25ub8mzDJqGJEsphGCzXtNWDZPxhDRNcc5xe3vLwcEBp6enPPvVrziMKpXT01NWq9WQcTKfzzk5OeH9998nTVMePXoU8hIin+DTTz/lnXfe4eHDh0NqbJIknJ+fDxyLvk0xn8/5sz/7M37yk5/wi1/8AvgztNZMJhMWiwVJkgwmZO+++y6Xl5fcLhasN2um0yn379/n8PCQtm359NNPefXq1UCw7N0C+w/38ePH/PCHP+Tjjz/m4uKC58+fD5bifZ5IXVWkacHR0RFFUXBwcMDV1dVgXd6/xyEQba+C763M+0TR/py9SiT8m3BL7ctY4fVWSF+E9K99nwzat3P6v72pbulfyz5PwxgzcHD6VpBWGhsJf957VNy9h5yXhmJU4LxjMp2xWq05lAUU0brbhn53lmVY0zAeT0BIlssteZRtjUYjrPVYJJ2BTRWuQ5HlWCvY1SuyLGEyGZOlt/QkbUkfYheXrijxMl3I7DDW0tIhhCdNJMJ1JNIxGSUczid4KxG+iy6kOvQv6wZEIHlatwtx7FKiFaRphvRB3phlIe9HyeBgqmM0uVKKPE+Yzmacnh5zdDDDmZZilJHlAZLVSmGcY71a0pqOLEvp2hatFFXT0CqFdOMBYdpWO4RWTCfBQbSsq+hQWMZxItFJRjGCug45NA4xGIFRVazWW66urzk8PETrhMloTFO31Lt6QC2yJEUIie0sbXRAtB56pHizXGGsoS531LsgUbZtF3b0PpCbjXN01tK0LXXb0Fp7V2jEFWIwgfwmExV3C91v3wn/thOF1+Gcw1iL4k5y/poHDWEXmcU2o+nM0IbVwRkq3MuxRQoxQ4jgPNwjs6cnpzxbb0h1Qmc6pApS+q47ZVd5EuU5Oznm8OiYo4NDVuMR2+2a2WwC3sb06aPg6igU26qB6Op7cjSn6mqkCxJZkRVIGSzzX28QwK+t7P7Ah2cvFsEFZPTXfXJfzxuJrRN/V2LsP16qaE6VaNJUDwuqjhsljQyLbuRyuDjPOm8we9wEwetXx3sfLNBjREVVBbK0F47RpODw8JDCv/54CAu/EmGSMiZ8Ll1d44zBNg0mxnqI2Ad2sU2eKIFqEzabU46ODkm0/dqx/po9gbhzJcYF+StS0jTN0P6/23zecZW874mnYZ4Q4i5vSUiFc7/bvfXNIuYTzen9eyyXSzbVjtN7Z0O/x8V0RzGZhBcTjbj6cLUXL16QZRnjqEzZbDas12veffddvDM0TbDgvb29HchVZVkymUz46KOPODk54dGjR2w2G5omeAs8fBhUJtfX10Neyvn5OX/6p3/Kp59+yj/+4z8GY6+zUyCQW7bbLWmaMpvN+P73v89iseCf/umf4uILh4eHnJycDD4hX3zxxeDl0aeztm07JNM+evRoiJ5fLZdcXFwMVuR92JkQgpPTU44OT/j+97/Pf/yP/5Hb21suLy+HXntd18PiP5/PydKUzWYzOJH2cuKBbRwLin1i5z5q8esm132lST8Z7muy95GN/vueiLr/c//cfQGmtUYggpbcO3yEEUMrIbRfRuMpUihGRc5yuULZlvvFKcZkaCUjx0EP7+nBg4dxQXSslgsOpoEPVFVNuGFk4C5Ya/AySFWt6RDek2pFmiZoVWFM6Nz2JD+lCRBnfJ9JEkiZ3vfx0o6us9R1w2K55naxQh/N0MKjEkWaj+h2FVXTEO7RIBvs+/ZeeRAd0ouBsxEsvwM6qJLAw5BKkWQpo+mEJM+wEbPXOuQqWB9ahevNhrYOxlvT0ZjWdCgladsGZwzJ/CByWn0ksxFyeKyhMnUMt+uCURKhIpBSUtU16/UmJMO2bUAHlaYpq7gbF2y3W5QMBNT1ehMJyhJrgmdJWe7ouqC6cc5Tl7tQ2DSBJL1ZrRFA4zxd0+K60ELyLthFN87QWkPnYjaquHN73V/3+tH8TZa/N2Xc/9zDwyAx90IgxN2Y7g3uBu+bvtVousFsrifLdrEA6f+NiAoFpRRaaG5ubrh3dsbgwmuhlzBcXV0zKlJmo5yqamhay+nhAdMs4eUXz5hMJzRV8H0xxrCrKkDw+OnbXF6HgK/lakHHLaPJMYmwGNPgffBrCW0MgRcBju/RhD9cuSG4S3sTw5eILc9wq8YP3n914/Mbzwt8bbEkQBG4FCqqO7zzkVsm8K7Duw5nPKZTiEQF/yIR0lJ/GzehaVouLq+4vrpiuVqFYgPH0ekR9+/d58hP715KT+CM5EtrLU21Y7dasC1LTLSYqOtggtffFDZaGEgBtkzYbB6HjWdqBo4UfLUQE4CJ3Lj+ugqpkFpF+bobmlre35VS+5+BEAprffSXsnTWIX2gTPwuxzcqNjprqduGxnTYGGTTtO0ALzrrSJTCeRhPJpRlyfX1NfP5nPl8HmWcO6rdjnffe4/NZsPPf/5zHj64T9M0jEajgSA5Go24uLhgt9txdHSE9yE/oKoqHj9+zB/90R9RluVgYz6ZTPjTP/3TwdTrZz/7GcfHxxweHoZ8lk2A0ZIk4Qc/+AGHh4e8fPmSv/u7vxtaMgezA6omVKYvX74c3DsnkwmbzWaQVn73u9+lKAoePHjA3//937NcLvnoww8ZjcfDQtwH1PWowb/+1/8aZ+Ef/uEfBofQnljbK1d6I6AkSWgj4fb4+JizszOEELx69WpYiKW8s43vpa/7XIr94uDNAqRHNeAuoG0/bn7/7/uP3x/I/eP6AqUvPJy1oZ8tJUmaxgjzgMRMpjlt26LnE169vOBoNmK9LSmnOVk+QiUZlmCzPMoLdrsdt7eL4dxt24LthnFXjHJGo4Jt1eFoGY/nHMym1G3Qzr+8XqDVBmsDiO2cARydIS70GV45bBMIjlppvIPOWKzTOB9UGq2xpFmBwNCahs16zW7X0BmHsWGRl0mKThyda5HeIZVDWnvHOlcSpROECmmsKrIRdnXHarulbWsSCUoI7nWHmK6JWQyeRGmsThBSMJ9MkYnm4tVFkBCPJ7RNg7NBhpvnRchpqUNktnOWRCfkeRF3WgrrRYgX6Dq6zrBcr0mj30a33gxtwF21Y7vd0baBE5XojCzLw++aDoQkTVKmY4WSmrpu6NqO7XIFBHdXYUNBa5qOrrlLGu46y6Ys2eyq4BDqPTaur1+NPSdMjl83p/Ub2K/5NXuFc38f/MuOvhgPDP80TUl0cnevDKigQimPTvQQpuedH1xwvfckfTR4tLfIsoyyLNmsNxwcHrDdrAGB6SxCaparDW2T0lYVpm1DSwCBr7csloFoe3AwJc8S1us148mYqm5YrxaMRwWr1Zo8gdubF/hkyrQ4wkiL9ZZEhcVmIATGS/oNKDL/nEsZD7H3FX/2sbjzvbri15/mq+iGf73e2H+sj/Ze3uGNwTQ1rbEkSiO0w3Ut1rQIAo+okR7X1UG27YNPxa9/O4K267hdLHj+8iW3i0VYhJUArWhbi0j3l1t59zK9p21qyvWK1c0Fi8UiOFIbS9s2oV0HyIgIhs2fx9VpVJLYAV24G+NvoBtSkhcF08k0qKdUyH1KtAzZYXE+CjLenjQcjSD31o00TRlNJhwcHaGSPLSov5KG+/XHN1OjCEFZVzg82ajASSjGI6pyR9cZlAwwetN1LJfLYLgU5aC9BPX+/fsAfPDBBzx8+JAsy9jtKuo6mFh961vf4pe//CXX19fMZjPyPOf29na4kIeHh7x69Yo8z7m5ucFay3vvvcc777zDT37yEz7++ONBxTIajWjbYBYFMJvP+PM/+nPKsuRv/uZv2Gw2pGnKycnJoFxZbdZxgVSMRqNBQVMUBaPRiAcPHvDDH/6Qf/tv/y23t7d8/vnnjEYjxpPJYCPeG3olScLZ2RnOOf7pn36GNW4YSFmW0dR10H33VrJZNriq9m2Jsix59uzZ0LLoW0W9A2RVVQNasa8u2UdA3iSE9juyXq3St2r2g9X6o4eJ+/j0/Z1ZHxLW//2u6GiRUpGmIb+DuNBbYxHCDXb1B5MCYx1N17HabJHCMhknCN9S7dbcXt8AAikTzk5O6DrDxfPLQDx14frQWepdiZct09kRs8mEpGlJ04xRkSH7qHtiwBI+QoRBeuk6h3AiTvyxcu8s1gmQCpXkKJWyqzu8a1ESWkuIWdcZ2JbO2mC8pg2yi2ZgPkiRdZKEIlJJhBTUVcNyU9IZS5KENNntdkuRaaajEUWekCZhAZuMJ1jbUVVNNMeahMK003hrmU+mATmSGqkiHJwkgTcSjaeCO+EUYwzL5ZKyrIIjp40KFe+QKkHrlG0ZjPJ0koadpRN0bSR0do6uqehaS1XVjEcTgoNpS7WraZsWnKepq1DoNC1KKlKdBBgWSQieCmOqaTq2Vc22bTAubnLjV9jR7088/WD8mp22//rFyH/1kd/8eKOQEVHCjQ/9eBVdGa0NYYPhMVFplOhhHukJd3VdU8XFIUk0rekCebBrkDrck6v1mkcPHrJaLhF4ms6EVNJMBzWD0NStYblc8+zZFzy+d8T984d88cUz5GbDZHKPYjIhTROs81xfXvHoyVNm0zH1ukGbkvL2JbP7b5GkU2rT4qUO7yn2reKm+yutgt//8TWFxv6F71/IbzrDr61E3uRtxJN5D87iTEvX1rRthxWSNOlo65q23uF1gvQW6bsYPx+I4M5+PRHSR3QmRE9KvFAIlSCUQCgPyGAY6PYLAAbOh3cO0wbjwNXyltvra+q6irJ/M2zg1B45Hzya4i6Uk7ti4+uuiZSSw8ODyFsM4ZWp1kg84zx4+eAFQmp8P2nG8w6fkBBIpciygsn0AJWMQpFl/gAOolop8iRFK4X1ntuba7Ks4N7JKW3doOKCnRd3qos+Pr2HE1+8eMHTp08HkmhYOHOePg3eF++//z55nnNwcICUkqurK+bzOffu3eP999/n4cOHOOf4+7//e9566y3+6//6v+YXv/gFNzc3Q2HRL6Lr9TrYmM/PQEMTSZ6vXr1iOp0ym80YjUZcXV0NRkTT6ZSjo6OQwrhYYK3l/v37jMdjHjx4QF3X/Lt/9++ihHAzcDN6LgXAw4cPGY1GAHzx+efkRcHNzS1KBu7FvkNokWWMRiPefvttLi4uWC6XA3HTOzf02oGBt9HbvfdOpaFdc8fNeNOEa5+LMUC4e3/bL0D2eRz9+fbls33bRQgxIDH7XhsIQtCXVigtozY7PM92u+VwNmG3q8mznCTJAmFR6hj1boMRl7NsVhu8C+gPQpJmCbe3gZeDCtcxSTS2rJjOpnihaZodWkvGKicxNizgkwzvWpou7Aq0lijpSdOgEPHG4W1Uq6QJSarIizxImjvDZrvj5eUN4BnlCVJCZyUiCQmn1aZks6lIkiKEa0mNFrFV4JPIGfFUke+wWK25WaxouyA/PD075a23nqCSnOUmJD/igt+GscFYq+vaUMCkKV2EUYHIg/HUTZCnBhO4LvrDBJfaNE2C9BjHeDyls1A3JdtdMPKZTKahyG1bcqVJ0yCzTrIk2ixD14bPv2k6QITkVmpGo4D4XV5eE4hioa/dRAh/s97QtoYk6VtfwfSt6zq25Y6qbYMGZA8ll5Gn4aCfjYd1o4ecXzvenFh9b74VioP98fxbj68pcIS42+F7H6SEgUMViv26qinGo9fQPxGRijRNI+PRDyhk3wfvPX+qOmyE+rbkdrtl1zSMxmO2mw3Oe2xnmIwKNtttiC0vCjrjubi4xjU7Hp+fcHR0TFuXlGWFFJLxvROklNze3vLJJx9zfHLK6cGU9e6Wl8tXNOtr0vwQhcY589qVFey959+22v+W4+uve18Kir1uR2irRH4l1jEUqdb6WIy+0Rr42kLjzcKl/ykoOYR3CILNdrXdsqsrtFBUUmGDTIrOdNhWYurQDjPtLiCqcc7u38PdRi5sTIKqRoFMMDF11Vsf8n2aFpnp115R4Gm6aHAHXVOzWay4uboauBnBmTS2r53DmmDlrrTCdGZIxt73XJJSRZ7FHedvOptx3DrGkzlKCUZFEUIWXVCZZEWBSpIYShek6UoQjNAkGBc2VHlRMD84xKuEZNfghaT+QziIYh22anACFssFh6cn4D0vLy+YjSeoJGFUFHTWDu6XfTjZ7e0tjx49IkkSnj17hlJqQD4uXr3AGDMoSrIsY71eD62VXiL78OFDPvjggxDBG/Xnf/VXfzVYjPeLX29Hfn5+zn/33/13PP2sg+ehp3Zzc8NkMgma9q7jiy++YL1ec+/evUC2dJZf/epXg4X4kydPeO+99xiNRvz0pz/l+vp6cPvsof2u64YixTnH9773PX7yk5/QtS3bsqSqa5SSZGnGdrvFWcvp2RlJkvD222+zWCz48MMPWS6XFJF8arqwaEynU05OTlgsFsP1yLIsEPsikvEmsXOfDLpPFN0vNPblrPu/358c+sf06AfcISP7/huvk04D41knkfBqew+O8Ji6bsi05O2nj/jj736bqeqQssN2ljQPORDb5S0Sw8OHD2nqmrKs+fKLL8FDmodJd7tdcuTg5PiEprV4qbm4uibNRxT5mOlsysP797i42bArb2jb0OcUOIyDumlxztDWHd4IQIECZw1aerIs4fBgSr2bs92u6bqGhw/uMZ9OkEnGerGg7oKltnECLRQqVciIBIh4TbrOYL2jaSwWyWZbcbvY0NmgsvE3azr3BbPxmCxRFHlG11l2Vc10MkEKEZjzuaRpLDrJYkBdKMCkVKTFiMViye3iNljCC8HUzcjy4E5qnaUd+EMyWL+PxhhrkUrRdB0+yldNlJEnJkGgoklRGLveiSHkrSgMpnNs1huausED1a6KWv4aJVVsGYR90Xa3w7Q2kOjalu2uxjgX6AKvTcFxIibKHt8oOPoxuP/9a+O2n5j9Hbbxu7dPxN6L6XeK4ZS9MZfpDEaGwkonvVlTRCezLPrQ3HE4IOTTjEbZYOA1G43YLG8ZFeGzNNYM9661js1mw/zwgHK7oe0M88k0GqmltNHTxFjIk5zVZst0nDEZ5WTFKBotdlxdXlKMCo6ODrAO2qZmWy2ZJpKLbsvy4gsOxqek4xGdALN/jfxvQgx+X0e/X459JH/3Ox9/fj3x/Ktkz1937H/ad22hgCDY6P5rmoZ6t6XalYj4fp0LVIEgF5UoJXCmg7ZBChetCfTek/jXxp2QwWG3czZmI3WD55OxIbJ+eI3Og5TxCojhXIETpOJmxSGUxnZB2aQjnwUZSMjO+7u5+WsKsb7YECJI0u/dv4+xLlqxZyRa0rUt5WZNkgT5b8+bESJe//71CgFCko/GTA8OEdmIojUonVA1fwBk4/zkjOQlzOZTpIDVZsN0GrIt5pMZr168QDkYz2YoKZlOp4Py4+HDhywWCyaTCZNJUKpcXl5ijOHRo8dcXl6w3W6HkLeiKAaZ6FtvvcUvf/nLAV0wxnB1dTUoMXquB8B2uyXLMu7fv8/R0REffvghPHeQ3xv4F7vdjtVqNTgonp+fI0ToY5fVDmMM5+fnTCYTnj59yocffkhZlrx69YokSQbnz54A+vTp06F4KsuSn/zkJ6xWq2EgJlEi21RtCIB78GCQun788cdDWyHRGp0k7HY7lFKcn59zcHAwSHmBwbW156sEhMINTnL73hhvOn7uy1SBwaDrTbOvN8/xJn+jL0722zc921lridYerRUIP6AyIX22w3t4/OQp/6f/41/w6Pwey1e/Yrd8FSBIB+v1hnK75fzeMVmeBdjZGPCglKbcLSnyMdbeuaR2pkUIh/Serq7ZrDec3T/n4YNzXlyvubhcsats8H/xjqyQ6CRESHcmtFH66yPRCOHoOsNmUwaiaZayq1vKXbjegW3eRAa6p+0MztdAJLnGQrRpOjrT0rSGpnZ0DsptQ1V1eKkRSrDdNJTlBaMiZzYeMx4XrFPJer1hPt0FB9M8ZToeBadTKzAuKPJDYqSibppBOocQJGmwR1ZC0dnoyhk9YowN7ZvxJMNG97+66aKpnKMzbSgQfWgndZ1FywQE2Eg2beoGZwT1rmW92lBXNcZYttsNxgYOUpqmeDzWe2zbUlY1bdPF52tDwu4egi4BEckaCokV0XdAuNAzjjB/vwN/U6ItRB9PHgeqD94Fvw+SqJR9Ua4HJUrbdaFFVuSRg5GQx2JDCPHaijfsTK2lqWuygwOcj1LjUfYawtjL/o+ODihGY5q6IjvOKdcbiiwNEuXOUtUtqU7BS8q6Jk0V4zxFKknb1qRJQdc2CKWQMqiiJj5hU+4opKXdrXBthRxZrG1BJrzW0vAChPsX4hrxVF937SMppOcfhsfFMtMT223xdfxLlDGCoPIkdLrCOHNY09I1Nc2uxDZhnbEeWhvHW+QsONuivEPLEBwKk+G0PSDT/+R8L9KO8fWa2CbPEPL1pdb7/jWFfor0IIRCqBSVFT20F1AeK0gSFUn4hPgD4ZHKvLbR+zphwD7anaUFGZAmCXmWhnMIQdekIFVA0bwbCNo+YDXD3N63EbVO0KlHI0Fq6P4APhv/5//DX/D/LH/Gl1evUEge3j9nXW6p64Yt2wDv29ALX643vD2Z8OTJEz7//HPKshwko70h1sOHD6mqig8//AX37t2j6zqyLBtSYv/oj/6Ijz76iC+++ILj42Ostdze3lLXNaenp9ze3g7Om33Q2IMHD3jw4MFg1NW2LY/UY8jDDr3neZyenvLo0aPBAGyxWFDudkyikdMPfvADfvGLX/BP//RPXF1dkWXZAIVut9vgbzCd8s4773B8fIyUkr/8y78cnEyTJPTejdY0dc10OuXpk7dxzvH06VN+/OMfD6mvvaKj7TrY7RiPxxwfHQ2F12KxGD6D3sCsruthkKVpgjH+ayfgYfLjdQSkLzTeRCf2J+f9x/fn2yePvpmhEvI8JFoHJYp1Hu+D/c2dw2io43e7ilevLmjXW6QDqTRlXePbXfAgOTpmvd6GFkHbMp1Ouby8isx+x2w6oxiPub65QSpN14ViQiWaxXLJ9eUls3vnnJ+f8eyzF2y2ITl2gBcJAVmx8wPeU24a0lRQ5AlKBhMm68EjcUi2VRNIlW0bbkVvQ6T6riZRhizNyfNeidQiRK/cMUiVU+8qdrsOQYKSGd5prAjW7mXZsdnckGrJfDbi9HhO24GPbPjjwwPqznI43zIZj5hMxqRZ8Hegs+AlRT4Oi6IO3CkhRZRggpIJWmla5cEYpFKoJMUhyNBIJTE2MO7T2CLYbJaYxpAoT9t2XF/fIIQkTTK6LiST9jye3pTOS0fV1EilSfKMtqzZVRVV27CrauqmDfLYMIshhA87Sx+skftxpoTEIegQGG9766dhJ9jbKffjehjzoX8Cwkeb5ru2xT/3cI54jwbUNUs0OIeNrWIZNxpJ5NqEcWXxXg334HazjYRzFQoVndC1DQUZQooogw1vsG4aVqsNB0eHXF001HUb4+tBJAnOCsptBQ4mhWe9WqMFHMxmTCc5baOQgGk70jwkABtrub54QSo6xqmjajY0uw3F3CLoF/2e1SSGQuC3kiZ+h+PXtVJeqzKQwwrsIyfrruCQDF4rv/PRF013v5GDAsXiTENVlWw3a9pqFzhFCIwLFvnD52g6UiXQUsSE7q8vfDzBhEwqTT6acHTkUElClmqODmbBrfSNtxDQ8VBAa63JixGj6QHTNhIyhaBrW3a7MrSdo+Iv0RprDcJuY5fOv+ZuOlyB/UI82usjFTJR+MgjCg7QNhpNGoTXWG3D/Dech4jcBOmrSlOU9SgrcLF1/Lsc36jY+PZ3v8e/vv5znv/b/xfCw3a1pukaxqMxVVWjRXA2y/Och48es1qvefnyJQcHB6xWq0FL3uefvHz5crAL71stfbhaWZZ88cUX3L9/H+ccq9WK9XrNwcEB8/mc9XrNkydPqOuaL74IYUYPHz5ks9nwySefDD3QoijIRDZctF7d0S+in3/+Oc45ZrMZQoZAr+VyyY9//GMWi8XQ2qmqit0uLITz+ZxvfetbLBYLHj9+zH/6T/8pmCbFXIg+k2W5WjGZTDg+Pgbg/PycDz/8kL/5678Ovfe46+8Nu7IY2NYbPt1cX3N7e0tRFIPstbeK37clD0S11xUk+2TPNwmiPTIBdxNx3/N7c2Lo20VvIh59e6Z/jt5PQCkR4EfnCJbdIsgcjUEnkizLuL1d8OGHH/Gtd56irSFXYSJtW0MmFVma8OLFS2R0ZtRJyAbRWjGZTLm+vqXpDMV4HAigoVrAWsN4MuHs5BgvFYlWHBzMODiY8+pyhe0sUhJfr8Ij0InDGUKLJpVAkHRaEWLFESEFdrcLfckkUQgX3P+6rgmOkI3BJZCmebAdjm0jF2dL4SHRKdBgjMcagdAK0EiVkKQZ1a6k3AYkJc8LNmXDerPDWYMQnrY1VE1DZ87wQmCso4wBfdP5ZOjN9ovqdDZFEHZjwcFVBCQjSmCt82RJyPaAYMDm4hjrofimbTC1pXEhZG29WlHkI4SQtHWNMQ7bWZxxONMXk2EMWW/Ddasb1tsN5a4KLQDvCL4POmThcLdL7NMlFfG6CxFlp1/dYb+puHrtdzIQfvedJ/el4d/06Cd04s67V4ypKGkdpN97RXp/D/aW8ZvtJnhg4GnqOuTZmBZjzDCxO+eG67der5lOHpDnI6q6ZpRltF3LdDwJbZHOIXY1kyIPqFO0ohe+Ax9cZds2yGCTNOX45IR6VGDqNakQmKaiXC/IT1t0OqIdKkDuCo3fgxzl115v3xc2wfivR676hTxcR8c/4+P6jUffWui6QMgst1tMHfl2QmKdwEfeg8AHXxilYsik+dray0dELU0zDg4PkTrl6PSMPC+YjAq0EsEsr7sjJ8k4tr0P32dJymg0Jh9NGbWh6NIyhOy1xpOOxuRJihRhfe3aGteGTV18Z/H9ve4vc3efhNaxUApjBI33gMN2DVVdoVXk8Ck1nIu9QDsh+vUkhkVKTZIqZJJG1+HffnyjYsO1Hf+X/+v/jf/Pf/grLB6jBbb0A0kxH4+Dx7zzfPn8OY8ePcR7P8hzzs7OmE6noW85m3J8fMxmsyGLcq2HDx9ye3s7wJJ9nP12ux18LdbrNT/84Q95+fIll5eXjMdj/pv/5r9huVyyXq+H1kyfTiuEoHENTCDLcv7se3/G6ekp/9P/9D8FiWnMNdFac3h4yPX1NW3X0jbt4BzaNi1N23B4eMhZ5Fq89fQtmqbhf/nLv4RYBMxi8mjf3njy5Ann5+dcXFzQNi0/+tF/oGmCEqXrOoo8p9ztUFJyeHhInufBpny14vLiAinFa9ehLxyAQbsPDKZCw9AX+9yNu0lWRnLbm4XG/uDsF6sBxZB3OSlaKSTBOXGQyikZslCUiqmKAbcM55EoIbG4kBEhJUJ4bNcFSfR0zOnBCOElXd2RTjSFTqnriq7Z8vD8Hl0TTImW63XgdLQdR8fHLNcblqs1aZqy2e5QKsP5LvAFlObs/jkNinFWMx+PSLWkboMUNU1SRsU4OJE2js4FVMxbh1YxMlyGpNqmbnnx4oJys2E+nzI/mJLqEIbknAnmVMbQdQ7vFVmah5vYGqzpIoPdBQmbC9etMwblw3hUSkOMtZdKU4xGKJ1wfbtms14RrJAVm23F4bYKRNgu2AsXecbBwZzWOpJUc3R4hJTBHEjphN2uYhcXttDGstRNQ1ZMUCpYogshcb6lqaqQGisE5XrLdrejKitc5zGNYber8DZIm6uqZLMu8R5MFzxJgueHJbg0hnyWumljvk9FHT9HKSQD1VMIwk/9Ih4IogNkK0TY4b7G5Hh9xzbwlOih3mi/H/VHAYmXuNjj/vVuh71jZg+vQK9+0UqQxTnCE+6ZJEuHoLs7RCOyW7krwNMkQQmBN4bb62tc3GAkScJu56jqmiRL8d4NygIpdZA91jVFnrPbbkCGILCQdePCe0Kw2pRIkZC3LTeLW3alZJQnHB1MOX9wn+fPn/PixfOY3ZFRVQ1a5hSJwLc72mqNkCMQ+bBoxn363UL/O3QwvmkRt8eoiYGNYgANPJIQwydee+Trz9cXDl/zR3HH1Lj7CtdVqqi6Q+F8j2SEQyIQqs/AkaHQiK0DZ91XJJ498CMAXPhMT05OOTo5BakpioJxkVNXJbPplHT7xmsUgv7ZVaIpijGz+SFOpJH7plDpFmMF48mYPC9IlGYyHtE2NbuVRaokElPvEL5wf4R1YJjbvaBpqsC98ISYBeHx1lB3DblNwzUQMRgOwMthExDu0yB1z9KU1nqc8agsDeqV3+H4RsXGlx9+yOwH/4qz+ZzFekVdt0jvmc8O6NrQ4z6cH5LnGQWWX/3qU+YHc9JMM50d8+riRYhaPz7gxYsX0SHSUeQ5NrZW3nvvPT788MMh0K0vPOq65vHjx2w2G37yk58MxcRsNmM8HvPFF19gjGE+nw+S1qZpooS1iIPD8w//8A9DxHuPQnjvWa1WVFWFEIrT47PA6djshij7bJJz7/Q+777zLmVZ8j/83/8HAEajgqZugi/BtsQ7x2wyDd4YUnA4P+CDn78f1CdSoLSg6xqUVpTVlrzIB5tzKSXPX3xJ2zYhnjtNkZK4CAQppRli0+/4FM4JirygbdrgBigjwS4uDqHPFnp+Wmm8ECGVlECI6kmFPemo3yV7IeLfgp2clgrlwHdNyIUgRC8LpXFKgNJx4TakWmOspTM1iVIo6clVCtYgtGK1XPDZ55Kmucd8lDHJxiAUUsLq5obHj+5R1x1JkjKbFeQzy/X1LZODGeV2x9HJKYvFAp3m6DSYLbnOo9KE7bZkvVkjVMrRKOXRvQN++WnCrjV4EWSch9MTqs2abbUkdZam8xFidVTbhjxTNM5TbksmkxHTyZymddzcrpnPp+SJpq0dTW2xJqABtjU0VU2W6VC0SI8SFi89ta0RyiKVQSWhB+uco2krwNM0FWkqyXPNtqxYrUq8D3kodedRqSLtJJ+9WPDqast8OuH0JKesN6TJhtOTQ7bby2jTr5FqR6JDv9jasMvtui4sWJ1FJ471pgymYetNyFIZB/n27c2CJE0RPqgtQGId1K2hqtYsbhd0XYeSGu88VVVjnSNJU5q2AxRdY1gvN2w3JbYzKIJjpkRGczMQTuEJEmYp+oktBPb1E7JSAoWn9g4f8yv6ELRE6ZBJEYsN9lp/sZzG9mPUeeq2Qe2N8bv1LpDfwvdxNxf/rAC8p8gyEiXBu7DrlR6dSLroedA0TSAWJppUaZTQCML7ou14+ewZ0hls17DZGM7OTqnbwA9jgKll8M6wDtO01LuKg8M5tjPUdROurzUUaQZKUZsOMGS1wLsSQUGRj9lVO2bTEV54Hj15hLWw2ZYolWD1FNd5bLVG5tdMZMXWbPFZjhFhOdDeon2HRbyRuvv7OTyR/AtRYtr3/CXGhaKgcQIndJjPInl0aBnIO2vzr+kevIE+RPaBgM6aIEHXGUZonMxwMsenAutMyIxBYk0IJdNKY31QySU6Eqn3roa3DikkWig0glQqzk6PkUkKWpNmOZlW1GWKlgJd720JBWGeViHOoc/U0WlBmt2hzFImJFmOTLKQ4aI1jfM4oVDFmE4kdD5wJ/ohrWSQ2SqlQh6LjHO+CWM09iKD2sxahJJYZ/HeobQOShihSFSQwDvnwXpSpcl0wjgvwrVqO9AJXit+l+Mb+2yoNOXo5AT72bO42MmweDvH2dk9ljdLNps142lQBABDlsm3vvUtfvGLXwxIAnjyPGe5XPKd73yH5XLJp59+Okg8ezLkQYxp//DDD5lOp4N8dDwes1qt+NGPfgTAfD4HGJCWx48f8+jRI55upnDJ4MrW74x6iWwg/hDdHtMhFjtJksEUTAjBvXv3+Nu//dtI2BzhvR12I23bUhQFx8fHOOd46623+Pjjj/mbv/mbgeC5G+C6kOr54OHDIXzt4uJi6H1PJpOQ5mq/Pu69b6H00G1fdCitUKiBx2J7lrsSKBWUIiHjQUaXzC7u3MLCh3Mxkliiep5GuKCh/28trTE0rkMrSZKltN6i4sAUUuBai+k6vLWYuLMXSUKeaPI0DROxFFgbWivGekSSoVJJ3dYkONJ8hDWO8WhEmmmyvMBVDUdHxyHLJDOcnByDEKxWG3ZVxfzgEESw3p3NZnjnWC6vEEnGO2894if/+D7Xqx2dh1QnQU44nsDxEZtlWDy1CpI7JYPplHM2qg1CuFjdtCgjSNMa5XOCk2SCtSHRUQkfkbJgh2ySBGMSJFB2Hd5bpBShGNDBnbAfizpRTMajqEbY0kQpsFJhp3K72rAqS44O5hwdHAAVyBWjIqPIJObihjxPyfMdSarRaUpd1eGzl1CWAV2cHxyT5KPAs9qW7HYhTE0pHVuFFc75qPEPO871akPXhfuk3O6iOZikMR3bzRZnHGmW0bRtCG5rDW3bUe0auqYDIUh0go3Ev6BtdSipUOqrRnJK3u2oIBYfQtD6uwhtLYPFdECh9nJ9+oIj3i/Ou6G4cM7S7rUf7ya2r/vh7jFJfD3OOYQOhXvfXuzvl761qNJ0eE9KaZTSbNdL6qrEmS44P0bTvzzPo3zdIWTYgZrOkAhNlqZsyy3FKKcoCpoa8I62awPimaVY42naDusS6rajrOGQEToJHji9f0/TGGbTGXXTcnx6xvrLlyQKTFfRVhuy4oxO2MjF3LcH/3Wr+b/86FGLcAQ5qo9ol/NiiGm7+79v4iL6az5SGIo6pTVJlpMVY3AhpVpJTdcahLSkSYrwDp2khDwRC/LXL6pKStIkgTQhLQpEmpJmGZnSCNdhuja4K792DeKLExFRUcHNM8ho41jXGpkkqCRBpkkgZyYapyXOJgHh8j4YAIo3z+4Qvid2yiEozliDl8S1w+Lali5RsQ36eg0euochEFGIwHvRiSaTEqsVMstI/xAOoqdvP+XFzSWXtzd4JcJkmGic9zx+8JAXXz5HqwStgnNmVVWDpffFxQWffvopZ2dnXF5eDi2Mk5MTDmZzPvnkE6bTKfP5fCBA9kVGbxkOd3bAvZFUT0w7Pz9nvV5TliUHBweMRqNB0fL+++/DySOkDDKk9Xo9uIZOJhOUUlRVxWq1ZrVaIWJfrFfNTKdTvvzyS376058O5NA8z9ntgrzJez/4gkwmE5qm4a//+q8H860+yExKibOWp2+9NaAufTZM/1gZ+SE29nJ7Iue+tXh/rn6C7gmcPYG1j6rft0vetzU3xmA6i3cuQNmij0++I3sKIiwXd4lKhPS/2ncY7bCJwCiDQJIIh3IOYfv0QIbXcNdHj7wOFciLMvbTLY60yJgdjnHlLU23YaI1QiissdQm5GkoqUml4rZcYo1luViyXq1iXLpnNCqwG0dZ77h//zxEp5dlMMRJ4U9/8Md8+PmPcN5zMJvinSMfjZicn6NxeL9gZy1CQp5nmM4NbrZt23J7e8t4kjObjTHG0HQdWd+3b0JwkkDQtC0jl5GqwNvpjEZ4ERU6oXgRURrZdjaoYQQURY51sFkt2VY1zgfymBcSZw1NG3hFDtjVNXXTsI0x5GdHc5Saoowl9QKlgv/GarEKviOTIqhnXFCsrK5vqGLbpK7DGC6KUUh53dXDBqKqOkxrePXqYijMd7sdo/EY54J/SlntwqykJHXTYYxnV4YYa+scRV6E3ZInckWIU6wIFu7qrhXC/tiObRV8DLRrFd0u+Byo2NIL41qjpQwFhxRDgeK9C2GAfs9VF/BNjTfR4i1ULgxQ+7CwRdid4DWQpWkg3IqAsqZZFnePLhDm5F2g4T5/SetgAHYT5fJ9K9Raw3az4fDoMFrJtwFBiVC4j1yJruvYbrccHx6F1msbDP3qKvjU0N+jMsg00yzj8PCI6Thjt15yc33NaDymbS1pmlOWW9I8p8gz2sUSKypcW6OdQfpguEdspfyeqRLf+OgLXj/0KX4PR0SQevfl0WgcXIDbOmwAhKLc7OiEIS8KsBalIEkk1nUgfvNy2W/YQjvNY42lajvqqsbaDmO+voUnpQyunklGmqVkXXZna4DAWEOSZmR5HjfEGu80wmRotU/k3CdN73UEo4Kw5w9lMgvjVikkHtM2pEoObrg9Udv2bchIfg1fYXwKFaj+UorYBvvtxzcqNv76R/+B/8f/+yd89NmvcDokUt4sF+RZsMPNsgzhJW1Xs6u2PHnyhPV6zaeffsrjx4+5vb0dFtleXfLixQtOjsL3vYHV0dHRgHj0F+nm5oYsy1hFz/mu6zg/P+fRo0d8/PHHQ0Ls6ekpo9GI6XTKBx98wHa75b+ffDtcPGu5ubkZOBA9wlKWJVdXV3G3MQoWw/EDWiwWLJfLYaLoWzrB0TMhTRMePXrEYrHg6OiIzz77bLA5l1Ky2WyGIuDJkyfDoGzqmsVyiXNukNL27qCTyYTHjx5jjeHly5cDIa1pmmEyGwY3DK2g/vf94/I8HwbhPhE0ZDT4PSWPjXk0d7u6QY6HCIRHYeisAWFJUgkKhHShJ+091jQogjmN8TE+m4C2JFLBnoxWRzmeVjpGLAtmh4ek85zt1XO6aslqsyHXgnGRBalWqtiWdWgNKE253bLb7ZhOJuTFiPF4FKSERUbbBFfVUZ4jdMpivePe2THfffc+73/8kvl8yqgoSLRilOXoBw/IRzm32zWbuHN37q6wbdqaqjII4Qa/BNO2dGlGr88XQsV+eiBf4gUytpi8tSSRCyKFx9gWazx1E3qu/Q54uVqw3WyROqFz4XMRFtJEczA/YjYZU9cNN9e31E2ICcgzxbffecrTp4+QK5gfdMyNZbVe07UVWkkO5rOgjkGQbnbs6obVek0IZTJxR13SNB29EVZZ7oLapO6od7tAPrV3aENQUgVEx3mPr2vKbYUz4TF5lgd1kkrxEFQ9PhggDZOi9LGlIhHqbucqxJ35hvceaUxweN1VQV4tFFretVFCtsjr6Ij3HhM5Ms7fKVOEEMgYudAjHHcbs7sevxheS4jhlngSqUiTYPPcb/ilukNVghRZhwUhTZHRa+T66iq0WG1owTgfrp93Dq0UHSF0rJcY+phO3avRRnlBkWc4JZEymKqVZcmoyHHOU5Y1apKSpjlZlnN4eMB8UnD56lW4d53E2xCDvlmvsV3HuMi4bUq2y2vOzr9N4y2dt3g0+Ig1fAPOxu/18BAi2Hs5yjdDNX7d0ZN7Q1qzpihynBM4OwpOx17gnEI1HcVohPCQpopUC5q2QkfDu689d5xj27YJ2blNE9pi1tHVFUqJ1zJE+hI3oAURaUkSdJKSZnfqQETgP6VpSpYXg/jA2Q5pR3GcvR622Rc7faGm4poxnUzCviAWwt4H75FWa7Bd+DfOg+7JqwEu8YKoGw6xC1orvBQ4nyCzFPkbbNz3j29UbPz05//Ij29/AnnCdruljKmpeZ6zXq0p0jTIcpzieHbM8+fPOTg4QCk15JporYfE0+l0GkLMri5JdMhD0VoPiovDw0M+//xzjo+Ph9357e0th4eHfPvb3+by8pJPPvlkQBTG4zGHh4e8ePGCjz76CGDI5oDAMRAiyNfOz8/Z7XZcXAR/Dyklx8cnNE077P73c0h6yLNXpCRJwng8xpiOo6Mjrq6u+PTTT+m18j2M2ZM6Q1ppgM1fvnxJ0zThPcUdY+8737++ruvYlaGn3tse74c+9QOsNxQK7o4MCpz+b31WS7/zCkUEoZccCwJEiC/upW/OBac6CAmXQgtwBu0MB6Oc6aQAHEhJayxV22K8QKUZeIHZGaQQJLHQCIiOi4mf4TPor01nOtblBqE0988fceMarj5fsakqstmYRKcBJRCSURbSYVebLfODWbR7Tki0Dkx8BKNiFAuoHUKE9MLJuODl1YIf/uC7XF6vMW3Nwf0zynLLZlMxn89JshSZp3huWSxWaJUM7bzAWwkLRHCNDVa/puui5r33HQlXsLMWE1GRohhBU5NmksyEsdTUFa3taAxMZwVZnrPebllvt9F4yGK8QySSIsvI0yC7Xm+2rJYr6qbDBlCKzgheXV1hXMhPuH/vjO12h/OGRElGRY73oQhMs5Sy3LFYbdhst6RJNrTfqqpht6uQUofztiHuuip3aJngTCS2KkVd1twulsFp0digHDGOLM1Is4QsqnJ0kuC8pIu8BOcYchYQMrDdpQ8tvj1kA3+Xi+GdoxFtzF3Sw9hJY3pqqnVAAKWMhRIDQqFd8BXxeExst6Q67uiamjbatftBCXF3DPwR7gqOIkvJ07R3Jg+LRLy/VdyI9PNUHya5Wq9YLpaDJw/xfETk9+4+NcMz7ytsnAsuwkWeRsJfkOC2bUuRZ2idUtcNk1GGVglVHZxa7x0HV+ftZo3wwSRwdniI0oqbxZJJNqKyUG+XSNMEq30fI9HDu/vfCd24KxbD1+/xzKKXTIfdfoiaB6lGKBU4D23rEaJBJ1lMh1VoJcL9+BuQjX5uNk2NcWDjmqMBb0wsMvfezPAZi4F8rxONThOy+DDnHLLVGGfJ8pwizwfUzFmF9qNQ1Efb/P1xE24BPxRYSimkczHGvqVpetlrR1tXJELQFWNskmKFDHEHOvoviTvFTWg/6Zh8LdB5Hjahv8PxzVJf85R1U9HVDqEU09mU6XTK8y+eh0CoqkarBGstk+kEBANZU2vN0dHRsAj/yZ/8CT/96U/D35Tm/v37g+fEeDzm888/HxZaIcRgCPad73yH1WrFr371K+7du4f3IS8lTVNevXo1LOTz+XywTK+qCsbB3vkHP/gBV1dXrFYrLi4uBpfOnh/Sw8W9b0dfeKzXAbmZz+dkWZDS1nVNUeT87d/+7SBPzbJsKBy6ruP4+Jg8z9mWW5bLJcvFgulsdoc0RGRDaz0UJGVZcn11jSBYlCulaJqGyWTymtlZ7yZaliVdF5CIfqLbl7O+adc8KE98sMrtW1L9TeD2+B5CKDpbM0k096cH/OCtt3jv/AG5UnTO8vnlJR+9esGz21uW1RopNEqICHWHCjpI+16XHcoIaxvTsV6vWa7XfOfthzTzAy6kCgY0+SjA9Z2hE3B4cMDussY7x72zM6pdza6uI3lW4GxAbHrVgHeOtqlIkhytINOCd5/c58uXF7z77ltY23F5dUlWFGgth+AypWSMAYeqqumMJ8sCx2K3C5boWZqGBTEmMHoPTjjSLA26dWvJ0ow8D2Rc7yxd68nSJED8xoGPRYr3XN8uQ+tMSZrGkBYJ8/mcSVFgmo6bm1twIexQKQHC0xqoO8+Xr655dXlLlmrKquHwYMZkVJAoiT0InEetJIWFurWs11us84GjYjuaTRWLUocxu0AI81DtarbrHVmShsBFERjpu6rE1G2QIs8OSJIshL1lOanMwnVvO6TWKJ0GdMF5jL1r8VkgGGyEIBQRs1xAhGLLucEHAELRkaUBJdFKkWhNqhPyNEWJO96HvJtpQ6EoJA5I8IGrE1sdAML5gfRnhw66H4oMLSBLFKmSFFnKdDwmyxKcs0NwYT+p91C4UndGed6HpNyqqoYiu39sIAU3jIoRSqrgp9KfK3I+XEzn7RVxSRLC7hKdhILMOYTWtG2UFVvPZl1y4S2H0xGz6ZRdTMNOkixwyk6OuL6+4eJmw8H4mNIbms0t+mCKwuJ8cMH0vbfFG6jGv8Qcbf+I9JD4We0/yZ0ENsxJvr8sUcBxV4zsL65vHgMos0cNGZoCsZUilUKpkA0kRCQfqxShHKCCYZWQMftEsd8FeY0i4e/O2W/ujAzKK+sd3nQIkeDcXjEw9DnuTLeUDgoWVPDTcN6hVINzIUunX0elDAo/lSYk6V0Q4P616a/y4J3iPdvNhqoORbZxJowha7Ftw3Q8ChxBY7F0CC0QPmhQBnN5IYYMJi/Cb51p9wrl33x8o2Jjs9vhlcDHMVDXNc45zs/PKddbRqMRUigenT7i1eVL+jCzXs5ZFAVPnz7l888/5/PPPx+knutlcNt8/Pgxv/rVr/j8888ZxwTVruu4uLjg6dOng734dDplt9txc3PDf/vf/re8ePFiSG/VWg+8j948TM90vPTBd+Dly5d3xlvRjKfvj2ZZ8ZqstA8661GXHnmRUrJeL2manMPDQ4QQLJfLYYHvER/vPWVZcnNzSzEqSGMSbN/6kFH2enR0xIsXL/ai5INEs/fXmM/nQzJsn6uSZVk0u7okTfOhMOpfe98K2VebmGFBDUS3gSTkI1nRRxKp8CgtEcKRaDg7nPLDt9/hL95+j7dmB0yEojUdF7MZ50dz/Ifvs37xguBUlyJ81JJHqE5JiY0ZK946JKGdYl3wi/joo49599E9BBLjBdPReCA/hZh6SZYHh9ZHD86RwlM3O7wLbqW+tdR1hU4SRnlOWe44PDygNYbVpmQ2KVhtrnj86JjNdoMkLHytMSxWy1iwZgPXpdfiBz+O8NjEh8yBfsGUUbZqXYArtQ55IsYEW25HGiTBIkxPznbkWRKlco4stuxeXl5HF0/AekaTgvF8gjeWq6trUqlQQoZCxIFp23jzh/ZE3YKWDuc7Xl1e07Qds8mYIkujaZlgPCqoqjY4irYtzvuocgpk4aZp6FoT75nwX0FwSyUPRdfBwQHLxZLNeoOwjqOjEw7nhxR5uDfwHukiijXykeQXhHPOe0zcWYXWBngsxpqAysiohiIaEvpgOa6kQiZhopuMW+qqjiZgwYcgS9LA2YiKFoi9fhfIcRIRJNk6LCbd0P5MWQuJ325ojIUYsy4A4T0aQSIJrROtyNI0eKz0nZZYxJuuQ6r9uPm7Vk5dNywXi0A01mGOEZ6oiLF4Y5EEwjIucpwimhpCDhOM7ULRstnw6OED6noX+uTIMBYdFOMpdd3RdZbRaMLz558zHaVMxwVnp2eU2xK8Z7lYYJwhz1JwBtfVVPUt3W5NMmuR0oK3IMMC0xMDv8nxZgHwa4uTuNnouTRxORvcO6UMdgKvkVX56rl/fcEx0C/vNlcReVQq+MtopRDC4qUKYYReoNMOZYIhV0jtVSjVo+LytfMHD6E7VKxfS4JKJTxPqlJEjCHI2jtuQ+/B0xOeAylfoJUA22Fjcra3HbiQmCyFx7uAmHlryCOaBkFZIrM7awRBULv03CVrLLuyZLFaUTc12zLocK0xmLbh0b17mPvnGGPI04iiDVc8fKeEHBxYu7bFCUHXhtj63+X4RsVGf7P2i6X1UJU7fOeZz2Y0dY2SmhcvX3J8fMhqvRosyN955x1evXrFp59+ysnJySA/9d7z7nvvsrhd8OzZs8FOvCdxnp2dAVCWJScnJ7x69Yrtdst4PMY5x//2v/1vLJfLUOhISdu2Q7tGSslf/MVf8F51ANehcPjJJz8J8GvcudZ1vefGGRbfnqDa98CKouDw8JCyLENyY1VRFAVJkg6OocDwwY9GI5qmGRxPu64bCo/RaDTYOfe+Hev1mqurq+BuGk14AqwXBk5PVt1ut6xWq8Gw6/r6egiQ64uSvvpt23awCu9vtiHhNe4MrTVRuy+CrI8YVY8fyEPCOWaZ5NsPzvhX773Nt+cHzOuWwnpa0yGVwJ4c0chvY5XjVy+vg123jdHzsV3jiXC1sUFa5ULBYYzBGsfVxQUfffQJ9w4nCKlprGVb1zgtmPoch2e5XAQ+hrVs1mtm4wll1ZBqzeJ2QZoosjxjPC6oqoqubcmLHJ2kXN0sSSTIPOXB+RGJlpTbLWmacrtYcnB4iHOOLMtCImq/4ApBlgXdu07CAu+cw1gXlR4K4UNmQWsMUoK2CucCETRJg0lX12zRWpFqzSjPAukxHQcfil0ZuTiSPM+QqaYsd2xXO7T3JOOCST4Odt7W42Rss0RCn3GBeNk5j7E7msawXm05mE7wxqNVQtcG3ogXDFkpAfEKyFHbhpC1qqrZ7SqSJGR8NG0HPshmN8s1i+tbvLV8/7vf5/jwmMloTBKzGqyxUdEUioawGLqgfLImkPG6DmNDJLaXCcbH1p4IjzXGoHVKIizGhZwKKxUSSZFm2CaM8zzLyZOgbkojx6o3xHJYvBdIBS7uYPtiQymFsneLjRSS5XpJG8nSfvCZ8SQ6IU10TMcMjpydC6/LA0mWhUKnb2cmCS4uEs5arq+vWSwWkTxMcIAUYQEgFqim7UizFKcUbURLrInKFhVdH52PPLOQjaTD6ocHmrYj1cHr4PL6hqNZzoOHj/jiiy+4d3qMN5bvf+/7/P3f/wOHh0chTdQb5tMRV6s1y/WKg8dLdLNDyimg8CShzeV7i/j/cke/4RkOz4AA/D7OrQayZEAKOifwUgdEA4FuDYkLBWCWJ2RJgk5A1RKV7vPa7s452OzHFnYuFS5yy/JEY5oqFhJ7GSLev1Y/DYu6M+Bs8OgRAmcavDWhFdh4vO2GuVnq6IsR31OPlryGcPi7luB2s+Xi1Suurq9ZLhdxI9qBsyTAt999FzWZ3SF/PjK0fM9jciRK4bqWcr2m6hp2TUPnLPz5b7/+36jYCH34sJC1dUOe5zx6+pRXry4h7gS9Djf8ar3GGMPbb7/Ny5cv+dWvfjXEx3/55ZeMx2OePHnC7f+PuD97tuy68/ywzxr2eIY75giAAEGymmQN7OpWdUu21VKEw68ORfjRf5Ij9G8o/OQOhf2gCNsvsipCqurqqu4qDkWCBAkgE5l5805n2NMa/PBba59zQYAEJJa0GZeJvHnvGfbZe63f7/v7DtfXfPrJpzx//pyXL19yf39P0zQSXuMcr169mkcsv/jFL+YRRo7MLoqC5VL86rN6IMbI8+fP+fa3vy3+F2/3cjFYM6tFvPczEnFxcTGHv/V9zziOeO9nI7E8xri/v5/lZDJqkTTOPBrKY5C80WcS6jLFzyuY1TJ//ud/zuvXr3nx4sXs8dE0DWMqHKqyYrlYzGMUGZVM80w4XxD5tVorUuBMKsuZMdvtdh7ZAEkRIi6XOSCNFLF+fA4z36Mg8LSt+dG77/Dt0zVr7VlVEb/ZCIN9dDCMPCoMP7i8xO0n7gZNNwgBDpPTZIEUmTxNEz5UKKDUhqBg7Ht+/fGvKXmHEBX7fkCVUFrLfb+jmkQhs1wuiUT6rmOaRtpG/EXubm+5uHycmNeadtFwdX2F7Qradol3A+dnK/aDZxo6hm5H29bs9oKGXF1fE4NsuLnIy38KYmaIiHw1O7hqbcSPgtTNB8mtKJMZkBAFC0o3QQyoICFIp6cn9JPmvnPstnussSxXS/q+I8TI5n7LMHmaUtPYAnzABHEZ9GbCa5FFE0aCiuR8J+2RQmka6Dsx2RJOiebOGKJ3FLVlsVykbJ9sSjbhvfhRDCky3pWCwGz3O3rVs9/u6buBRVXxx//sB/z5n/wZ56tT6qLCJARBdPrioxEiIpcdBknzTV4Ru76j63rG7HkSgoRfEWb0Y3QToxbEw0cpVnSEtqoZOlGlVUUhfA0jcLXRZpbMRmPx00RGd7W1KGOICgpVMLoJn8YyReJ/vL65YkgydqugtIImFUleqxVSJCdoXyf5a9688n2ZvzbbLZ9++qkQfjk48QIz8hmjuH5mFDSkwmwm+iEFTwwRN41sNlvOzk4wieDovWffdfi6oKwK+n3P1dU15+uWd999l83tDQYFxvDHf/pnfP7iM3yItFVFaD2bXUdlImHYEcc9qhKHXR8lKh0VfouaOZuXfcnxu8Ya3+iQeYncg18Yv/4vfVyVJMlFWVI3NVEVOKWS5bamGmVjz59JVQopWOn4wML7If3iYOJWhIKoDU7Lax/HibHr0RoW42EOk0c6aUaTiqyAGzvGfpsa4EPQZ4zgVCZoFpSFIWg7N5DZbE5zKDIy6pLJo92+4/rqLS8++4yb2xspZKMEvO22W/zkyJZ7JPRwLoaiGO71ux2vXrzks89fcn1/x83mXjgb/+f/6+89/d8M2QgBozTrdklZ1lxeSoxx24jMtF0s2Pc969M1+/2GabJ88sknXF5ecnd3x09+8hPatuXk5IS+73nx4gXf+c532G22vHz5ck5PzSZb3//+9wkh8NOf/nTedAFub2959uwZWmtub28ZhoGyLAkh8PTpU374wx9yd3fH1dUVP/7xj7k4+yG0cvLzeCWPQvLoZZom9vs9zoXEVG7mIiN7YeQbbZqmBy6c+c9MwsxIR0YzMpJRFAXf/e538d7P/JLrt28x1rJYLI5ULiVlUfDee+8xjiO//vWv5cNKC1r2LVmv1zO/5OTkdFazzFUussjl/IpDBfxw3ALMv5dHRXn80paWP3nyDt+/uGSpI1pN2KVh2/d0/cB2HOm2eyyGZ6bk7vSCTzuP7XoGMQeQBTgZjfnkv+EzQmaszEptwd3NLderltJ4bAxMPjIFzeTh7GSFBqbbO4iBaRoZx4EY4e31Dd57yqLg9m6D0XKO1sslU/Dc39/hpoH1co3WjneePeHN7ZanTx7z8tWbOcjuZNWiV2KI0zPOvBfiIepeKYmpV0qkqcBsxoQShMgYiauepgmlJZAtd1LVBFUpkrj9fo+2hiJWjJOj6wfhTRpFUYBRikXTctossEE4MMEHfAAXRxSOmCy/s1xRxTwGN2gr0e6vRkGblm1LoxQ+bAApBsZxStduZOgPHhl9Ly644zAwKc1utye4yHe//0P+8//s/8DziyectksqbdFRnGKJnmjEJChGkcb2aTwzOvnvfhjox0GcR72nH8X1tRsHukTa1FEWPKc0LgacMDyEFJqaCZM2jaooxRMhwfGEiMcLFykRSrUVpZCLItmLMaKVxyqLWUquSdCRu/s7+q7DJiIh0c+OsjqRRLEGZYz4bZQlNqkDjouNcRx4/fo1V1dXglAkrkHmT2Xr/RB8kiCPrOoFZVmy33czcknMc3KNd4rdbsd6vSTg5+LEmIIQBOGwhTzHdrvl8uSxyCmt5bNf/orvfOc7lO+9yz/+8hcM3jH1PYWCVVOi/AhhwqqIUQqXrqevmqD8wYqK3/HYx3yIPxhHNK13xgi/piwrAg6rDcpYIbdPjoAod7TVqCzPNnrmkvz2i84Pn3hC3jMEIeLjPMN+S1kYJqePXorYChw/nCIKB2IaGPrdHCfvQ/LCTdeDLwpwli7KZ933vawL6qCOCpn0nBuRxCnSyZk3q1qIkvyUr80s31XRHN5a5nRHKRiG/Z676xveXL3h6vZa0pu/xvHN7MqdzB7L1OmdrNb85je/4b333uXt22sikbPzc9w44H3F+fn5PJJ47733eP36NdZaXr16xfPnz3n06BE//elPefr4CWVZcnp6ysuXL3n33Xc5Ozvjo48+oq7F1GaxWHB1dcV6vZ5j2Z8/f07btvzsZz/j/Pycb33rW7Rty2effcZf/uVfslqtxLtCH1JNu66beRWnp7JBv3z5EhD76LouZ7LkMAzzSCYfDw21mB87qxYywpELmrIsefLkCWdnZ1w+fsRvfvMbbm5upLCZJqqkeMgS2cViIehNjLx48WJGWnKh4JwTqK6uGceRPoW8ZVv4Y6JSZu7nQmjmbHiPThd7Jl2ZBDeHIK6j0zCgleLJySn/6rv/jKdlzao0VJVl43bcqJ6N2/Dq7oa31ztCLKl1zZkyvIiyGCoi0zRKEZPcF2MIEpmceA+FLfAeyqpm6Lbsdnvq05YYFKN3BFWgrCzut9c3AvsB19fXPH36DB8i1ze3XFycc3JywjYhQLvdjg8ev892v+fu7p7VYoELAWtEsXG77VBAVddsdj3DJEXb2ekZxhS8fvUGN+mZuV6WBTEqUGK0FUnKiihdtU6maVrJJrvZbDEKlqtGODJK0TYl/ShW5qQNsylrJr9nu9vOxaw1WuaiQ6DbdjxendFaCWKyuiREQ2DH6Bwxenwi8mUzK6XFYtmHQN8P7N2EUTJmMIWmQIzSpskzDCMRGQftdnuGfsA5z37fUdclp6dnKC8Jj+8+ecZ/+W/+Dd95/wNaXbGuGwo0yom7bIyOMfaQ0BYVIGqFKYT30BQW39S4EIgotCnwQdGPA9v9jrc3N9xt7rnbbjBOguGmKGQ7pRSlLWjqGuc9VstIqq6EXJ2JodloKyqNEu/zeYRCIp1aazAxFR9KoU3Lk8ePaduW25sb+v2WwkiKal1VlIX4wohsWwtKcuRdIKGL0iC44Hl7fcPLFy/ou04C2+JBfuiT4ygwj6MzUlaWJUM/EiPkVGXnPdYYiaKfxFMoeM+iFb8XU1i6fmC1KPAhstt3nF98h64fWCyW3Fy/pTw95Sc/+Qk//JM/5vLigs8++xTvHdM0EqYA0aMJkDxPVASP+p2C06/NzfjKI37J3+LRsx7YAn8oZONAymSWKYeomWQhl3/XMvINKUzPJcO1EHLJm4YSUVCJ/D+QPaDr9uz6gU3fiTHgNBLGgdWq5TKsjl7L4V3OXkSACl74NOOImybh0yWCf0yjt+hGojG4IXDb1uwz6q3EcyY/gQAcwqx9+HnJe8hrvUFGqT6Z4MQovCUVD8WGQpCNGAL9bsf97Q1312+5ub7i6wlfv2mxEQPD2HN2dsJms2V0PT44Xr78TGCcKF7r4zjy5MljXrx4wenpGetHa7x3LBcLkWUCb16/4ezsjKZuaNuWu7s76rrm/fff5/r6mrIsk/VyOX+Q2birKAq+9a1v8fOf/5yiKHj69OlskPWTn/wE7/08UogRttsd1LnyDCJHBN6+vZ5RgjyaWK1W3N/fs93tAGY5agT2u93cxUjolWdyE867eVPXxuDcxOTEGGa5XPDd732PTz/9hF9+9JE89naLMYbVaiW+I9qkkUkjzpNOuv/bze0MueZY+cvLSy4uLmbuiHAMfPLN0DMa471ntVrSNKdzUq5SyFzdCz8FvLCKtYLCMqHFZGoaKWNgaTXff3rJk/M1VWVp6xZrA/vdhmmMbDc9N1c33F1viBgWqzMUFf3QUyyWVCdrhn5PcF6SFoNPAURibDQ5h9YVzitMAWrcEnFUVcG4VxA1RIPWBTd397y9fsv7731A1w04Hzm7eMSrV69pmpZHjx9jjBAhlVJsohR73a6nMAVGafZdhzYlwU2crld89uqKZdMQlWG73bLdeB6dnbNetOybChUmvIe61FSlxgXN5DwhJZRmwhkJhlVaJdQmoE2kGgNVkJRUP+zTPR5o25phGjhdL7ndDRSlofClIALCbkGjMWVgdI5+nDhbndLWC5Ta040jPtZ0Q58yGzhIRYEYJJjr/n5HoTVWKWKYcO6GbuhpFw2rlSw42kgmRoiRYQp0yT9jdI6TesWybWFwPD054//0n/+X/Mvv/YCFqSiiooxKilaj0IhlfKmrOf9FeYheYdBoq8EqGdsElybAEsW9rAvOVi2PTlfcbrb86pPfcLfdsBuEDKq1IigoRpGuKuR7xorkD0Xq2ICQ7J91OMDIiHzPKGHYa5Q4KAZPUBqjIrrS1KakRHGrhBjZ1CXGGkgFhtKIvayRsKq8DhRpNKmB/f2Gl7/5NZu315iY7KBBpIRKiI8hZB+bQ1Ex9CNFWWFtgUvjHJU4JD4ifAGlmLwggv00ylozTol8HYTzUTR89uqKdx6fEUaHLgv2w562bfjxP/6Y88tH1E3DTdcl7k4eo4mlvBiUBzR+brrjlzTzv7X9q4ejhd/3fR0jpOeK2Uws0VI1YKKMTzxRPs/fuTN9vWMuxlNzBaJ8mqJHaUNII0jvR2JwGFVgdME4jATvxD8ob5lBCQKgBdFTcSK6nn57z/XNLW9v79hudwz7LTp4Hj+65IPLDw/vH0HaRcoGShnc5Bj6kd39jv12xzRO4sYcw3xjZy8WrRTGem4LRb97lzD1YBsUYjcOibyq5HpTWlAalHyekwsMTooNqxW7fkjx8g60E7fUXLjoFEinRAlXVSWLtmXRLinvNozun0CNUlUl6/WKqq64vrnmJz/9MaenpyiluH57gy0LVsuWwjb88qNfSmIqcHN9jfeed999l5cvX9LUDSffOhFZV9fNEtFXr16xXq9ZrVbc3NxINZ9Ievv9Hq013/72t+dU16xmUUqIhp988skDvoFAlBv69gwgqWMWbDabefyQo+NF6hrmud48Y1WK/ZHXRZH4Gip5RKCS9rgs6fueKZkFXT66nMmuV2+vuLu/5/b6JrHqhTcyDiNt087jlixz2263Mz+jaRru7u4kdv7igvV6zXa7ndGOQ3iTdNbjKFIpiHTdnrquUnXsZ7Z8SN62Ep7m8TrSh4CL4oVgfaQtDI9ry/cen2FsxNQVqiiJfsQEix4Nw/2A246EcSIoT2QkKotCCp+6KsAXDGESRCIGgfe1RKNPzjE4QBVE5SgKjbGBEEeRZMVImIBoGIa98CN0wdub17zz3vviuLndYZOq6Pb2lmkS7xc3DYmcG6nLWtjY2y3d4Lh48pzHpmCzG9gPtwQ/EYNj6CP73Z6TkxVnZ2uCHxgmCTqPcaIqCwmaUzp1YIoxRbgXBRRFhbEVxhTYQmPKFnSFLhRV2xL2HU1dceItN7c7lqs1951cQ4vlksVqxdvba9q6YnN7J8+hYPQBW9UsVisiik23IxAorcEFRxa4SfmQURdZXN00pqwOJe9lPzFOgb4PaFsciJzes933aXwSWZ20PHv+hPNmTdyP/Ok/+wH/4o9+wEXVUgZFiRAzlJIE24gQT3HSgRkXKYOSRTwEgk+ZO1pRFyUhgiNiyyIpY0ZsYaGueO/xY5ZtIyZrXcduGiTMzxgqKzJRMYZTkDKDtEL4CSmxNCpFNIjsOC3WBj0rXoiglSFqGU4HHYlFSaU0i7IiEtA6stttxVzKKDEysskTQdvZEbcsCjSR7d0td2/fcnt1heslEyiqiE7+GLlznJUSSoyzlNLJGn7CGIuCNKqV0dls1JTg8bIsGd1EVZRUZcHUD4DBB8Xt/Z7FskFf3bCoDZqRwkT2w45mseT11ZUULW5CW4OxpA7eYYNH64jVSAqwvMpvskX81pFHfA+/mbUuyaNSyXOpXGxEEVzGGGVP1//LXkM+hMwZ5+Yx2yIMUc67c4Fx6HHjlDp5QUKnsQc8UzRAO7+xmPg1KI81YA10m3s+/+wTfv3pC25ubqRxswrGDrd8Pr8WjTQImYCplZYmcxzpdjv6/R43Odw0EkMUsn5GoqMSNV7suTWObndPcCOaEqIjRo0yRXr8FMaW0X0iKENUhohBWbENGEMUfhQTQTmULolKcokAfPLEsYVhvV6xXp9wt91jixrvj4ivv+P4ZgRRbYloXrx8zenpBeM4sNvLRrzrBs7qllev39A2Mn+8vLzks88+Y7Va4b3no48+om3bOb11miY++OADNpsNSimeP3/Or3/9axYL+f2nT5/yySefUJYl//yf/3P+4R/+gY8//li0yIi6ZLPZzJwN6eZXs2Q2JhZtJpB677m7u5s36NVqJV1FQgIisN/vZ45GCIG+71M+hiEkGFSuj0hRllhrZkfSbDneti2PHz/m85cv6RI3paqquajIqbBt2/Ls2TNWqxW//vWvuU5FWSah7vd7ttvtXIBlCfF+v5/fQz6yV0ce5QBzAm0eoSil8NETtcwho5IuenKOMThiUChlxQBKK54/OufJxbnAyMaQ7B3ILfQ4jnT7ThauogQkOKuwiugnvOyjKd3UiFul1hLchsFHhZ9Glm2NViNFbWlrCWuTxFSP0kt8CCLXDDGl+npOTk7p+57Hjx8nro2bP/cMP1+9ueL84tFM3lotl0xhy831W+rVGZcXp7y8ektVFoxjwdiPvL29Y7FesViv2PcdYRfo9h2BOx49ekRZ1kyTZIMYYyjLAmMKtJIZpzEWayWPxnsZY6hoqJQRV1Jrca7HuQk9jVxcnOKv74ha0J6nT5/y6aefiiJJKUzKiVDWYqqS0leUbc0UvRAf3ShS9AeHlB3WmLnoyHB0CIFxCgyTSxbi8pp8DOz3O6yC05M1T59e8vTxE2pveHR5wn/2L/+C8+WaRhfoGCiVSIJj5iIAKPEUwQdUKkSyj8uQRn5lWdK0LWhFqRVFWQq51jmmEKis5Wx9QtXUNIuWN3d3qM0dEajKCefEa8PYQwCbRglJNPM2bPaWkcj2EANz6kPyU5Bzks3+AlFLWFexMKyWLWVVgIq8fPmCqMTnQKk0dkkEzWzYJ6Z6Mo7d7/ezU25M953mQALMs/F8ZH5GNgMkPd7x6GDmcMA8Rs0N2LpZiuQ6RKqq4ubujtWyRMeaqljR1hVlobEWTtanDKNjv91SlCW7YWAYHU2U5kAKLPExmTyoGL5BqXHwdPjta/G3v5WFtQHESRZRdUiWBwfoJHMP/gCHSj4sSunkBSNj8sH1KK3E1bfr8ZOkF5Nkp94NKBw2FsBZelkxkVdnLqs0vd6x33V0+064YCHgvRChH/AzHvx3DpZzDMM9u/019/c3sv4lFMxNU0KD4jzmiMpjbKDr+qTCIvGRspw4pnN9ILHmQgutMVryybSSwmdyaQ/QOsFRIZV/h5lPVVfUTUtZ11RNw2KxxNiHe9FXHd+o2NjtO7Y78ae4vduwXq+whaeqW84vLjm/uMBNjt3mflZ9rNdrqkoMZfq+Z71ec3d3xzAMPHr0iM1mw3K55M2bN5yens5jizwu+eEPf8gvf/lLfvzjHwt8mLw9tNY0TcMujTZk4S9nqW1ZlrRti0kmY3BgNi8Wi3mzzpuxtZIweJxhAHKjLxbC3s8dhmycEzEEnJOurK5rTk5OODk5YRxHXrx4wdurK5q2nWW5Q9cLoTYVIzmTJWe/ZHTDez/bur/zzjuzl8br169nM6GyLKUbtHaWz3ZdJyFuIXB3dzcTfvKIaNZgW4ncFshfpIhRKZQuqYymVoZaw9OzM9Z1RV0UVEVSsfgU5a3lwh6miRCFqa2MMPgtQap9XWJKyQ/phl7MYpQCY/FRSWImgdJGnBupa01TCyw9GYEn0YJajZNDxcAYpIq+vb0FRGacZb/5hrq/vxcUa7Hg8eMnbDYb9vu9jPEWSz5/c4PRKtmW1wyTcCiUtgwu8Pb2nrPTE9ZnF/TDgAt7fDcmlVAJRjN6D4hdfWErubVjfg0GRUyfY0RjsEVIhaYgVpeXl1zddSxXLX/+L/8V/+Pf/Dt8hLvNhstHjxiGnqkfMEozBMfnb99IJ6p14rBYdGGJw5dAzOkbZSGhUH4SQuA8g1VpnOEDJD5NnKTIPV0tefzogovTM0pjWeiCP//jP+XRyRmVLdAeKSS0IBgqPagSaANZx+Te0Wgs4qiqE3/IJHVHWVVEFRmdYxpGVIgUyqCMLMyjUzRlxcXJiSyGQYi2rshFqHRdhiiyQq3EsjxxkKJW+KjQIRC8muftstgm6y45GeRIc4iExMUoixKlYdG2DNMgRmpy6h5IDTOx/O7ujtevX8+FwbH6RH5PPbhGgfl+P/hAhOSDk0irQXhghygBaVRyMGSMkX4cWTUt09gxJb8Q78FNHpTB+ch60dC2pRRaURRCMfFbrC2oqlpQ5ERwDCqQNAkzv+T3HbLpfv2iICe5Cg82cxbSJpkll/xhORvEowh2pWQEOY0M/U7QQBeYRidKLyA4TZh6nB9RBIwqjh7qyN00JnZ2VNJspPgBa8uEtPnEn3hwBuYiQzBSISQPw5b97ob7+6tU9KRPPkRBhrPSBHkvu32dIgeyg66Zz2W+L2XSmII6rU2k5koymoymMAptbPLLSO4qidOXc3rktcorjQq0NiwWK9anHf5287VO/zfjbITI5ALLVctut+Pk9Izbuw2lD5ydX9L3A5eXl0mWJ4qLZ8+esdls5k0yO4SC8CFy1kkuAOq65smTJ3zwwQe8evWKv/3bv52LiLYVCGscR+7v77m4uGC5XIoEbL8ne2I8fvyYuq75xS9+QYyKTdHAhVS0uVDIyo9jm/BMIM1z1BxxD8xR87mT2e12+OBYLFpOT09n34+3b99Kd+M9iyTfjSHglWK5XFJVFe+99x7GGH7605/O3XhW02Ti5DvvvMPZ2Rn7/X7eLGeL72maz1kOksv/tl6v5/PTNM28oGX1jFKKKUbGxDeJyRZPaSiNptKKOkJrDc8vzqg1VFpRaGExhwyBKtK4SrJNMJqgFEpFdJwIzhCNJqJRukSbAm0F7I9onI/oyTH1G6g9pZm4WLcUWn7f6Ii2Fu8c/SCEQ4MEnbnJcXd3h0025dM0zWhUVg9dXF6ibYk2GmMNzjuatiEaw4cffsDHn75gcXLBB++/x+t/9zfCAagqHJpXV9coW9A2JcqWFFWNGzu2my1NUVBYI06LaVQ16ImqrKmqhpzcq5SMPYWU5rEhIK6MirZpsPWam80nTOPAP//Rn/Jn//yf83/7r/9rYoSmbUUxNE3iOWEMb+9vKeua1XJJ50Y6NwoI/RXkeIVI1dumgcLTq+wMmxw0E/FVJWKwCYG2XvP08SWnqyWLoqRWhm8/fY8ffvAdCqWpMBSijRM3MX9gvEcOsubs4yIplkpcC0dNU8um1iTn2+1uS7eVsDytjVg7A3VRCHHVWOp1Lec7eCYnzrbepSRZbSiMplCKQmkKLaoYFQTBccGjEx9FZ3RHpWHT7AwpC35QmognoGXTdAFHwNpS+CcojJVsEhCE4TjPKMvX89p2XFzMjeEXCo1jZ9+DEZ8jprRhWS/93OToRBDPv6e1put62qqWZGTfE73i7dsb1svnXL+94/x0yWbbcf32mvOLc1A5eyXiJodWxdy86bQGOj+hdCFJtF93nz9GI37r3377Ip2tvFJxGOZiQyNkn0RuzCOkP8AhzeahsBvTPrDbbQlOioxwMC2FGBlSsaCQ5unBY80sqXSzK2k4QoRxcmy2O3AjCs/9rhPPmuNzovKdmv6KOHI6NzCNA34SKb7OZmJx/r9UCFqcT+GGysyE1VzI6JhLh7QeGCGGr9enKFtLLpM1VFazrAu0NrORIiFgklomX8JSFIsKTGmDLSqqZkHR/xMgG1nGKSFM4u/w9OlT7u7uAOa49rv7O1bJOfTq6orVasXV1RXL5ZKXL1/ywQcfEGPk888/nwmZeeP/wQ9+wP39PZ988klKVpVNNgeRZT+K9XrNMAwSSJSKkGfPnvEXf/EX/OVf/iW/+tWvkgRVoy8PDN2MGmRmeDYCO85Cadt2Rg2yg2hWxNzd3jKNEoKV/UByfkHXdQx9T1lJql5WhETg6ZMnnJ+ds16v+eUvf8l+v2e32x089Y98Od57771ZKfPq1asHipdxHMk+Iufn51xfX/P69et0wapZ2pu5LPk15E4shMAwjbjoxEAG2Tt0jCg3oQhYAq0peHJ6Qmt1mhcG8EIoyguepF1GQtLmTz6Agba0dH5i6gNBGWwlWRkq2SAHxKXUu8C095gVXJwteP/5I/p+wzQM+HHAFJV0YUFhbIH3I9vNlkeXl3Rdx6NHj+brJiNe+Xo4Wa+53/Xc3N1CEB+S9XpFN04UlSR1bjd3PH/yiHefPeHnH3+K0pp+HPHO8fb2nmFqwZRUzRKI7LZ7Vm3DerlApzm/wJoy8zZu4ji/wloZs4EQtqzVXFyc0PUT0166pdVywf/9v/lviMZydnqK85Grm2umkEzaqpLVYsl+u2PwE2rouL67FSSr73HOf+karxAFhEFTNzWrdkG/33O/v0//ZkDLYuS9h1SYtGWD9pECzeXqhB/90fexLrJsKrQPWCU0QpHMITB4kPOgo0gnbbqmI8JyJ0asFhlr7uR3ux33d7fCmyrlWh26jugDUYnXRTQyclPtknge2HUD0zAwdj0oRW1LClOgo8hSC20wUVA3n0ieKoqNvIDBspDGBOPnnhKA4FN4rYbEWYgpO8hPYQ6sS44G9H03GxPCYdSRVWn5ftT6iNuiDuOU4z/zZi+PYyHK+LYo7BwKCeK54ZJZXh4btm3L3eaeRxdnFLrCR4fzE/t9j10U7PZ7xgGCn6jqBpsMA3PjkL0+lsNAsRYTPnn1c4X09UYpx/OEL16LX/r9Q7GRR3HzVqkOO9wfCNM4fqGz82vfdey2W3abe7yf8C4mgy6VOD2CJsToIToK08yPEqLwKPKPi3W9Ypw8o/NSzDlx7lTRidrtgUL0UGQ8OMFKCPFFUWO0GI2J1b+bz0tW7BiriWl0KenG8mWtnt2bM6qhFFhjKauKdrHAK5H7KhQ2xRIJp0ihVB6/MifA5hc7OZ/I8IbRBWxRc3H5+Gud+W9UbAx9z9u3b1Gpyt7ciS59n9CAsixZr1asV2u293ezkiTPMM/PJSr5448/5lvf+ha73W7WCT979oyu6/i7v/s72rYlW4RL2JmbRxtN0zyQmebMkB/84Afc3d3xP/wP/wMvXryYK/+iKH/rfVTJMhwOCakxRoF2089kZCOrWu7u7uYiq6wqlqsVxsiimQsYrTVNMvbKEtlHjx5RVRXvvvsu/b7jZz/72YzmLJfL2cG0rmuePn06e5Dc3d3x+eefzyZl4zjO5/jk5IQ/+7M/wxjDb37zm5ngOgzDLPXN7z8XTnnskjkNROketIZSIZlY3lEoqIziyemKdV1QGQVuYuo6wtDjBnHmDCGIpFBZfJgwRUFQGkWkrS1hcOzHCU+g7zq8MjMDXDYgj8fjVUQ5zTuPn3O+rnkz3mIKxVRoQnA4B0ZbgmgXWK1WZKfUTJa9ubmZk3OrquL09JQu5dYMk2eaRlCRsrS8vrqiqEfee+cZP//Vb9ht7/nw/W/x5u0tr+4GohLJ7f12iwuBvttzulpQE7nd9dzcbGiqkrKuGSOEIHkm1mb7d/ExARJULQvWNImDZFXVaFsTzch6vULFQFEYusFxslwxOeFO3O8nTk5OOD05oW0atNF0uz2nlxe4zwO32614W2iVkpJy9gb4KMJFMfRSlNZKiJgtiAS2/Q5HwBSloEdpzOGGkTCOeKdYX17yJ9/5I56cnFMHTRHAKrHrDkEyFeaxnJJGRAUB343W8zXiQhDFlnNEEIa9czK6i5GqErvxYRjwbiIi8LaOMiLxzqGCp7IlJ4ulOJCOI8M0UZcpnVlpQTaUWD6LH4AWV85pwiuPDhCjjFZy5yYUDiGaOo/Ae2nl10gibV3W3McNV1c3gNhYOzfgwzSvQ5kTlJsGOSeHQkLFL/w9ba7Zbvu4+NCJOyCNkGIOkUyNQuamAWJHnTwS9vsdTSUKucWyZXKOXec5P1vhho6Tk1O6bqC7uaOtS8qixugd1hQHsrxOhbJJCKYSbsDXOeZG/Wsfh2KDzCNI6EgODzs89sP/zoXZV7+WXDCEfHGmZ1Sp6VE47+n6jv1uT7fvGIdOxqAooo/pdchm7f1IjI6yPJCjxH9DjOLmjVmbxAdRxGiIURMxGCMuv8cnSArMRDj3UgxbU1BVKxbtGX0vFuTWGLquh8lTlpWMO9L9qpTcU6MLCd3Izp/Mo6D898IYkYFri7EFRRFQpgCEV6J0GmunMb4pSqSV0IdzGuU9uxCISqNtiS0jwf0WaexLj29UbNR1xclKOvnzs3O2uy19t2fYd2ij2W/v+Xdvrzg9PU2+82ruxler1TxOubi44Obmhr7vWSwWfPjhh/OYJEtYQwj85je/mUcjuZvON3c2zsrGV3//938/IwXATJLs+wHXCMwT0xw9FwLHvAxA4pgTJyO/ljzm6PaihlikdNkhOSNmu/A8Bjrmkzx79oz1es3r16/5h7//e/pE5MmISvbWOD8/50//9E/5/PPPGceRzz//fL4gMwLjnOP8/Jznz5/z9OlTfvzjH/PixQuapklBS8JbqapqHskcK1ZywSbqHmEGWg0mgvVQaqjKkgbDaaF5erJmYTRqHMR8KTnNBSdKEYVUyrY0DE6UDTHdyJVRDAaMCkxewtBMWRO8QO2aSF0YlnXJe5c1P/jOuzy7WKL8nkWp6EOgsColE4qrpFaBpihYNiXeeS4vLwHYbrfzgp3P+e3tbSquhmTlLTdU13eUVSFmYGiePrnk5n7HhOf5s0fcj6+4vd+jjGY/DNxutux3E5PzPDpd0y6X9N2Ou/sdq0TYNMk9dBh6iIq6bjCmSOtKxLswu3gqrZlGRzSWi4tzfFRcXd8T9iOLpsJHgWMfXVxSVDLCu7m+4VbfArBOKckqfc7eJVOueVURF0+hTwSCc2gURTK/asua0hrqruI6cXx0UVCXNS4ZkG2u71icrPmz736f7737Pq2y1EqisrUWWd3xQjab2aUMhsk7xmmcSdW5qCgSJ2pKqpOqLPFBTMmcGxkGIc2K/Shpg4gQxALdKjhdrSFKobPf76kKybKxxtBUNZUtsFpTaIuPnm4YxFtDaSJWguBSF0gix2aljFagfEyohmyCVmtUWXGyXDOMI1dvr0QargP9MMzZQsfFQl6fjqH/jA3kcUheY/LvHB8H3s/BG0ilzxUlm0UuNmQcFkF5himwXtdEWwKRqq44OxXl3clqMRPgldKMw0RIXjHjMBGmEedk7VIarJXxJyHzEr5uwfENqo0ECYT8e0qn4K8jlCM/3peNYX7Pc6n0e8fFklynSX0zTfRdLwjb/T1j3x+KBZcSfRQYowjBAY5JuS88AQ/uPW0saENAS5idMuiiwFoNSVwxv/30IJk8rJXCmpKyWlI3ZyyWkllkjMXFLUFNaFtibIkuoCwtRo0QO1CGEISHFFKGnFx/mYIrahRjxbZBG4syTmTjUSTdSmt8GjV6oogBHnzu8jdjC4wtKauGpgWnC+L4TzBGOV2veB4eESO89+67vLl6I5Hs3mOLgrquePHZS6bJ8eb6ms1mM3fXl5eXlGXJ1dXVzIXIG+7lpchEP/3003lDzFX/6ekpIMoKnTsg71kul3RdN48Q8sw+czLyDVnX1QHdiMxFS2Z8Z/fQoiiYEtM7v76cswJIwFca1+SRiS3MjLSAXMx1XbNarXjy5AmPHz/mF7/4BS9evJDFPN1AmV9R1zU/+tGPZnLn1dUVwzDM0GnOYrHW8uTJkxn9+au/+iuurq7mBNg8Isk+JTc3N3z22WecnJyw3+9nfsq8SCVPABuRHABgaS21KVjYgkeLmovVkkoDTmbSXmlRr3hHSFp/mQEWDCPiPaANWkNhFaWXWHNCwE8yXnBOGNVVoSlV4Hyx4AfffY8//t57FCXcbbc0hWbYTxiVZ9iBOAUKNE4H3r69pipLzs/P2e/3eO85Ozt7YKLW9z3Pnj/n+u4egphwhajYbjZUVZVIfRvQBZv7G8pmwePLc17d7tjuOoLL8klN1HC77SQDpGzZ7fZ0g6N2HqwTB8co/jJaTWhdUOf5Z/QoJQtDUWiqSjgk+2Fgt92w2dzT73e4MVBULePk2G8HqrZhvVgyTiP7rmMYRkxhee9b30IhCOF2s+H+dsCoHF718FAxFYOJtJuLDtO2FJUQ2G7u7+nHSWbDPlBpSxkV33n3ff7ku3/Eab2gnjQ2go5qtrh3wQuqooS4ljdwpTU66jnN1AXPOAworYWA6B2b7VYUKkCMQUL/YsR7l9AyGVXM0e9HTEGrxFHVrx2LpqVuasZxojCGVbugLkqsNiKtVZG7/YbtZoPzUhB1fU8/DPgMbMSQNiOVEmcF0tcoiVrXCq3h5OSEaDTjNHG/2QjcnHkUqbDI6wowr135v6N6WGzkteLLjgeqgXgoNmZuQHoclxKCx6nHGDmXXb+nrQwXl2e0TcHlowvqwnD1+gVts6LvB4iI34yK5Pwl4WokHxyrCDoTDJWMT/liQfSHGG7I5jh7nuTiIiTOQVSH8/WHIogqUhS7hIy5FErYdT1umjAmYpS49OqkWCmsDAtVGlfMxxEPJyudIgpjCuq6YbU+ISqRFlujWLSVWA3Mv54/y/zeJfW1blcs1wNBicJIKYNXJex7iqKmrGq0MdRViQo7xu6a7GYcA0STzh+HK0a8U8BaI9HwWTCQFWVRQg9Duo9j/Apfk3QZGGsp6prSQa0sFP8EPhtlaalSJoDVEKYRqyJPnj9l8p4P3n+fxxfn3G92fPbic66vb+YNe7vdcnJywjvvvDNnnnz/+9/n9vZ23hgPdr/SGeVxx3a7nTfhXCQcEwRzp5ARj0z6lG7fPrAWzz+bi5IcB71cisQyK16UUvPryJyHjNJ0+z02Of9Ze5Cznp6eUtc1H374IR9//DEff/wxd3d3s8ugd36GT58+fcq77747h7BlH5FcaGUlybvvvjureH7961/Po5yMZGQPjmfPnlEUBT/96U+5v79Ha812u51loXnREsjXoHxET55aw7qyLMoGExXnbcuz81NO2gbtPYWR+TlR5o9+GqSYmxxaG+qmoYvMFs7KKAplKEwU627lcX7CjTlESGF9ZN0u+O67l/yzb79LWyp87KlMYOg7dPTJuAaB4SeZlFslo4hjvk42RwshcHV1hXOO9XrN6ckJ3TQx3u+wtkgOgZJquVwucPdbiroQn4Kxp9vtOF01jI/OeH11Sz85lLZMocd3I1e3G86WDVEXdFNg4cRTYhwdSlmMFhJq6DpAJc5MCv4yEg3unaesKsqiol0u2Pcjd3dbmqYiKoNVwjGwSki0ZdmwaBd040BUzFLpX/z859RlBYsF3TCI8c/cK8lEtywKFk1DXVYy4knZIXhFpS2X6zPqqub2fsNmu5WiYRz58Hsf8n/83/8bHi1PMVOgUoX4DuVFP8g95GKKWNeyKWpy8qosdMd5Pm3bYrQmRjNLzSc3MYaJaCB6z+hGfHComImbUTbAzJOIAe8mCqVYNg0n6xWnZ6cMw4hWikXVUFqLVWLLjNG0fcu2acWJdHJsd3v2KWJ7HEfGyT+QJIboxVMDnYorgd3LwtJUnvfefZ9PP/uM129eoK2sKfmezYU8/Dbsz1cUGV85DvgttENUP8dOlUDyzJEUWwXEOKG0xpaGs7M1Wiv++E9+wMcfWcZhSGm/JCWdw1atqBPKMuV+iOdCiOGgMPt6jI3/mQWIIjNQ87Oo/K2jp/1DEURzQZPJsEppSRs2ct1YUyTOhqDTwUsCrtYGrYNEzufXpJhFKFKISAFRlhWL5Zpzh3C9sgeHVg+KDeCIYCqfpWziJVXb0BClIFaG3TShncfWDdVigbWFONm6gJ+kKPHJwCuPUh6c05jPqZrfj1ImeeBkhC9zPg6gYn6Vxx9G1BpblFRNSx0tXo9o/+WF8xePb1RsqBgIbhSL59tr/NSjUBgV6VK4lYmBk9WS6tsfYu0nc7EwjiO3t7fc3d3xve99j/fee29Od/XeCzs9pXB++OGHswIj/1v2kMiEzRAOIUYgChVgLhzy6GC73dGpfXoDh7lp9rPIMtksOc2Pv1qtMEaC23LB1O331E1D3UgM/TD2ony4uJh/tus6fvrTn3J1dQUcpJnDMLBoRbL23e9+d2aw/+IXv5ih2MxsB+EmrFYrFosFNzc3TNPE7e3tXFTlscjp6Snf/e53GYaBzz//fC5a+r5/kOmSuyWtRY5YaENVwrKwnNQ1lS6oTMnFyZrzkzWLqkRHh0HIo9F73DQyDcPBVyQKW19bcZxT2oCSWW9pZZEuXMDFQIwOFT1xHDGV5f2nl/zZD77D+bpm6t4S1YjGEdwgXZeSPBXnwTlPwFPoSFXIDZuNz1ar1Twzzxbup6enbLZbyrJC6Y7Vask09FhjGKeRy/NTXr95g7aW89M1b2/vefzoghdXH6HChLUaYxTTFPEexhC5udslZ0NF7D19P2KsJUZBc4gG7xKZDCkoy1LGKeMIq6rGTyN9P9CWhSB9dcvbmzuWq3PeXN1R2IKqFNRAWcvoHCqKrXmdrqP/4t/8F3z081/wd3f37JzDJv7K6Fwifioqa2mbVoLgrGXRtDRlKZ9d12NLUdScrk5YtCvhCN3ccFJW/Ot/8S/5wXe/RzGM1Npi48GzIsSYXAbzZiQbgQ9+7oQlrdSmJNmRwhbUlbiKuhRGOAwDPniCCVBo/OTox4HgJMBO2xSbHWUE5hHeiwZxWQ2GxXLJk0eP5TqcHEZrSlNQKD2PdZqqxJrTpDxw1HVD1/cM6X7s+lHUTc7hg5PkXOdEZggEFTHIgmyNZbFc4VygH7bsu3tC9PM9ldHFL/Iwjjvz3ETk48uKjdyRPvieOmySx9EDxljqxqKNRwVYnSypCsPd/Q2Pzpfcb2751ce/5MMPP+Af/v7viTFQFiWxael20ohQrI9GylJsoCLBh2Rx/9vHl23+6iu+/1WHTLKkq4762NTroYLnGA36X3ocj7GkIRUUomnFh8maAj95plEaKWuEA6RVkvJH++Cx4FBYiqeMoSirZBRZYkbPFAJoTYge5+MXXs+hkNRaYQrxP8JEMInIbJT4xhgkgNGAN0AUTxpjCll7lU5eP1+Y72RaTEJdRHGSUEXvZeyaUA0fUhrzF863UofJpjGSYr1YLJkY8Mpi/df7fL5hEJtnGnrauqLb7ygSOtDttlitGPY7iQBXFqKhLgvqpqEsM6lFs93t+MlPfsKzZ88oy5I6ZYN471mv1/R9P49JMvmyrmucc+z3+xnJyGhDloEdS82UUjPCEaN6gMLlm/bi4kIcTpWkrL59+1bQk4SIrNdrbm9vRTWy3WKSm2dV12w2G2IILNqWqq74V//qX/Hq1St++ctfzoqZY97ENE08f/aMD7/94WzF/umnn85mZrnQypr61WrF06dPZ6Qik2HzYpMvhouLCx49krwVkPGOtZbJOfqhTzM5uZBjCJLOGSPKBQqtOFu2LEtLpTQFmvP1gtNlQ1ta6lJcJ0V6Jy6VbpqYJpciwoVPoYxwEaIiWTIf3TxGsiSKEHHOY1WgLDSXp0u+9fyC02WBH3do7fFRQofEdlrktUYZlIop9TLgrMFbxd1mOxPnQhAL/Rgj4zQwDCNVVbHZbLh49Ij9bsf5yQlXbyesNez2I+Mg5NG+79G2pLCa0U9cnK24ur4RYlmCFpu2oR9G+snx9nbLojKcr1qGyWP6QNMsZodIo41o98eBSSuMlq3DTSMFkbauMIUgd6/fvOHm7p5xGDh7/1T4KdHgpsA4jVRlhS41jsgwjdRVxePLR/zVX/3V7EdTFGU6r7IpEyLaaMqipKor8qzWFgVKa+7uNry5ekXZ1tRtw8npGcu2obaGVmn++MPv8K9+9M/FhjxErLG4cSK6IEUGKTI+RazrKN1f9qwpQrZyDvTTiA8yXlVK4/zEMI6JhiHFgEBmsi6G1BmiRE2SNy/ZrKW5UMnXQytoqpKzkzVumthtd+ADdVFQGIsbRyYf0bqkLkXy26leRnrjiK0qllXN2Dr6rmcYBylCvBM1RggEpdJ4QmIaCm3Aey7OTpjcO3zyqeSUKESe6n32zEiKm/TGolIp8v6wCB2Kkt9eY8NRwZLXq/xnRkUfhiYarNX4USy1y7bkZNHip5F2UfH5yxecn6y4vDyX3JtelGrj0DF0PaY26XlDIoWKO67cg18+wfjyvV+64q97SPOsiPikZsqdvhL3VyUmYzOE8GWP8RVFyGxcxcMBkDZij58zbeo6pYsHKXWMtgz9RNQymq5Ki1aIFX90mK/gQc6fUQrYKwr5XLRW6ChIQvThAfol/hWByKFB0wmV0oVGyVQaY6BuC1xoUMZQlCohlYqKSOzLGb3P18cxFnHglEBpS+FSRhlPy30sTQMpdTnMGOlM4RWybFTJ0FHUKkYbjEpI5tfUR39DgmhLUzdyg/lkhZpgl/22Y7V0DMOEVoHKljQK4tBRKSjahnGaePqdbzNNA6/efE7wgeVqRVOJlDV7XHzyyScz2TErQXa7HRJ/XjKO4uUvccxCegKdIKDAzc1dKiI8ZWlTtykn7uRkxQcffCAGTVXFT3/609nRs65L6lbUHNc3b5kmIViWdZGQFU8/7Dm/OOXp06dcnl+wWCz4D3/7d2w2G+7v71FKsVws6LpuDln6l3/+L9jv93Pn/R//43+c+R19389up5eXlxhjWK/X/OIXv0h+DWoeF1VVhVLitLper7m5ueHFixdC/vJyIfTDxBQmopFFzijQKmCJ2AA1imVhOWtL2qoUdEIrTpcLTtctrYmcL0oaqwXe9g6UkKr8NOJGsR6fguR2hAhFUxEKi1ORaCQLo1CaplYEDOgRr8FPA6erkj/5/rv82R+/y6IZwe0JcZQRVtAYXVMWYjPdDwOLukQBk/N0vacoa6qywAdHXWoCnt32Fms0Y7+jXazY7TsePXpMVWguz5bc314x7DcUqxOGfqDb7WnqGqUmbu82nK0W/OazF6zaGkKkKQu6SbOfHGgtrqtEOu+JIxTDhClKjIKqkKLKjR1VrUQdQsSPkT44qrqhqkt2+w1dv6UoC5brFVfXV3z22Uvu7/f8/d//R05PH/Gt974tPCcVEykt4tzEatHy/NkzvA/85tefUFU1jx8/Q2F4c3VFqWGaBkyhMVHyWSIgA+MCh2IaRl68eUOsS/YadLenXC44tQuaMfKt97/Fv/nzP+d5u0D3g5AsR4cfRyFpIuFVqBRmVliqWlxus6XxvttRlBVaa+4294K2GMPddkOIke1+J11laTFYTGGZ/CQcCK/SWNNCcpv16fqPIRDchEURvac2hoLI/k6IwJVR+MlTqEhBmLkyPnlUhBgx00g5jSwIwvMoLBdNxWQN/ViyXywZvJPCYxzEbCx5e/SJ1GyUjAu/9fg5xit+/quPmAaPshVSigkROLWs6ChrjkfPS7g1B3Z/Hk9pfTABkwU+j2geGoFpo/FeENu2bSmLgkJrcCMX61NWbcGH7zzn7KTl2eML3NAxjj27zYa6rjk9PeHzzz5HRU/ZFMQtbLs9iwtJ6c1eDQSwiAoufMn0Pibk4dhjRQi3X58gGjikiqqYTRfBaymQA4rBRZxTWH5bUfi7juNiLR9KCR0zJPSmSMnadd2yiCVRyWihaCPj7Q2mLLClpa4KSquJfqDoPLzl8LoPzyhfKgBO4hbCAAgyPO56Sqvo9vuj33CENPJwUWT0piwoCmkilotlQrAEJVRR5OplaamqQhRwDsrlUq4x7+fiNagkbVfCVQsppbooK9q2pa62rBetoIlp/S+N8DfQSswCMxcsknw+JA7DKouK4PqBaejw48Du6H39ruMb+2yAmj028hjC+8B2u+XurqEsRMpmtfi3D2NKNk3ZHM+fP+PV61esTk749a9/zb7vpINKY4GsKjg/P+fVq1dzgmcmShqjv9AlyIV0bI6TYbI8bshHHrEsFgv+9m//9sFmDlL55hGBUmLClTkc3ntOTtYsFgvef/99nj9/zn/8u//ARx99xOvXr8lx9SGEWVnz7W9/e0ZtnHP89//9fz8/X4xxHhOdn5+jteb09JS7uzs+/vjjmeyYfTXquqYsS4mKLkvu7+/nUZO4venEJA4EgnQMUbrAQilKBbXRnNYN69rSmLTYaUtdVyzahqrQrNua01WLUYLKSFJDUoWk6tx5jwsy744qRycdmOXZJrsIlsJGKm9x0WNsybtPL3nvncfUpQI/oJSXTjbk21fg3JAUKbYs0zgj4mMUOW0MLOqCcZowBJrKJoKXmjuKpm1YNhV312+IzjHs95ycnKKV4uTkhM9fv+b0ZM1+37G5u+XJ40f8+uUV33rnCT/71Qs0Bct2wfbtvSgAlCciXiLbbqA0ER0tKu5pm5LKaqahoyhKbFESExIkxC9DZS3OC+dg33WEGHnvW9/iw6LhxcvPmaaRly8/w/uJui7pk5eEnyaa5owmZfN88MH73N3di2Gc94zTxOBGxrFHK0VtCqLzdH3ParEEDVNIqFFZYNsWFyeim7DawDhxuVzzL37wA77z/DllEGhbhQwTQ3bedZOf1VnGtATniN4Tok/jFEcYxKmQtEEO40gch3le7UNIzH9NqRRu8rjxwCkSbpQgGTFE2cSjBEHFGMFq3OiYhoFF03ByesKbV68Z3ET0BdoYeU1BGpEYxB9Gh0hTFFgfKBJqUxlLiSy0bVnhooxRukGIpIOfGKaJXbenH4d5k9XK8OzRU6bJ85sXnzCGSWSQJCVLjIfyIuZN6YiylzeFkBEPISFmV0ohFz9EQkj3Vh7XAHjnmGLkdNXSliVPH13y5NElY79BeU/bNJSFoevEGHAcRpq24f72LX3fUdYFEyblPqUMFpdSUTSp0/2ycuPhekt6n+JK8vsLjoxqJMkIOsi5EQ5EWkcixJkYkbpqfvuc/O5nYT5vGTshHhdvwqeq6pKoClCiyCiTYqqsLFVZUBhF8IbSf7XqItEhhAOiYkJlQlr/RRGVi/b8+pTKwyPm6kUQlWL2MFJK4V2DiXJu6qqiaWpBFoJlVE4QmC+M5ETso+b/Tr6gGMRpt7QWYpyRa6KQ/71zkpOjdbIqSJ9VepFGS7ExTSP9fsd+L9Lhr3N8QwdRwcmOi42sLc9FwfLxEjdObHshh93f37NSYh/svOfu5pbVakU/yGz97v6eXdyx38mmuV6vCSFwe3s7y0ozT0F4GWMajfw2LHn44I/nm/KVj9vbW968eTOPXzJfw1orrpuLlvPzc7z3c5FhraWqKn74wx/ygx/8gL/5m7/hv/vv/jt2m+1cTABzgfP973+f9XpN0zS8ffuWjz/+ePaAyGMVpSTN9fLykpOTE66vr/nFL34xXzRVVT2wZv+jP/ojzs/PefnypfBH0leGVaNPi4KSSOS8bRsPBZFVUbCuak7rlrrQ6OhQaIqqZLFoaZuKqrCcrFes1yvC9o5pnETSGgPBh/m9eu9x0UvwlhJvBS8fBCSGuUGKnEpbgo0UeNqq5Fvvvss7z54KGTJxAbL1rrWWSim0VWz39xijqeqCcZLiZhrlM9GqxHtFDJ5oJcBo6HuqqmS5aHFeouxvrm8gQts0jKu1jJKSu6X3nu1uy3q9Iip4e3PD5cU528FjNCwXNfdvtpyerLm53ZDHkj5CP3j2dqQ0cb5hi/USQsR1PaUHW1ZoU+C8Z7vZoBYNZV2wWCzEwt0Y6qZlt+uTokle/+nJCbfXt7KRayGwFcZilMyYQxBiZt1UbO4V3/7gA168ekFZWtww0G33wjwvbOrwZANwSW6tURKQFjWLoqJWhg/fe58/+vZ3aMsaP05oJODNaE0en+e8mezJkC3hp2nC89BoSumDh4T8nqNIn6+2BzlsCGEu7rOxmNaaIckwM7QvyJ0HIwqBIXGo+qFnMbUyew4hyXK9+ApYS1SiPjNKE5IXjS0KCYpLPAulxBCsC0IYtIWmNJa2boShryK7/Z773VYMDaeJkcC6bnj3yVP6bsfrmyuRDSZmXSD/mTahA1XvQIj8wp588N04Jpge+Bnpr/NalT+PymrOL84oTR61wHvvvEu/u6csNUVh6XuFS+f44uICP/UMQ09RlIRBlHnGWpnXe09QBp2alvAlEPkXC425GMIcgfBffcjZifPfZNk4KE/kNIakSAop0figTvniWv91D1EGxSOivJim7SeX1qA4v7oDudugdMAieUdffTyU68Zk0+BdgLTXPCARy1tNe4bwOnUiI7dtI+NYJ03eNE5MdmSahGMhI2qDUV4IvtbO6FhhzdG1dRioaMRrQwPRORk1DgPOjfgwoaJn2HcENx2MQx6+PfHRUQCiMOy6Hf2+Y0zcwN93fONiI6MF2fMif/B5ZOB84lbsdgzjIJ23UqxOTzg9PeXVq1csT9a8efOWJ4+fCFlzCjNykcmkwAPSVX5+qfa+iG48JOt88Xgw3x+GORAtIxHZ7+ODDz5g18m4Ipt4ZR+Qv/iLv+Df//t/z7/9t/92DpErkyIkS22XyyXvvPMOy+USrTV/93d/NxttZcJq5pmsVqvZtOznP/85wCzbtNYyjiNPnjxht9vx4Ycfslgs+Pzzz7m6upqRkvyYkCrXtFBpEIIlARMDC215tFpz3rYUIRJDIkAVBW3bSAdkNG1Vsl611KVl6z0qFZey8MfUvQZ8jLgongqCvupEOhJjJwXoIMTSUgFWgy64PF3y7PEFi7pC+R3WGiQMNlt5kz7HlG4sI3xsYbDeoN2AH0aoC/b7PaumwljLMDmiMjRNy/n5GQHY77e8fvmCbz1/Rj+O1HXB/d0NTV3z5s0bqrJimCb6YUDFyHq9whvL6XrJxdkZn766Z9XW7F3A6shIYO6PInSDpypk0RzHARUDTVNT2oJ+HNHOU7crikrIjt0wEDVcXd+yPlmxWovvzJs317x4+YqqbLGmxJiKsizmAm6hG3bbDW+v3vD+hx+yXCz413/xF3zr+Tv8T//j/0QInrIu6Lo9rz9/Rb/rQMWZlByCp6xKjNE4d0G2wFbOsS4bHi+XfPfdb7Esa+LosMaiEWOzEMVUKxOysz+NMQafcjtC8n+Yu23vCC4mrkbmG+h5oS3rak5IPlZdIR/3g+cR98SDLw4pmVIn9GyfuBreuaR2iUyTwxqDi4Hdfo+KsFgshDS97wiTmxfmGCMxBJQ2rNuGManNFFHi65WMxZaV8CD2+z27/Z4hBMYQKI3GfvBtNJHXN1cpMD13koZDamHuzHlAItCpKNOpqNM6JzIfjmOiqXMOk3gumbRbVSVtW/Ho7ITL0xXj0BN8S1UVsg5oM69HXSep1G3bsttvGbtJyLBpVJPlxrnI+api4/h4iGx8vdl9RnfijPocCo6MSOm03oikU9aH42LjG3l6zC82PbWS5N6yLCirksppQnLUDFFCLYuyoKpK6joR5XVBEc3veOxDYJxcqokMHAMZZYlHpBbBszJ1M3l6aEE1lm2Dc56u65O1hNgNTMNAcBNGIXSGdH0pnc9nEDQoocOHZ5Ivg0KFKKZ4+x1d38v96kdUcExdDz5IoOEXz288PJZObJvgHN6PsxP17zu+2RiFOI8c8sadN1tjDMMwzMml2SMArUS6qU6pqpph3FAWBculJL82Vc3ANKMI2+2WqhJGb1aCZF4DMHdLXzyO0YzjilsUAam6M3oOiCvLkt1ux3q95oc//CEAXd/z9uZ6HuW0yQ30448/ZhzH2YgsP3+376jrmsViwdnZ2ewp8urVK16+fDlLdW9vb1mvhfX96NEj1us1Z2dn/OxnP+OXv/wlMcaZ1FjXNSEEnj59yo9+9CP+9m//FoBPPvmEV69ezec+fx3Isek8kOO2IzZGlqXhYrXkYrlkaQ16EtdObQ1FWVLZglIrKqs5WS1YNjUxeiY3omLEBeGDBB9nRGMK6U/vD6Y8csKTOinBbyqCRmbdVvHo/ITz9RLlvcgYtfj+R61RQYyLhBUtpmz7vsNNI2L9LR1AcBND11EVmhArnA9Yo1mullR1A0kf3+93LNqapmm4327Y7/bc3d7SLlesT87oug7nA8tFyzhNrMuaz16/5cnjR/zqN68oC8U4OAot8thh6Dm2HBgd7IdAUQpZ637X4XzkdC33x+QCcejR1tK0bQoTiwxjRyRKLk2UDaeta549f4fr63uc2zONHj8FTGkprEEXJYpIaQ3/l//qv2LsR/5f/+3/E50216ooWbYt0XnGfY+fphmJ2ncdy8WStmp4+qyRsLTJo6aJVVHx5PySR6fniDBEJ15ElLTZyeGddFO5wM0Nh9JCSnTO4UnXhg9ip61FxjdNk9y3SrT5Gf2ICDqqjqTemZNwHGA2E96iLOPBeyk2EsEPZCxjjCHonMYsC67UuNKsjMPA0A8MfT8rTbTMXqUICgH6juADKvgEIUuuiwueoixprKU1hmVVMYbAfhzYDwVNWTAOHd6N3HdbJoRcGvTR5htmVwWOxyhfHAUcNtP8/g/EULnfPZGD8kXrlKqrAqenaz78zvu8ffUpb6/ecHl2Qoxy3tuiEtQneN6+fUtdmDQ2mSirUjbBo3Uze0pE/eXFw3EBlD+jA5v064xR4vw783+T0ItUeMQQjz773+60f1+D+eXPK/+nlMhQrZHQx7LSRG3RRhPQ1EOVrOINZVGg0ZRWYf3veK5UxGRlkjEpNDH7taiHqbiSX6jRCD8lr5ultfiyYFTQ7TxDv6fv9ox9zzSM9EHWzrjytJWF4BO3J0nFA2hz2A+OPw8VJV3ZDQND19Pv92Ks5x3Rj4x9T3RORATAl336ClmzhJrmU6bMP0U2Sr5Rj4xssmSzKAr6vpcE1KomxMCu22NMMevaP3/5krKuUcB3P/wO/+//z/+X5XLxIBo9kyB3u92Dzie7kWb/9i+VjX0pg1tTp0j6GOIc9rZcLvmLv/iL2YHys88+E9vrRBDNi2C+qD/99FNsWjABMdTSA0+ePMEm74dPPvmETz75ZFaOAPP4BOBHP/oRy+WSf/iHf+DHP/7xTIiVjmyanUudc5RlyV//9V+z3++5vb2dCa1d1z1wKs0eIpkcCxHlxayoBNZtzeOTE07KEjO55IpYE1JAmdVgVaApNCeLiro0xGlkmgZhGo8BFQzBywY5OcfoHWN0ImlVJN6GLAAaWeSVrBjCAI9grOLydM26bSAI6TAGkWoqZQlBVAgYUB7sMBGiZ5yG5JwnMjRiYLfdUJ6e4Lxn3w+slwuqukFpw+3tLff3t0zDQF0UXL+9IjhJjD0/O0WbIpGBa0LXsVosGKaJTdfx/NkT/vGj3/DOs8fc3O1ABfYjTKWhN4oh7x3pehsdoIWw1W037PuBECKr5YK6avAhsNv1hKhYLCQUr10sCcFxd3efiuqGx48fEUPg7vaaumrxk8PqAmsMg3PURQEh8vb1a/7m3/01//izn3NzdcXQjzJzLgu+/e1vc7o+4fr1FbddR1WIP8xmuxW04kxJQRwi2nhsUdIUFc8eP+ZktUJHyUEYx5HoI0VRzo66GQLOYxKtNSrxm3yQqb4UCBFrpVssypKQcpRiFO8QrTVucgzTOGf2HHOq7Jd44sz3c/7vKFwFN02oGLHGSO5KP0rUfFQEd8g5iTEyjSPjMIhaKEmTjTHz2AWlKCqDQeET2hCDR0VBUAql51GfMYa2LDlbLenHid3Qs2pqVk3DT371C67ubxljQImQO5Fe3LxXzplaMXecpCL7t8fBx+uaFHwOlD4aLYtaq6lKvBvRKnJ+esZ9dNzd39DWFVVxOqcxF7Zgt93j1MGXqDTlrC4jjwFyAZE25q+qHw6jH5WQjf8ZaMOXHLnAOBRfx0XJb//s8es5fB/m7T0e+DMxowFKpQRUS6M0ypQoIxv/OI2YwtCUJcumQiHW+KWtjt89DzZyxGreGoktyAVH9CGl2MqY78ELTC9do9ExHP2uQjshiU/jwNDtGYeBcRS13TgMYpjoawod5/GwcCgzqnEYxWWE2CghWI9dx/b+ns12g5tGwKOip9vvkjw9f5IH5OXwt5hM+SL45Ls09F/rc/1GxUbf96LNhrnQAOEXZH6FUop9t5f/1prROXzwh04SuL56i31S8PzZU168fEmI6oHXRYZYs7rjWPaVnfUevIk0w8w/lx1ABaGoDx+q1pyfn/Pd7353Hn1st9u5MFgsFkQlEtJsDpUdRGOMs2/FYrHgnXfemeVMV1dXfPTRR9Qp1TLzGrTWkoIbAs+fP+ejjz7i+vp69gtZrVZzdP1iscAYww9+8AM+/vhjrq6u5lFVRjD2X2D9HncYWom9rdZC7tNAUxrWVc1JU1P4QF0V6BDwKCgKrFGUVuazloAKk6gqUspm7xxTYqaHEJn8xOhGxugZSQgHER+VzKzz7CMKCqET9I0KLJqKy7M1VkdCmJKrJpjCEpGAH5zHEAnTNGeNhODFVj2NiRaLhs1GjKhyd+sSebWpC+5uryEEMXjSGqL0DX3Xszo5oahqUUkolWSTW1yQeWVjK77/R9/jJ7/4Ne89f8rw8UsmHygNVKWd1TckR+1ujGx2vcSR2wLvRhmhdAa0pa5aULDfd2ijqEqRqhqjaJo2Zfv0OBfQygm5VQHeMY1CxFSFxU8jTdvSdz3/7f/j3/L69WuJtsmFAJG3V1ds7zf0XQcRdluRpvvEZyiLAlNYFmXNwhY0Vmzy3333PaIPktobZEShtZJsk3QPZcJmvr9Exp44XPEwcvAp4ThzG7RJDruJTOy9n4sTY4x4WqT3IMFjxVzkZ25IiIcRi0LGtREJJYsxMnQdI7KIKltIEKItqMqK/v7u8DghbdYxzrL0aRyJKQVXBY9PxZOxhtIUgmw4LyqAukIFj4kkMqhi0S44aRtOlgvO1iecrFf81X/8Oz598+rQrc/FS+IKzKZLceaMyD2cN6LjDfshgnDMhcnjsNOTc548uWTotng3YnQUNKyuUUlSubm/xwcptpXS7Ld3qRtO/kQpx6YwhikEcQr+mqOK45/54tr8u9GHr+qdRRqt58cThU7+0ePH/KqRyrEEVDb1VLwg1+gs1S4sZVHiFdhKItd9hKouU0MipGyip2kaLpYnKHUnzZPRmITEzUViBJXWwNwoe+/AO1CymR/eZEIPgmznRGjrFs01fSdJ1lpF8RzSkcJqgtcQZa8bB1G4FHWZmsCJyY0YXYgxW4hSjGolXAudC0dRuI1DL4GXwWGtSmNW8U4ivXYDUh3ngjPd75pIt98xdDv67T3b4/f1O45vzNnIMtG8IGSSaN/3mLS4hPQBy9wqV81SwWdJ6JvXbyiMRSt9NCt8eLHk43cRgo5DkDIHwx5BuPnCAlmoLi4uePPmDa9evWK1WgHMqo9xHLm9v+Ps7OxBuFq2Ss/z6qw4caMQYHe7HVVVzT+bXS3ruuY//U//U/7xH/+Rn/3sZ7NpV3b/7LpuHhd9+OGHvHnzhl/+8pe8evVqRjyOHUWP/Tbywp/PTwxOkAQUlTG0VrGqK9q6oLKaUkOhlTDzlQZjMAoMAR0Cyk8UGiqrpQKOQQiYAdwUiF5cI6fo8FqY40GB84EpRgJiLENMNsQp+jtGkQ2erlra2kKcUCoK2UqFpFIQ2LawBlsZdOExxch2PzD67EfgKQpDYcUjYJocYyLa+igddYyBuqqYlMhXp14UGuM4MQw99VRTNQ39vqNsGuZZphamtnMTk9+xXrRslwOnq5bR73GFScWLYvSRIMgoERhcZHQeopIYj+gY9IgxPUpbylIWrGGUrtQUhtJWKM1sFtfULYUtOF2vmAbPZhiwuqQokxV6kFHRerXk17/5RBxDvTi6Amx2e3wIdPu9kIaDkG5dMvoap4n73RZtDaF2UNS0Jyc0ywUhfY7amJSXI5VUjIIexHgY1x3ff5mDEZFZtJ8VIHIP2KJgsRSzJOccU3axNaICyKqWvKZUVYXSonTLiMqxmViMkbos8SFQ1g0K2G93c6NQaoPygdII+jglkml+kbOaJUaGvie49HpRKAN9t8d5yRfXFDIe1MJ/0gSim2iKgkIpSSL2Spj9hVg3L+qK9fIHtHXNX/77v+FXLz6hsIYYxewpqsP5k7HIYW07bqiELzGvfNKhZ6Q2dc/5/A9Dz3LZcna+Yuo0d7dvOV21rFYtfrTSfQaRRO53O7SxlFWJ1Wuur9+SSdnHf87rqsp97P92R/q4eOgccTi+WOg8/DdyU5+/I9dBjOIxkTJ+Mh8txHBIfIWZixUcLNuK9XrFAgPczY94XIDLaEbP/Ju8Xns3oYLDISOHB68R+Uyz3TkIoubGUYrovkMFkaYGDU5D1AqfUBuChEzu9ztBJRY13kh+Dkdk5FypGaNn9MQWRjw9PGkcFx4giPJbx+jIgftBDAQ/0fc7tpt7trvN1/o8v7H0NRcVx0Ffbds+GHnEmMyeErqhYr5cFLe3txRFwdXVW6Zxot93kjD3hWvpIST28EY93mhzsZG/Mrk0b8plKQZdIHKd3/zmN3NxstvtuLu7m7u11XrNycnJnJzadd0M9SqlkjNcQdd1YshFIvWkvI1sKx5j5Ec/+hE3Nzf81V/9Fbe3t2y32zlHZRiGudA5Pz/nyZMnfPbZZ1xdXR2NRQ4E3NzJHHdC+bxI5yr5BnVRUheGxmhao6hLy7JpKKyhiCRJVoJQjUUcAAL4iTBFVJBxQ57J+ZSg6b0nuICLXm4aEB8PBc57vFJig4t83mgxskGDdxFjFMtVw6KtKEpDYSvKShFCLgaTplsdXE7LssQWmqhTT6KgrCylke5147ezRNn5SFAa5yNFYTGUtHXJ9W6LtpayLFgtl0IMDp7lsqWsW25ubvFuwqTxXVmVvL25p9Ca1aLh8uyEbTcxTZ7CpATODIVqec39FNj1YtoVnAS+hdCnzkqg7rKyaG1Sd5/Z9gfCdVmWBO9pmppFbRj3PdELYjjFiIpC1CRE3n3nOW9evaYsSvphP5P7FInYZi3dvmM/7gjIrLjre6ZkiX++PsVcPmIz7Pns6jUfbJ6xLgvGVD2ZmBIr0n01jkfjxCOYPxcXQhqW6zBE+ayPC5K8LuTr1aTP+LgRyFHpKCXmVOGwloQMp4codv9aUVjLfrfHecdisaAqSmIhI5qmrNBKMU1O3o82BCUycFGspG4zxBSOR+KbjMQYUpFvIAZiENt3qzXj0FPYglIpQmo8VPAop2YkpK5L/sX3/5j16oS//Hd/xU9/9ZEYhOnUoedNSefsk+xAKkZJcq8f7u38O4c1UMjx8xrhPX2/w2i4fPaE7e010U94PHVVCCfFOc5OT4U/VlRc397RuZGqrqjGQO8spigw1iZWdtoEs1Hf/8rlhkpFzjGScTRx+Bq/O/8tncucE/Kg8kjXrYwGx8mLz4Yp5Br0TgoGExOqJ8nW4egBYohzsaEzopueK4RDrtbY7VFhwqjAUI7Hr07+F9Pfok77uGMcejabDV23T8/vcW6QYiWN+KYRBh2ZHNzeGjbbDY8vzo7QnlwcqPyE4kaqJYsmEhLZ32OC/H10I+M0iD+TPi40D0WLQtaj4MVOYHN/w/39P0GxQdqU8xvK0jXJCJGHylApREZ32Ei6vmPRLtBKFovoA7ujzuSLBcX8Nr+AamQexhe7+yxRbduWsixn+HQYOvb7HazkBspGWlnNcTx6eef5c27v72aORI5+zzHtOQ4ehLNRleKHkEcty+WSJ0+eoJTiH/7hHxiGgZubG6qqommaGSpeLpc8f/4cpRSvX7/m+vqa3W5HWYp5TTjqFLOc9svQnmPJbVOWLOuG2mpqrah0pNBQ10UKmZI46qDCDKnpgFSpzhFsxI8DwY2gZcGWXIojBnUIBB1Fl6FI8LgHK4uVSbk5QYvnh1Ly2VujKUs7w7pFaVFanAPLssTaMq0vGq80DBOj6xCoWYvsNEQKIwhDU9f0vYyfJufpBwnIOmlrfHSSOjv0bLcbUdqUNev1CluU3G+3KZkRvJ/mkcE0OWzlWS0WoEtMsWAY4W4/SjFhTYJNfYopl+vO+cCuG6gNmBjE1ydG1DChzUBRlChtKOsGIYYmGLcwc6GsUHjnaeuSplkQXWC/2YMpJCJAwzQMvHzxGf/iP/lP+MmPf8LQif16CLKBT0GM4ayxyTdBkDMVEc7COAp3apro3chVUdLEyOPTFeuyoFydSJGRSGsqpDGJOhpZ/+/dAAEAAElEQVTXpevumKAspOFcGIBS4vhLKihyoa6TIgzFzP/I13MOHswmXF9UNeTvOecoK7lH3CR+K2UhGUpWCV8jm2rl8ZFKhNNxmgjeM3S9kEetRWy5xe/CWNmctFLyWKQCZ5oE0vc+5Y6U1IWdUSNxmZEiO2ghC3//29+hrhu0tvzsVz+nD5N0zkoM7/IG5fMIRz10YZQG86DGOOanPeikNfjg2O83nK2EhDxOA4bAfhhomwZjpNlbLVe0iyV391v6vk8OtI44xZksq9LjJ9Pw3/oc/tc4HhYbc8Xxe37+t/9MJJK51MjMg5wNZY3kvshIYiAog7FxJi0rrSiCuAD74NhuNtRFMz+vFxgTrTPvJh69zpgSmQf6fo/yE9aAdyMcG5TJIpner7znaRzp9jvu727Y7XaAILPjODKNDpARyRQcMUxExEF4v9sxIw/z18MjKPFN8TEwBVET+uCIRqG93CODn3BBTCC/fIiWiMlKlGJumnDTPwFB1OgDWzwzyL33M5cgFx/aWnHfmyasLWe+gdHSmWfuQ13XjDlE6ujDejAeiEcfZvq3eZH+QlFy7MdxkNYddWNO2NjH3XPXdVxcXKCU4h9//o9MaXEsCvFEyEWLc24mr0r+g+Hi/IK7OylO2ralKIoH1uGLxYKyLOd8l6ZpWCwWXF5eMgwDm81mti/Pi07u9jLB6Iu5C7PUNSEeZVnSVDWLuqatKooYqI1mWWhqHajKUvgT6VQFnyv83K16YnCCXEwis1K6RBsIkxc1gguESargqOKMXkze46MQhoqiQFtL1DqVJxGrFcZq2kXFYtFiClngffB4xCFT5/l86owdim5wgE6wn2V0AaYxzbvjHNI3TAGfbpKb2ztO2pbJec7Xa8nuSefKWo2e1FygBR/pBonVFt7EKGKxENL1MvH08SV39ztKo1i2Dd0Eu8ExuDS+CKRwt0DXO7DQFAK7hyB5LsM4YDsrqI2xVFVJsGp2ZS2skPaMgvOzM05PTvEuorzGKsvN/R0RRVHWQKQqSv7mr/8afGC9WEr+zTixqOs5KNClcMEqBQgG5yTQLqEC+6Fj8/mOs8WCJir++j/8LXaaUN/9Huf1gpNmQVtIiFwMHpWQCNTxHFr8VfL1mN+PUpq+HzBWDK5c4i5F4oyUxBiEZJxQKR/CnF6cSaMPvvJMPkHgk3OYREhvi4IqRdkH5zFzM8S8lmit5w7fJWJpVZZYk+zqQhBuEE42JB1BQ5SqMfGvJk7Wa+qmYRwGYpDn0ogzo01FmjLFTHz+zrvvw/9O0TQVf/eLn8DUP0jnzQhjRqXivEHkbJC0HqbzLlHnh0IK5DY+OVnj3MRut6EoDEO3o1DgxpF+v2e1XGGNOI3e3tzQNA2r5ZJhGiiLgqpUycvEYTiMdEQM8E9cbPzOhz/aA37Pw/x2oZF+P61zM8FRZRfOSJkK8kjEeZGCo0BjEVts4XdooxjHwN2dQ6kdMZYoBO3WMaCUzS9CUo+zN4dSiSXCEVr1u96rqMD6fcd+t+H2+pq7u3tQKnlueIkmMGYeKwppVhRUXbefR56/88RqKfxNUiPqqCmscJZQKQfp95xxMxdsGmtUSsb9/cc3KjZy2FUIIVVdzIVEURSH+aOxqKBRSux7lVFiyNRJkbHfd8QoEKWQwCTtMoaQZFjhUGlzBOLELAs7fKDHDp15scqFkOSuJPIhuWBRPHv2nPvNhtVySVXtef1aTL6KssTagjrln1grccPGGqpKiKbDMEgx0w20dTNDxEMKKMs+GScnJ7OMNxdG5+fnXF9fs9/v2W638+gnu6ceIy0xxjTqUA8KHqLkX2gFVVlRNyWLxYKmKKmtpdSRCs+yKjlrKyoVKFXEqphISRodPDGOqOTOGFwgWiMGd9FI2iVaMkmcw7n02SCZIVKsWUavcBi0LkAhsfBa1JVWG6wBFRR1U7FaNTRtSVMZlHZCxNOGuqqTT78UDtMsnxRosdBGiE8hJP8QT6kUTVUxTntiFMh8F2HbjyjvGb3mbtdj65qqbcAqPI7gJqrKMPnA7k5yfPr9huiCBHnpQFtV3G87fL9hUSou1g2b7Y5FbdjWJZuUCBwT/BmVEPUGB0ZDmQx6ghI3ynEahVC43wuqYw3eRXwWvMVAXRouLy9ZLpe8eXNF2VoWquHt5pqYoH7hznjur++4ODlj1+05Xa8ZxoG7zZZSKYq2YZqKNMrxmKLAK0Vp9Nwh26qgbGqasiJ0HZ9cvSb8/cR+HPjOs2d8+9FzLlfQKFl0xU1SE5XCx4Aj4JR8PlMMjNEL50OJx0SIAT/6GdUKQXwwokJ4JiEpykJAW7m+jdZigx/yJirXqiAGRvxetHi8TMOItZYmEaqzxXcI4jiUiZgqCtHQBUkoduMxfysVTnmepcSuWyUEIfiAwaQRjBRHdd2InLfrKKqStqyl+w+pRDAyJmuMRnlHCJ4/ev6chf3XDMPAT375EUMYUcrisleIkuJUJTt8Hz2gk0xSChAVIyrIedBA9JFAMk4D2qZFa8V+v+OdJ4/Z+5Ew9oKIBcd+u2F7f8fJyRnrkzPqOmLjxM31WzqrWNWWrR/B9VRK0jpiUt+kANyvXXP8DgDit45cFghjS/gFKnFEpLjVCBVRc+zB82Uoz1c+eBS0lHn7lDGUtZbCGCCgokMHD35EaxmNGTdJ0Rk0XkXGITJGjxsj8B7A3HDLnmcgiAIpN+TZrMsUGrw4xObrG8TaPZu+gQcdUHjCODLte8auZ3u/SUohZISSyMs6pjFIqmSGwogJYG6so6zfkqCspQGKyNqfgg5tVVEG4WJorVDRo8oKFzVRCa6hj9GaGOV8KYWyBlPWKFujyyWq+Hof/DdDNlJeSU4szTD/sTw1xsg0TESlMapIrHkSVJwQEeXIyaQZflIgVrkxoBNhJ+YLJ72XA1FNz+jCMfrhvZ+9LYCZkDlNiZijFLYoKauacHfP9Y0ErSltaOpmfo7ddo93gaq0NCn/IfiMKChJOtWal59/LjrtxG7PviOZ75FVJk3TcHd3N49LgDm6PueiHNsQpxeLSkxgdzTfNlqxaGoKa6hK0YMv25qqKLBEVlVBGUZaqzhZlPj9DhuiQPxR/BNiFIhZG7m9RxfwXiHXs2IaIyEF2PkQJFjPpIj25I8YlGY7TDhVoKLBBDEQK2mIRvgWWk346NAqYHTEuxGnNUqJUqdIkPjkpmQQpimKmqKydMOINQVWWbT3tMZCBKs1XnkWVcVmsxPGNRIT/fbmjvWi5X4/0Dkoi4poFIObQIO1EozU3e0xJrBsK/ATTVniA6zrku1+T1torPGcn9Tcbmo+/XzEKE9bW6zRTAH8JBsiMmoVZ9Epoop8V4nD6eid8Dl8tvqWnAMVBOYtCyuEyRjY7La44FidLNBlxLwhLbRR5HMBVu0KPznqooToMSpSW0MMyfQ6BiprgCTtPPKu0MZQ1iW2krHFGAt0Zbmaev76o5+yGToiEkT2dLlmVTWMY5TIjFRARa2YXEAnaWc0WvwsQiA4j7XFYS1wh9RalFivD4NYlxulMFFGKD4RXoMXhEtpLeZbR4iKSq2HT/dOYa04mk4TZSrmM+chIwNGSQhbNgGLIeDTOfLBzxtViBFti2T+pyDI+M4qw+A9ZVnhY6Tbd+iyZLVcof3BuVch83AdZZRmrZiKTTHwwcUl/8Wf/2vGzvHx68/YhhFVGKJK3LYYxK0zBXMBGLR8mCSAI3ndzOhSTCRzpYguoJShH0Ygslwu6LeBfhzYbbe0Vc1isWS/vadtW5p6QXV2hpp6bt6+RU2OwkwYP1KoSJ9I5kYXuDhJQ/I1jjiPPb6atHlY2aSgzCyHA7dCEaMU8AqDeKboVHhEvjg6/8oHR6YSRHW0fSTSuxdzPq0i0Y8EPxKmATcOxEKiN6ZxQBFxQnGgD57gJianycUGQHanlteaLNB1tuV3eO/opxG8yEm7cQRkn/FRsnMMMaFpAW2gNJZSF1S2JrjIOEgDGrwnThNTJ6ZeKOGp6aJgGoa5OBfjrnSOtEZj06cS0NpQFCVV09CuTjB1i7ZSdJkYKZqaKSoiNhWaUZC6eYQvQ/WgNKqowTYU7Slm+idANvIIJMewP0ixS2jCAzlSIvZkwtcxzyBvtsfR8PliUSqZPcU4k0uPnyePEmTeb2d49/jPw+KjZolkJnv+4z/+4yzbq+r6ASqjtaaqqpn0mp9rSvB0fn8xRgm7Qdj2x54XWus5Hj6HyN3f38sJT8TVYzMjpR4alR3GPo4Y/YGgWkphs2gbmqqS3JOioCwKysKggyhI6rqmLQ1+kgtTHaFEgi6KfTxRoqQztD05xzCMCY4W6M57j08VMXnxR8KDbu53OGXx2hJDCQGMjZS1xVhRorRtw9npCWVZ0Hc7hj5QlYbVaplm9bC0BYuVIaIZJuiHiV3Xi9VyJ0hCWZR459FGfPesMbRNzeSBGBm6Htd3GKLE25cNKkonqXTAFAXLRcvd3S3OSRBXWVZUtkYrg4uR5aLl+vaGxeoUU1gK63n+9Amfv73jrruiqS2rRcN4uyPlnuZKmphqj0G4hxidNr7JY0ZHUdZEFCFKVkl0cYZ1r29u2e52lMnRdbFsmJwYLhEtRIvCEIJOsjYgitNiUVgWyXwuXT1AmEecSj3M09CFxEwXdSo4BjGpu9rc0/3kH9i8vebPvv09+OC7hDOojQUXDwWxUihG/CRjh4zaDalgrJSd74+MOObiP+f8mCMehU7FBYD4HwlaSZTRi0JBIAWdyei1qZv5eYu0jiilcJNju90SnX+4DvHbo9n5fKQuVGTCMhPH6OTlIYhMU9VzEbRcryVzZpgYe3GFVSR1TiZ0RvEVUUo63T/64ENCUfD/+/f/I3/zj/9AYSw+JLLdEX4rTJG81h424vm9zAsx6fxG9v2AtQVN27Dd9axqC1EdeF9EbGnpOyk+yqqiroo0Zkt+IukliEGbJ+gsof+qneAPexwXBIdi5cjgKz4sYr7OA+ZG9UsNUNPDxRhwbpJ1acz5IEeuzMFhyGaDnkU42i7jgcwLieR7pDCKuVlGUC+rZeQ2/4ZWiUuXfkoJelW3C1YnZyy2e5pmQcAktUzPGEdpOpKfBxqU0YL4TVOauhnU8VUVj16jMVR1zdnpGbpqcCGgrRR1uAmrJOjUGHMk0T56y+rwWdiioG4blqv1HKr3+45vrEaJEabJobWl7yYopQMOPqZI7dQpcNg0v4h8HGRyqW4+4oCQK9dcKauHl5k8xmF8coysADPikTf/YRjkg0GIYPv9HmPMXKjUdT0bZZ2dnXFycsLV1dWc2TBNE0VRzLLa/JU5FXmue1xstW1LCCLzvb6+pk4z9ezBMQzD/DrzOfkiJ8U5j0sVsVJSWC0XLW1To5U4nCrE0dU7T+9GGiv6f6LB6gIdZfRAGqHMhKm88Ib82mWhGcaRfT9gtGyKIYL0XSAQGgJVK4WPml3nue97ukmxbmvWbYmxE6XuKJXBaMdy1XCyXqJ1xHkJDaqbevZUkAAqCEqhTYlSBUVZslqtMErzeniNSgWaRzpfj3TpVVUQRy/eFsYwTo67uzuausQqxWq1wBYFeI2xAR8EmTLGAhqb/BhiEGJWThaWEVVJ7SKOwDvPnvL6ZkfsPKu25vZuI+f0yIM6qoiXRkDyPxPSoLRHDQNlVVFkIrUSqNyHyDiNhJQxU5YlEdjtOvb7ntPTM9wUubvdJYg0qxrkeY0xGFtQWGbpd9d1s53+vCGnazfGSFGVKG2FLBYi4ygKpMpatruen3/yCTiBm3v/Pu+dX9AYI9Htk5Ai66qSpMc8JsmNRzwQx/N9n4ugrK7K9900TbMVuuRPJOIkmpDyKzIiopXImzNHqa7rtFhr2roWldHo6MJeyL7DkDaD7Cos130ei6hcHaZlWSsltufDSNQaF+WzzIszGmxZsjo9oWpqXDcSx2ke/aIUZUZaktHdMZm90ppvPX3K9z/8Dh+//A2bYU9ymE5FRS6G4tzdS2N6cFM9Xh/mNVVFttsdwYO1FV23pTYtMcL93YZF29D3A5vNlrpuQIlX0OZ2R9fvqOsK7TyjG5mcw3rhHkWEpxLzPf+/xREPhcY3Lja+4jgm2UuDJ4ZvXbdnt+uST4xPahQl7pyKWYqtODL1yp0b+fOID0b5dS2k9LZtMFphjab17eHX9cP3o5S4vdbNAl0UmKpmcXKGN1t2+x1jVGBKmkVJVVqqpB6KWnhvIUhTHlLzkytI4QTFdD2JOGO5WmKaFh/jnFIdxgE/dswE3a843cYYkXsXsnbWdc3kvl5V+o2KjWEY8U5mUzbNgYOPqZLO5aJIspR62EEcIwdwQAO+yMnInIA8u1Mc32jMN/AXpaG5wDiW35o0R52tzlGHOa8SG3WVNu6yLJmmic1mIyOi1MnkgiCjC7loKMsS7yRuPS+wmXTqnOP6WmzPsytqzqrIi29GZLKK5riQOUY9pNKUtL+2bXhyeUkIXmLs9yJ97HZb3DSgTENpSyEuEWmqCucmdDzqHY46vGw6E5E59b4buN9uMUrhQtL4a03wUbxdxEYDj8KjCFhut3u2wy37tmY6WVI1DaZQUJQUleZk1VJYnTZJKfimyTEVDmtkrum9Zwoi11IWAilvJt34hTGCZljLvu+Rzl1RFpJl0PfirTLGyHa7Zb9eoUKgbiqGKSb4sMK7gaKsqesJl3wWpmkUMph3yS+llM7BKqrCsO1GLs9OuDhbM/hbVk1BU2im4HCpgMsLT5ivdSnUiBAGT2REq40Ei4Ug105pU/dVUNiCdrFkfbKCCJPzeB9ZLVu6OBKJlJUQWTNqgRLJZVFYdGmOOhhmLlBG2jJ5FKCqa4LS3G82DMNI1w+i19eKQsOkDL98/YoxBGzT0hYFT5YrsRuH2SsgoxPBiblbiAGV5MDHiAIwF9j5tWU0zxozNxcxSLCYD35WkORwtlzMeyJ2com4J74gwQemcUL55NCYQtaCF5nrzOFIr1mlzJZ540noqYoyBim0dHWRSNXUIps3hvXpCeeXlwzjwO5+iw0BHcFNssEUNvGWUiy8NFHCw4jRUwCPz884W624292hrRbI+2j0kFNl832a18o5G4bDhpmRjfv7LXebnYxaxp4w9FgCtqp59PgJb9+8Fo4cKSgvBOrCUDUVtiwwhSJGTT/0GDehS5WMyNR8Df8B9vmvfTzcD8J8z8xzpT/gEWNkHCd22w3391vZk5A1SmuVMkCYM02q4wY+HhWIaSPPKFlZlpyenGJsiQ9ptG8061tP9umYidfp0PnzNoZ+dIw+UjYLajR7F4iDQylLtVxSVyVluvfB03d7KRxyI5m+tDpcUyo1F8YYlosljbFzjo9SMHZ7trdZqu4FUf19h0on4sH4/6uPb1Rs7Ha7mYsgiwYPSJn58D6AOqAXcNjkju25v1hskF57SEmgzL979M8xk0QPN2GWzuWNOj/PDCF/geGUYd1sI543+fv7+3kzyMTN/O82cTPy36dpSlavh1FINui6u7t74D2y2WwenJ+82OVzcVxoHMO7VVXRNsKatsZQpPNVlSXL5ZJutxEo22iiiwQ3UmiRAYZpxNYlQanZj0UhAW0+ncNjRCgQxemSSGEswXtcmp9GXNLckxOfhZNTlEwRxjEQ44hRPetVR1tHTKtYLVY8ujihri0iiSwpCkPTiNOqLSyKSFEGyqiYpsBm37Pdd4wuMKRMnKZa4J2jqivud1uMKbBWUwbZrIbBEqLHuYkQZAHWWmNuNpyfn1KVFuUVVSU5KNpaKluw3+25ubkRUnDTSEEVFTE4ySMYHUYFTtct7zw5Z9d1WFvQ7VeE6y13+yk1yGnWnxQMIQeMR5FWMnmsGbGDSIQ9EigmvyqZIPu+x1jJY6iKguAjZVlRlo10LUExTWPahEy6VjzegzKkDBMxdysLc5CTzsoF6VgKaxmchDplx89xGonGilmQ0XgFH1+9If6Hv8VtNrjn73J5eUm9aNHjyH6/R5N4EDHiXHLI5NCJ5+s8b5jHm2Qu3GO6/nSSSscQZPZsEsEzrR8qLQLBeaZxxE2TGMYh64ifJKly6HtUiFglqoDpCGnJhf8xMjDL+JObo0RvFwgfUbFcLrl49AhlNKsT8c+Z3CQpvJPDOUE3rNJShCmFTrJKr8B5IafGCKPr2dzcMHUdpTYJ4o+HBjltFDGhwsdr5sO17zAiDjGw2/ds7vfgHGfrJT5MLBcLgpsYx4mmXWLsOJsNKq0odI0ymqJuWGmLY42vW9CiIpLnCILc8L/NIa/jGNn4Az4ucj/k/aLvh4PhYrqmvZefVOpQbBzXOw9fUbZjEDfdtm15/OQJlxFRoZUlRkP967ccio2jzzZVdZHIMInibTdMDCFS1AtWJ0La9M7RnJyybBsWTUVdWrwbuH37FkxJTCjejGikpl0DnlTQGFGhFEWJCyKDVQpI60WM8VCwfMmRUbjMTzFGEoO/zvGNxyjZ9U+UE4npGw+JhbnKO/bjOEY3jr/mzTVBvVprhklMfYw1hFxZHl1sX7zuDh26f4CezCOKIB2OnKlDN5WLpKwgyYtklqpm5CXGONuQ73a7g3NijGh1yIkB5ujtbHS23Yqm/Vi+OsdDZ/+Bow0hxjhzWaqqxGjNYrGgripZUIlstxtWqyXTOCT5IExjLwCfm9AxUGiLionoFA4dUd5wFPkzeJhCODon6MZmJyMKDM47ctBPRg3z2Exbi/DWNL3zjD4yjD37/chyoVi2lxSFJp1ylIpUdUnd1Mm6W9Qa1hhciPTDiFKGtl0Q9x2xLKkqUT8Vhbg6+hgwWpw6rVUM40TbFOz3snn6ENnuJLn31dU1dbuUrjwIA3zf9QQvuQNaa7bbDcvFEmpxsMQHhqFns90RgsKUC4iey9MVb9ct4XbH4/M13eSZPOx7cdnEFMISJdkVE2TMohG4E5F5+pDGRiBjFDeJLXaS8NZ1SWVL2rpBKU3TSKbKZrNltV7gpkDX7dFaCdLoNVhZSIw2YC0OJWhQ9pxwjn7fJYlpn1xGHdPQYzRiTmWs2If7gGladm7ioxefYnY7Qrfnh4Xm+ZNn1LYhENG9ZnSTjEyDBKTlsUrwDzkTxwVILuAz6Q2YRzG5M/TeHzEZ5L4liIJDoUTplEYxVVkSnGcc+oRoOKZpTDbTRzyHhHRaa+WcJHVTfr1Tup9D4k6UVUlZVSzXguqgoNvvmfoBNUlCpp8mKVKMhG6JGvogFRb0xzM5x+3NWz795Dfs91s559YQxvHQCCm5j1AHv8x8/o75Z188+m7k/n5HacTr1PmINgXj6Hj95orCSkEv0uOC/W6H1SIhDjEStWF9csG0PGXSVgrllNUyd8pf+/j6P/tVPxljlHI9/UBeu2M8eI78ToJofnyVR8aCvJP4L3I76jkpFQXWFpIFFKGuK2JM11NCuEwM2KKg1BV0h9efuYVwKB4UirKsOC8rlC3ASoqz1Yp4c3AQDalIzWMOEpI8Oc/oHJMTVljdtKiiZPTJfbtuWZ6cJt6epd/dY7dbIqIC9JGDNDbGdD2lBjZxiBSy9hxG/3ksnawdnCMWRWowD+9PI1w9kQ+X1JWMUdw/xRhFkYmD7ksJjYf54sHOeP7do44ib7Jf7IIOXw8NRR7O2g4Fx/FjZu+N/HryYx8jJwrmNMpcoOTHyAsdHFCR/HVckGQYWIobCS867tgyjyRvEjPZ8wuy3AxtZ4QHmL09mqahqSumaeDs9JS2qem7jslNuORH0O13ovmPkUIlN0MCbujQ1ZLCGNw4YpK6RxCvDE/Mn9y8MKr0Pibv2XU9KPG58GisYh7NZJpCRG5MjMj/XIz048R2e8/FuuX0pOXkpEXhGIcBZSJF2cyZJ7YoJDnUO7TRUqV72N3vGSePj+INARFjBFXqh0FGBGlm6KaJGEYqWzIZhdXyO6P3MHn2+47y8zeEJ484O10yTIIETD7gky12XYsVMUozjAPTlEK+jKWqGpRyjP2OpoCnF6dy/RIY3Zqun+i7SVQawYMWIR9HM+YZxSpKTFGgjRXL4RAYvUN5kSKjIE6jLH665OTkjKZpOT09SbyiPSFo7m5v6IeRk/UJm81AXTf4cUJZi4tOjKJSkR2J+EnMhYL3Yl7UB7b7jSgsJglKM9rQ1CWjUrhhZLvbEkPEtC13ruNnn/2aUUUG73l2+UgCrMqDosMqjcv3f9QzJ+t4fThuGA5kbglSi2nROCAYGfkQIyJiJmTLfeMmhzJaAiBtkQyGgriTaoND/AjyOCSPS4zShMSVWi2Xc/Lz3c01NpGV0YqqqSVFuG3Y9500AIAfRsLoYJog3cca2XCky9NJaXIwxPLesd3d85vPfs2LV5+JcZmJQOJ2zNdIGp8nJ9djy/bjtS6fz7wW7nYdm11HYcTYzzKyubnmfN3Sdx3ee/r9jn6Ua6a0hheffU7dtnhTsR0GVpcNwTZMyjJLTfOL+7rH/ONf75diPF6Zj74/fzet7VlbTPyt6+irCrDj1zAXHEgxF0Mgj+OtsZLou1wRlIgImrrGu5GAok7+R0ZJE7qIxVxsyOORbvcj1FqrtMYV4oxd1GhTUGjwxYHz4b+whyUYR5AIo7GlBa8wZUFhLLaqUeMExoItCFryl5QpsWVNQIjjKqOER+dIHvvgYJtjRCKkYulh7tj8fuIXCt70njPnsaprqmr4p+FsZN38Mdyf39DxyOT43/MLnd/4lxx5/juTRDno5r/4u1+FqOWO6VidYtJCNn+ePJx/ftVx3E3km134E8WMSsic1sxKlmxK9P/n7c9+ZcmydU/oNztrvFvN7qLJzDgnT191qyiuqoQACRUg8YR4QeIPRQIJIVBdVA8IURQcQZ17T5NdREbE7lbjjXWz42FMM/e1YmdkBtwqU8Tea6/Gl7u52ZxjfONrQFj3KaXFuEgVJMVa+8S46PI1W2vFermu2W434k56grauqJwjBk+KgSl4xr7DFtLR6XQCo6lUwlpNGAeSrzCmIYWAmccyqGcFXMaUziWp89dCTPTjiNaGpqlRRvzzVYHbhBcpHI66rrDG4GPEKU2KnmmMtPWGX/zsc26uNiQCMXkgUbmzu6VS4CqH0WsOhz0xT8Qk575dtZz6sZwfeV7ee4ZxwFYWbTV1UzGOA2R53c7JOdRRLIbH7Ol6z/fv7jDO4Zxlt2uwVU3wE9ZVHB4fqJwgSKP36KoipUDfd2y3O6rK0A8jw+BJ2vHqxRZtlbihmoqHfcdh38soKaXF7rmc4eW6lY1f5G7TrBAw+uwqaQWOlOAkS9O01HWLNY66bqgqx+3tNT5IxssqtjTNio8fPxaSJUBanHGNMYsJ3ex665zjcOiwzpCjZ+gnDCKTXbUryfGYPErLgrvbXfH65QuunaN7eODffftbklEoo/ni5WvqupYCuzgG22QYffxBk7Dcyxf/nrlKItsxgsoshGtIepa0y7Vm5g3dWdq2XQqHxlViWmasQMQolLWoJM6iC2FPa8QKIZUFXSSQRmmaqmaoG1xlyTnSrtd88eUX3Ly4JeXM/f09KUQ0It0Nwwg+YgpS4i5MnFJ5jQlBTsZROFBff/cN//jrf+L9wweiyQQCJIXVStQFl8VGuRfDxWj1ckO9XE9TlgDCrhu5vb6iHybicGDbVjzuD7S15fHxEWstL168RAHb9YbROh72B4bs6dWaJirQFRmLzJCkiEqfKAb+2z7ml3p5Hf37dDI98ywUrnKsVms2VwlbC8LYFKNHn4QIfY6ar6jPbuPMSplzISSfF+NKkSgJmllwzhBJ4bIBf1YkKeGsOWdYr1uudjtGn9BVjUmw2mzE2dNVoARFS4jhX92sSohcRmshoF8+0/m3zWPEFCPDFGSUq6QgTn5i6ntaZ5+Yz/3g/OliCOYcVeUKHzH+we+/PH6aXXlWUNzy5ktgJnXmLJ0IQP7TCp3leJKcp88b8Txp5hNIyPNj5kBcqkasteJQei7zPtkpAE+Ko1npMj+3ywJoRlFWq5WE7KizzGw+LnMj5p+/NIGZn8P8fJumoSoKjHkx0UqCc6RoGcVrYxqZ5Vpj35XiJTBMgc2qFgdRItMwEIymllO3dF+kuaNAorjn31WgNbIixECaAsZ4jKsKT0Q04ao07DIdSDirqYzGq4BYFCTa1vH65TWvXlxhnRiqaavAGBmhNGLfPVfVc6HV1g1KOULaM/nSMVtLMLqYmQkXKIkLkhhnxcCqqWlWLTnD6BMha7rBM46BmEQ18/B4IufE9fVfAIaqaqic4rh/JMXAMHTEJK6CWs824lp8MsaOMAWSDrR1zfW6RStF1WzIyjGOE9++P4rDI4jz4pzSVjqGmBLDOOKDp+t7rDU0lRSR0zRJN+QEatVWuBPjMEmoYRIZ4tX1tpi/jXz8cMfHjx9YrVfcXN3w8PF+ua5SUYX4aVqKjhQjpq6lENFgVKI/dTRWoPXXL1/StitZNHJmt71is91KBsnxkcHAdDzx3/z6nxdfiS9fvqaqa3wIxJwlZj4m8R8p1/ZzWfpzEvd8Xao5l6V0ZbaMJFJOYkZU7tW5o4ozimHVYktOOfdGi739XLzMyOFceMzNxjAMS7ZT0zZoo/BRsdpteP2Ln7Pebtl//FjUXkE4MT6ivWQRKa0xzpYisUQuFGJ3VopI4nA88vbdW/7tr/6Jbz++ZcyB5DSTD2LvL3MbWXfm1U4pni1NP1jnzo1QJoREP3jqul2g9M12h8me7vTIdndFTpFxmri9viGFzNXuhg/3e/rJo9YN2jRkXZGVIxVfizMm/N+R/vX8Ci+6w0vp67+HR76AxY0uBURTs9ooTC3eKpV1YA4Mk6ctI+y6dtKU9BdBanORwfwcy3tTxhLTGMg+MRGIKAwZO4w/fK3P/uWsZrVq2U2BfgpkUzO7G+WcaVYtxjmscxjrUEZTNS1oS0iFeDBDLs+OmcRqjMGQiOFiH9I1JoelGfxDx+U+acpaqZ40WX/4+GnFBmUxvYBdBJZJCx9A5kRLmfDkCf5Jjz934mXBuUQBfuyYF6R5o5/VH1yMYDLnkctlMXHZLVxCwPNGOHdic1rrbODlp7GMdfIiZb18nvPiep6NnefYc45LU6R7SqmSVyDppaZkXKQkccJzgmBTwuCOh33xXEhYBTlGtMpU1sncepxYtw06J4wymFJtpyTfq55AuOpizCIQf4gSGmbLRqBy6cJyZl6K2qbietswThOUJNfPP3/Nn//ZL7AGpuHEFAdBA2wt0ddayHtCqBImvNaavusZ/ZGuG5h8knCvEJjGqaTPilFRtNA0VxijqWvLur2iqhqaukHpmqxO+NQJu95WKGXpupG+O/LVz75gGEY2qwqrZrKybIRNU3PoTuSUcE5stVMMNJXM4yOOFMSD5Ga7xtWKpt3yeDjwcPgVUx9RqtzsGVCpkFvktIqUWRYrrWB0YhddWYt1jrqpsVZGa33fc7AH3rx5jXOWcZKU26470DQVNzfXeB9p2w1jPzIOAzFEjBH0LcaCbvhJTK20LpHVinHscDaz266oXYWfPF/97HMqWzEOHucqGc3kzP5w4OP+gTF7lFO83z+Q/vnfkUMk/dXf8urmBjejdUA0gRTO3h4zyfrynp7vJ621mBppLaGNaVallKIEyAi5e+60Vk2LsxY9k5qTFD5ai9NsSiLNne/P+Xtmrtn879n4byl6rJBiTWXFcl9JCuyHu4/EyYMP6JhwyuBsVbgOxRtorivJaGuIZEY/8Xg68vvvv+Off/Uv/Pr3v2NIHhpLUFnUHuV16Txv62qBrXP8NCp8uVaduW/I+xXA1o43P/8F/f6OttYY48gI6juFQHfquVrv6E8j280NOmj2qsG6Fm8c4udpIM826vG/c2xjec3LyPeMGvz/e+Qyprjk9RnjcJU4hRprpAj0HusatKvRrpHxp3PoC/OqpTnOTx9XmlVpLmL2TLoiZI3VmdVF6mtGLS/xfAhXpaksbVvLWNbUZGMXN+m2bWjqmrapaJy4BU99j7VVGXXkJ9OpsivLo6eIsZbVeoXD4GMoRUxCxcBARJcGJaYkCdCfOIeXj3/ZPP+x4ydHzF+SMeeu/ZLfIGxqVRzx5lHUxUzrsmwvxXOGpSJMSXTrqphOcfHYf2weeIkuKCVSr8tiYz4uLW8/dbIuTZDmIuOSODp3SUbLkijEJVOKGEFkZrbup0ZMdV2x2+2oayGeXl3tCickUrmWEGLZBBNDP9J1xfc+eDSZ25trxqFnGntJLTXyWk+njna3KQFdhRujikXtcv4yKT3lE+TlPEuXmZOwlyMskqqS+rG8BzpD4xw32zWn0wmtErtNw89/8QUvXt4SkieksLx/VVVRlRyL+T2ZponKOungGYhZ8eL2lqQMXT/wIX0kn0SKN02Rh/2BqDNXu52Q+JwYnFkrRNKQDFOAfgx03UTOim4YCmt74v3dPaSRq+2GoT/iQ8Iq4VkYa2Gc0AZcKdgwms1mTcaQlOP+8UhOicrVVDqz2zT83V/+kkM38Pf/+A19KOcnP71JU4LJJ2wlWShWS/eaMkvkutJG1AAKQgyc+o6+76hqQ4gTcxJlTomqclztNjjXyCiotkw5sFqLn8vxcKSqBPUySiSw4+Sp1i1DH6gqqKzj9etX5JB58+ol+8c9WmeaupJOeBxIMeCamomEM44cFQ/7jn/41T9RG4f9q7/mZncl/KMQxNStLFbSZZ/N4ubFeC7ItdaCkJlzZyTNokhoZ3kvzIF/0kAkZFOIUdCGylmqggaF2aAqS6GqS6GcYiQWczHhVkkAn3UOpeTnxmmiXa9IZE6F5D10PWny6JiplJFRTE5kLaqTXEYNKs+Ni2EKE/eHB7778J7f/P53/PPvfs1p7FHrqhQZ4jCaUy5mTSyvXdDhvBBnn4yUP3HknFHaMI6CmKXQcbUWQ8LucE/tCvKDkFiPxz3ZB5yt2R861PqGerUFXWxvLxCN/ITt9t/GkZf/zxwDMTojn1f6rMS59k9+zCzQq3q2V8j5Lai2Pifvyq+TjT/lzGy1lrUBJTb9ha32ZDyRn3yUmL3HZZmVDVzcamWTTzmXRr0cl/Hv+bweOyejiZXP4tHjGqI2JIQ72DQNTV1J0VHV5Oip6hpbVYJ0nM/c+RkWkmhWRY3inKDYKZKjRITEIL5SlRbw4FPvvWgE8jLyk9vzqdDgx46fiGyUE6vShUKhSNUUoFKRDwJZ5u1yw6gygyy2y2bW2ptCFlaFaHM2sS2tNsbYYjeusNaJP8IFTHqJfMxBOCnKm6u1XngmyKM+Hdnw6XnyZZT7/Pc8D79EPQBiFLtaYwrrWwlb1zoryZSzna3KuNpgs+azN6959eoV4zgR/CSW6McDMUYe7z6yWq0JJMZB2EhGw+koCoRhhIeHeypnmAS4IaVMFyYqY+h8ot20pOjxSSrVRMJoKzbKZVaeY56vnjK6khwC29TEYWDwEy4FdFYEHWURULbotg02Kqrg+Xy7otUvUJXh+mbHX/3tL3EbR1YRrStIAeMsq9WWulmjtSOmyBSEdLuPUm1PoxgL5Txw6kb60TNME1NKdKMnJs1DH2hqS/CgssWoiuurW8a+J2XYriqOB8X1tuZ0fCSFjGssH+7u2O22fPfhEXJge/0SkhNff6IUxsaw3la0TU136uhPPcMgRWXdblFGYNdu6pjGUey2w8ib25b/5D/4C05dxz99/ZExZlCRGdvTRhXVUoXKihjnXJWMTgmlI/vTCVM5Qk6EXLFxDafhyPu797Qb2fzlmjM0DjyBq3VNP5yI/pFXr24RF/xEmCYUg3Q6BLbFu+N4FBfE2kmWxmq1oq4afvbLn7PZ7nh83OPqit31jtVqza9+82uapmKz2zGOI3cf7witBm157Ef+69/+C6my/O1Xv+SqWmGzoTaOjkDVNEzB00/iK5ONyHurtl3up8U5V0leTGVrQpRN33uPKgUJnIuNkD0VFUZXKK/ZWEdtLKu6xrQrDqcDp67DZ0+lNI45DiDRWE3IEecMXX/COI33QYIKneFqd42xGkMu1vsZhgEVZgM/BU5cHrWRYoOQ8ONYFA5iOX93fOD3d+/4f/7LP/CPv/s19/T0lSaRSElKdsrGUzkL+kxgVaQyRTgHSColIWwzoTKlc3dsjGYcPTFCN4iR2ak/8ue/+IzXr3d8+P5balvx4e1bSIntpsWagX4aCcbh0VTra0ZsWZMzSgUxqFMUt9hP7QKf2IhQpEU/+OQLP9j4gSXfA1Xyd2Q1hij8GJNFHj6RCEaXcZpBKXMeWZQ9oizU8vNlXCwVS0Ge8mx2VXr8Qn61WuOMxqiIKWNrpxW1szT1ORwzo5hCwOnzdqmtuKQYpSAGrBb7fqUiVePQKRBGT4ienDVZRYw9jye0VqQcyzVe8m6MQWsjzVOrCdqjXU1SBrSmHweUUbimEjTQKaYY0bXF2qIyTAlVEmRTeUdCKXwkgE0Ru0A/jHR9z+gn4X1Fz9ifsJRxbJZG01C8Y+TsMueorBpHZRSOhM4X46UfOX5ixPxZRnNmAj+F/OaxWFlvl3wDjbgSquIONfutC0KizrDS/PmlWLrovn/sqZVFbEYe5u7pOQZ3KcOd4bRPfe0S+r38/qdkrcJ4zyKvFehRXqP3gZxAIXBdu6qpGknL+/kvfs7Vbse7t+/oirRUI2mokvcwXvBA0kUOjHRowzBQObsgF9FPS9qqGLUoYhZb7MqY5VbOnEc4isw4FjOm4ksgcm8lia7FrGlla8I0FCtpQ85ys2ckdEqvG1YrQ7tb8/lXX3J9u8WnPdZIvoNWjqZuxT5c2SUsyBQOhy/qJrQS8mKCzbrFuopmtWEMmcd+ZJwC+1OPpmUcI6dTjyaxf9wv3AdnLddXG6qm5uPdPf00EMloKwTQQ9cTw8TDvmO7qlGuxmmFMxBzXBQy0jHKyHAcJ7T1EKDrJIPA1RXGKlwI9CHz4mrFV1++4f3DiUM/EdDkfLHEJtHthzD3cBK8pbVspMpomtWIdpaKevEyOQ0dH+/vaWuHQnISVo1jt11z9+E9bx/vuL3acPviCq0Tx+MRrQLrtRDbjGqorGMcRyqrqKoW50TV8/LFSz7//HNev3rDNHmqtqWua+pVQ8hiC29rx83ulpxgu97y69/8BuMspxg5hIl/+M2vUCnzH/3yr9m6BuM9la7wMaBj4SoJtMDcq8YCOc+z3gT4GEUKXDgVl/4z81gzIaNFYyTAzAIOMCmjQyTrRPBeCqqmYtWuSKMQamOKhJRQRsaAs6y+XcsYc7VaUbdCFPbDxHA4ymY4BQnoM4aqklRjWZ/kd0UvPhvGOWkUjke+fvsd//zt7/in3/+W7w53RKuJVpG0LNQmC/0y63wOXMtzK3/OdlEXyLCsNT9YylBK4QvnJwOnrudhb/h4X/HqZoMyiu+++45N2xDGiTCNtG1L4xoG30C7pV7f4G0tfjqlYUx67uQ/XSh8CvAQX6QflBrliT77Z5Y/9OKnlM+fJ5U0XfneqESR9HTk8OkSSD6dysk6I+Pzh5ePYbQpLpgVba1QppgoWkPwlrpyVFVdCsGELs3ak+dQ3is9F4qIi3ZVVzRksqswZoXWDkNi3V/uNeKTMz9ORlBOW1WS0xMVptgLaONIShK0lSzPaKPIOi/rpnVWRAO6nAPOqpQ8g61JxnaxkEHHrqMfBvw0EkuxUWuKUWU6v8/5fMplz8kl9VgwH/NjJKOL4ydzNi6PS7LI/PfCryhV8Uya0aXSlKCasjHOqpWLwmM+5jHA/Pcy7phHL/ns2nn5fD6lglm+fvGc5+LhcmE7y3+e+oFcFjvzxzHGC3XY+XfpmeAaE1rpxV75+mZHu64IfqQpZmIKSb61ZW6fCtntPHOWbnSGgGPM+HEqfItaZuDGMBVS3llfTdngIrhLuKvkcRiDIi8Q63zWRdZb8l56MdQKwaNDWFIbcqn0cxatdsqeTKB2a26v1+gcqGuHNYpplA7OWFt4GkUGqg3OaByWlTacTh3eiAFS3w8oZajrijAG8eDwnr4X59BxFI7CgcSqqXj77j2fffYZ/TCy3mxpmpakRDPu+kCIivWq5XTqGPqeEDxv37/HvHnJNEVcbTC2ZppOtLUTrkgGYytSSExTJKWMDxOHwx5TtTQqE70XFnfQtJXl5z/7nA+PJ75+e8e+8/go70MIAlXO1bNCyWJRRilzONs4epybmFzFVAKTJu95eHxE31xTO1v4DA5XyTxXa8N2txMCa0iEKeAK1ycWA6zkI1pp1m0rZOS25er6ipcvX3F7e4vSUqA7Z9lut9Ql4K6qKuq6ph96qrphc7Xl5euXfP/d96w2a1I3MqXIx/0j33z/HW+ub9m0a6pyLxmtUdbClIlZXchxWVBHY4X5HkJ4yqmwVkinl4oMLYuaypBjwicPIRLK+DJpxViQFO0spnai+qFB50wchhI6BbfXV1jn2F1f4Zzj+vpaiLPacPfhA4fHPSZDDhFTiVV7U9XokkMRyrkOk19MoPbdiY+HR7559x2//vYb7o6PRKvBGdlYn4yDnx5PPifL4bLO/LiSrzQowUuhNnlG77l/2GOJrFZrXF0TE2hrcbUjZoWrV5ANrm5ZbXecdENQBpQRYyj0k/X7Tz30gkj/8Hk+fcHlU8vY4fzvuVxRSjhi85j63+cxK8Aq52jqmnW06OlMloxVYHROEpqjyKirylHjLh9knvuhlF4aOmNkrNdkSXxNyaGVRZOebMq5IP0pPcvv0fK/0rMBlxPuiNGLCaIxQmB31qKqhPESTKjm0WMZz+m5QS6n2AAqJ/rjkfuPdzw8PnI4HcVCIQWiH3EqESdByWbZ+MwDyWXkIwXHeU9eTM/+yPETi41LL4zLDv/pxzPxD6RaU0WrPDNllTYCzxjpMGbfC2Ahb86FxGVBMStUzh+zcEfOv1s/Mc56fnxKq/2pz1/OSRc47WIBLD8kce+lG56ft1UZrTPWatablu12w2634eWrqxLIlgmTuI/mnIh+KsgPQGYcB6ZhFElwFqTGWSuEvn7EjxNGK2onLGlNRhU0J8SC6uQLJrcq+mlykVeKH0Jd15iS6XCGbcXXP1I8K7zHpEiZusu1rKXCthqGcUDbxG5Tc7WpqQysGidjtChpiFVdiwKkqUWNUhbTKU4cTx3DMC7nX6xyMz7BNExcX++4fzzy+LjHFLO4cRR1ToyRGDybXc/xNGDrFceu49T1aCXGaDmIbNb7ia6TxN23b9+xXbfEcUQlS13VKOMkjlkr6nqNYZ53d2SER+G9Jykrvz8MkqOQLUpV3G5a/vKrL+iGkcPxI37yoG2RpeoFipR7J5Xxr1wzfvIMwyhseFfhlMYh6Njp1GGNlrTbIDyYu/uPDH1H3a7Y749MgxiamdkBMwppTCnFMA60TcNqvaapG1abFdvdjhcvXrHZbBh64bR88dkbXr9+g1KK7/ieMI3FzMry3//X/5r/+3/1X9FuWm5eXDOcTkzegzacwsjXH94ScuIXdYVJerH9TiiiCriZjKeEfAyy+dnC7whxVngUWDrPLpqqyFYldKouG4BB/BKmGAhFFSIW23lZMx77Exlotmtqa8ndiRQCu90Vv/j5zxe/nHEYqa1byNU2K0I/kJCO1ShJtXXGglZSPIaAiglXzMWmGHg47vnt22/5d7/7Nb999x1dDqTWMcUgrP1nDdnl2jN/rAoKrC48EZ6vS88PbYw4FjeO2sm4LkT48PGRTeO4uXnJh7dvaaqKkCnxEpCUxZgGV7cYVROUISlDVsLdKCktfHJ2/wmkWV106H/0yBffP1cbZ5D8XOTMiMRPlTf+kUMaHl34EQ118CQCqaT9BqslYiEFmlqKbq01Lp+RjZzFwVXkpvNLKGZ6iFdMiNANvcQ9pMjm8eH88zGSk2GGDbQ6IyXWWpzStNZRtStsVTF5x2bdQk60rSjZqqoi5MRkNWSJdkhFzjoPu+RhNSrLCC74ifs7MZh7++499w/3BO9FCacSjVWMg6xt81u8XJv5jI4rJcrFYRjo+gvzkR85fnLq6yyLeV5gXN5A+fwqn0hjYgmLmcOLIC+citkM63K08RxZkMLDzGdgyS+Bp+OO58/pop78QUFxWWws33Xx8Vy8CMv4KTGWnIrKUTJGUhQui1bQtC3WWa52W25ubxbliLMOP4k9dE6JoeuoSnc0V4yCKASy1jLTTJngPZvdTjbZYjMdQsQaTds0+NOpdLO+dJZS9F2+prycl+KuqC6XE8mmCEH8QIw2JD1Xtko02TEi635RaKRAzhObtuHzN9e0FTQOiB4fIqCp24a2XRVJp8ST55IX4aMUDNoY/DQtUuIQA7VrUNqibc3N9RWP+0dOxwOgJKERsY5etS13jwe6bgDreNwfGIvniTGabd2wPxxoqpqUxYOjO504nnqshn7ynEZP0zj6cWK9qqmaFVZr6aCDjFJyVqzXK7IST4fGVfR9R5gGsskYHC+u1nz+6oYPd3v60Zc5sZA/jQJyPBOsnyzLgaoXp9m6mnBKY2eOUkzEuEerjMqJw0FJDk6WWX53PFJriYSuq5qcoTsdhccSE3XTsF6vqJ3FWblWtpsNdW1JKRRo1hWX3Kq43nbEGGjahpsXL/j2+28ZpgEUhBxp1ivC5Om6saRQZ5SzNO2KK9fitKifcpb8kMrYMqKSYmO+x+cFTJUZvC1crhkZNXYpOUBnWlfjnMEqI+ZYsThBlPWhchWurmQRJLG62vLq5UuqYgB33B95cXPLzcuXYgo3jDhjGbuesT/RVg2+G1CxmI1l6UbnAijFRAqBOAUMkjPjU+ThcODbuw/85vtv+f7xji57vFUkAx7pKJ+vKTNq+wN0mKeuxH/sMNZy9/hA21asGsvx1LJZ1cSs+P7dR17dXIkhlNFMKWKSRiVNvdqiVmuydqBsKTTOnLmlMfyE9PUScVk+V/785IDjB69l7rXLn0+QDZjhDlW+kP89IxvzoUsMhFahvGJRVRZwAZRiu1mjtfgZJQ3zlpmTuN6mlEqkA4LgKyXX2uOe4zBx6jzT6El+5PbjcD4nRWGXU0YZZJStKAROC1ahjKxhWquCZMgI2+gMOZLDRPYjfhwYjGIaB2JTEZXYU5j5LCq5Bq2zYlSWM8kH4jQRSmghWkuRm3MhsMs1/6QZL+/FUmz4QNedE83/2PGTio05LOZHRxXz/HH+fKnW59mZQP3F3TDFZYY/j0ueLER8mivxg9938fFlQbD8nPrh9y/jnovjUwXUJaoyq1ByLvPWLNKhGearKoGic87c3Nygteb6+orb22uGoS/VtGUcRkwrHvpDJ5uCtRajNB5FjgmtpgU+Bgl8qpxjvVozjgM5SNGhlaV2jqDmMKtQfq6QPnOG2RelnCptNORzzP1cTaMSwzDK55WWWO2saZ2w50MSHadWBqUzmoizmevrFa9e7DCMGO3IOUpujKkxthIrb12RlKMPwklRlGyPzDJCMlqx2+0KV0UzhczbDw+QAq9fvuDwuKfrBnyUYqsfIu1qw+Oxo+96sjb0paOPKWK0YbtdyQxSKZHADiP7w5H98cRuXXwsTj3KbBj6Hq0tdWXFbwBFyoph9FhXs1lvZCRgBRp93D/QnTqqJoGJtMbxZ1+84e5+zzh5jmOWcQq5EInTJ7u0GBLDMAEKox1mDgVzlQSOhUTtNGQEurcSAtifOpq6xWYYvdiwT9NEdxqw1tBWYj98PBxIbcNXX/2Cv/nbv0UZw8PjA/v9nrZdyczXGqap53DYk3Pi9vaKL778GfvTib//f/+/uL695dgFtrsNh8dHMRmyDj96vnu4o/MT63aNW8N2tRbZXM7oLFkuzjpSFnfT2d+FLERBjdxnztgnoXGXCi6QwsXk4tBoDNlmkpLrx3tPVVXsrnaEnOhz4Prz17x+/RqrBFl4vLsnh8jdx4+smhaNWEbfn04cPt4x1TXDqeS+KEUov09lFtL5zCtxzqGN4dgf+ebdd/zzN7/lN9//nsexJ1jNkD3RJ0FdxDTkyRp0CZ0/LTjOLInnCMIl0jE3Ckpl+q7neDoxdNA4y83VFUPw+Cnym69/z8vbGzSJ3W5DUomkDKvtFWq9I6NLoaEpux6gWVyc1VPWxvOx8vLcfnBV/9jxacRkfqBLZIPMj5pMPR9vf3JM9WNPRWVSDqQoa+HMMdJKRtyrpmbynnHo8VYBbfm9ZcSXpWFWF8Vz9IHT/sDD44nHQ8fp1BHGnni8cNVNeYZt5GUW2wFZpAviHQI+ZbT3xOjxQ0/OgSF7DBCtIwwDw+mIy4HJj6QcEaFFvnyJMr4McdEEGCXRHZWVmBCrIUVflCnhyWjkh+M8+Vhk+gOn4/HHzvBy/DRkQ+snCMK8GT8ffWijCUmcEikd/ZwLMSda5jKcNMZgtHlipPU8RGp2AzXGFCTkqRPoZWT95VjljMIs0UbLrEk+/0fmp8wQvH/yuZyFKa1ypnLiE3B9vePNmzf8Z//Zf8Y//MM/cHV1Rc6Z77//nse7j9hK9NxXV1vejz21M0xZwsmGvscoVZJnE9aIO+g4TKCSLKwp03WdJLmOI0lJ8i6IHK+qKuySrJqo6gbvJ5LWRETVY+ZxT4iLHfL8mpXSNFbyAY7HI750jdMQaDZrHCKJzSTQchNkNXF1veFf/au/YbdrCHnEWCArmqZFVyuq1Yaq3WKaFViDQjIpYhwgS8FmlJik5SQ3g3MVw+RJYULnwKqWWemXX3zB+/cfmHxcbvSPjweRKQ49IStSisWvP4jDqdY0tSvkV02Iic3uio/3jyXRNhCzxtYNMcCxm6jrFSmDn0aqumWdQGmRV/bdgO9HttsNKUv4HirhciJlz7Y2/M0vf0HKmt98fwd2TVaGbv9QPCi0kEKZF0dZX/wUsMYxjh7fJrE61sLCz8A4RXLyrJpKFracWG22gr6MEyHB6fFA33WkFGh1CzHgjEYZy6vP3vD5F5/j/UTjWu4+fmC/P/A3f/03C3p3f//A6fjIyxc3tG3L2+9/z8fHA7vdms1mxeGw53TqqSqHHzxTKsoFq7kbTvzTt1/j3kC7WVM3VeGCOJknG01MCqOieFQoCVZTKVOVIqNy1YIizkRvpYrVu5FwwTiM5BBpViuUUfgk3CnjLHVTc319TdXUeKuprjfFY2JkOHWEcSL5wOPdAxq43V2hUEzDCDHSHY6yfiBeCbb45CjOiakxRLQ1eDLffPs174+P/PO3X/MPv/sX3ncHRpUYSMJ9KCm4M7J1uWhfjnmfNlPn8efzpu4HXLIyVqyc5nTqcEaTMaSsGbqAMw2ukqK9spaoDFVVc+pHfNfz6lXDwzRh1naRxyvOHe2nHMt/bFP/aUfBLZZmlNLInEkc816i1A/REaWeFmPne2lhfTwp6LQW15BZHTl/PoRAGCf602lxeE5JEKyktYwUcpZwRH/hk3GR0xJzQicKUpEwGU73j3x4+57v3n8QR9KxZzpk4Kvy7Bbd5Xl8pGTPGoeRY98zTJHBe1CKoe94eLjHTz3WKirjMMDY9fhx5N7CL7/6knC9E6KosstrzRkJmFSzt4jwv0KIxJiYJo/RIuHuuo5x9AsI8KQwZuYjyr4cYxS7/8fHP+kd/8nIxryBP68o58/NJM50gXAkEiHFJfxlIZ2UJ2yceZJrMlt6f8rJzMyAV4GjY4zy4ssJ2Ww2DCUtVClV4n3Po5e5gPlDSXXP56SXH19KYgGs01SV47PP3nC1u+Krr76iqh03t9cCD2u9aJaPhz11fSMsXqUWV9CqsgxdQhvxRfjs9Wvev3+P9xE/hYUEGpKXjJF0JsY6a4Ey39YalS5uODVHi89eEhfvT86QL2znFxSKxYb6cOo4dR3RBw5Gs2karKvKfD3hKk1V1/ziq59zc7vDaCn2hFSFjBtcja1adL0imQq0kULKGnQ05BTI40DwEyqlYvqliEGQkcN+j7Ni3+tDYLtZk5Liu+/f0g0jQswahVMxeYwNQIJRUhxX7RqxMzesV5aQYLVe0w0jPiZO/SgGW0E+ziGS8kTbTLKpKy2wvXUiv9amBCRFjn3P7uaW9+/ey8+FE6vtDh0Vt+uaz15c8833H/izX37Fn//l3/Bf/pv/grsPH5ims+exdDTycQiZaQpY4zl2QzH4EnOzVTMbojkScj3UzkJK9F0vIW4hgtI0qxWzhXtdV9zeXPHF55/xsy+/YHO1EVnvfU/XHWnbmqoWF1Tfe1IK3NxcUVWO7777PQ+Pe/ppwlYVj/d3nPaPqJzlOlQG4yyJBHXGT54Ph0f+3SQEzc9fvsaojKssRtsiySukbISDklUm69K0lI1dF8RRSMws9/Rus+bD998To8doTfB+KTDmzjfGyOFwoJ4mzKalCpHKWHTtMDFjrm6Y9kemY0d/7Hi8fxAn0hk9Lb/TKi3ydSMsf22kcDgOHSlD1VQ87B/59u493z585B9+9yvuhhPRzU2WIquC6hQpxOVWeYmafnqM+7TAuPy5y79BSN3j2Is6LVu++/4dP/vyC4oZLNGXgs9pDscTN/U1GTieTqyHAVVfL8RydDrD+ZwVMZ9aI58fszPHJ77wo9DCzFO5IBgUTsd8Ls6k9+fn6E9hiDxBjp5+YVkPvR8J04gml2RUTeUM5CQ5KVF4deGi54zFuyUaIdXOKracEjlEwuQZTx3d/pGhEDBJ855zNkZkeWZS6OkMUz9w9/4Dd497Hg8HUk74aeJ0PBCmQfiAWkHMTIOMVHfblu50LJyNJ1cbZ5xDF9WRJmSxbfBRkIyoQeXI5AMh5cKleXYeL/gaqAv+5oVI48eOn4xsLP7qF4TO514VIUZ8SdUUGWVg8p5ln5vhwnlkggTdXI5B7B8oBubfc1n0zKjGzc0Nf/3Xf83XX3/N999/L6z8kt1QflASQH+kKn/ePeQCdYGEjZWVQPIqrjf8xZ//gq+++jMeHx/IZO4+fhS/+GmS2HIlN+LpeKBp7GKa1HUnUozUxZjqdDoRfGDdtqxWLcFHgvdQ3FmVkjS+MQoUnwuZzlpTdPii+NF6Nl/LWGdJ3jNblEMJhCOfuxfOsKW4Pkq1HlKmHwY6P3H3GPEhcbXd4KwlJFGn3L5+yc9/8TPaVcsUT7jKQElUyFrj6hWr3S2r61dgnMDpTpHjyDgeiGNfNhpNmEYm74mTl5nn8Si8ETLTIPyK25trtKl4PJ4YfcTHxOgF3k4xFdWDoBdS7GbG/iR8hmaN1gN1XdOuWoZxYgqRyjkwFVNQjP3EMEw4V+Gsk8cqozOUYt3Kz536A3fHjteffUmzWrO/+4jKiTh2rJsto828vtnw6nrLv/6P/0P+J//z/wW/+sd/Sy6b4fEksOMsmwYx/go+MegJfVQYJRJJsbXWpZOS61PcQMWfoq4bLKJ6SlEUKFbDZrtGq0y9arCNwzUV1y9uubq+4nQ44Kc39MPAx4/v0MqSM0zFcff+7o67+3tSyuwfH6iahr7vOR07XNVwPB5JEcT3QIEVDtBh6JiOJ4yz+BR5tbthWzXYgmaRKXk8LLPf+X40WgqQS3v/lM/NR/Sexjr6INyoEALruuLVy1eLZ0td11hr6Y4ndAzcfvYZxjqYPPWrN4RjRwxREDwrxbsPEVcyTrTWReoq51wpja0cGPE4uH75khADv/n913w4PPCP3/6Of/e7X/P29Mg+TiRtiLp05Vk8NcQh9FzQX46JP0UCnffT52PcT6IcnJuLaQzY1rE/dLx7d09Tacb+QOUU0/TIbtuilKdqHFcvPmfQTXESzqQY0Hb2a83L3v9Tjplf8Ykv/OiD/YDNMaMrl99xUVicz8P84D/tWeYyplBlPDS70OacBAkuMu1pHEXp4yc5v9NE9uffl+OsJklPRALSHFrhLWUxPiQFyJoqXM7zKVLfBd+QZnjynA4H3n//lu/fv+f+/r401YEYJnL0KCIa8XMKUwCVyWnLMHRknioMl9+lZn8rhzaVEOK1JSsrslglWWRxjiRRn7Ys11qhk16Ah1k+/KccP5kgelmNXypCLr8mcr6EMhlLyRophcbMuFVzJ6P1cjLmx5of+9MkqXOd9vz3nk4nvvvuOwkng0Xlcjknf37zPj/+UKGx4N1A0zS8fHHLqxdXfPb6NXVxMfTjwOP9HbvdrnQHGU3Gj4N0YuU1N03DOAxMXU9tHa9fv+bj+w88Pj7y4cMHfvbFl0yDWHV7P6GyOD4qLSMQrRQ5Siy2KdHSc/EmeSoRHzxcpAxygSbNoNPle7qMs0rh1rYt62liHEdO/cgYDmRluN5tcMZSNZavvvpzdjfXKBOpqxpXqcLzgJQVPiumBEyJqpUUVe0UOWpMFi23qAAM3mg6H4gpoFUWnwhjOZ56mZ2u1hx7KVrXqzUxKeFqDGNx6ZNi0AK5BB7lVBA1NLUuvJSssK5Ch1SyUxQowxQTscjex1GY5LVzjP1A1x3Z7a5EEVSLmibEzL4b2O1uGPsORxQ/kqrmerNC24brdc3+/oOoH4HVaoVSir7vhVNiDDmce8K54PBOCqmYMzFlxjlQjERTVdSVYRhGvJf8lKv1SkKkVqviEzCSUsDVDltXvHj5khevX5JU4nF/jz/15bEknyOnxGF/kM3V2mLT7nj/4YNcu8Hz+PBADJnT4cT+0BGC8GVE3CB+GlMYSUrxuw/fi2eAMVTaYK2iNo5Ka5wq5mUhCAIzX7faCHcjyz3qU5SrWmvi5HmcJnF4VUpQwCCcJlNXvHnzGj8MpBi5v7+nHwZsDLz7l98QSzZK5Sq604n+KKoUow1m7iZRSxSBLdyRuWPLSLDi9auX1Lstv/mXf+a7j+/47uEj//T73/Hu+MgxTYykYqCnIIufhk0C36uz3eKTRueTI4l8tmef17XnyrhzYSLorqssseTI1K7mu+/f89nrW+4/PvDm9Q19nGhixe31VhoYMjF49o+PtNUrsg6k7IlG0Ca0EAzzwp37Uzb1T0MYf3jkMnNOzudmHi7MXBFmWgOfsiKQ3/n/6zRn/rEYAkPf0Z0O5JyX0dY0SZFxGTmh4oUahbzYHOgS+Dk3vc5aauuk4ED4PjonctQXv3/2Kj0/F+EKafI0MRwPHB/uOdw/ijFYDIUkHlFZ/F2UNsuocagM0zQKab/4khQzVSmqsiJGcTFB2aXgQFtSKIhWjgxTpBsmccK+UPo+P3m6qHnatmW9Xv9J5/wnFhvP54ssFt7ydVVq4+IJFzNaz/plBWqu6M2iCVYA6Rz3Pt+Aszrl08/jaTcwdwrTNPH1118vFdf53Dx9vvNjPD+e39iX9aFWCldXOFfx4sULfvGLn/P6xY7KZrwf8WGiqiqmaWAYbZEtHhDKUcY6zTRN9F23PJthHIVBby1ffPEFfpp4fHxkGAbkRtKFOCqjlxQDE7FsmnF5bbmMapSWRTvFJKqU2uEui6tykZDTE1bxXGwsXweyEmnser2lnxLdOKAPR4zVvLjZcvvqNS9evSwLrPhhoBMpgTIWFQ3KOLSt0bYCW4OtyDqTkiUpKzbJaiInueWMleh4YuRqV3M8CvqTc+bQPfLh/sgQZWPY7naELGqSGMStNqUkdX2SG5A02x9LwemcI4WINQ7nZDMPKTGGCEXd4JRiGCb6YUKrqriaynkZxhFdCjF9mHh/94h9ecN6syV0j1ij6I976q2jcQ1ffv6at999w9e/+RXGykhG6Zamaej67omU+3z9ZYKPjMPIoz4SYqRtKmprsFqY7tNkGbqOw+FRkKZXL3nz8hW7qx1ddwRjuLqW0cPf/d3f8MXnn/H4eMd42JfNWlHXMhpSSjP6SUiixnE8nuj7jjmqvnaOu/t7Hh8eqFzDfn9EaSvImbVMw8AUE9ZWYMX9sO9GPjzcs6kaqqRQqy26zlTOSVEKjAzE6FEFulbIwiizYglpy0pIpjNZzWhh5o9hdhS2qLalub7m+NvfSlHarjjtD+QpsH/3kWkaRTUyTTIa0RqVlRQaWovZljG4Sky9zCxzRzYUUYXJtfndN9/w69/+hiEG9n7goT8yqkQfPd4oKqMhJVQCk0UBoBMoo39guX25fl2uRzJ1eeof9JxEqpR6ymezwrny3lO7mq7vOR47jt1AvT9ytWuZfKSqa2oHh8OeY/So2JBXR7yvGJQnuTXKNSjniiPyU7D/xw91YSN18TpnduKzYx5sPJe+/gCxeDbSvvz7p/JG1DzeL4VRBkHJvGcah4VHowuKDJCCLyqTOYpCjuXeVfLEM6VIzGCUliyqlIk+MA4DOiX8dLndltdwcW7kHpCGchpGxm5gGnpykqYMlUlxJAVpQGOW1F/rDNMkHCm5V569B+XRI4qsLdpJLH1Vt9TNiLVOrqEcMdaVMeCnz+HlRKOqKlarFZvt9k86/z+p2Mj54iLJJeLYB+miEUh/XjAv54uq5HMsjF30Uq3OKNmnbMPnvy8/p7Q65y9cVLqXBNFpmiBnrHOl8Jk35tkm9ryALQXP8qdcRPN8KiN2ra5ybLdbXrx4we3tLZ9/9oYwHulPA8YY7j984MUrkdkdH/f0gzgXqixR7LFpOR0OVNbRNDVtVXMolenD/QO3t7dlbm14//49RpiWJQ1UfEr6zkMWON1oB1kq4xQiVomRkjby3oSc8FEKE3EUlap4ljWlwv5SWmGswhglyhelSUkTB49RirZpqBpPHxLHYcCe4PUX13z+i9esb9ZYG9G2KkWljGhyBuMqmnbNenfD9tWXRFOTkahv5QdyMsTkUEkT/B7vFVk7lA1MwXN8eGQcJnyInLqR42kgZyUZHVpTYYAN4zhwOHZkZTDaCqpCGc1kSW6NSa5Tp+S8rCop1kYv8ewpRkJIjFMSVnY3Uh8ljyaHLBHmWjP0PZlMWzva2nA4HRj7mturDcdpoK5bfIiMfsKT+cuvPqf7t//CP/9//h9cbRtiXEt+zbqlnwZCnIuNmZEei6IkENMklsJdx3q1kvfBWVI8En1gGgd88LRNzYsXmrd39+y7E1e7HZvdDmPF3+/hcU/OmaHvuLra0dYNxIwxFQ/774tJnKSpAktKatd1Ii0HulOPHz3O1JDF6vzxeFrCDn0U/xZrDW5VE8mc8si3jx9AZWIO+LRhy7oka4qXw5xrpVSWrk2f72FVbJGn8nxsVQlxMyV8DGxur/mbf/2f0L645fT2Ld3xSJ8y06nHeinwnauo6xaVoXVN8Z8BH8u9YC1V02KcRjmRumojhNAUSietFVjFP//mH/nm+6/pfE+oFPbakVrYnw4kKyTxlOMyy5Y8CpmfK2WK9H3e6M4chMsGTpa8VHageZyRy316uQ5n+b4sW3z0iaqyeJ/E90Y37E9HhuD57v17TP053Tiw3q757NW1+NpoRa0mhsNbOt0zmi169QK7UtRG1GbkXOb/6hLY/UO7A+kTDqIzmv3pYx6ZiEizgEJFDVISDKOCVBx5n6FCf/iZzEVLQvye8lwTyFPJWXg6KZGjJ04DYTjhfSiOrLo0u2VfQdAplc4NrCAIkUwAtCj0jJHRlDEkrYhakZ00WikEIuc3MeXZHDEv4ZYpZ5ySMV5V1TSrNesoI2IDxDAyjopsjFxLubiJWo2iQthQEmKosuRrqaUgAqfl3qsbS1Ub6lVDPa7wPmAVKLwY10WPrKIlFeYCCs+UTCdtMFWNrWrq1epHrovz8ZOKjZgyIUoxIQu6I4yeKZSgMTWrI0TuhiqwWCk4hOlcnvJ85S6w2LlwmP99GdMOhW+QMlrbJ1+bf2ax+M7nGemTkVgZlIn9hBgDkbPMzmWQLBd8eX5VZSHDer3m+uaan335Mz7/4nO0UnRdx+PHD7x5cUvXnYqfvyKXqtQZg87Q9R22wGldP2BuBCq2ZS48jSOdMez3e5qmwToroWwI+0ErjY9ekge1RmuI3kPpwqQYklGENbJwW+fQWvz8aze7AhYDmpyw2oFRpfhCvDNMKnChGHxplWWGjqZu12LN3Xu6MNLsKl59eUO1En//GEOxZxe+hoRrOcYpcL8/EpqOZrfGNRtMXePI6PrE2B1I/UdSFht0QofSYHJCjwOV0sQ84CrHGsMYRYUQExyPHW0Fn73Y4XSmL8VRyoqYLGAJuSKEkv4ZJMVw7YzkLkwZX2apRmmChqANISeyT7RDoKkzJitSEH+X1coxTiM5eja14UPoSWPN2BtM3ZCVQdlMHgI5JJwOfPlixfDweyptWbcV+/0jVe3QRlwH82JhPl+qkRhU4ZwktE/oIRDiSExSAATvGadJiMMq8auvv8GPPevVijevXnBztWW3XZNjYLfZslmtWTUrckg8DicqYzgee4YxYE2F9551VdPWDXd392ilWTUtRmm+//4929WGq+0VSltWqxEfI5usCg9LNr0UI3Vd4SqLyo6cEicmvj18xNSGZMBYjSVTtytW1hBMvYz2KOgdWtx3QwzElLFWkUmEMKGNIaoEBgY/oDYrgh/p9o8k79Eh4mLCoVDFXtyWcQlRiJspRqw2VLXIitv1milKEPi8LksD4bh+9YphPPLb737Dtx+/5uD3qAomB7rKhGpC14nRTzjXlpFUyaRQgrJJ5x4xyjLbU6vC1RGu1VxwCKKRc0QvGXS5rI9PgxxzTkIEJEvWFAKFGyNKp27osJVmjJGYJt7dP7Lbbnn32BHJvLy9ZlUpat2zP/yeWPV0qaOyDaZdiwdEMAWVPedb/SlMjuffkf/Qj82fz+fAPXm9kYwvGVuKHCEHGXg9N2/MOV0UHZe/JKMWREUKelWaW1MaSJUTKkXiNDKe9gzHA5OfmCZfkGPZQy5H6Y4zNyGpQFYBhcFqeTyRnEIkERVEY5iwBNOQsydcwBixNEXSBJZgwmLG6JzDNS31dkeDJcdEZSz7+z1ZG1wro0idFU4pwSx0IgbIMaOzIGw5BbLRYBTELKOYHNB4QuhQqqTZKEcMwgPLOeGnDp1l9KPyHIIhPA6tyvVqhfuBlpHMn3L8ZM5GjJFhGFBKLaTOeT+f33ijDVmZZ2jYp6Gwy+PS7Cal9IQkulxgPI2Bv/z6TPrJzz7P2Y/wfHEqka9SZoIKJAxHQVUZFJpXL29p2xWr9Yo3b97wZ1/9Gd57Nps1//xPd/hxZP/4uHgDpJS4uroSi1tr+fDhA957Tiexf1YIDG6NIYZYuC1+kflZa9ntdgWdEb+MWGLCcY7dbsP+MRar7Ag5YXQpQpQkOxojqX0xKSkyyk2jtZbJepYLTAi2spCJZlzSCYVlLZDhXPzVVYU1lvWmoaoiVzdbdtc7tBFDp/k8ai2BQco66u0Wd3WL3V1h65akNT6LmYzKCT9GfMhUboVpNpgk+RchTAQM2jqUMty2Gw6HE4d9z6qumWLgcNqjtGLV1GhlSAm06uiHSfJHlCYnxRRmebREPteVpW0atLEobZjiA+Mk7qxT8mIu5kVu1vcDQ1vTWsG6+mFkt9tQVY40RXbbDU1dcTwcivxVUTcVoNhdtdw/7InBY7VArOtVw93+I64yuEmkhuSIRNFfXLDleowxkcYRX57LnJY6H/M1hlLs7z9ijeLDxz139w+8vNlxs9vyn/7r/x6ff/4FlZWAKaMV++ORaCz704EpTIQg12MmM4aRtq2x1pFz4v7unt12y9XuBh8D0xQwxnLqB4ZJcoErVzFNAZUzm82Gq+srpmHgsN8TYuIwdnz74R3ERGOsZJpozapusJUFo4ryYUYEZNkIMWJDwE9eEMISu75sezGR93vGlHi4u8f3A3XWEsBW/EkWRDXLWDDrTFU1+BjxGgKRpAWNcFb8b7JKtKuWlBSn05F3D+/47bffcD98xG0tq22L1QnlR9pNizsciEsMw6XqZJ6b/3C9uxz/XloGwFmuf45PgNkvZyY3LqhHzhTdSKlLiuePgslP1HVN1wceHg/UTUPXD2xXlhB8caFUrJxl359IpiLFSVRDWpDBpJ6zCv5YwaF+iGLMxIHnR5YQNjlTirkgQMcyIs6gZrLjn+Agqs7P7bmK56KeZbZBuBheMnpPV+61GCLiiJOYf+38rlYXDqKhrC35Yg/J8ykoWVPGWqyrMJUXu/iwPCA/PFEFeTcGW1XUq5a1j6AlbblShjBlsTxY1WAUDlNGd57kT2IyVmCdP1TfKTUrjVimC3JZlfc6Zwl+XJJfnz6SyO4RRUx5jcb8t6BGyVlcG4dxZM420Lp4titVNNECjeULrsBzGekfmr1d8kEuxyLz14Slnp+MXH7wsz/A+/6Aq90sjeECzFPQrBxXuy3GGF69flFcGbNsGjnQdQfq2hDjxGazIoZzcNQ4StDRNE0YYxaC6jRNNE1zUUhMy9eUEg6C9xLxa60VZUESd1JBAxMpBlQlngWkuEDcWkl6oTEiha0MEBNK5QJ5yQWjTbF5jlPpqtxyquZFTHgPmZhCQbC8dFBZY50iJc0Xn93yV3/5lzRtTcwjiuL8aOQ8BrJwpa1FO4d2FVgrORlWzINUVpjKoXRL9gmqFSZFUgpkP5C0xVYrcozEkNCmomlhijISWa03pKS4v3/kcOyoqpabmxt42HM4dAKFKs0wBoyGylmB8kOmUQ3Wal5fvSAbzffvPgCyccpsVK6WcRo5nTp0W9G4ilM/UDWVKEJQrFZrNusNv//2WzbjRuSnPuDqhs22oa4cOUPbNOQp0FiLyoFKKyprcFYzjvMob76/SpBeuTdmaffMibrcpKDwpbyXWatWMoLxp2L/rvjss89JOeO9yFe7U8fd3UdCFK7Gar1iGkeGYWCd1nTdwDANmOiZxommqbm+NuSoeDzsGeNEZR3BJVTuxXxOJDJCYq4bMAbb1NihwqBI/cRxGnn3+IBTlrSTBXFKiVbVVMotqaqqeD1oJbbP1jlsCTnLPhJOvfhWKDHa6u7uZfOISYioWeO0wZUNWKGWBFBxjVdMOZIqjW0bksrEtiJME3maiCFS1xZXr/jw4QNf//Zb3h/u+HD4QKwDtdIoIqGMcm9ubvj48YGkIimr4uWSLxowtXAQLgvFy+Li+Zp4+bnnCdOf4nnMDZZ8XbprCQlWvHnzGfcPdzw8PND3I9e7FRrw48Dd2JFRbF9fc20a2uYGtd3iVjWuqtDKPRubzJv5H+FJfGqX+2R9ImNXCso8f6Oak2CLS2aaN/4/JsNVyx8/8ruffn1uRJWpwEgqs9EZ7RwpBLkXjMYYV7g8tVjCAj4EyEIvXoD0fN6nbOGfNW3LlIUL1kb/qafx5IxkZdDGUTUrGp9JyqKiwinL0IvzdHu1pmpqGbcoRU4Tx4cPaGMvkmXVxf9nMGA25rykJsi4pZR1uTjlLvMv9eRdVxc/Y4ymMme/pz92/KRiQ6DNhLFisGWsIWWLzk/9J0qN+uR4fqM8/9r893wDzRv45XgFkNkZZzjtU+SpzMVFeAGDZbIQI0GkhEoVHwzpElfrNa9eXbPdbnHOSkZITDLWSAFFZLNqGPsTqth7T+MgP7ta4b3n7u5uec7OuaWgAPELmCZBKoQEen7NIQSOxyPr9Zqmacgx0YciDdViYd6lEnDlKpL24qiohWzrjEWXRVCyn2ScNNvBo0pVilluQGN0MVgLoCREaN7kKF1Syolh7FHIHPUv/uLP+PLLzwDJfsnZQBQ9vw+BoDRmVZGMIWqzOJmKZ4RA/waEkJmFD6GqFTZD8J6sO7KeUFahVKTrDmhbsdm1HLqOZlPhQ+bDx4/048Rmu2O12nHqerYxE0PiEI74GCCLz0NlNJDw0TNOA9pYrhvH9c2W+8fH8t4K7Gi1IRtDSIlTP2BI1G5LP/bY44mmqQoXJ9GuVqAU1jnqRrqYyQemYaBypiAlFVOMGBXZrlrGYaSymroyHE9zbay47IqfI3OX99blfXS2+VfE8n4GlWk3G/7z/9l/zna343g8UVtJevTDgHESyGWtxVpLiJ6YIw+P9wzDwGdv3tDUDd988w39WNQ2ncd24jsRs+QBOWNLBlFeWOmurtDOkrWmWW9QOdOHhE+B++5UgrUEPvZkshMkozIKY6wQ1ZQ+L5FJDLamlIkq0jYtIXg6PzF2Pd/85nclt0Km1VopGZssxFLp8heEYPH2UASVmUiCrLQ1V9fX5GHi7uM9X//61+xPJ/o88TAc6ZCNJx1OrM2aerMiZcXV7gqQvJSY5LFmZp0qXb40QCyp0PP7djkSuPx7RnWfv8+X6+Tl9+syukHNBYq81mEYAYWzFWTY74+8erErpEXP1dWW4+Mjwbzn5su/Y2y3+LYlFzQ5psiTQLbLNfrZNVqe0bO/lyuWH7ANlaCNSkkkvSqcOubibC7Qyj3xh/aNp78mXxTtP0Q1zuctl99T/jcaUzXYeoVypbhTisCIqURpYgpKpvJ5u8xJhA7nEZBm5t/MKstZSj1v9Oc96Q+9CIXw2iTpVdQicj3EnKVRM4aqbmnWK1orRo4xGMa6Eqn4E2v3olLMipnzorQua3KhLqSn3MmzMypn+/r8bP0pjbo14jNVuT+tjPjJqa+upJQuL2dmb+eLlMalTnp2Kv8Aqeeyip/frE99fj5ZWpsnN+msZFl+xx9YnAGJu1awWjcLSuBcxWdvPuPLn72hqmYXUllEUopolbnarklhYhpHTqcTTe0YQiQoIdM1TcMwDIvstr8Ip9lsNgKPpkwKYnU8L6Q+ROF7aEN/6lD5/P2yKIGzFq3EYrayYgeevLhIGiOeDM6WZEGtaaqKHAN5Eve7kOKFpa7MAHOJTc4kcizx10bQC60lKExUKgkrQb1stzv+1b/6O5rWUdcCt4WQmbzHT57gI9FYbMpYZUhaMhd8Ki2704jFqIxtYs6kCEZX6Eph15FaZaqqxfcH4jSw2jmiFxvdzVXDFBKPpwdsveKv/uYzcobHxwOuSqw3BuMqqqbh7v6eKQYhxwI6SZbO6Ed0DByOx6K4EYWMMaokf8r3x5AY8exTwrlK5qLdgHFGFDLKs95slmu/aWqccyigOx1Yr9dU1lBbhVNgcuDN7RX7hzuoDW3lhIz6bF38VJH+qfvlsrDOyoC27G62fP76BY3V9GPgn3/1Wz5/8xKjK05HMeRq2pq2lbHP0A+FcBkxzqKdpR8G9ocD7XqFu6q4Xt/y7tv3OOtoV4px8jRojDlhjGGaBqyraFcruV6cJalAs9sQpol4OmFaR/KRfRxRh3tOfuJVvGEKE5vKsVmtye0KahYukpk5XnMoWFZUlUUpcCky+YnDwyNxtWK33UpAWxbpt1GaafJSAKh57ZDHc1oTlcIow2q15voXP4erK47/8O/48PYdv/76t3zc7+mD5xhHTmnglE8EE0hRLJ1VSljruLm5WXgSKWkktumiILgY285GZZdr0nM13aXcf96sLrlrz49zA3ZGKOcuexo97959wBgKiqnojgPduqJ1BhUTbe3ojnva8YRuAyqJsVvGy+/TFpNB5/N4+nmxsaAqSkY6c9G8fP7iz/kjPY8RsrrIYylnTGXZcIGcbXld+ocFyw/uj0/dPc+/N5HSudAQWavFtRvcesSV9yymBOmAMTI+nh2tq2RgPz+WQmuLUhI2aYwgHGK9r55s6D6EhXT6o4dSGGdLkSFy2pjFz0MC3cUhVoy5FMoWHyVkdJNyLqaD53dGqCvnc6fLvTDvkcs1M79TORPL9bZQRC+pCFqs7I2RZO5V25bC9o8fP9nUS35ZsduNcUFqUnmSKedyXTyt2OFsMf68mp9NfC7D2JaT86wDeC5P/RQEeUkY1Vov1Z5CEk2N0Vzttrx++ZK6rths1vzd3/4dIYy8fft7TqcjbSuEL2scgzBnlhA0sYROrNo1RmkeHh6ePLdZ+yzpoGGRluaUGIdBMlKsJYSwVL6b1YppGBiHAWetaLPL7N5ZS11VEgiVxGfDVuIgqVUxgFJqWaRBLg0p+vLCdUk5YZdRl7yPwrhWF++FdMjOSccUc6KpND5r/uyrL/nss5doJc+p7wsnQksRaKxCuQZbS6fgmhWmbqGuSdqSlYzBfPDEcUClhDEW7QzG1aAEjsfWMsaJCipDTiOBKCRZk1nvNGsUdd1wOBxRxtKsNigzUjUrqqohoTgMHeMkipYSLcJM4tof9qAK4TbL8uYK6Wz2d8gYfEh0o6epFFNITD6WBOPMMAw450rGiES4T9MoI5QkypxVW4sjaIigDeu6QvtEWzlqqxl8uliefxz5m++DGfWTUZtIjF999hk/+/Iz+sMj//Kb39GdDqys4X/zv/5f8flnbxiGnmkcCd2IqRzWWEKSxakfJ46nnrp2tM1aeCxO4Zzh48eP+BB4+foVow8cjifevf9ARoKdbHQC3zqBcLVxRUyRUcbiVi1OGVSIaJ84TJ7D/UemFEnrCV9V+MkzDSOrpqVxjsbVZGOw2qBTxmUhp8VQ+BXO0eYz4c+gJFeFmTioyEZTrZtFTdCuVlBVMirTCuuEt/L1f/33DNPE7779mlPXceg6uugZcqCLI10aGHWUUEIysetRxvDi+oaqqmiaFvZH2WDKDSRy3rMHxCwz/5Rk81PxD5dr5PNx8g8vjsvr47LJEhXR1fWOyskoePKR9x/u2dSOh0chpLtdSxgH1NQTVIcyK2JSKB3PfjVPlIQsaMflIfuRPQ9bLoDlHxQby7PVzJkgc4Mq5LESM5FMkaFCCOdC4fI8/qFi43IfIJ/PI4WnsHgLWUPVtDSrXSlMRTgwhsQ0TlRtS1WLjf4qOfiuvG/KEENCOVHSnJ9GXsYVmbSMV2JRT14+vx9SCkS5JKGFpYkva5TG4lwl1uhaUFOUQjuL0hWuaQTFlmf35FqQp3D+t7XnFGLZu2ehhMAbsYyrs5wQGW7NI0JdpgsxUVcVTV3RNhd+Tj9y/DTOBrL5zGOSvFxQBa6nSNbKk3t+c80L5IJSXFSYl1LW+XsvUY7lOeQz4Pz8JvxU4RFjXi4ErTWrdgUqUVnLn//ZVzR1hZBdLd/9/reEaWLqB5yxMstSmto51m1LSpmh6xn6HlJm02yePN95vt73PcaYZTMSS1yPU5oYgjD3K4kJNlqTojhZVs4RguiyocQdByle7NzFJoHNrS7FRfk4+IBxcqNEH4RJfGE6o62QN3MJw5JUWPHc0CUhVlJiDTiD1gV1iAHrYNW2/MVffkXbVmjtCV7MnlIKYjRmDKv1mly1DLYiaYOPWRIP84CuDcYWVYwPaIRg7JqmpNoO+KBI2WGMQteBStekOOFakUAOw0hIkdvPXqLR7A8HghrQ1YoUAk0xMVPGsh4nVFXTTwP96UA/TlRWM4WIjpGUfbmelRjOKYUzCh9CMesxZBQxawYfZEM1in4UK/N57FXXNd77ZSxhrXi85JTYbdc8PnhWtWMCeh+42bZwGLjerXk8HKizYX/olhubeYz1ieNyA5qv56qq+ev/8D8k5szvfvc1d+/fYnXm2+/f8ze//IqkLN+++8DpOFA7yzSNTB/uaZuWYRy43x8ZBk9VO9brHfvDibuPd7Rtw+2N5mq9Y7u9ZrVe88233wkLvqAg1hhqrWXG3DZUpeCKSsZqVdPIIpUSoR+JWZBCP0XeH/e4lEh1wzBNnKoTbd2wrls2dUNlHbWxVMWlNJcmRmmNVXY5V0abZePQuaBlIWDXLbsvPiOOI/54YrZC74ae0/HEFCYOpyMf7u44+ZG7ONCHiW4aGVNgIDBmT58nkc4X2kBOmW4YWPU9mWLyhhLZoypohlbCPeBsDPipAuPpunZe756T33/seFqICrJA4V9FubBJScYqbSPchNPo0UXhkvqO2g+E0yODr8heE1SNNg5jhPBc/ls+nvNiLj5FQhe7d32BJpV1n6dbX1biRaHRxEJMLPi17AdKozNkNEZbjHJPrv/zaP1yvX9abJzHBMXKIOdyfs6N7mzYpbQgorLZy3vjXC2EaFdhS2aPvZC+KmUkXFGdURfZs3NZS4s7cHnhghY8fd+e73VAQbLN4qZblUgKm4WoH3PAGl0sDTLKGIyucFUtCpcsTWa5EgENuSBFSWAvW0ag4hhtISm0ioLsmvnczXvteUoh13KRw5IFTSz7z59y/GSCKEotMMsT1KLIHmfIkqJmuMw5mbNFnke1X1ahP1rFw1IyPx/JzKzu2T8jl8eaCTEgXfxmvSbFSfI4lCJEz4uba/zYEcNEW9fEdoVRsgmREpV17DZb9o97TvuD6JayED2Px+Oy2QzDsCAZzs2x3TVt25JiZOqF3zET/mTmLZyQaZqWn+v7Hms0de2E+Bci0zQucdcpJQKqcGcqjDaiNlDC7Ge+kPP5plNKiLykUHwWdOnGNGBRapYYZzDyXpsSCNdoyxdfvOEvf/kLNIGcPMf+QMqhkKAcKSm8jyQVcW2LsbVIokyFtjUpa7yPZRRU0dhKIt5DoO8HwjhIF2tqyI52VwkRNkV8GFHjiGo9Gy0Vft91BD1Rr69RbjwjQikRs6bdelTV0ubMXdYcHu9BG0IMhGkCJQRnpeUW0CicFcg2ZlkeYxLE6DRMOKvZbhpyFgVGiBOuamjbSEriPtifTqzahr7rWLcNldGk4KmMorFyPfqCdNxebfj2e82qWaON42F/LOZkIu3O+Rw0OG9A5+tYrpPNZsObzz7n8Ljn3ft3HPePGC2z3W7w9D7xv/8//p857vf8B3/7V7x88YIwnuiPd1xvd9ze3mJsTbMSyHZ3/YL7j3dYV3Nz+4rVeoO2Fe12w/sPHxmD54uf/4zH7h/Z3l7TDyNq8lRNy3q3pWoaQiGtohQYTSoWhtnJuc9GkZzmFCc+dAeGoaO2lqaq2a3WbOqWddXQGEdj5X+rJbNEW4Mu9u3KiB/BXOTNyGEIkSl6rDeooWPcH/j49h37hwdiCLiqYhhHphjwMXAaeo5+5GP2HMPIGCeyVUSTGLPH5wimeNsgm2AMUUjfbS2Bg0aXHCThgKk8OwmdF+YfrKOcN5lPcdbO9+0Z7fiTuAvLMqmYJs/p1AuU7yP3D0dsdc3VVlGFiBon9sM7qqtXKLMm5xWH00TSLco6lLZlbF02LlVUDOX+UGpehwuSnMXifd5s5437/LpYRmMGs1gOZJQkzxbMw1pN1EqCGOtKZLipfnIOpCC/dBDNC2/l6T4ia5yggT88T5fWCfMoahnXl/1KKXkt6mJTFSNCI6OgpfPO5RyphT8nrfGnRQqX75XsVRqjFK540dSVI4RMVgmLJtWWTEVbV9RVjbZGcnyIxNDi6gZd7P/nwmp+3+R5SRkym9iJS7MhFekuCMoxhw6mwvW4LNBmEr3VEi+gyMWX448fP81no7DjF5tx+INV+PMubK4in//M5bjjCclU/fFq6RKKWoiN5ZgZwdYYyb9Asihub66xVvPh/VtyCoSUcE5QjLYQM1dNiwICQSSIMeKHibEfGbpe0ArEAKnrusUIaRxHlJJRzXq9ls2lpMbOya3WSLLgGAaSq8SDvyADEphk8cYIdKY1VhuCEsKmVLoGUSbNMiqp3quqxmolUBi5XBRyHn0IhOCpKydM8yReI0pTxilPZXVy/vMC5SmVubresN40DGOHdUIcrZtaYMcMaC2ppSj2+xMuWpx3mLXF5hZxjZmhdjEiG4KnD5OYj82urzEyjVNR2xS76GSZVCIZSbTsOo/Csb56QX86knSHqSSALHqPriaUq9BZeDLXN69wtmJ//4EweXJIYlqExTjpUlSR/lidyUZGOGkGfZPkrqgSKDZLxmanWqXkvZ/Ggd1mi1HidtmfjqzqimmcsErsfyqnqDQ411BZi1Ka25sbhjHQpaFkoJSF7Nn9pLVktWw2Gz7//HMAvv/uWz7cfSxfFzXRmAJWK379u2+I3nN7c8Uvo+brtx9pVOR2d8X17Utev3mD957D455hGLh/PHL/eGR39YK6WfPhwz1/+7d/zce7O97dfeD29iVJA1Zz/fIF5nRCdT3WVti6krFKtoQlj0gRCyHb1pU8Nx9IWuFMRZ8TUxzQk6LNnkEnTsmzCiONttS62D4bg0XLx9ZSWUEBda2k69XyXvkYmPzEMAw8fnxH+Oa3Yq4XYtn6IUwdPif6OLHvOw59RxcnBmuEMGoBB54ks3Yrfbt4PpRI8ZiZqom6yVSV8HSEx6POXe6TDeZpb/98DZv//tSa90ebryey0DP3QaTsiWEYC+fIcOon7KGndorXf/EFbaWoFYRpwPiBqDrGLhMQk7ykWBSGWsuaJMiNeVZoAFlQQKX0cq3Kx09fy/wzErKgZlcqlJbRgY9RcnKMJoURfMWpmdgf1mjacr+pIrc8FxvysJ9AxbOYqS37gXXo0mAoJS60Rpul8Zv3ollNstjYW4vNZ2QjI/vJjJosKD6UgksUgrP7qFJnZ8/n7+aZjygcY2cUVSVhi8EFUlZYNNINVdSVk6iBuhYlYI7kNFA3NbrI4ecCcOE5lBER5dxJgV7UpLaM2znvnzNCInWUoHVzE6uYOUXId/yJSNxPNPWSMUHTNAtCMZ+seRQixYi8sNl/4rJDmxfpy5vryXzt4g345JHPDOVLUuqTyr9cRKvVClN09/Njrtdr6trx8cM72rahbSqGoefUHRGXyBGFSJtkLBIZhp63379lGEam0aPQDNPIXNvM/IwY43Ju5sC60+mEUopV0+L9JNWr1oyjx5qEsw4/SWWYjShFVu2KnKOMKQo5UiYeEecastHkGCUszGiJGZ8mIVvZi1CrMoOT3IpAFT1V1YgJzUVnoJS+IKHlBQaVgkRJnPmqxk8jygQJKnMr6sZJBoqXLJyYxMNDG401FXXVYJsVrm7ItkYpR2UVKkX8OArCYESaarWBmBhHzzh6CUSrLdoYpmjovYKkWbdrqlpshIfuROcTISmqqsYZwzgOMPSYquHVzRWVq3FGcbi/45/+bWD/kBinjlzUMSaBNmCswTrZsHXW5CzszZwVrqpJwDBMGDz3D4/EmOmHIBLmnIQQnBN+GjFGSbVvtcw02wpPwMdIpRXrVYNXhpubaw69jA+ttdze3nJ390CMAa0pnIBmGcUNw8DV1RWbzYa7u7tF+eSMxodY4HqBP2PKHI49WoPPmv/mn36FUoq//Pnn/HxzzRdf/oIYIvcPd6SQqKoWkTg3dN3I6fSW9XrDMHpOfc/gJ45jTz9MmKpCWYOpHDoEIoqQEyhFU9UEH+hH2eRjSkWP7zAukrVCO4epKpIKTJNwobo8choj9dRRK0utDI2xtFVNbR2NMmyNyP1qV1FXYiUfs4xux2GkO51kTNL3hBBIOfOwfwSj8SS6MJEbx6ASH057PnYHkjPoqiIrhaksxil8lgyeoCImn7s+DaA0Hs9j9iKZnNfGmMR9v4DX578FXn/OJ7hc955sVhdr4vz3T0E05s1PxrCW4CNkkTonNKch8t2HB9682PL6ZsXNzTVJQfIjrg5cb1Z4HClpfA7EgrDlOBKLq+o8788Xf2Q0Kc1kT1l3ZiT8yffLM0QrSdQRzbwlG4OoOwq6YRQ6J0arIdyxMq+J04a6rpfAyJzPDqxaiw3/JR9QfExEceGco6pqxM31vGc5V+EqR1VVT5rppmnIOS/hflVV0VwWG7mMWnNxjl3eT72gGrqM+MSV1jBHd1y+vwuqoTVWJUnFNorKCMJhFXiVJNGbjFYJXaib5CzXP3NRWEZwap576QU9UhlS+bQue7PWczSIJNUmAlmLI/iyp6LKG1eYHzNKkucy5un46seOn6ZGyRTL3WJVHqNYORuDVjILn29y6+oFAflUYNunTvrlv3/0aVygGSkJ8lAepMB+IslpmppNu+ZqvVteQAye4zSwXq04Hg+s2hfl8+KzgBK/gtOpX/gl3keOxw6lFM5Jh3Y8nEiJpfodx3GpgodhWEYqwGKE5icpNtqmWaLGq6p6Mk4ZhkGUDJUlpkBKcclsSGUkpUu1XFeVqGayLyxjSqEwk5TkVbtS+AQfmOyENharnndTSSp1PWv3S9FgFXW74cXNDeREVQmnAiSLIWYJLhvHSAgQdMas1zTtms12h1mtiVoL4a+of4ZhYOp7MfcKEeXlgo5TJAVxq1tvd/hYNrowop1j1daoLChCCpGsnfhxYKjbGqPheOpAWX72iz9nu7lmvdoQ/cD3SvHqzecCKYdI9EFsd2OWWSWi7pnhTAmhEtJypSwpISFwrcUHj9YG7ye8Fw5O152onKMfBzSZpq4YhwG7WtNUNf2UIU1Apm1qpj5yvdswhgOmaXB7S1U7CdXyAk3OJnG3t7d0Xcd+v+fx8ZGHh4cFEZR7QGb04gSrCL500ioTM3y4u+fdh48i5R56NpUhJcXQd5Az69WaaTyQoiSgTqNcm1kZfvf7bwlh5OFwJCqRvvoUmfqekBLKyEzYVI6qabDayqjCT4sxnzGGkCI+Cd+jah0pRSYUvjIklQoJ0RKzIDMuB1wI1DlQBUODYdITlbI4Y2hcxWocaceBx+NBuBR9IQT7wDAOxJwYvcfUjp5InyPDOHHIng/Tkd4m7ErUSK3S2MYSiPTDSIgeVCZMExaxY3daYG6MLetfWBI/Uw5iul0mkXphOagLAvCn17Ln/74khT4nEv6Uw1lXPH0EjVVaM3rPqq74/XfvqdQLgg9sXmiU7mm3iRcvr8h2RVaWpMT4LKdMjlnksHk24jpLbSUTpMROpiIUKOiWGAWm5XXNc39ygBxITMQoJFChFBgJKdSy6XogT3vUuOfjdktdV2VfUYjbqpx/KWj1GYHRs9GapmlX3N7cYIwlpVKUI2iNta6s4TNXSywAYhRTxXl9ds5RqXNxOY9o5FzMw7JzOaX0LIEtRQcC4izHpy6HMv6wxYxuZpik4BlDIviJECZiqMUbJoEyTmwC8rwvmGflrgA++fISVOX6TEJcDT5CCCg8KL/shZn5B8urLIVRKiyd+eXqSy/9Hzl+UrFRVULmm6ZJPCZQqGSotaSeupTI41BeqBxzINpl0fFDl7ynxJ/nN9YlypEzhJjOGvpZGqslm0UupExdWaxWVDbx+sUVTKWKTEFMlbZrCbPa11hj8KMvz9dyHI5oqxf4NauMdkUVYxV1VRM+eFxl2e/FQbSqKk7HPcYoQpg47B/JOdHUjmns0U1LKAiFqSSzJSa5If1F+I9CEb3HNBVtVZO8F/JnbcSpEjDakorraNu2KAdGRYwql3zMZDOPpTI5qfI5JaZ9ViAyVYpWjahuYnGTTBkmP5J1xLbQbhqub69QpnQkCg6nAzEljt3AFBRNu6PZ7KjrHcntMNWGpt2BrfAhEkLHKRzLtRMwyCYfU8JoYXfP3YAq7Pf+2OP9xHa9QTuLH0b23YFV06C1YZg8latp2xUpRe7u7vBB8/nP/pztZocxIm/83W9/w2M3oeoVt5//jGgs9+8/kJLM5P0Q2Dorm2YxO/P9JLJoY0T1gyVEmKLCmhqtLdu1Xjb+Dx8+8urVS7RRtFXNMEys6ka6n+C53l0Rs6K/23O93bE/fWS7afju3Qd8l6gryY2pK8tEIiVZ7E6nEzEKR2C/3zOO45MC3RhNzLEgWOLoKinRpWtOmaSlw+z9yLfvPxD/PrD6x19xe3PF3/zVX2IT+CRd1aE7UJUx3rF7oF7XHE8TPhpOnYRSgczyK2tASU5D7SrqRlCNpKFdr5hCIFtd1owR7QyVaWWBS6C0QePQiPJEaUMsZkGxNDZTgXddTpxSojYVNoAeT7RTz2qoigJLEVIkpICPiV5HBpUJJpGVpNEmoxhCoIsBX7o7i8MagyUSxiOTH0nRS2KzQBmoslEGRVGZiILLh4x1TSkuJOFZFZfeZQlb3EXn9evTxobP0Y1zIZmefH4uQpavzzfx0wUTrQWhmdUDqahjYvDs9yOvN2+o3E6s749Hec+nLa15RXvzmqRnJY/wmFQW/gFElIlEYvHw0WRlMbpC5YAubJVLh01pCDOTn1AKuUbiJGq7FJcIjJzPPiMpC/8uFQT21I/04wTl8/KaU+n2hSivS0efcyzhdxL4+PmXP6da7TAWqhDByv2itMW4Wnxyas80Jayty94kTquCMBqcM5iLNL2YMxi55qrKyT1Xuv5sNLpy6NnEUNulMb/cxy6RHshEBdkK0qa06JlDCNIchSiqEaPFun/Q6FrjTCWuzAmsMhhlRaqcy3nICBdGQwoy/nZGMqmyj7icmaYeoyIqj1gdiUMPccKpiDHFfA2zkHiFdJqonCOHSAwTf8rxk4oNnTO32w37w6N4FDQr+iEwWlGouAxNZcTbYN7I9Fm6+oec8H68uMjLKCZnmbvm8qbPnIKqrgGRY27WLTlnmrpGkTEqUTkNkxBEW1dR15IHoZXm/u6eqqokyVMJ/O2Dly6wizIeSXHp0oBiBx6xxdys6zp2u538TCkcchYFyiVE/vC4LxeNtKHGWlAQgqepG7TSi+x1HIYF5qtcJSFcXooRYySOO4aJvu+pnWyQc2Wuc8aW4ssoRLmiSoUb5WZWpSPTswFS8MICL7NPW0H0iZwi17dvqNoyk3dz9W94OBw49RNZ1dhWkbUFU5OM49iP5IcHmg1sbm5RpqYbRk69LkmqCVIUtnUMywhuLLbBSpWY+82aEAJTP4hvwzTx/d09SiHvmTYcHh6YxhK3fn3L9fX1An/+/d//Pd5PHEbP5uYVTW1p1lvQhvsP70kxoK0EtBmM3KhKNhhSLh2WkMNCykwBMSJTBovMe2OEw+HIqzeviTFjShc5DJ7oD+IoW3tWTUVbWVKObFc1/jThrKKpa2zdcuw6qsYJu6Bc94fDgfv7+x+OCsshpmyURfb8+Zw4J27OBX1BJe6OHWPIfPazn3PoJfCNFHhxu0NNRgzMtBDJ+n6k6wW58n7AOiuLjkIItkotc3QfgqTuWln0jAIdAyEF6e21RhlFRHgpBshJODzGCHoHWcIEl/GoOOWGrAloOhKEiIkJ6wN1kijv0qhJ8Z4jvVEMriCd84hWKcYcCEljrHBtHFbKHeUJcUTFCV02OJ21eFiUmXdU4FXJI1GafphwruL6+pp3794L+XrxB0FGCUlez7yWfWo8/Lzw+NQa+aPHJx4z5QyL/5ASVEIBRI6HkbfvH7nZXXG9qVBmQFUGFY5M3T2m2WJaV1A+Q06aFHXxFBF3Yuc0yhrAkXONMRU6TeiyDp25HPL8cpYmdSG+luvy0iNpQcCR0ccyIicxJrGtjz6Ih1DK5GJcSClKUhmD5OyBjI+ZoBQhG0IWwqe1BlNk8MZYrGsWdDqlyNkaXtBqvSAU+qmJqeKc5jubkyn1JN035kQIHj9N+KEn+ItiY0YN5qqjjJ0iWVBgYwW9axpiSAwpCgKYIn4cJEguJglxyxGTxOjO9wN53ZRAuyQ8IpH2oAzopCSwMinwGUJgGg5UJqPwGJsJ00AOXvJRiMiDzR4xl8JlRQyeqRhU/rHjJxUbX75+ifv9yMbCcQzoGKgrJ4trTiStsVVdZkfPJav5Cav6OdP+U8XHXMFf8j6YWbbGiKLCGPE4sAalMlUlhcTN7S2fvXrB491HYgzzO0wubFvvpaDoum7Z2Lq+YxpllLEqSXbiHproum5xCc05L7kwbdvy+PiItZa2bTkcRJkwL8BnSDRRu4o+9EsOioyfirVyWRDruibnVG7Ccg5yJEYx9Zq5MraYTw3DIGE8tSyMWqnFb0AWWikMjJ47slwMhi74MVAgPFmkUj4bqTnn+OzzNxKPTmbyga4fyRSPDJ3wSYy9rPdoPZJVS7upUVrTDT3Dh4+stlco7Z4aGyVNLnClUkUKW6zbZ8Jt13XLaOrx4UHUHSWh9HA6EUKgrmuubm+EG7MRK9+Hx0d+//tvOXWdGLNpw/b6GpUCwzDx4uUbrHV8LAVHRAiAisBs1jOHN6U8k49TiYlXkgniI3XT4PqTkCKjhL3lIqf1Qy8mZ9ayv79ntduyWTUchygOjtMD2/UK3WzYuJasLVn3eB9ZrR3BSxE2L7rLCO2CaL3cF4llg5vfVFU6PVsIcShxlLSu4nA6cep63r99y3bd0FSOECZWlTgXdkNHDCNZHfH+qQogxMhQ1FxN0ywR0+M4FgOg+dedO/HL+90YcSMV3xiJk7e2KNMK9K5MKZawS7GQsiYm4XzpypIyjCkx5SQ5JCgwmUAmKIHkZ0Om2f9m5ozNhPVZgZVmNdvyXOU+MMu9ebk2SbHkvfDXrq+v+e6774Fyrxm33GtPHR3/tONTRcaPfe4PFSXPz39MCawhKc337x9oK4P++WuUbUjHI83piDsdCOoj031P1BW2Xi/FRl05UhpJaqRqStgmDm0anK4wWjZ/Y8wSYbF4HWWKTb7w2XRxitXWLhJ9FlLjvEfAshNbxZzdlGMqbnip8KqEEJ/LuEYrDzozREVQjtuXr9huNjS18Le0BpVAGYW2ehlfLI6bSrr3WdGRC8qWnryXuSgdKaOlMvRI5/0tJwl7I45kPxD9hXR24Tycb125Zym0+HncJGiZ8O+SFFoZchQTr6AyTiti8oz9iRQm8ux/lRMJsxRJC7elmI7N4wdtDOjiDWNyycTJT+6F5Xlf/Ctn4VtdGlj+2PGTio3/6G//mv/l7n/Mv/m//BfUwOM4sFpJdx61IWHxxRlT2KtPb4RLaBCeuo/OX4/Pbvr5a8sCYcSxTZU5HYCzs5GO4vXr1xit+Pyzz7i9ucZpUFEvZ2omc/Z9z9XVFcACU9/fP5CiX9CUywU+hEDXdSJLLYShuWCZyUVzgTH/zFyUeO85HI5Url7QkbaMROqqlgIjFgtYa3HOMvlRpEkauuMJP07oykI2AkdamV3nlIgpAKL31+o8wsopkVSWnJp5ozKFNFQgOq00royBYvRFPSDJm9Zabl/e8vLVC4xzpEJUGscJ6wzX17dsrww+abox8rh/pIsd2xeGF2++5Pr6BlU1ZO0IWQvpLoQyJ/R4PxEmIYqmJImYc6HVNG25HgLTJN+bEXfV3W7H8Xhc+C1XV1ecTqelkPmHf/gHDocDTbuiH4Qn8bNf/BnJTzw+3pGN4/b2ltV6S1aG7nTETwM5eUJM6BxLFz477c2wdiAETQoWs9KgM5VzNM0K5yoeHx559fqVvM4sxWG7WrFZr+iHI313YHP1gpA8KikaZ1m3Dd98eI9ptxhTsbu5pTv1TF233COXSq75c5fkwvPdXy5yELQqF8+bKEWkpATLIqWNIaF4f/fAx7vIbt3y7t1bXr+85fXLF8J9SeDjHXAuhsXvZGAqReJqvcY4TVUX+BYpJFKMKJFESOx2zqUQpoxiKPdvLknOgmbEKBlBqmzogh6KhXMXIxR+TVYKnSjx3BftpDr7ONgiX6TA+jH6gsrpJ+q3nIVHlPKZGCe8pUvC4XxaZTSgNYvLb13XFwo7hdZzGvXypvyk4w8hvZ/mcly8589+5rIwBUHBUuEw9I8H3r5/4Gq7Zr1riaPnsN+zvR0xY8f3795yoiZVW1IS9E523Yn11mGscAi0sjT1FiEjyppR1zVVVS3Sz3lEIuirEPZVLu9vUeehoG3aszu1kt6ZElsf/HQ2iyuIi0KjtKAk2ihMrYtpYQQlXKuoLK6ypOTJ2cAc16DKNq+fnrPL/WhBNC7G/PORovDp5mCyhSExv86CGOvsMWlCxRF1YZ9zlkTPVYfk7mikQMlhIkwDfuhJIZTYeCk2fIjkGFDGkv0gxmLR0+1roh8ltTdByqbEwxd0K59TfEUVaDBVRd20kMX7CBXxUSTkedmHn1+g53Mx74t/yvHTOBsq8j/4j/8DVmng//Bv/m9Y5Xnoj/SjJ9c1xlmylRAxs1Sln55LwtlR9LK4uByZzMfMiRD3T8vkpUOpq0oYyHm2i7bcXF2hFGw3GzHOqmrUoJ78zmmalt97GYjW9z1aCSrRdR3TNFFV1eKdMfthzA6hIYjPxHq9flJ4zEqUGQWZFx5nDVpVjOMobKEMm/WKMI083D8AmRgDlRPH0Lp2EtttDNM4ysWWEnUh3bmZdBlDCQyS2aUkMWUhXJVzKtpoIT8KqY3ihZJIeR51Fd9RJR4etqq4vrkhk5j8gBi5iGHbMAXu9u/wUYF21Osr3rz5DLO6heaKZtUSkpCGjZEuve97hnFiHAa604m+65Zxj/x+VWxwG06n08JPEMSjoq4b2sZxPEnYmLWOyXvef/jAer2mHwZ++7uv2WzWbK+u+Hj3QL1a8dnr18QY8DnTDRNf/PwXjH3HlDt2Ny9p11uOhwdynOiOQvzNZHSShczomek9Z8ZEYhn9zHW/dQ2P+yPXN7c4K6ZxxmgSmRA8V7sNXdejU8SpzJg8ViXa2vL+7UeO/j3rq1teff4lu+trHgv5VGtVkJy8jJpiDM82m08RtBS6FBbeF1KclUUaNCEmvv7m9/TjSG0Nj4cTp8OecZyYvCfHgNGG9HBc8hNijAzjyDCOGCd+Mev1WkZMIdC2K9arTRlHljRTpbDGLOjEXLjPR06JGAIxhkU+LGochSsGabWrUC6i20oI6ZmidMkY9GIgmMvCrfVcdCtilMf2Xv6evX7m7llQI0lRzlwUGs9GE5c/I6/BU69qvA/LyE7Wiafjrk8ZeP3Y8WOFxvz1p5wP+EMFzfz6LjfNlEEsulf048D7+0duXu2oakXfdfj+xGp1w1VbkWh4P0iDMI2Zjx/u0Drw4uWWlAaG/khbtazagaEf6YaOzWZN07QXTZesgzGmcp6Ek2XLeCsXgqlzltVqjbNuaQpnREkBriqkdYQDY4r6QnI/5D0z2uCsxbmMcYpsFIGEj3C1XREbS0wJR3HolB18GT/P/0vBWBCABdXKT05z8BNxGsCIys6UH05kUUBaizUanSImBQyh8F5m1sNZCjuXHuWGIKdA8lJsTGPHNIg9gh8HpnEQ9CLnpdkeK0sOEw5Pf9xDvCXPo8ic0Xl+YcV005TIDy0Yio+CJsqgMnIaRsYi9HiObMzH2SNLLCD+lOOnSV/DhM2e/9G//o+pyPxv/0//V8bsGcaeQMa6Nbpy4APE+IPneFk5zvHqT0YkXF5kavn37GdQVRId7YuNrqsszhrCNNG2DVXlsEYvIwhxK5yeFDtRIhGX3z0rSbquk4yTElK13+/x3tO27aI0mWeO82PNxcpciKzX62WEMkunZp8NrTVmJRbgQy+VoFEaa17SNjUPiJNn8BOjgqurLa6SBUxpxXolxmBpmlhvNhitCJOXblWr4ikiLG0iqJSW7jBnSfKLek6fLBAyM5FonqdKNDXLBanL+yNjl5RT6fgtYZyWx8la4uBnF0xXVTSrlqpZEZWlG0dO3SRFYpKMiLppIGcqJ53wTICcDdKqquL29haA/X5P3w9Yo7n7+IFQRilzHg0Zun4kpcgXX36JtZYPHz7Qtit+9vOv+PjhHVVVMfrAn/3yL+i7E8M4YasGVzVUzvHZF1/w9tuvSUkxDZNs6qF4rHgv8lxjaOqKpm6EI6FEpRBSwrqKEBPjGFi1mqZZQfQ8Pj5wIHG928iobuhRGFZVRW01bW1Zt47744mHuw9kW/H69pZ8tePh/r5cyzO6J5D/bGg03/RpJlE8u4fmQjlG4QnNfgE5J6yzPD480DQ1k/ekMFHXDVfXNygtfizDqWP/8EAKcbkOSnPJOI1ldBTR1iz5QC9fvGa1Wi1w+SxVfB5VkHIilXl2mCQgbylArfA3ZjdWb0WNZJXIngWejwtSN48rcpo1AeJUG+Yi48IqWkyKil9GyctJOWKsEF8/CR2rGfA+o6zjOAmcnWWUeh6hnhGI2XPix44/NB75VLHz/HvP/1Y/+NxloTEXSMYYso9SYJqKoCOPp5G7xyPrtcU2jsf7B7brW7764hdcux3+XUeMhmmC/XHAOcWbL34GeWQcO67WO1bNhvuP99zt79hdXaG14nTsmLwoeoZhFH8bZXDIRg2ZsR/oi2/RerPC7o/SUS9b77nYqKviTJovlB7KFARB0DJnnewBzoibsq0IKbNqG642LderCqri06GlOdPlWkopEJNfUN+YPCFMKJXxQZR61itAxushjPgwkZJFq5KgXZ630aYEAhqc0dRWLAmqePmelvdpubbka1opyIlpGum6E/vHRw7H4zJOJQbIghQmJWqSVGmS97RWFGYpxYtCY/5NxWQsSVOZycIpiZFhmsg5FBfRxBQSoRhXfvKavVh7UH/Ya+v58ZOKDe8DfhzZtY7/6f/wP2XqR/53/+V/RR8ixIAfeqqmKsxnTebswzE/oVBUFHME++UoZf64rmuGYVjcAadpWjqlGWJVCpwx1FUFObJZtWw3G07HA6vVipzE+2AmDYHcgPPCNxcxMzdD/DBYvm8mKnZdR9u2S/5JjHGRqs7+B8AidZ27tnnTnF9b34uyIoaAmWeUKfP4cC8uo3XN8XikrSu0koKkqcWv34cAroSxOSeuplVDbY2MV5xZIF9rhFOQYyDPi6yS6ybGWBjhCW+U+BdZhy1FhfeTEI0zVE4QqsmPhOTRVmGwTGWRjRG0Fktj126o2hXb3Y6r15+j2mvZiIYBZWtJCa1ckbYlmqoWxYYxIufs9+X8yDnTGo7HjsPhtIzPxnHi7nTAasN2tyX4wP3do5BCr66WTmeaJr7/7h1V5Xj9+gWH/cMyU/zszRshbIXIarOlXa25utqRQuT33/yWul3z6rOab7/5juB7tHEyVjICeGolXfk4jTirqOuKaYoi39UWHzOnbsTZnrau0ST6ocdqxcf7e0BhbIVPHtsabrZrTmNgVWmszkwxcvf2OyqdeXG1xRi4u3vAGBn9OWu4Vg3/UfX6Au7XBJ+KXE+6paZtqauKcZqE4FyQuTm/JSZBwnRbiHgxsHu55uWLW7arFSknDoc9p96QTEPKnoiMueZ9LeazW6PrHHV0OKdpTyd2O1sQKkNTa6xbMU2+jB+kOJDQwokYQCmHnzLDkFG02Gxlpj4pbNIYb/iVf2QChsMRa10pYjTZh7JhlDFRkjl+UkLOi7FI+FQp0lQiRVGLGGOkuw1nfPvc6Mybdnry7xkZaZqGvu9p2xbvAy9evFhCGC8LhfPPfrrouIxvmP99+RiXY+fLguNMuD87Xz5HkM8oTnFYjhGDxriK5CMoSzcFvnt/z5f2hqobeP/2HU215ub2c958dss+t9w/SKH/6tVr2k3LV3/+S26uVqQ4YpXBZsf77QeuT6949eY1j48PvH//nqZp2W43fPjwgePxxM9+9iUpJY6HE5v1Cj+OvH37jt12x2qz4uPHj2gvxPthGJbGre87Gi0o78PDYxk1G95+eF9QN0XMmZvrW5q25e2Hjzx2PUOQpvPl9Y7r3Y7XN1ekpiDqZVxMjpAifuyZ+r5Id2WviX4k/3/J+69f27J8vw/7jDTDCjudXNUVOlzewOa9DCJFUpRhCIIBS362HiWHN8MvfvKj/wjBCfarDQtOMATYhi3RpGhJpkTy0rwkb+fuqjp10j47rDTTCH74jTHXOqeru6uu1YBNTWCfs+MKc84xxm98f9/gJ/woBU2cFHAJJIb+QEqBaRqIlTgoS3EpSq66qqmsmz0tTq9xyutDnb8SVO2UzwjjKOT//W7PZrPJd0vKbUkPKWakOzAOoFNku7UzxzCVezlz945/njf3yohpnRGir0Zev86cjWEa578r95d45ti5CDHGyPphv14Z8c3aKIsVm66n8gN6CvzNv/SHvL6+5e//8Oe8HIQ0lsZ+JjKWG78QMk/5GJCzSvIuqBQgwNwDLQVBKTpOuRApRazVxOCFc5DZxOM44qxlGocMM8XjYEXNxUIhIL7v+XE4HKjrerYNTymx24lks6Sx7vf7+e9PQ7HKZFM+L7LfEILYpJ8S/RCkoM8Ohy67nYYQaFsxBpPCCvHCzz+DLJn1Uz7P5ALKiyxVK5xWOagHUpbTqkIYTMyBeaWfrVT+fRzoo/V8DAE/eZpFwzANVM4xec9ut8dYiTT2fkJVkUppDl1P//o1ejGxOLvCNZrxEOjGiX6I+LJDsCLd7Q7dXAjWdc0qp6h679nv9/N1P3JoEtvDlv1hz3K54vxCDK5ijNxvpGCJsbwfy9s3b5j8xGq54upSJonddovJXhIPHjxgtVpyfX3N/tBTNQv8NHF2cUnfD9nnBFzj0CmRmJgmsWtvK4s1lqRK9oLITrt+ZLEQOZ8miCS7cpBKjz+7TXrP0Ekr5cmDcw5T5O7g2Y+Bmzdv+ODxw+xpYri9veXs7Iyu6/iD+jH/l0//nW8ybL/+kYB9/twAF3/Gx9n9Gf5GAe2v+FmE/4H6v/L5fiMTqBPPjkmr+U/DlGXKSdAKYy1RHT0rZFKPxGjyTjbNXJjSPnkXipWvj8VGUXvJ59ZYYm6lOedEgj7vknMBITQ7im33+2TZX92u+ZqnTJ0EgH3lz35Z0aJV5CjSVaSkud91uBuNcRqi5ubNNTcPXvHsyUc8vlihg0G7NUZZXFPx6OFjHlyecThsqHXFul3jbMuq33L14AFaGfa7nsvLSz748ANWyzN2uz0fffwR+92e2+qWZ0+fooDl6pz1ak1VO5SyTOPE4yePmaaJ/X4vm8GUOFvWWKu532xBafwU0K5hGj3GSpu1aVuG0dOz581h4O3tlt32nmcP9/z13cToFSEo4XooIU5GHxi7Pd3mnt1uOxvRlSLgdB0LXgOfANAdDoRpJFVGzmRCEJes0pC/gaQ0Q0h0Q6Abju2GmXAaE0lLOxuErKmNtAanKdCPA/0wzlyVGL2gG2RyaQhi0pgCzeQZJ1FEHXlWUOzyQy40YiaWKG0kzt7afG8HtAoiUgnvkqXLPRWj5O2Utbig0F/n+EbFxvLyAfGwZLe7wQwjq+Wav/WX/hI3u5705oadgkggJEM/jPPEX46ibigVq3OO8/PzvEi8m244p/LlxbwUGyBwcoryvWHoaZxo5f1MQJwY+o6YmejGGPC5z5QEPZHdsJmLn4JIvP95VVXs9/v5Ndd1PduSF+5H2wqxqbRdTk295laEc/jCD0FcM0PuWZZcj6qq5l4zyO5PI8WGVgqyc6iKUSpbbUSqms22VHZvCSEinXk1+/mXXbDLplwpSeKsVkfyqFIS7iMmPSkzkiOb7YbzqxX7w4hKiouLK7p+ZAyJfuzxuz3JOFamoloqqqamamqSFnc9Z53crmkUY7Hsdmm0pq6FIFuKyO12y/39/Tx5l/yZcj6VgrPlkqZt6IeRu/vndNkxUpz/Wtq2YbO5pz8cWCzEJdZPA/vdjspZ9vsDl5eXbDYbnHP87Gc/Z7Vec3l+xnZzz2q1ZrPZsrvfzBHY2ohkTJQTKsPukZDEOdWHyDgGnAukpClySVs7qroBJaiQNgpCwHuxMK+N4tmjK/qosHcH2jHx+vott7e3/PW//i/zgx/8QHrm45hVV19vYP+LdqQoxkPaSrJozCS4uSgeJ4IWwvQ0iQeOMpkMm4uEGNM7fJcYvdhXG2Y/h5NnzHBxIYkfF/aU0mzmFUKYC2VrLV3Xo5RwusTw7NjKfP/4dYXFKUrxZz5n7xUbSoFNiLotj/sYFf0YOAwTPsIUhOh9+/aa6sVzLj76Xaqrc9ziAbtNR+9Hls2KxWKFHyfOlisu1w+4u92z1HBxcUkIkcNBio0nj56QIjTNgiePn3Lrbokx8ejhY0GiAqzP1iil2NzLOP/4W58QYuDVq1dcXFywXq1oWykk+3FEKcvd3T3dENgfOlZnZzx5/AylDS9fXbPYGex0QeheMvWGYJcksyTphoQUVBQDsnGi3284bG7Y3t+fZK7oubUs5xJSPC6X49gDceaeiP+QFHBaiclWijAFRTclDqOnG48bWx+Lp0hCxwgmb1pQoCxo8ZrxkWwDLyaNMUlmibTPDZFA0kD0RGWIaHxEQg5z1EZxvlVaoZPw8VxdUbcN7bhinSQHS+sIacLVbmZEz7QGLR5NKZYmSi66raVpmq91P36jYqO9esiDh9/mzR+/5mG7YthuWTvL97/9KQ+ePeO6P/DPf/EL3u72BCpMdl47XdSBdxbhUynkabujvNFyFEKKziddO5e9J6S6EtXCgEayTspJ6LJdeDk/CiGCPn78eCaHTtM0oxTOHhNoq6piuVzOXI1xHGmaZuZxnBYmzrnZIv3169fvpL0qpdjvdtRW2MFGi8NpyIVAmMRPoHICZ6Uo5DySVNkaiZOvXAXGSw+zrqkr8eQgiiFTioGUrXONEWa2ToY4eYIfmKbIZAyqlgwWZ907PigyoDL6kklF0t9UjENHdxjouxE/QdeNoB3GVVRR49M9o480GKpUUS3OqBcNNiiGSap2Zy2VcZKLkh1VJx+YvJADC7p1tj6fM2e22+2JPLlhsVxkjs3b+fyP44hzjouLdQ7EG9nvO5xKJD+yub2R6zeN1HXN5cUl/WGPSoEf/OCfs1y2fPTxR6yWC9xr4WS8fPGKzd1mJkGljABJe8BhrWEac3EbIiEkhilQjYFpCgzjRG1zeJpSrC8ecP3mGu00aRyZhh7jahqrOV/UXC4bhinQLi0hTnz++Rd8+9uf8Ff+yl/mT/7kT3j9+rXsor8mZPkv2jGO0uKbFRZ5AkylZ2xFXeMzv0SFk/kjibw85upEKcEcJDhNod/LoZmPueBOmRcjSIm1x/yPgla2bctisZhbvnMQWVJILsbx+OU2yy///z4CUr73TY/T3ClNwhDxCZwyKFMxxZHJT2x2Hbf3W5EiJ83tzQ3uxXMePPkWl4tHaGe5aFu64DBJcX+z4dWLV4xnEza0dPueu8M969Wa9WLN00dPZROBoTIVsYo0ruFifYHTjtVihZ88bbNgtVjjnOPRw8dM4yT/TxP9YeDi7JKnz54wTgemaWR1dknVtGhb8fLNDZvDgKsWfPCtj4kJDhOsdjVxe8cu7ZisJ1VLdl1it/ccjMLEgLGGsR8ZOlF8TN2efreZN5BKq6NsOa8btTpGqU/TICiDyi6pSmFMlYuCTH5VGo9iSpoJSzLvZquk/NggFO+Ikgwm6zCuwVQNrm6pGv9OCyalKO10U6GRLJXkR5J2TJFcsBwD5spflrnMWUtd1SwWC4YgbRPrtKwdyWNNEK3MSVun3LExRkJMhNwKLWv51zm+0czlMVw++xbX//Qfc/32lnNj0bbiatmy9RNTU3OxaBhCYu811lUzQawMymN+ip6VIeUoJ+dUB1+OmdwWRXZWZaWJ2MAeF8YYinw2YLVmGHq8OiIVSqmZ8FmKjTH3tutado3OuRlCOw3l8d7PsfHl52WxK79XpHAF/i/tI1W4IukkW+AExZGbVM3661mel5iJTJVzDJkd7q1luWioF0sqa9jvtgQvqIa0VoQ8q4loawlePC18CKgpYUz17s4n302x9IiDTOQpG5i9vbkj+MjYB8YhUFUL6maBT4lhHEnGctW2nJ2f0wPb3Y5dPzKOkSmKhXaKCmcs0Qf2+x2H/V7IfYXEmq/FbrebITuxnpfr4kPg5atX8zmuqoqYEnXb0tS19CC1WDL7IJBgCpHgDU3dUC0XeO+5fvOGru/YbrZ8+7vfwRjLtz74kHEaWa16njx+zAcffMjPf/oztLaZmHnMkikhRl5NlDZU0eSP00jX9RwOFXZlqZ0RPXwCXdWkmEP3EPvztl6gK8vDK8kw2faBh1eXjFPkn/2zP+Xjjz/m+9//Pr/4xS/42c9+Rgp/9p3u/18fCkmpMookxqVoZzFWoyoxYyMEIhLrrY3sAk8X7LLheX+Do1TCnBBA56dURz6EyrwneYwjt6IgmlVVs1wu2e8PQojMsnLJz5Bd6/vE91/3//uKlj9LoVH+JmZStgifg6BzWmD0GByRiX4YeHt7L26quuJ+s2G9vaO7vcbQkAZLZRQ+al6/esXkR758/py31Vu6zcCh2/Py7WsSicePn1DXstu9u7vn7u6ecRx5+/Ymz+OOoRfl193NHWEKXFxesGyWqFYxDeLdMA0T/aEnTBLOeOg6zs4rrHVyAyhJkh4nj60aQgKUJdqGQ2o4xIakFwzB8fpmy6s3t7TJYVPNom2JPjIOA2O3Z+z3TMMBP2b0OZPuU0yzQ+90ElQ2DL1w8DIXRqNJNkm45axi0UwBxqgJSMvieHPpec06up8Kwd9WFVXdUFWNtIam8ry5nRwiylToqkFpS2U1cToQVaKfREUSKQi1tD0SiRASsjctrtsl2Tar7VJAaynAp0x9MJOlkEtDTAQP4+Tp+oH7+3v2+907a/ivO75RsbHpRpqHT/nkL/0Vfvyf/F2msccNA04l6hSpU2JhLbWxjCpreZV4J/hJrI7LoiIeEsevIQ/ud7gaJsv/yGiFRitHjJ66djhrqFxL9CIXVSmw6w/0fcfmHlTmRIymEB/TfCMUXkjx9hCyqvRZ14slOMdhfyCMkxDOiPhxZDOM1E1DipGmbYV0EyRzJTWJsR9YtWL+VTcNk7F0hwP1sqHbbefpTKSTk7Q2iookIwuuEgKUNcKwVll2pbVifbZmHHooWRBDhzOWRVMRdGIaxZkzRpEGKq2pmorKtKQYRB8eJqIPRCPujFopyQLJkfNJQQoycLxPjN2EdTV9t6dplzx9ekWImkMviI9RCpv5HEM34NZrVu0C2y45dBPbXY+uDUZbwugJeKpaIo77fqDLZNxSuI3jKA6tRlJibbR03YFhGFFWc35xgVaKYRyPRUeMWank2e92hOBZLlsePXmIUpqbmxuZdK1lt9uiteGDj76F1poHDx6wyxyb1fk5l1cPePLBB9StkOCSyQZEOY1xuVxinaYfeiFg5ejzoowYx1EC+1Z15u0k7m6vsdYR4iSv2VlevX5DnAaUgVVlWNea3d0dV6srppD44vkLfvTDH/E3/ubf4INnH/DBsw+4+OJw5FX8l+hwzqGdB+fAOZSz2LoRnwAlROdpGAlsSN4T/Ujse4oDqVIKlb1BEoDWM7EwkTD2XV+Nma+BmouOgmxIkXKUyI45SFEIuBXj6IkhEQ0Z1YjvIIjl4zcVEF+FgJz+zS8rZ+RzSXH+ZVPF0hIExNBKyY7aakfwns12pLIdxAql72mXb7h68RlaWXzasbk+8OLmnpfPF1R1Q3c4sPfX+Psd2lru7t/Qbe/wh46rbLI3DCO319cMQ4+aJi4uLsVMsffc3W149eU1t/UGP0pGj9GGuxtppd5c3zMcJpp6yX7YstvvOPSR8x6urzfcvN1xc7slxorr6w3BR169fMubNyOb3QFlKypWBD/y5Rev+LEb0ZsF6uk5l5fnHO7v2dy8zanHAymBto4SoBZCYIrTzLOqlBc3TqWkdRolS0sZkZDGlIrbBiX5tV6sqJcHEorKH5fboCxJuxwbcSw8tBVTOGMrbFVhXU3VHCkGU5C8I1NVuHaJthW104TBoP2Aj4lhzKaReZOttSQSh5QIiFeOj8VTRlSQx7tKyKyjj4w+YXzK92/IHD0RXmx3B27u77nfHTj0v4ViY7E849q2NN/5HpeHO378H/9HnIcEQ8DFgAuBdbvA7kZclqVVWb6ZUsAohaj3AinKjkIrIEJlZWLe7TZoFJV1JAIpTlSVY7loGIaO1XLB7c0NtTN4P9BUNa5xVJVhGgLOKC7OhDS422+ZppH73T1cyoAtJM7dbodzbmY9xxhJIWEUbO7uOTs/p9sfaJsGTUZDtCXEwLA/SNXrEm21oKprNpt7NBrrHP1hj/cTVilMAqcUTkF7dTW3inwYqSrhrShy/1gbnJUdgTVKMlysmYmvPoBShraVc6WyJfoURvb7CWfEoEuZnFCQpEc9BZVNiLJLp6qx2bOjTFhTljopbXC2FottY0ljJHnFME7ESVGvF/SjxyeFbWpIiXEYqZ1ltV7TNgu0qRgPA91h4jAGppCYAgy57eCniaHrGPqObhhnM7T9fj+Tco0x2OWSEPycyHt5dTEjHaWlFkLAF+VPSux3O6wxLNqWxWrF4DW73Y4QNe1iyX63Y4pwdXGGj5FvffxxNrwyHLqejz/+mO1uR7te8+iDZ/z0Rz+gPl/NiERKYoWfyJHi2uCqSkKgjCaGyP6wp60tl2ctftKQAnVlmPw++6csmCbPYr1muz2gkqY1im9drTBjzxd395yvLvmSl7x6+YqhG6hchdGKJ09W8NNvMmr/xThiCKiFy8VGja4aTL3AVQ3WVnJP2J7Ry/gLcQ96ypbzGqdBfPMiYRqJAo2AMYK4xYRR77UtOBItC+8j5QnXaCFYhxDRWowGV8sznL1ltRReUAhSsFSV4VRtd1o4vF9QlNbM+wXJ+4qVY5Eh/iCyi50fCQi57X6U304hMWFQxkCaELlj5qtE6fW/ve0JqcU2iS9fvuTxg0uePnrCdnfHlz/+nOtNj3VLrG1mJHn/8iW2cmzHDh8C98+/5NGjRzRNyzgO3N+JfPP2/JwPPvyA5WrNGDRvb/d8+eULpmnk9s09H3/0Mefn54Tgubve8ObFNXdVxTgEolPs9jtubjsuzg/c3W25vznQbyZ2sePtizuscRxue4bNHuMH6jhQ4alJ3L18xY/3L/DPDfcPV3zw4WOSMfSbO8bRo2yDqWWHr12NNgY/DAxxi6sMFmhdA7vc+taKoBQeiFqTYjb0jnFOwDbG0jQL6naFNpbqcLzWQ1R4DP0o91KKE1EFhqRkgzyNoCRx2kmQEABNhJSEF9euzzCuxqgIlcX4Ee8Dw+RZoojA5MW2IKlEUlr8nbKpl3EGYxRNW+d70RHCRGUswRt8ynb0mNnLZpom7jcbbu43vHj9mvv9geG3IX3VWjP4RNMsefq936d7e8tn/+iPaQIoV2FQ2VTFoP17BBOl5scoLZLCEjcZXrJWZxJmRYge7xNPnjyGvBiLG6eQdsru0E8TlXGkGBmGYfYWaJqGYZSdp3Py+OVklXZH+Xq73dI0DdF7xiAaZWstq2zW5adJSI4pUbmKoGXxH/oB0BJVX9cQIyHzCKIPxIzKyE4jzTwQpRT0gaRS7sFHCaPKEqkQJlQ6kssqV6FUEvMjP6JpqJ2VaGTnUCnh+54UJMK7qqz0ALOJUYqJaRyJWqEqR5WlspxMZgVSPu1eBy9ZJUM/4pWnrhuapmW7P3DoJ7aHNwzTRNKas2GkXayo2kvCOKFNkIRPqwnRz4zoaZoYh55h6LMs8+i3UszSionbbrebr1eBq8sEbIyZtedVVTGOkjK6Wq3mCXuxWDD0Az7EOUcloXjy9CkAjx49wGbVkVKKy8tLVqsVt7e3fPzJJ3z3e9/l5z/7MSHKAtXYeib4WufyIiMttMJNmrzHT4JuyGIlrbx149DWMY0TQ3eHyQukosuJs7Jjeni+5rZPDE3Nhx88Y7lczvLr3W7L9PU2Ef/CHdY5tJV7yrkaWzVUdYOrF7iqxmqLMQ6thWg97C3eWsI0QBiZ/CB8C2PEnyApAuJuGiM54yK9t8gncahU6qQwKByOhDGCNoYgY36xXNE0Dff39+/4WxwJqe+S4H+Vx8ZXHaffP9oFCA8lb7CF0HpKBHjvUBmqj0BRPyhVDPU0Rhm8T+z2Pdv9AZsUz7/4jA8+/JTzi0dcXCyQvbIo0UQAkEhhYupHdAxUwLC553lWfSUSfdcxTR6/39Ft7gDNGBXd4DnsD8K9OmzYXr/g/Ow8S6+33N/do43h9s3nVKuW3WGPMZbV6oxpCmze3jJuNxzGkRc//SFtu2TavMVsD5x1kYVVVMpSxwnTHTgMO35+veH6OWz3n/Lw2VMC4JqWKimidiRjcVWDMpYpbbBZiqtVxOgjZ8NnhMH7yDh5jDIw5UwWn5Nvo1x/m9FNO+XrDuwPHcthSeuKKV3Ax8BhDAyjEOkn7wFJGlfaCHoSpa3dNDXW1dJy0dJeREkB7L1ECpggSiuApMCnQNJH6wdrJPhS3FWzYiVEApEpSAyFjxGT9IyCeD/S9T27/Z7dXmIa7NckrX8zzob31PW5WKeurvj9v/avor3jR3/8j0lxAiuDTwKVftnxbt6xZgfMMgCMkRaCyYWBSCxF/395cUE/9Ox3OyFm9j1nZ2ecn5+RUuLV3UsmbyFJsXG2umKaJtbrFeMkZJ2qqufXorMCouu62ShMKcXFxQU319fzpNB1HWdnZ/OCVkihpVgSHoZIP0WtIkSh1XJBqit89p+XDBI5zX13wGTiqrP2aJilpRWhs8OnNRUpSOvJGE3lWpqqYlJi+kXKWQB54YMsF/ZTJokmjDXUVSP85lAgswydKo143TGfF04+P91hjcPI2+trVhdLztZnTOPI2+u3dOOEMgbnai6urliuz1gul5ydn+HaS5KuGINEdPd9z+7Qsz907A8HpnEgTCPBB4xzMhH5IwmqtFMKwbbIYNu2naXHZXHXWljj5ed9DrB79OgRlXVzQJvKMshnz57RLgSJevDgQS7u5Jyt13ZOcV2tVvzRX/yL/L/+4/8nY7/HBCGVKYq0UnZ10yjvQ9qjQpxaLRcY59hstxgtSEjfjVxeXrDfd9xcv8ZVNeuzc/GF0DkDyIvsedmO/PAnP6JqlmzywmWtY7lcY76eWZ8czsDDNVxvYQrv/uzBEh6s4L6D11v4TZwAq+Fbl9BY+PIeNl8RvqSVPObVUj6XGwp6Dy/v5H+AywU8WsO2h1ebnHPxGw6lhOiZHXDFI0MKDGOsEPjqmso5KmPorMG3LX44MBx2jF2xNRd/gszaoMxT3gcU6WQhL1LEQkotrRSVuWeTLEI5pRgUTdNyfn7GdrvF2oJ6vNvOODVA+iY8jFOEo8xjoCAkkkooVdCOUznsSdEhle/83mY+iMotKh1zQKJsara7HUtb8+L1a/70h/+cP/xLZ/z5P/hdXl1v6CcIIZvJpZK35Gd5pmwcwvz03svPytwaQqAyidYZLpbL/LNIGu64eXVDiaV3BHQyTLsD/b6YT8Hh2pGSEll+TKiu58VPDjRNy6Hr0ZvI07iQLCaVSLGnYuSs0tSmYrmuqJYrBqPZxcgQI6aqWbermQdRlCDKONpGMlUudA235XpA3/UMi5HKDFSuhiRzWCTHaThpA1eVtPitEySKBFPmhqQSHRE8w+g5dOKv4cOJAaIWfkqCOQm3qqR9bOuaxhqYFB4/qzeL6y8nXCCdC01UCaFzWGdxwR2La1WybTSuMlSNCDEqK7xLHwLr9QKfIvu+Y7PfM4y/BQdRazNJBENdnWHWNb/zL/0r9JPin/6j/xy0yEODn+ArGN6FRFkGdCk4iAjpBbKlNuKQ5r04gIZAU9czsbIsOioT8hKBpqqzUqBmv9/PA0drLY6PkZmrUQqGKec8KKVYr9eMXc/m7pZiqbzPQV/lYpWBXjgCJaNQK8VytWLoe0hSmHhr5cIpLbbPMVItpMBwWmOcZRh7wuRzKI4iRUkYrV2FcZZJTdkPX8iJRqtMhqwkKdQaccJTmsNuI20FpACqjKZyNktkISaNBrEzn3dA7x7vE9hE7THRdT2XDy9IKbK539AdOpIRNYaOsN/t0MYSvCAY8n4S0zRyOOzZ7w/iSAdoo/PrrkguzN8vz1f4M8XzRCklHIls7nZKxgXm61PkVzFGlsslAIfDYQ7GW6+Xs9PjMPQ8evSItm357LPPAHj06BEgbqXr9ZoQAs+ePePy6pKXX2ykLaikvRfzQHbWiARWHb1KYkyyK/CBpAwhKTabLcumZr85MGRFUwhw+/YGa1xGtKQFY5TmyaMHfDLCLz77Els3/OiHP2B9fslqteZxWH/NwarhX/8D+Isfwb/7H7xbbHz/A/g3/whaJzPq//2fwX/6a3oz5bH+le9JYXDfwf/yP5L/T4/Kwr/0KfzN78rn5W83Pfwv/i48v4Pfewr/jT+CVQ0hwd/5AfzdH/7Gt5OQmHSjZBcu/APZQeoQSFpjyMmermKxOseHkcNWM/pAyvk7RTo+L9758aOa5+X5GbN0RX5PCZNfKSn0A2FW0+nM/4DExeU5t7d3c7u2PMPp2II/u6T1/b97Fzl+XxWg+KWCI3+ntIiguNAWZZ2Mw8N+z8v+DsMVL1695OLzn/P733/ABx88xkdDxBCi5O6ELLFUUciLRRwZ8oZJTKvS/DNRA4VsFFc2pDFLk49eTMWpJMbA5CdCamd/CrE4kHyUGEUKbcyAbRJN1Kgg90sgkmrD2eKMq3MLaU+7qnjw5BFpsWTRex7h6KeJMSSUqTHWkZQhaoOtF5ytlzSV5UI38CM5hzElbu82nK3PaJsGGxNBRXTmdBhrMvJss2pyPLk62SspBGKUJF1xmvb44BmHARA0z0Ulqcba5nMCJFnv6rbBVTWVNSSTMHGU56wqqlo+TEZdUYqkxX278jELLCqcdXgX53NOijme4ejUaoxBWwNBeGuL5QKsY3vocK+vOeTX+5uOb1RsyL2pSKrGG4OfDO7C8Uf/1f8aqar403/yD2DsWa1W7PYjaDMXFEVhUhaJ0x6mThrPlNtJmjkq3Wj6vqNpGh4/fsjNzQ3eH/NKqqqCdJTSlpj33W7HbrvLsJKWhShnxZRI+MViQdd1bDZiFBRjpKolX2Uap7kYadt2lmEWr4+S+Nr3A0onrNU8uLzi7dtrpklMxYyWil/iswWeXLQLqeqtMNTDnFOiqUy+wAivxRqN0RXWaCprCDFgEA6H0VryT0JGQqyhbZoctWyIOeRs60dWiwY35whIQSdV77tM/bLYn+4+CtPa39+jPn4mki6tqZzjME6cnZ+zXJ+RlGLsB54/f85mP/How++wOr9iDImu7xiGnqQMCbEF9t7jR5FNxXSEoU8/uq7j8vKStm2FkJSLhpQSZ2dncyusaRrOz8+Zponr6+vZC2UcR/bbHcMwsFgsePjwEavVku12S11XPH78mLu7O96+fcuzZ89mzkhBu6qqorKGD559wJsXzzF557eom+xWqqibmrquMLrPk4MlIe+l6wexZ8+Cw6EfGbuRyXuaVgimQz/lPBuR0yoEnjUKPvngKcEHMJKEm0j87Be/YLxTcP7t3zxY/+t/QQqEKbyLHHx8Bf/Nvwb//h/Dj1/Bh1dSAPyxPSIP7wx64G/9DvyVT6XA2PbyuP/tvwX/078D3UlfZ5ikePj7uXDRCv6tfxlihOsdPDuHf+uvwn/4p/BPv4CnF/CHH0nR0/363ZGra1wlfB1XVWibEQ0l7HmTjaoSCuMsUSkmHxiiIuoKXbeETPhETaiQzZEygoE2JH2a0lzaKMxtE6XyvHWyYy+qg9KSPT8/5/zijGEccvFcpKe/2bocfn0R8j4CWS6QUhIuJmhl4peLDAWKeWesivqhPE6ei2VMlYTYxGGa2B167m7v+MVnv+Dq8TO+8zu/x/rsCu0aUI5pSrmNo5iGnlPlzmmOj/cB76dsCoXMBfgsSS7E2+wNkWTxi0FaBiIaAJI8VwgxzxORo7xYZQl6xHgNo6hAxugx9pzHj894+mjN9dsvGOPEk2fPcBcPGG1LevkavduTug6fNBiLMhZbNaAdVbukqWsadWwX7A89Xzz/kgeXV6yXa7zJQZJyIVB5Xq4qS1NZfH8E+0DWUqXVrAixriJi0GMpYgUBj4jNuzZWzlPw+Ek6A9aIsaBBhrjOD6y1llR0rTHWYsrGLJVUb2nPn9qNl/MfosTWex/E6j8GfNSogKDmJNAKbbS8IfX183++YRtlwlo5KaMP1O05Q7dHV57v/sW/jG0r/vjv/z3M/QFXiQvZUVkiT1VgxKOzH1m1Qo7uFjczaRGI4VVdOc7PzjOkbHJOifAoRH4qE9XZ2Zm8KWsZhgHrDFq52cmzyM9KkVE01VWVw9EQSe00imIAYJmdP4dhmPNOyiInyoNBoHCEiNZNE/3M+U4EhIzjrKhL6pxiGWLEKHEcJEbqKnte5ElO5xuyypbs865Mi16efONNKRKDRycx+mpqh20baVOEaW7PGI3wQOTsA0dJXfko5NXCn/DeE/HYykCMmdjZo7WmzZC1VoopRow2VMbS9x2fffYZT55F2vU5i7bGuoopwiETn6JXBJ1VSl6CwgpvA6Q4e/bs2YwuFaVKKTRKns2p58nhcGC9XnNxcUGMUdQnIfLg6opHjx5xfr6eq/R2IejX8+fPubi4YLVa4X1gt9vz9OnT+foGP3F1dSUE1jjm4ijmyUSmc2vs7Mha1TUpgdaGEOHmbsNqveLp46dsr99AjBij8T7IDmGxFGZ4kKj6qq4ZdjsqY6kNPHl0xc+/fMXD5gMWqzW7bmJ4c//1BusPX8JdB//mXzh+TwHffgSHEW4O8O3H8PO38CfPvxLpksFkpCD448/gFzdyf/7f/hn8D/8N+NYF/Oj18XcT8tiHXIA8O4fffQL/7n8Ig4ePHgia8WoD33ksj/e/+fu/+rlPDm0tzmmMrdHWYW2FNhKohcpuiEpLEY1A2WiDrVqUKu3ZitDviVNPnHpSys6PSmLpTxfoUozrpOfCI6Wi6shcBWQxVJQ0a1BKLPBvbm4yAbs4zH51wNvpUX7ntKg43QR89c9BHDFPX+PpJS9sqYISRMRSW+aCmIqh2Sn51GO1pW0WbPcD8fUbTLvkyy+/4MOPv8WFvcDVmrpp8dEIcV1rxslhrSNmqWixzy9cktM5P+b4dMrmJrcHBPpn/v1y7kO2u085QVYKmTJnSUtpKq1ZqUgYA0worLM8eXzBw4slU9pwt9tS1TVNs0QjgWS2rlgYw+ghaeFXJcRpuG6W1G2D8Qrw8np8ZLc/4GOSTJa6oXK1WAooQ9uPLJYL3P0GWZcT6UQ6q7XKKESDcZJMq4aJavKCaGS7BefBZzQn5esbY8JnYrzORWzMqEjKnA5RIxqi5AHIfaPNnAljjJ29lpzzc0tfKRHcgKzfISZxTU6JFEJu9fQchpG+l7DI8bfRRlE5G0JpjdaWbvL4EKldzWgdH3z7u9y8veamD9y/eEm32cwVs80chUJOKQNHkiFlMkkk6kULKWGMplY1zlrGYTgJaCr+C8xE0O1OuBSLtiV4QSNKGJt1FpsJojIwBZkoxUbxcuj7HpfZ5cUPpGRJtG07p70W6LTY6EYvYWjjOOZBkhizZ4fWihgCzhoWbYOfJpJSxCCGW65yqCTGXIkoNgLZIdFqMVlRCJfF6kxiS2m2xBX+gCdMgcpYQTuMlj6zs1SVgSCtDXPq0Z8HYxn476fvlkIjhEAkYKIkKjZVzf39Vga+Ngxdz2a7Q7uK1fpMIuM9TCPEVy84nyaa1TlgGMee/X7Hfr/DT6N4fnhPgX5LUVpeU9/3c0ul+LIURGm3282fHw6H2XdjvV7Tdd1MFH549YDz8/PZer68z67ruL5+w3q9nnN4mqalqqrZqj7l63h2fiaE5X7EaC0Tc56gY3ZC1XkrV1UVCZGNoRSjD3T9SETz8OFj+q5HGy0Oi10nE67WQpr1k0CVCkDisRe143y5EGJwTLTtkq/tBf6DV7B+z/9bKVg38GgF/86/UuQZ8D/52/Bq+9WPY5S0PG52x1XsMMB+gPPFr35+reBf+z34+TX8+LW8r7NaOB3/9t+UPBKt4X/2/4Av7n7j24mQpfN5jVKyey8vSUy6kO8BShsh4TaBMcPpVqVc3HviKBsaTZG3Ss1zTEgVeXo5SqEhm6Vi1CXBeMWO3Bhp3a1WKy4uzrm+vsY6Kx4zuZg/vcffb6v8uq/h3R3kseAo6OSRJ3fy7RnpSCnltpPMKVhmJFVl/oeg0EIilbk+tz72A3d393z+xWc8+fwpTz54hnWAScQUGXxg9J6gPVZJy8M5R1QSTwAyr6WYszqUwqSMviRpuNv8fmLMxfy8XQNiRMWUz/YRaZJ6Jb0zd8WYIPXENNJPCeUqUoKmsZjWcv74CruoaNuWpqpZtwuqyjKGiUheEK2lqhf4eMCEKFk8VYtJATJFdvIRPYntvdLC3dHGYKwhoMAIMi9Z7wGQTWGhF3g/yT2rxD+oriu0TTRNQ1M3s03EjDSngpjk6xojfvIYnfO38jqmsyCgcHRQmZ2XEtGLVb2f/Lye+RNH3WMLC7p+ZLvb0zQNmoTN89QwDNxvdxyGgd3uwP4wsN39FiLmQ4a25KSXnXXAVoZOG6ytuHr2Ed8LsItwlwNkCuGwDKBi8qXmES7+B0oZ1usVd3d3tG1LSuLHsd/vePX6hTj0TSO663j06AFjhsg32zuBg1JiuVxye3srRju1y1JLP9/I2/12VjakdAxcSynR1g3HsKg4u4KmlGbYXu7FHMRmLev1Sgy/xhFilIChaRRuREq5ypf0PZNEIRK1QpuaRSaKDn2XDa4ilZXizGhNnIToQ1aVOOeyxbiiKOalgBJ+S+LYDiFGqaZz77PsqkzRVMd3J7LS1hrH8Z0cm0ikroTncsjSVJRU/LtDh6kcafIY43j69AOefOtT9sGw7Qa8H7m/v2P0kSmztmeYNb/2lI7GaWVglWKnqDxOeT5j9tYoRmuFrAvM/y8WC5qmoaqrucDwXlpLwpAPXOaslJQSi8WC7XbLxcXVfJ8eDgfubm958OABdV1z6HaY/DqstYQ4YoxmGqeMct0KUcpYfIqoqFBBs90fuLm948FqJX1XrRn7npAU3TixXq85dB1vb2+p2wYfA/0wYVsNYUQjRY8dJ5Zn56TFrygKvs6hgIsWrlbwP/o/wusN/O4z+O//61JwPL/7ir9RwrvwJwtZSFKoVL/CeRPgoyv4o4/hf/535HcVcN4KMfV//Lfh87fw3cfw3/vXpB3zi7e/9qVrYxGrIpVZ+eUeNxg0Pkq/WSuF1kLIE8KnSLmN0uhK0LhYO6aqwneGOPXSfkxSNBxjEsR3JoSya5NF+9SHY0YqNCdQvrTGnj17xv39fSaYJ4J/N2fjq7gXX/V1Gc+lhVsg7/I4IXOeUn79Ssn4iuGIcqQUjgsXudgo8LcyM3kTMtqZz+8UJV6iHwKbzY7bm7f85Mc/5Dvf/Q5Pm4YxwP1u5NAn9l3PkDpiRnmKEd9isZgTmpVSYgdgJGFXK3MsFktLl3f5LTKXJUw6tiEKbyPFIwpS0JOE8C6SnsQjwlbsNhvuhh2uWbG+PGO1XrFYLUEplk1LXVk2B1l4talA61yUJmlVWCcIQNCA8JR8SDS5vUGOa5d7Qcm9UCLqs9rUajFSO17gYyip1ZaQpAUmqPJECH7enPsgFuWl5VHOS/SeiYEpRnQaqbLt/m63w1UaHz3WWeGSpUScIj4mbu43vH37lpu3N9xttoyTn40wU5LC7nX0tJWhO/RYrQTV6Ht2hwN39xsO/cjdZs/L67d0/W+Bs1EsS2yMxBTwKWArhVeRg/dUGMzynGff0tzst/z8Fz+fF8li41sWiOODRqypcCZbjFcVfhppmwpj3AxXRx9om5pxLHLTNrda1Fw09F2HUfDq1SuMMazWj9ntOna73XyB+r6XVNi8uJ6680moTI1CSIl1Xc+LbimSyvsRPoOnsi6TBsU3RPr9lmkYRLPsxLwqpkjT1BmyREiBSQqFunI4vRL2sc3qmRAYkXROq00mBuWudB54xmS9tNaoKDepMWKJHlOOFS/hUSkT2/LgVek4SRWOxukEVwqPiGe5XEgMfHZcTZmYq3IV7Zylrir85Nlst8RqgTGaYfB04x6fVJaJiZ16ys/nvc/QoJ6LiVNb+3KflDYKyGQ4TROLxYIYxbStruv5by8uLo4tmSRtsa7r2O02NE2DMYambTHGMo49T548YbPZ0DTtTPbb7XZSxHhP24hktrhRlknFWktVV8R0gzVGTHPyzlUWFin2dl3P7f0GfKC2Dls5puBR1jB1EvfdDT2HvsPUDlM5mA7cvnlNtA0xTHS7LdXZQyo01dfMIfjqQ4HR8E++gJcbaW388xfy/7cuv7rYCAn2I1wsjszCthK04+5X7Gi0EqLoyw18fnN8bqvhn72A57fCD/nBS+GAfHL1G4sNtJn73OVIKeX7OaKUJSrZ8U1+kns1ReFEOYdXihhGsYHOhbpGbKZjEHn77J5LGRcyVguacdz9M6MfRQ7LyX08jiPL5Yqrq0tev34ji+SJ+u50x3rKl3q/zfK+bUAZl0X2XdrA8nrl49g2OZ6jFEuvPhBzEUbhn5hjsufpAi/j0KK1xfuO/b5nvz9wd/uWn/zkxyzPLnDtOW+ub7i9F1XCGHsSUXx+Mmry5OkTzs8vcPYYW6FQM5GSfC3m11y6We8VYyh1bHSpTNTVMbdq5l8BVE501Vit6UfP7f0GFQaWtcXURrhWTU1QFldbVouG+51iGKTASEyEBIu2RqEle0urd+i3WhvqdkFVNcI7iQkfA3GaCBHGUdDKlDd9xpYiS46uO7A/HHDOoojZnXrgfrPj9vaWzWbDYb/n0EuWVMhgRfDTnJlVVeCnQAoTVgeUVew2O24WN4Q40iwaoRBoJVLYqJl84M2tpPLe3Nxwvz9IMRNFTRljQCcYuoOk8r68RqdIyJu1vh/Y7Dv60TP4wKEfZQx+jeObFRtKTrhNIv1STGijGEJgJFHXLfX6CuckGbCErBXo8HQhKYu82EpryQtxbg6OSTFgK8c49rhMgKxyhVl22lVV46eJ5XKJQixWN5sNm82GBw9lR3p/t2O73cIJ4lsgJGMMTdPMnxd/h+bEYn2aptmHoVTrpXgauh7digOnDyJJNVrlFogMgspJDknRNWNMHmORcRzwk2LR1CyWi3mQO+fQ1ohJl3MYbRgHSSE1SlowCoXOqHuKUfw28oCL2SlRXo+edwvS3hYwMsVj//C0H1wW0oL2uMrx4MGDucCyxtCPPrcQoK1brh5c8fDRE87OL2gWC9qrx7hmye1mx/NXb+i3B0I6yglLwJvWSja9BXU5sagvxenpTq6ojYwxcyT92dnZXDwWv437+3suLy/nrIq+71kul5yfn4ss0VhSinz88cdcX1/PuTgSInX0+2jalj54qko8NaZpwugmo2YmJ/VqJi/nP8UD2lgxCQqRqA3jONENIxbFaC1ucpnDI4qjbbeXIqWqUNmnQymIfsRUDavlkrGXMTNMI4uqLkjuNz9ihM9u4Ok5LCpBHB6uhKD5VVJWkN/5xVt4diFFxpgLk26CN1vhdDRWiKhDfmHrBv76d+B/9w+O30sJPruF7z6RQmX0gnIsm19WtXzFIYx4ScWUGz9je0kIb2Vf7ENWsKWIszq3UHUuUiNxSqAtyjqUqUh6nPvapCMcXxQSuQuQ5y01v5aCfpxyOaT9K/fJOPY8evSQYRjYbg54H99pHX8Vd+N9wvb77ZbTn5/yrY6E0MKPOH4tKGLebZ/yMvKcl8gZLrlISSlbtAMhpEzUlDTju/t7LrZnvH71kru7O85My+3dLT/8yRe8fvOWzeYtiYB1jhgiy+UC9Yd/SK0rllWDqw02aZzOIoHo502PPlkXTp1chcORUKm8ehD3iIgiiqEV86WRM+EV2mvQhq6b6DcdTaMlm6kRq/LF2YpARbtf0NzVVNZiSEx+JCmxNWjXKxRaCthxON7LlI23ZhwnNjtBpZ13JBRTSGy3GzbbDd1hP1ub+xAAadG+uX6DWVRAoqnEJ6rveu43O96+fcvd7R3brqcbPD4mIdgnmbe992LaGCUcMk4jlUlEHXj16iUpjNze1hgnBo9kRNgPnuAj9/sDb2+33G0PdOOUPVPijGxoOeXcvHmL1ZropaApleAYkqTCaivhb7+VYoNcIXtPnAYmf2B3GAlW03UHjAOdxITk0eNHfPzJx/z85z8nJTHTmSBXnfI4KQm8GEIgaIVS2f+iOiIFx4wS6XOdrdc07ZLNZkMMgZubW9arVUYXIofDnrquuTi/YOgHQggc+sM7xUZRoJSFPIZIKMmadcVqseTFiy9ZLJazsqFpGuYE13T0C9Fa4cPA2A9C/LSa1WJF21TEzMYW9EZIqkbrLD8VPoci5t/Tgu4YjT6JxrZaY6wmRYOLGqulQrbWYHMhkXKRkzJ0l5RBRYdWidoZjMrx2JTqX4KrCgnr2KdW86ANmcx4eXnBk6dPUDoyZlv1GEPWeFdcXl1lLsc9PkTOtINmhatblouWs/Wafgz040TILRO0ASeF4ziJXz+8O5mWtgow8zVKuF3ZPV5dXXFxcUHXdXPy7i77saxWqzn/RIij5+xzqyqlxPpszeHQMQwjVSWIWVU5NpsNVSXtl9JyWi5XHNoWf9igjaHrDqTkIMPbaRhlp1R2wknY/BEJB4tJsTt07GIElWhbIYOGEEgKjHMsVgtiity8vUFHWK+X3PcjZ+sHbKcD9/d3LLUoMb52sdHngqCoURLwn/9cio3/7n9FiJqfPhDp6w9efvVjhAh/50/h3/5b8N/5V+F2B3/hW/C//vvwZgd/7VP4N/4Q/tEv4N//J/L7v/9Mipp/8vzdx/rHn0uh8t/6W/DlHXz7Ify9HwpB9TccCY02Ii8+vVdTgdZDECg6yX3r8vgUtII83iWJOBmkjx0m/DSQgkdpyS06cocgRkNM/qTloY7PTUETin25EC9jHHIY4MB6veajj77FT378c8BzVICJIZc5QRXk/xOexnttFK2LRHLEaDO/ztM2ypGzkIR3MfNXkngspEgKov5QoaSMBiTxU58UWpkzoTQhkpOGFXe399xfnXN3e8vzL56DXZIC7LZ73rx+w82bV/hJzNPGYeDp0yf8wXd/Fz1FkXBGjQ0Kmw2o4ix9lSucAF0s4jkpNtJ7QEcs5FLZ8CgVj2mkCWxUGK/FkGoUI1l0IkWNtRXG1YzBc5gC3Sio4jAMdN2BfgKUo6oTsZ0ggR8nRqVoBigcs+7Qc3d3z4sXLwnTxMXFOZVzkv48Bd7e3PLy5Qvevr1ju9sxTQPrTlGKjS+fv6ALE/e3dzmWY2LsB7qu58sXb3h7t2GK4KOShFiOCqlpHKVIi72sX9NIZSH4jrvbyOuXL8QHKAVCbhWPw4BkdErLcfDi6hwRu4QUc8ufhE7CCdQxt+uzh5RBk7JNQ0xKLP9VyuZjv/n4xgTRoODAxOgP7Lsd/dQx+In97T2TrWhtRaUjurGcX12wvlnR7w+MKaJjAK/mweJHCY1RRuNTYApT7jM5hnGgzTd6DAE/RaYhsF6sqOqaaRqzQmKgWYtz3zgOhBBZrdaM48SbN9ekBL3v5xtXUmO1kDV9wFgrxRNSaU9TFMvjscdVFav1CmM0iTT7b8iRqGqHcwowqHwqxWkOFk2dK2IhFtaVVPsqk6a0UsTMntc6SV/PlZ19lIGkZKIwKqGcnk28UpRsE9M0s905IdAsckHkJ7wRm2yVWd8p5P5mlr0lQq60pb8YE5JfY4w4ZhKwjeH8wTnGKaboGfzAFHM+AHID1rXj4vwCZaygGosWQySOA0oZGmuonRHFSQr4MRt4IQoWSUeU81lcTbXVuY8smRfWFmfVyDCKCuXp06fiZ5JRiPV6zWazwRjD1dUVMUbaxYLdbk/Tthz6nu1+z/nZOdpW3N/vqOqKs7NL+qHnwYMz+u4gxmtGi7pBG0JKtKsVtqpIo0ED3o+kZOmy1K+Q75KSn2kjO+8QNTFaus5zvmixleL+/hZj5edFkdJ1HUTQaJyu0EphtKNtDN24Y9Fo7ocdoVtwsfxglnH/xuNnb+Df+8/e2ZGx7eH/8A/hr34Kj8/g3//H8I8+O8Gxv+J4s4P/1X8Kf/ljQTf+vf8M/t9f5Oe4hv/9P4DbvSwCIEjIT14LifT0OIzwf/rH8Jc/gg8v4f/8T+Af/kJaNb/hiMoSVUSwVT1zlRIRnSakO5gRMqvElViJ8ZOPcm8ZY/ExyEKsDUk7lKvRmemPyuZTgIwLDT6T/ECSkOXWnzkPIr1NORcDyDEASqoVzs7WPH32mBcvXzKO8vghTFQ59DFEaYuWRT6liDIKgrSIBMFUufiQ1xSKMqFwtMrFy/yRmFtAqaA+ScisSSliXiwlLVf+VjZzORcpK3RSiqDFcA6rJVcjwOa24/Z6x9vXt6xW99Ta4hLUxuGWZ6Sxx0wePwUWzYJV29I4K5ukvM9IWe6gi3dJeS+lEINcXeSCAzWTI1MCZQwqSzSL+iidIEVR4Hdx5vUBoxLDMHJ3f0CZij56kjpwv++53ezYHQL9CMMI0+iJyTP2A1ZBXTdMY8lsAngo180n7m+3fP75S4bec3cv6eLjNDBNOffl9RtubncM40QKnsNBA2ekBM+/eMnr29vcYpdidRzFrXkcPaOPOc9E6FKpFJAZGdMo8Pk9EvFEYproifT9wDT0co+aQrpVkIo/jSBYET0/t1Jq5sQohHviY8zZOQal8x2XC1qjDRpJvP56wtdvWGwYJbtnrxNdGLnb3tHttnTdnvvNBnS2Zk0ixWzOVjz54Clf/uJzxnHAKIUzov0t8Lk2smPp+46kErWr0EaLnNEYjDakGOkOPSlCXVeiZBhHGeAZKdF5YgkhMgwju91+RkX8yVbQ5NyM3TTlvq2QX3yIjFrRq9zXVOCDxAg3bSOGTpUEFslAIcO6wvWoKykgnNGEsWdEWg5Og7UKp8lWsp6UPFobrDPEqBD3PeGniEulyT1CcZATtNaQkthaBy9af3E3TKQgEfPWGFHrJItOUeoVpbPbqECqWil0CvgksjSlxddfntzgo4QPBTzf+ugj/vwf/j4RSW8NBPEVqS0xQtO2xBjo+z2ubomxhuCZuo797kA3BvaDZ5hiXkgNXmU1h1R3wlDPtzj5OiqlMHXzDupSruXFxcUcvlZSg1NK3N7eklKaCxCtNXd3d1S13E/7fUfbLlgsl5kEG3CVxtiKRolB2ZhJnylFxjEwAFFpHj/9gH53y5t+m4nIUmj5rOsPIWQjuyiWwEpMnrxPjCoQw8DZYs1yuWS339MPnhAjy9VCDOEWCw77g6RZRtmBN01LCp6u23N5cUF3faDf36KqJ19/wO5H+Xj/6Cf4ez/KPbevB4Hy5R28vJe/OemT82YnH6fHi/tf/TjDBP/JT0Xl8k0SbLVFG0DrrJ4AiGLQVb4qrQetGULMiZ0x1wpq5lkpkbZgbIWrFxhlOIRSCEgL7HRxkw1lTtJUQprWykprQklRLztDPyMRxhjGcUAbzaeffkxSkS+//FJcmBuZR7SWqAJ5Lo2KUrCmzEeQiHORLKZSBGmd7780k6wLOTVpsgwffBQ+1AxxqwApu45mL1VBWGRXm6eDjCjI34TkBXFGkbyM2bc3d5y9uuHR4zvOz29YnT9g1Viq2kDnJKXaBurgWa9XkkztzFxoKEsmUZ5c2/KUmXPxboPphKOTiw5ZePPiefzD+dejgaiCoCfKM4WB2/sN+37kxfUdrq5BGfadoBpv3t6w3WzZ7fdMQz+3bvv9bm4fTz5QjYpSbAyHju2047Dv+ewzKby9n2YvEWm/95Ac5CLubLTAYxKJm+sbpptjkZhQ2ZNEvEMKIleuefFziflnIYIuRI5SUKqYkXZpfdXOyfqWMpfPVHmzKWNIZ3TMZRdtrbLvh9azKdmikRb19Ztrbm9u8eNImiaEdCxjRetfQxQ/Ob5RsSEnQAblNE3c3d5ydyOJfrv9jtFPTF52y8u2Zr1c8OSDD+l2Hfc390zjIBIhrWRSTiEbiPhZwuiyUc/RMOe4SJdFpMTais3uUT1QIPACtz98+JDb21tsKtJXJb33OM0s6SL/KvyRkKbZ/jrmwqWtJeXVKAU5yMnmloJKEZ0kS6Wyhsoapn4gxZCtySuBpBTUVcUI+OCxWrT/0hsVoy+TPTTKRVcxQYhZXiXQltFiWiSbGJVzWCbJ3YgjyZhs4uUIWkuhkgmZKeXKNO/GdfEoyGPVGsPkR3ldyfHBBx/w0Ucf8eXLX9BPao65d1qcZHU2dzk7P+f84hJlLAGFyZyUGIqBl2dMGQ7Mu76k1DvrXIGqS4FR2PflwzmHO+HNDMMwE1sLqXO9XpNSysZdwuup65rb21ucczx8+PAds7bCJTLGsNluqJyiWTQobZjGgXGcqK3m4vKC/YOHvP7i5zOnZJqk1xmiGJ/FlGazpNEXeBfGNJFsYAqefhpxdU1VW2Kc0MZQNQ3b7ZbRe5Iy7PYdnRqw1jH6gbpuOL+64sXbA8+fv+CL/RqaP/dNhu1XH3mS+kZHTPx6COQbHN+k0CC3AfW7XIUjOTK9c6+kJEq5VFbjJNyKoiDTSYOWuHWVxDvFWktAetay+EqLi5PnUjHmtoVC6yOXqCxOZX4qJNKSj3N2seaTTz5GKcXz588JIWKdJfjjOSg8jKOPSxmXx3bCaZuRDI+rpN55jLLzheOVSu8syeX33+VFvK+QKaRyUbCJZdo4Dez2gRcvn/PwyWOW52u0syyWDq0ixevDaE1dOxZtw6IWxZ21JpN71YxaFv7Ye9UGvFdu/Lp77qu4LwppD5DnuBQjd3c37F98STeOMj6VILjDNNH34zyX+Gmax8V+sxVENcmC2objojoOPYex53DY5Y2cjP93eTVGYjyyu2wKRwxANit+5gTJJUvZJiLLo5XOclrhjBlrMydsYrfb44eJMIkwQKSvcr0mP/Do4QOePXnC+fmaqnJUtcO66h1zzVOO3KkIQl7D0dxwt93xpz/8IcF77u/vGf2EDxGdRBGkvuY88o2KDZLoqo0WNEIsYjVV5VjElngQdUBCs++gqho+evqMOCZevXxNuA0zZG7yxxQ8fT/NA7aQ9UoPMfgg5Me84FhrePPmfoYwlVLZ6lWKhLZtJcL4cJjtruu8QJVF7PZw4OHV1fy3McZZZVLIMToXPIfdluViIeRYJdwIyb1R1NaglMeQsAqxIUdhM/rSVJZFjqNXCWrnsApCsu/U7864uXBJIZFUNurNyg157TIZaWtF161OfDaCBOykKGE+KlkBI5NIA1OuWgvbOGYDF9DyN5mNjFaE6LMHiGWxaCWLhUTwI8F7kZ1WFaOPGCPowu3tLZMPkt1RtxAVddXg6pZ6Cbves+1GunECVaSoWfZlqgyNp3cKjtOslNOFpO/7d1REpc8sIX3T/H1RFlXc3d0xjiMff/wxxpjZnl6k1WkuWnwMtPn9+DhRQtbKY19cXGCMzX4BWb1jzGyqI86qVUZhvLTKTOnvKzb391hN5pO0IqU14j0j96lkvpTAtW23ZXl+xt6L3dSH3/qYq8ff5RN/BSc+Wv9lOYyWe1VAuJMFNR2zfN6ZOLMTjaShym5xzo0oH0VCmSRmQSsxxguxIAJpbjOU0SqTcpznq1N1yeliXXwL1DSx2Ww4v7jg259+gh8nXrx4IbtUsjKEzNMit2LyvV7yWGYeRX6fUjhl9OH09aUS1BUzCbvIXY8LeiGDvk8+Pc1sKd8vDp5B6Rx5kBimiX134O7+hre3r2nXC6xVGBsyUhnm+UZrnfOhBKEuu5qYTguL38JxUoA457DOzQvl7f2GQ99J20ZrfIiAmR1Vy3mY2xW5dS6L8HJ+3LqytCanT2sjTrNRnDfHaSSESCLOhYtOxZtFDo3k8xitpLUXPYu25dmzZ0J6Xy7RxmKyKEJbJ865xnJ3e8svfvEZr15ds/f7vIZ5WSOUoqkrPvnoI773ve9wdXnBcrXMa2j2kcmuxdpo8UFRzJwXY4T47qzwGZOCt29vuH5zzeb2nqHvGcaRkAJRaYIqa8lvPr5h6qsEhlltqF1FXTl8VeGcETZ+itLbCkl2uK7m4sEj2nrBT370UzSGsT8wdLscMe9JsSfG44RdVBBlZ+v9hG3b+QYoO4eyw/Xes91uj4TKbB4WoyQHVlVF2y7y3+u5iCk30TROM+xZVxV1u8L7if7QYbTIH01GAIzWrFpJPjUq0TQWFYVE5LIHvlFQ2Qo/TrSVo9JCVHLO0FYWMioBipBNVWYiV4pZmWOwGpQ6To6m7HYg9/SzuiMluSampAIGxnEQYk/ug0rhkt38pEmaF8VsS+tzfzfIjm6723F5ecbZ+TJnf0S6w4G+P3B+fkW7WLLdd/TDiI2KQXcoFMFHbDOiXU3kwISm86JyDMjCPBd1RY00xnniBN6ZsE8XkHwBf6kaL0F55drHGOf/3759SwiBTz/9lIuLi9mNVCBu2c0c5YORkGDoRkIaaBaS+Fu5hvX6DDU+pGlbUDJBaaOxHCfUQmKVNuKQ+TayqxJH0zvapqFpa/pe3Pe00bx4+ZK6arLFec/TJx9QNzVv3r5hcXXB9ZtbBtPw+Fu/x5VZs3j9W5yk/3/4KOmUKh3lx/Du/VK+TrNUVVpeMhfmlpxC4OZ8v2hqMIoYBkiGYIxIwwuKU3bfuZ8S41eT4coccgxyS+8gHof9jtVqzaeffkJKiZcvX+YxkPsXeSeuYG4FlXEhPXc9G2RJirQ85zhOMq8pNROTxdE200vSCS6QmOee/EDytGSzL1VMvgRVMdqQbYJQSPtbZ+fU7e6e3e6O+/s3nF+cs146qlvFNKQci5AgCno9v4L5NeVQvP8CbuVfhWyUT7TW2LJbN4Zl20g0QN64gaJxLX0/stvv6fr+HTdjuWc0Z8sVH148hp/KQ/+F7/95ulaMu6ps53A4dLx69Yabm7eCPEwTRF/KPOnvlEuRIuAhaeI0UDvLsycP+f7v/Q6PHj5ifX6RCxyHrSq0NWgtsQVfPH9Ot9sy9AOSJ+PxXpBprRJn6xWffvoJ3/vedzhbr1mvVjhnEM9AOe+yWZUiUlqBpS2XOZVo8bCxjlW75PrNNW+v37LZ72TTaDxoQ5gm/G/HQVRklT7JDjk3M1EkYTQblUk/AWecOKEZy+XDR/zOH/x5Dps7rl+94MXnv6DvdhLcVd54UnMvruxi67rGT7LDHYaB/V4czYqh0zAMsjvOi0tVVbMssqAVdV2zWq3mC9wPQhYtvUyd5aUlU+PB1QO67gAxiaOo91RWpKcEcEaKhhg8LsOCKUasgkprjFKs2gXeDjSVtFCU0iyqisaJAsQYgcdI1UwsKwYuCWYp8NEDpFgZpVnBomKGCRUoI6RQnYsRYeRnWeY0ceSX5f1edgqM0c8tFaWEIKqNgjFyeXnB5eVFbqkk+r5jHHqs1ZJiuE/0/QE9TSy0pq4aMUAaFbWxGCdx30lroopMKYdVZfWRcFeOU8P7u8IiAZ5bXaUYnbMWji6nIM6NT58+nR9b2OUdq9WKxWLBfr9nt9vRdR1XV1eCwJ3sisvzKq1JIRIiDKNnuRCUx7oKYefLAiRx9UVqKO9C6SxvPlkIvfeiEjKa65u3oCPPnj3Beic+KNZy6Hucrbi4uOL67Q1Pnz7m8y+/4O3NLWjD2eUVN7d3rM5aXPU1g9j+BTs0al6IC68HjgtpKcIBYdcnUYcYI5knwct9YbQSslsM4D1hmsBP77RL5nuxFPEZHZEi5ljovO+JUf5/t8UT6buOtm0YhgFXWT788ANijLx+/Ro9JxSctIQ4tlGOsPwR4ZOvM6oSpVjAlD5/Rj1ym/L40GrmBsxNixPqRHlerY/vw+ZYhRiybkQma7qh5+b2mgf35yzWFcuVZdk6Fo0jjAEdPSnK3DTnMMUIyRyfmF8uFL/O8av+5v2iQ2stHJtiO68NDx9cSSR7XeXEU4fVBq0s16/f8uXLl7y+vmEaJ4Yhu0sDV5eXfPrRh/zBR99F/VSe54/+6PvEc1HplHbs69evGfuB/XZDpyTSnVC8hCD4XGwkIZKPUdyd/dSzas959uQh3/v2p1xeXdEuVyiFGIpVTX6P2SEsBt6+fsPoI8pIfpb3Ey4XGw8uL/jwww/44NkzmqpiuVzIfU/I95E81ik/R4jG+fwmcNoJJyYpGudYLRaioqtqXFWTjCVpxZQS6bejRpEd/NQPDH1Hyo59xjr2hz3jMJKSl8yPEEho6mbB+dk5f+1v/E10ivy9v/O3MVbz/LOfsbnvUEaCuLQWUujF2bkMSuc4HA6z30IIIcfCW1arJcMwzLbUpQ9fVdVMFHS59VI8I3BiKrW5vxc4PRckFxcXQrDMBUzXdXT7A0YJkfSQ7bUb5+i7DlfXqCCtE6eYo4+sAkuiqStqo1hVS+kPK+acl8ZJdV129uSe/ziOBJhlSkqdONApWfxiChilSTrv5vM1STEIEuKkYEMbVNLzRGWNkEplexeFZIciJZk0nbUopXMrSjTmdVPx7W9/zPn5GSl51ssFDx88wBqLswI5SmtKZFiXFxcY47jf3OOqmsOhI9kK26xo11c8efwIryy7fmJ/GLDjyOHQ0R06pmFEoWZLe2DunxbL8pJt44sfAoJolbZJVVVzUmspMCTm23J+fs7hcJjzU0q7bbfbcXV1RUpiEdx1Bw5dT4yJ5dk5/TDQ53ts7PwMiycccQa/S3FdesNSdXjvZQddiJRJ0zQVwY/5np64vLxiu9tQuYb97hajLavVmv2+Q2nD6vyKQz/Rnp9jTIVzFW/eXHMVAM6+ybD9F+LwQSSfwBF1gNzSlbFEiCitqa1DaUNMAlH7yUuiZlFBkdB4kh8JY0+aRmLoSfGIqqbcYix+G8VHA44ch9LjLgtgKXxPURfvPajE0HUQAtoY2qbi048/IsXI/d0d4yiGYjpJw0N8XGoxWJqREuYASu/HOTRNJpi8eYmamDyo7GgZIyHJBsZoCe6SeFt+6X0Ukt9cnEQp1lxuEaAENUlKuL1TjFzf3bBct/QXay7Or6hf7BmdRidD10uLyKgiFhWkL753fn5d4XZ6fFWRUQq79x9LZ5O9lEnl1lmWy5bLqw9ZZ1+eqqpo2kZ29wl++pOfy9wwBeFyjF54OcFzeX7OH33/+3xy9RiUKLc++vAZPFgcHWtRtE3D3c0t+/2OaRwZh54Qpowii9lcfjeMU8CrCHGS+HY/8eThAz768Bmr9RqTTdCKszZaoZT4A42X5zx78ojtYWAI0vdXSfhIKgUuz9Y4azhbrXDGUOUE8lJMnhatX3lekWDD4MW6XMQRFW3TsFqt6L3H+igycqWZwm+hjRK8EGhEE9wzZbi+7Lj9NDL2AxGNtgpSwGhNu1iwWq4Y+o6PPv02T548wFWWf/4ne3b7vUymVtoip+6iBUYvfbRTl8lpkgtXVAolK6NAYG3bzpPB8eaVnWtT1XOuyqJdsNtss2252KGnEGSRzq0TlUAlMV+prZF+mzIk79G5P+qA9XIh+n4lig98af1I1smikoJGYLdsbEOE1BJiPNmtR3zMFrWcWPOmHBA26RlZUkgLy1ozc1zKJCkuoybPHnkLlUQGFWIk+DgXODEG2rbGp4nHTx7wwYfPCHGgqiyXl+eE4KlchXFVlktNNFWFMmK3XlWJZdtiXCWJkUNP109MEVy7ojlbgK1FbqW0pBQag3eTpJvmoxQahciklJpj5YVRfbSSb9t2VqZst0Lmurq6yjtGPSMa5R4Zx5FTnk5ZHErIGyAZJcNI1w+iajp0pKln6UAbxzAId0XVlsKMLx+z1bXOJknpGJwl8jKL0iLJ3OdCehyG7GhasdkKI36376mqJYtlg1uuQMnCaasKXy/5D/zI/d0NxsBq2ZCC3LMFBi3kvrJTKbyW4z7m3eN0wobjJF7ajYqjL05B2lI63lv+BHKOOVmYU0j+5HWdTnRlt/3+MUugZ+a8YnBkBcdRjTIv5p4joTiVrJrje0cdi9ngJ4gT0UeiD3gfIJvVRYrEVYZKTCJLLedEG0Ox1y+o42lo4akHzykap1Kc58q2acU7p6n53ne+w+eff86bN2IsFzORXOV7chwnQSitfScHpLRQIOX5UeVzKQt6aVegFTbfbzEExmnCGffOTfDu9TheltJyKQncaE1xHh2nnjdvbmjaiqHv2Ww2PGwvWC9adps9RkPtLMIZ+6WHzcWOmltT76NC5fP378mve8RYUCg5jNYsl0s+ePaUB1cPaBctlauoGodCk0JiGkZ2hwOD9/gESRm5J6LnwaMHPH78iMeXD4DPALi8WKMuFgjfRozRYvA8evSQN69ec/f2RswdtUbcDNTRbx2FrRqimkg+SUimFsS7rmvqqprJoGK8VuAITYyCrjtnOb84w2vDarXEKEVTibqp0oqmqbPpoMkttlK4Hq/5KbH6dB6LOZANJXJ8CfIUvmOb10wTAkmZ7Az99RCqb2bqlcmFfhw5HPZ03YHKGSRFME8OOu+2k887iIS1DmMcIcGHn34bwsChP/Di1XMOQ4dTVti2xswDwXtBSBS8w+MANe9OSy5GIY+WPnxKidVqNS8AZcec6yKqqqLPk0OIYUY0pmmirh3amAxvTRAjddOwaGoJbYoRYsQ6jdOKxXKJSglrDOfLhSw02RDKGjHekmh2aKtK0l+dPU6I6Lm37xVMJMY8QUYlsFhUipSTcSsr2RspGbJLy+xQmmLpK0fx11BKqmIls48uMKrWBTw7WSglvdUazQcfPuPBg0u0jjRtReVElqVyhd4NU379QhZOSczLQkykQRj4SUlRsN/uiPoVK59IrmEap5lXobUmGuG3nPZJU5KwtLJAOOck76Rt5qKsXPPC81mv1xhj2G63LBaL2RSsoB+lFdc0zRwlf5oBU5RBPqS5PReTGNaMhwPaFZvocR74KaWcsBhzbLOkKUqLLKuJ8vsR9A6GceLQdez2mxzWJ0FMSgdikMJydxhw9ZKqbkBXpGQwrqExDdPViv9t2/ML/4Jp2vHxo0do3xP9MVTwdKI+tqyOk/o76Zvx6Jj5vn32NE0zqlT4B+WjoIfl91JKOWTOz0VGWZSPHzlzRKlMTDu2nE6Pwn0oxVspMHTmMHnvs9T0yNU55UwU3sbsZRZjtspPMmZiyomsObLAWYwKkIoapciZQ27NZCdepU7eh5nfu3gwjO8UGOX8Swuyx3tRz8l6Le+5rmo+/eQT1qs1z58/5/5uM7//cj8dr9GRFFs2XHK9hOAXU8oFmhS6YloW5gjzEGVnXdQrv1z4vX9k34uUSEoKmJhdPLWtGMYD223Hdnvg/KwjTBOr5QKjouSYOEv0ktT6SyVu0r/8vf8Cj3Jt5TzJ8yzbBZfnF1xenNO0x2wl+X148PCKJ3ePud0d6ENCVRUasQdvFy11XVHXx4j5yhmwxSeErHQyLNuWpq6pnKWpG0wmE3sCJlXl1FIvVyhGpj5hdcK6I0L2SyhPWfnz14Wz6OqKdplyequlrR2ahO+73Jr3OJN5Spyom1RxiZV7W82PfzSd8yHmlHHJbYl5o56SILrmGGGI0V+vjPhGxcZut+Pt9Z7d/Q0vvvyS+9trnj5+PBusCF9AejhGa5ySAJqqqkBZKhSPnjwjxoFPu9/lxZuX9OOAP/QMneibQ+ZopIJe5MmlSHJSikxTmDkaZdCd2kyXXSzIwPXGyztVJf+k4my1mgPblsvlvDOpqmU2iwn5fUg7Zdk2mBTELCUGVouGyhouz1YYoxj6HoOitha0pqkcbd1grcn9Xs+yqUXNktGbOHM0EkkjJKAMPRoPQSlUAh8m6YGipD+WEtYoVLLyuifJUJHgKYVRWb9PlmRlWZXAwXITTlM62YmJydHYDVw+uuJb33pG3VTUtaGuHEP02Sa+EmmztlxeKupmyWa7E8nsKFp2rRSuqqgWwpUw7RK3ED+OaRCWtvcTXT8wTQGrdN5h+nnhDyFQ1/WcMnhKHFa5+i8FxTRN8wAE5nZcKVwKkfhwOMy2813X8fjxY/q+n4Pnzs/P2XV7Dl2P7gfQltEHqkkilbc3WybvsTGiTA7lS7IgxSQOtPNOQWYFyoQac1HifSSEic2moR+67OmRsiHdxOZ+x2KxYr2+xLVr+jARo8GPkd2wY7ffMk6Ktmo4P1/x+vVbrMmX+mQxhCOsPLd4ToqN0+NUSnq6w3y/cDkl6r6/KzrNvJgTJMQwYv5Q5B13FG5LaR4nivw678xVysW5fvf15p12DEcVSHWSt+GnCZPNuqZpYph6SX51NgfomVxAJCDMz4+SWtxP/tgWdg6XeVnCGSuk7BLlruaip7TzCkfsfXJojIG2rrPDugS1iflgou9HVqsVT548pqoqnj9/zuvrt/SjFLTGWEFYMlfkSFpUeB/m73svbssYWQBAeC1TikQfc8KqbJ99HvPlOs6I4UnhceSLBIKPkuuSjCg3kgYN1jbc3+158eUbLs8v6PYDTd1KRtIwoLWax8Rvs7D4quN0HMztNmNomlo+qkocmK0s8MPocc6wWLWcnZ/TB6iWa0EEpoHlakHVyD0Bsiw7p8EK0kBSYnkfxbqhzpEXy3bB5BSj9/hxojaZe4GSbBlGOhWpVBRzSK2ISs2LOMimLUNWMk5RKG2xVUXTtgQjysfKWpraYZVid5fnnSiOujF7WkWO4/AU+RMSbx5rSl6fymuS0gk0uMqyWi+5nDx1u0QZi60aJh+z3cVvPr5RsXF/f88Xz3fs7264e/uW6Kfcc7fHCQtmWWsKk8SJJwkYC2NCGUcicPHwMZ98+7vsdzuGzYaXXzyfE1ittXO4S6k+j6FcXiakk51PYXwXJYoxht1uNwd0ee/nSOWqcu9MkCVVD5hNopwRx8+kNWkit1RS5hYYDIbGOUF1SDht0VUFSYouZy21czS1hLTJbslQOSFtGiMx8iGW8Js4w9RGIZJKa8R3PkTUGBlLSE6+LU5JcsA7E79KQaKhM09DqUIcixn2TnPBdmzgClm2bRuaphGTtWTwOXnUWik8lNJYKze/No6uH8R5VBkuzs9ZLBZC3g2RoCWoresOEBRUYtOcELjXOXECRKc53bX4ZsyE2RN0AuBsdc56LQZZpTg59TUoE38pLApqdXd3h1Jqlr7GGGfH0cL5GYaBcRiJ2mOt2NQPWjGOE/vNBtCkTAgTf40Cncu1PKIcURbYk35ozPdXiJFD31PXFT5EJi/w7Xp1RrNoObu4YPKJNMGuG/Aagg3susAUHV98/jkPrq5YLx3D0LHfb1g4+871h2M76n1ewen9Asfd92kGTTnXp/LyUnzM7+cEHTktDN6/J09bDu+0aUDuhSwln1sOCinO52LmWBeEE9VRGfszlybfJwV1qZ0DY7NT5ml8dt5p50XcTx78SAyD8DlyFIDOPjoCkZNbljJQyvkqhcYpIncqIZX3ILt4nY25BH0VEy3nqhwC2HB1dSVKJmN4ef2W3X4vLZ7MZ9Lq9JwXZCPNLeWUEqZyJ4RvdbKYHBdcFY/n6/T33r8v5A3E0k8So7EyTyALm4qe7f2emzd3XF11LK8uOT9bc/d2yO0ROO7J52fgt35kBDdlDoy8x3IeiipPCjVBrXK7z2qqpqZZtgRjMEoRJoO2QjY9mSpl3kohK51y7kolLfKmqlm0C3FLDhrjR4K2VKGdT8Hy7AwXB8LUUSlPZaVdSErSYka8iARoUOUyyNdaY6wT4z8bBTXTMtfVRnM4KbSC9xAkEiGpI8Iv7ZJEiiq3PDm+OYAkGyMVo1ALgLqpubq8ZLEMGZW09OOY79PffHyjYuP5F5/zD7/8GcN+Sxg7Fk1FCBOr5ZKYIsPQi1Q0F9L9cGCzvWe721LVUSoy5/DTRLs44zvf+wOWyzPePv8crQyvX78SQy0nvbKqqqhcJXCmESMpH3xesI0s/sYyZfvuelEJu1hrDocD6+VKzEuymYpIQWEaBnbThFbgjGbfdzgnaYDBjxhlqVxD0kLkScFjksEpBSY7cvoJVxniOBBVYtE0WC0haU1VUTsrN4DVaGWlIAtjTkTMGSlGQxJVg588XnlCFAmbVRq0mJNZFXE68zui/K40YBDCpj7KeaU/l6HjILsyotw4MRuEJdku5cUwT1gh4lyVHeQMSgns6qoaY2vqbFijx0l26DERG8Xl5QV32x1gaKqKYZzY7HZENNVS0VRt7knKc3UxEseRMAqRzShpvUEO1iISo2KaIiXdsus6WTj10eCroFoFybi/v5+N3soiUFJ7C6pRTL+K9HW323FxcTEXIX0/0vU9ISVcFdjt9xAmCvO/bhvC4HHKEKeOyce5gCstRkKAWFwt1Tx4ZY0Q6P/QHdBmwWa7RRtN3bS0qzXj6GlXa7b7RH8Y2Bx6cJb2bMXjp09A1fzwBz/h0O25vHiEMo5hiiwqDYR83dQ7C4gUG6dGWBzbihyLjdPjq/gVp4VFQUtOEY5ynHKu3ieinfJAZL0XpO3YhpWF3Z44Ekox4WVC0XbehYcQBMEqhnr5fvAx0DQNSWnGzH+STZDCWhlTRllMsqSmwk81aRpIviIl4YHJFiLOfxszX4qTc1WKuXL91Qm3rLz3IpvcH3aCcjpLzMWCMZpxHFgslrk1PXCxXuG++23qtuWzzz/n/n7D5COqbTHOHjcH6miRHiJzkUue54rDqLSrcvsl71hLHkuIIe9gi915Ke60+I0gpFFtUs51UzOXRqSRirpu2B82/Pznn3Nx+ZjFxVPOVku2t28hneTInBqPJZHO/koC0cl1/3Vfv//903tR7rU484dkfpFCQwidxbla5w2ezoZcQq2ICKdNkCKYgkQ7zG2s+V95LHEynUtRtNWSLDs1GG8Ig8YlRTVV8182VYVO0LlK7kdriEmCGIVUnE00E0JANccIB9lsinqw70fGscekxOQMvdHs93uGdcc0DagkBbd1spErVUXMrUZxQPYn80O+r/yE9wMKzTCO7Hcbxr7jsDvI3GyzoidFTob8rz2+UbHxxWef8Sf3f0xdWZq6om8brKtYLM+om5bJB8IUCNHjlaKPHlc7DtsN2+2Bi4sHJJ+wpsHZhssHNU17yeXlQ5YXF/zpP/0ThmnApEDttFTUKYBGJo4k3h3JTzK5K2nXWCW2xLWzNHXN3e0tjTOsmpoUEqrAj0qxrquZ8X22WpCix1hxunj86IL99p7gPc5E2kVDrBV4D1NP1VQ4YyCJQ2rrLLUTfw5rixmKDFajAkY7KquygsMQoz7Zxb07mVdZvju3E5K0QEJMmABTFPiMBCFbaku4krQg8ohCIXIpz7HvLhNjNipK4lJKlJ5cTNCPEz4FTAXaWqYw0i7OcFUQ9UpKch6LPXqa0MpjDVinuby6BFNxv91xtxfFx9D37D77ApTh/Oohq/MH2MUZVEsMDoukcYYwEmPZkcqibJ0BXG6FHVGn3U508C9evKCua5qmYblczkjIarUiRjGWq6qKxWIhAXGZWJiS8DGstbx8+ZJpmtjv9/R9T7tc0vUDh35g8hMrFGHs2Y4Hlk01t6DQUtyGkAhRIfVFwBlDHHfEqZc+bUpoV9P3PUYpxm6UItMqpmHkzo8Ya1kulzTLNco2GJW42w/4VEFlOWsfs764pBsDTbvi5m7Lvt+hreKpfsLlow/ZdjseXbYkP2ZDsA2ucBgKuQ9mO2oxwNTzgn1Kxp0L1nwvlRbU7D6r3g3sKogH8EvFx/vtF6XUjAJUWcmUgsdohzNlEcw7cWXeeR2nk+AwDoTyOo2Zo8uTUri6wVhDSjCMI0kpqkaSpKdhxI+B6ANaJaxSUoR7ubfxHpWy4dxJO09Qi+P7NaX3nYQ8apQG8y5PhhPHX2Kk0jaPtwRa2h6ohFaCqCkbSTqiY+Ji1WI//hZNZXnz5i3XN7f4IMXAFKRA19owhsiUzcfqts7oH7MlgNJQVQ4fPMGnmbOBETmj1AAJZWUu8kHiG6xzOVE0UtcVw0h23AQIpKgIXnPwCbNc0DQLjKs59B3buxtWq5UoUKwTBVBMIkUu2D0pc9SL9PKXj3eLhvedU5k/P+UbvdMaygW2VopJpYxuT4zTyBQCLkVU0hkpEPK2qxqqpkW4MeI5FIPcLyqHq4XCwQJIhhgyGqHEEyUqiOKBgKqE06FMjVGaGkN1stxWKZBI1LZGkagWLd2YRNmhcrEUExYNyRMnQcd0ShB6VPQsm4qojnQFlQLj0JOiBzzb7T3LtkFrxeiHnLxdjN9CNiGTjVkiZZmybIhm48cEwzgwHLaoOLB0YBqHdRbrjsX/1zm+mfQVsShVSV6YDzn0ZRL777oWLTlRZRMSwzR5bm9vwdSslmcoZ9DGooylbgzaGJpGrGwPXcfrVy/Z390AiLIlEw9DVKAEBbAKFtmoy+e2y7IW9u3Qd5ASdV3LAhEClXX5RhQm7+QnnNW0TS3kmSTua7UzHEhYDUZJK8hqmUyMEli8qYXhu6hrKqupnfhOWGNw1mQHNkPlpO1QOWE1K2Rwz1bkeVBA7q3lz03BjPPg0YjLXMo7EussMZp5kk/pmB/zDmSqxatjjDE/pyFEGXQRKeJ8EItslPgQTL0ERC0WLZAYx4Fp6nBmIeFA48D+cOD+9o6b23uGKXH56Anri8ck7eiGEZdJBHW1ZrlYMAyevjvgQ6QePW4FuCVRyQQUs2fL6eQeI+IsmvvWgnYIUhOHNCNXVVXRdR1N03B+Lu2V6+vrOaW38DlAdvPDMORzJlDyO3yPzAuZ/ISfJHjJe4+Knh5pf2hrIFlS9LKjDNmtMZP+dO6vymuNWG1FTaDNLKPzXhRata1ZrJd8/Om3SRi0rdjd3HN7d8ejJ99iuVoSkqB52/2BP/3RT3n55i2r9Zqnz57SLpbs9wf22430YnPxIOjGcYFXeUeLltcpt4eaN5Xvt0B+FZrxPpmwQPqnfJCyALyPjJz20IsJmzHFwfDd5z5tj5ySUVOCmL0wizlc4WMUQiWUaxLEsCnBNI2COGW4HJ3PRylgQrabnyb82EsbLSfHGn3qc0MmS5KdRwPH1syxEDs9dzqPQcO7SJD34i4pRYvFqEgyIMGJgUrDum1wT5/w+NEjNtsd99s9m+2BQz/KPeo945AjG5QkO8foIQpfzIdA3+1ZLJaYumK/2+eFEVKK2MoxTqJoM8bgQ+YWJbnPrZE5+exszWHwDONE1dR0wyQozjTSD4MUbkbT9wPb7YbKVfO8HKs6twHeWUTmVvCf5Tgt6r5qkSvcn2PLRI7Eu0Togn6Ue4t8L4QQmMaRoe9Fap8CycA0DichnOSxlGbUJgFk1YaxBuOyEAAhQqeUcCd250Ylams5Wy2FAN8upFB7j/dzKnoo4+twODAOHf2hp+9kfo1hYup7Doct+InayKby8vxMzCaNeJa+O8+WDJdu/n45h6fIXSlICtFcvu5/Jdr0q45vbFcucLFIqaZRfAN8kCotZYarzrLOkiMQgvS3VPZ3B07yJAymbnj05AOW7YLoI3/6T/4RX/zspxjrSGGAFHHGiJtlxroSzPKmAgtPk0TvivlIhZ9G4ujRwssRKLy2WCsTsxFNK20tOy1CwBmBgiujsVqUHrZyqOBxRlFXjqauqIylsoq6siI9NUbaO85QW4dzwtuw1gpMRyIL1uddWxksAnNDzK2VEEx2AZQBYbQmZOQDJQhNyL1ohXA8Yj4fZQeuMoO+qTO87MVdlLzAl7aATwmCJLk+ffqUP/c73+Xxk4ek2OF9kpA6RjE/ioG2csRFy9B33N+/4cd/+pYHT+44v3qMTpr1qmG3T3g/ye6qNgSgGwe6uzucB7sImGoJxmV1x9GzQJwiE8HL7rWqNN7nQRdiLpSOOx9rLU0jF/j169fs93uWyyUpSV5OUZ0opWYF0zAM84Aq9ud12zCMI9M44aeJIfM4UphIXmGU5CNEpaXwGkaS92L1nmQnK1DoEY6e+S2Ir4muLFVlWSxblqsli9USTM3Qj6gYWJ9fsesCL1+/4pFx3Nxuef7yFdtu4Oz8ij/3u3+OTz/9Nm3bzmod7z2T9zSm2CrbGTXT82JP3snJTiUmNSeTlrbHacHwPqpQjvf7+6eoxenvvL/glsLu9HFttt3npBA5LWre5xsUpQ8I38fkwp4shUapk3tIWnKTF+6S0YaqsmgSQ9/L3JNJpcMw4Mce33UYokTYY2Wc5CJY6/z8MyLk54l3VrScvLd32kfkRNgk5D45/8VsKkq8fTAQPJrI0hgWdUNfWca+53yx4tHVFZOP3G8PXN/csTt0UoDc34ubrQZTWWmvpiDE0pDRm+QxqqJtKqZxIgTPg8dXPHr8mOubG+7vdzSLluVygQ+eqq5Yr1aAIvqItRXVIiPLUWHrfM4AlCw8/TCy3e54+DhirKFtWzb9HT4Tv3MDW84L6Z2E1v9vj19VcBRuSyleodzLaf68FAzlMWIMhGnETwPjKJQAqxHkIhyjI0DQvhgCKopFQcgLTc8EawABAABJREFUt9aatmk4OzvDaOEeBp8YhpYzU/5esV4tUbUjrNfUdYWzYoJ52O9n2wWRP48z+gjM39vvdmxud+y6UTgV3jMNnShEjWJze4eOgWG/p66yijNvjgqx2WdRRXkOIXkzI53ks3UcV8JNKSFuNts4nLZlf93xzX02xmnurpMi4+iYvGcYR7phEM29EmfF4KXAWCzXxKBmiGZOIs2kG61rdGu5cA2/83t/wNAdePP6NdpPDCFgiJmnkSAJWWccR0hp5izM3hJkBCOrYHRdCXyVe12Ns6gqkzbzDr9dLaSqCxMLKyRO4VxoamOorSF5TaWk2KisybbkhtpJYWGNmcllNiMhspFKKE68BxR58irEt6Ohjuzk9Hv9YE2yYmjlJy/IRIoQAzFPeDpPgCVWugwmIdIW+VferZk677AlTKfrexKRDz58yu/9/vd49sETFLDb7/B+B9FjNEzjSIoBZy3rZY01V1ycnXFzv+P5y2v6w4F2eSbE0CikStmNKmLMC5OBYewZk8ZMHkwlHJb3ii8hARb+gaZkCiilGP30TuU9juOcb1JydZRS7Ha7efCX3XRRK5X/30E+nMOniB8HxknMw8I05qyHhDMQvRAK+3FiGid0EKv3EMVO2lhHPwyAFB6yy9JA5mW0C84v1pxdnNMsGoyz7PYHDmPg9v6Op08/5MmHH/Inf/LPmb58wb4T19y/+tf+Bo+ffYCPalZOWWs5OzvjzSvL0I8szuT7lc19aGc5AtW5QJPaGhXfRTF+1c68nKvTAqMUAae//1WIx+nHUaZ5fD5pPYlM+JRECu/6nhyfR8+LlAJSiPiU0MlkY6ujGq68zgLl+GliGgain2bfATIPAS1IY7tYUFxCSwyAwkhClhJUN4ZjWJfPBk2n7/N0UTsSV4VDgS5Jq+R5J83oivfCy/JW4ceeurKcL1uGw57xsMFlv4zL1ZKrswvQmv2h59Xr19xt7jN5UTFm19xEoqkb6rqe1Vxt24rXxtDRtA2/97u/Q1U3vHn7liEEtHX4CFXdcHF1hVKa/nBgv9nKGE5wc7dhzLvi7WbD7v6OvosS8GUtOheATdNwn4QgKtc9n+uZ+1Gu4n9xx1ejc+odv57TazNfiVTIu5FpGOgOew77Hf1hh9gmGHEOyJu/+T4PcVYTCodNfFuUUtRVzWqxQKusgAoRZzX1YaCgLZoS3iaf+2lgt7nn5UtL1x3mtNk+26e/K4MWObUzmnXbEConr6+tGRctfuhRiO3AcDhI2GmMmS/0bjFcslCqjBAWJ+1TEnYJbVNKHeXCXzFn/KbjmxUb0ySyppTts1Ngu428evlKFhcSfZ9j2RctaIf3CWsrPJLDYZy0KebAPy3GJyTNGCfOLh7xO7/7fV6/eMmr559BikxDdtfzAyFq4KhQMdkhU3T70jYxRlNXhrqy1NZiBzUbbNU2p7UqAyRCgNYZgpKiwFUOayy1s+iUqK2hdRZ0otJGOBpW/PabusqtEkE2dI55J4q/iBZ7s9lBTzwG9DHK9z3YWr8nfQr4zASHFLWgEBmyV4nsUJnAGFKQsLdCShOyXCYYZfvzGI6wYcgsY60Tjx895C/84ff5+NMPaZpKILIYSD6gVCROA0QPMYg0OSWcFu34xbJl/b1v8+rNW169eUFUwhdRxqCMIyTN5BMeg48KosaYSvrCmc3tT3awhXilOCooYjwuIuVmL7vl3W43O84ul0uUUmw2m5mn0XUdwDuGXuUoC2AIgb7r8DEKupFjomMQ74VAJChIkydOgRCLwqHIdUV1YIwl+ICxDmUcSkmqa9MsWa8vOTs74+LqgtXZSrgFCtp1z+2Pf8q+H9CuQtma9fkFm+2WP/j9P88/+8GP6LNKS9uazz77jBcvXrBarXj65Aln6zVdf+DB5RqTNNo6gp+w1mVEQ2Bz6cEKwlbmhlMORvn6q1opp4jHaRFQFtj3EY/3H+c0r6h4c5wiFqcoSpGwl58dH7t4ShSZrELFI7oVSPPfhUx8SzArgrTWuLomFPOt4Bn9xDhNkkYdR+FgZL+gkjPhp1HoolGY/dMku0BO3u9py+f0XBwLOZ3p3Ep2wSGISiAFopYFJyhLDAY/9Yz7yHp1xuqjZ9zc3nO32aFNhXVCeNbG8vjinAcXZ3RDRz8OsuGIgcOhQwz6FtR1NUuzm7rBVU5IfocD54ua86sLLs+X9D6yvriiWZ2TtOXs8pLl+hyTItv7e7bbPf00cX1zx939Fu8n7u9uubl+w2G3YbVc8OTxY5bLpThz5kiJosBRHNt50k4uRKKvsej8huO0iDj9ukg6U8ob31BiEsKseJPrI3PPMIyM40D0I04l2sqxWLRUVotBY4pM2U4BBEUPQ8LHwDh5+q6n63u2d/cc9ntRVo5TJnOqnBh+LHL8NGGceEAtmkbMJhspDu9ubvG5c9D3/YzEHjdGlhgS3kOIipRC3vyDRVLBxedJi8+T1iijUdZJplP2GDkt8osfVYnu+Kox+n579esWGeX4ZhHz3hOGgaAkAdWnEjJ1i7bi6BhiYpgCJhcZISKs/ZBbLFpcICHO9s4paayrUIgPe13X/P7NDX3XcT0O2ORJ4yC78ZirMVdhjaKkFislElVbWdqmkr7lNDCFiTD18+9UVmMUuMrlXWAtA1WL3wQhiERVa0iR2ihqp1HGUmlNZURhUjknRNkcCWwzwmK1FAlGRYy2AmUqcgqoyR4C707oKaW54s8y/tmIJYpOSQoqxIkUEioZUt5JKa3w5ghFa6XwAXTSOGXBe1LShJwZ4oMnqoR2hqcPH/Pn/uB3+fQ7H7NcNYzjnhgGnBGeSMm9scaRgsCJRDFwMouKbgjcb3as2goenPP67T37bhI1SrtEuZqqrtFYtLJiUqUUYRqITOBqsf+eJwspyKSrFTIqc7y5i86jkBaLKqFIWYvcuWmaGc0oA2X2g1BqDmAru+i+7wkpikNo5mGU51HBY3VChUk8S1Sxq2POo0hK8D5tHU4ZbNXgXMXZ2QXn51cs15csFiuWZ0uqupLGtVYszi/50c8/R9uadnVO1/V89Mkn/MN/8A+4ubnh0aPHHPZ7Pvvsc7b7jovLSz788EPu7u54+fIl5+sFw2EQ8lhVvEaMoGcq6/9TtqyPkitaELXT47SgeB+tOJ1sTpUoX1WElMd6/3FnguV77prl/i/X8pSMeuQlyWNXrs7qguwiqoTwNk2TSFxzERJizCTEEpSX0TKtqJwjBs84QBpHMSxKoJXFOdm8RC8chehH4ampEmYohZp42rz7HsvrP52IS6GBlmtC3gRM3jONIyp6rFYkPCY6mkrje88wdVyuFjx+8pSL9Zqff/YFm10HQaGTxo8DXRRrdqMTNknKcFs7Fk4f3V7jxKJtZn5Ca6CqLKv6jDh2bN4MtMsVrbHoMLFqa7A1dd3w4OFDVm2DSpH9vmPwsnm5vbun7wcOuy13d295/eIF09BRVRWXl5e0Tcs0TjO0Prcp0nGeyz+Yx/WvOk7vo1Mi6PuL3em9V1oBc8v4BCUuvINynUpbZRwD/WHHNPQ4DetFzbKtswlbJE6Rzeae66j4dn6et2/fMvXiW9IPk5Aox5Hd7sA4jJCE9K8z96dylmVngQ1KKc7WS6hKVoqoP7q9Z38vxW0pnIWjJDlhJS+rchWqNigspJxCrg3GZOFBbqEbLa43Kns7KWtF0VfafupIGD2t/1KMuc2pj2qrMl7zZVPqhPeUvl7V+I2LjWkYsEYTVYZTXeTQDewPnezItZIiIySMddRNIwuIVdm5MsvfjELbsufX+MmjMxbgI3zw0Sd8+fwz3rx+CdrMhEKRGc0IaW7DiMFPZTXOyoLvx4EUA9NU8kBkAW9cJmaGCWMr6trhR4lRd0qqxrpycgGVkK1qZ2XXiFjLVlbT5DZLifWV6lUmJWekUDHZUTXGIBNehtePk3OBfCMl6lcKjjQzqgPClrZaYepqJgwpozF1NV9s1dRSRMTs84D0ckNKjKN4SxirMUGLqscamsrx6Xc+4fd//3cxFXjf431P9AMqTRilcFaUNrHAxykXiZlw19SGdLaAfY/3msuLM3za8fZuyzBF6pXG1Q3aOWrXgqoYQxJmvDIEZbJxjRximpTmaaj4CcgOhdk1shzvL1LFvrwQPIvMFY7OlKfGYX3fZ8hSCLCiFDCzRbr3nuQnxhiwOmEQZU+h76aTAighFuxox3Kx5PzyisvLB6xW57h6ha1qtBGESiPpvj4kvve9P8dPfv63+cnPfsYnn3xK8p6HDx7y6tVrHj/7EKUSZ2dnfPCtj3E5B8YYw+3NDU3TsLvxJGRC6rp9niTl+ngfsgJK4MRCDS2F16lc8/3dS5ncT89b+dtTaPqrjMFOd0WnBUrZVZ7Cwl9FtDx9DJCxk2KaWwa2/E5+P2KSm+a2oqks1tg5LVmQsGEmAYbgxSCuacS4bn/Ax8QUhJuklHgTqCTx34IMBeFchCNX7PScffXuOsn8BUf2f2l/ao0xCpOEq6WBGEaatuVsXbNaCll76A+E6RX9MIqy2id8SlRNg59GCLnYQpxVxdsIQc+ymZgC4igOyClJyKQzGu0HfJg4+Am04fzh0xnBjnXNYtGyWK7pfWSYPMv1GZP3HPZ7nvZPWa+W7LYbwiTI0DAODDnk8LTdRm6pkNGOFJmVee+2fn+5gHi/BfI+OfT0e8KxOXKVUuYnFa7WOI7UtZs3GClFum7k5uaG3WYjpo3BC+FfC5JcrZaMfcfuZLm8u7lhGEQW2vcj/Thkn5+Q7ftVJu4HfJBkcHNSW9XOEAq3iuNa0bRNtkUw80dxUi5fz8W+aKPmtaOsJ/pkbZGxA0mJ0vCXinzSbDL5zpGkVTmj8GWzUR435VGp1DuW9L/u+GYE0RhI04AfZZEtu0+fwG1rphip6pq2EUlsXbdUdSu2y0hcbV51S4EkJ4d4UjFpXN2yurjkk+98l/vbt7z64md0MWBSYNwPpAgmCTHHkMkqRnqiYRqJRFGUaI1zStJZM5HYmQxpKXDZv8JajUOLisSKfFbawPnnKeC0hI9J+8SxaBspAFRJu81AaZKqMsGc+KfE4lFQDUQSF08GGFGa6Sr/TIyjxA1R55ZJyJOp1UaQAKWJWiqumBJEnxUpAvHq3I4YfSCFiKsrtNZ512HAGb77ve/y+3/+96lqwxh6xqljnDoIExpRYgRyqyrDbIpEUIkYPCkbVykSzmYyagqMqzWbzYEvvnxJsx44f2TRlcbWDls5CdRKGh8jfhjx6pQ0m70WjH1nwil8n3gyoZeJqvQai6NoQSuK50ZpocyPEyN1Xcv5yIZIzi2Fh2EsMUV8lroZrQg+omIg5hAvYkIpUYrEkDC2IilNYmSxWlE3Sy4urji/uGK1PqNulhjborQhqUwMzAtjiIkHDx7y0Ucf8aMf/5TLi6v/D3V/Eizblub5Qb/V7M7dT3v7+/oXfZeZVYmkrKJUIEqpUmGYGDACTIJRzRhgJqM0wDBDGMaAARMYyAzDMEZigjRAhQkko1SgLKoyq8moiMjIVGbE69+7991zT+fNblbD4Ftr+3Y/575374uIyoz97Lzrx4/79u17r73W9/2///f/c3x4wDvvvssPf/gjmqbmzXdeQxcVRdUwOMdnn33GD3/4Q4zWPLx3B6UU67YdNVJCGoMBpPMJQO8u5NOFID+eCnVNa7q3tbflclY+p/laZQuB7D2klBrl36eoRi5pTXOiPKHlrG6/pJPZ8iPslIMdayTQI3mgFFYklJWi79rRgM9anZym5f6qqopYCH9rMVsQfGqL1RBcT99u6Ddr/JDKjxPuk1K7VvK3EWy3iJrFRwiDS5w2lUjnAyF65lUp8tcEZk3B3dMFxwcNs9rgg+b0eM5qNefzZxf0vZOyTHT07YqAl1ZHJdogIQb8MCTyYgFGwksfRPnXGsl4Sw1FVATnMWhciKx8GDkuhdFCHOeEer6grGvKxtA7Ke02swbf93jX8/kTzdX5OX3b4p0fYf/Mu4o7VzldQrYEzek21WnZ4d/s/ZuTi4xeToMN6SISsas+mT4Ow8Dnn39OCJ6jo0O01qnbbmAYAsvlEu97ZnVJRDgoTVWnMpDMd5eXFxArAD755BPWlaBU/eCTXDwoTOrElHmssBaTkK3DPpCRjQf370EhWh9C6taJ4zflKe2Op/0AS0VZLyXYyOdG1q3pAqumj/dQx1fZ9q/Vq26v1voaITovpk8+EFWg9x4dI8vlChcjc6Wo6kZYzLX0YVd1g9Zi6atSlIuSaIsY0UGsfGMEpY30y2tBN/p2jXcdH27WqOBECwIzQqghSJ+8GwaUjtKyqgUytakEYsicDSiMCHlpJDuqrQJtCUaLiZqxzMoCozMELYFLVRXM6hqjSJoaStwl5TKMfDOYROzBE4hi255gvXTZIN1skLlTSSAmAbSGBFOp8XQBuXVWvrsK2z2qdLNlSXeRMxaPltmswRhZiI1WRC1CUe9+412OT4/wvscARkspKsTUhdL3eAd9ElKTdrjkMBnl+E3UlFUhDpvOYwzUpaawJUPvOfvoM1aDYXZ4B0yLKWvKsqasqlQ6S2ckZvVFmdAlZsq18HxeBRLPi1ue5HO2nFGf7B2Qf8+mbXnhy2WWrut24O8YQvouknl1XYupKnIYGKNk1kYX2KIkuE2iHWm8iwzOc3J8yuLwmIODI+pmJguNDyglYl8BIZzqaAgxUNUNXT/wG7/xG7z38/e5OD/nYD5jGHqOj0/49NNPefj6Wzjv+fmf/Anvf/AB6/WaBw8e8IMffB8dPNZKC3DTNKljI2KsEX5NmvRUzP4iSaNlglTkAGS6TUsjt3Va3CwXbEssU4XK7M48RUHyFhLpLu/vNs7DNEAim5EhC79SqfwY2dallbRxt31L1w04l1pZdW4dL+R+TZOzMhoVVep+MqDEYdY5If/ZskDhiWFA+lMTahF3uRohQc9T2XyT2kd9rh7keS8JcxqtCcNAP3TEIVIezjg6PGaxqCkrhcKhUdRNycFBw8XFFSEMaFOglSB+Rnk0DqLCxIgKERMH8UZyAd9tFwhTgdWV2Eu4IBw1o7BKiJBDGBiuz1kTqcpEFGwadNXQlBpbFHjkOhWlGIbNF4dcXl6IQm7wYnAXIk0jhOgYI13bCaoHMhcqs1OGmi5i0/bS6Ta1osjnPRPEs3Jwvpel06jHOXn+6vqKrtuw2Wxo2w3n58/HoFY4G4lE7j11Vcg8YwRxkAQ+l/mmpUVRhK6qChD+ldY6iV2Jk7ZCUZUGayQomOkW+BQF3LtzQrBSEp+u4WqyJuwUmhKMn6oX43rAiKumlxHHBGNi8IuO23VETffzEvFGLoMpuCVsfPntFXU2ImpiVBSVtMAGpWj7nmgNRd2km8tgbYktxB7b2FImQrW98STrl8xfo7bqetpiyprF4QlvvvM1ri7POXv2hPWVo8ISXTKXCck4KUp7rY5QViK0JeJaorBpwlZBtCxEahwfRnEfbRQeCUJKo6lsqt1qYU3YJF9ep/KK1qCCRysr5lBkB9a0pT59MUbzqGjwbkBpmybdXflomVC3bHYxcwtElxTwUmAWURIEJZVRqSOnLJnkizLqaWxrkznD7zqBN6um5tHjx9y5c0fqi1ok4KWlT9owCVv1RNf1+EKQBqsTHJ+0PUg3WVEUGDtgtMDQVlvmswVPnj9h+dGn3HlgmR0cY5yiax1l2VPPZsSqIkwWtpjguWz3LRNM7uwQ8t/0vOVJwzm3I3l+dXWF956DgwOpJTfNSBzNBLbLy0s534mR7ROb3ChFXVWsNpsRtrTJxlnq7BETIbo1g3XEJE4GivliwXy+oKrqhJKACuIHo7VOOg9yL5m8wBI5Pljw/e9/j9///X/M8eEBs1nDm2++wR/80z/k5z//Oc/Ozvn06TPefOst/spf+Ss0TSO6MpslzWzGer3h9FRurtGKPUGg+e5VSV03+G0QsS/KtV/O2Cdw5oDiNpg7v3/a9ZMDuimj/baAAriBEOzvW7LkNFnmWy3GhEgoCIF+ENKnG3zyNZHSWlEWUgYht55mDklSzLWK0ohTad/KPacLS7dZoWyBCp6ohNTpfCCGrdPtFPm5cd60SqW/lL1GaQsfnCfgUcHTB1E/vnNyysMH9zg6MFS1QWmPmJbJIliUGh9EJ0dI+gpj0zyqBLWNWgLLaFIQmMiD2mhKYyiMwaduNh0jJVISt0phlGfVXrNygyBadQN1gy5qQagbS6GkcyPD/0VZUlY1s9kMnb1YlLQJWytaOV3fU/aDdOt50VdByVXcRy1uK5nksZEDjIxeDsPAer0ePY62ImxJizjsdqF571mt1rTtZgxK5PoXY6miTP+OGjDpetqipHYG1DVEuHvnDm6RrN8xuOCTRZGd3C9glBuTTzOpN+Q5QL7ztoS0j+jmc7GPdgCj8qdwglICmvYaJuFI3sTmRI0BR9wNZ1647fCTvvTVL95erYyCnKCY6vcxijqcTKYe1Tu6rscNLp347NqYoNhMblTsfNFMaFFKi3NoiuLKesbpHfFQ+fjD9/l06HF0DGlASe+71E6tRkoeuc5l5OKWhaFw24lgVpU0VS2OnMFR2mTBmxwehQSaSgZGY2xm9MZEMLMTQ6Z0WUNMN7waJz1lFeiATqUGmfic8DvyJVRbJVE7hasj+DgQlJwXUjklpvKI9EeAC7mlMVJomURCkK4UgwiYKaQtNjrxVbFlyd0H9/nmt78lyoNug3c9bXuF86I+p1INEWOTgZRO3SV6DBYFmg10wyDIg7I0dUOkpB8GmnrDwcEh9+/Bzz95xs9+9j73Hvbce/CYsrT0vQNaCRTSTa1AAplUbIqQwm8z3vgh8zn2JnelFO2mRSnhTcQQscmeu+97VqsV6/Wa2WzG6ckp6/Way8tLYhSH4PlsLoiS1mhjKcqS5XpDVcqEWRVWAm0/iOaG04RujXMR50E5TzObsTg4pG4a8baJjKlKCEkqWUm+FLxH24SepZzhu9/+Nj/58R/x3nvv8f3vfQ/vA0dHh/zsz/6Mb3zrO2AK5vPZGFAtr6+5ujijKSwX1+cCoYaIMiq1ZsaUYaXzpXJLrpzXKZIgJZ0kYa9y9rNFI6YT/zRImaJMuf24bVuaphmDwLIsx2Aj7yujGlnNdB8iztu0c8Ag6qAqw+8xS3OHsQ3ce8+QSjZNVaGtnkzSMqZMcmFWCSXME3TvHGHo8YMjOAe+l/NJEhpDUAmT7vwpV2A/ABs5C8nVWe5rCfSEwBrQBEprWFQVb7x2nzdef43FoqCqhbAnCYilHDwHh3OOjg55fnHN4CQxMEmsULjGapc8PgZT27KENXKPWaMotKEwUsIutMZWljYg3RVhYHV5hmoW+EJK4LYoRb05meRZW2C0IBh37txjPqtxm1ZakgdHoS19PzBfLBJnI/t7yNhXSXNoGlRMA4scTGTV2fw4I2V5bOXxlgnfeV0orEVrWUekDLyLyo3OqUXBfL6gKErKZDNhU6CR7d0z+dtsPHANQF3VDJWgwN4nXhPbMTaul0rWilwi3llLJ2P+Np7S9oVpTKU1U8Z+giVUHMdynh+zEuiNyOCWAH6f//Kr3F4p2NDIwjYEB9EgPBiDCaA8KCf1bIEJk7iXypBNIFHEBZKRGgoqJvIQGRDKKh4KYwqCMpyc3OP119/m7OnnGAzEFaEfKLSRaC1qaltgVRTmuIooazBGygI6eQZoBbW1NFY+UCvRx1AKvInoAKXRqdQiGWK2h9eKVJpJniYhCjQVwhj55ZZUoy06Jllg+SZUNkGwQRG9wIka8QZRSst5CKlEkuR8tYWgJDjTQUEQQS5CRAD9QEQyl8IUhMHhs0Jj8h3xfY9re0RV2VDMZxzcPaZclHShI4Qeo0WeHTegSEZ3SqFKgQONLslcG6fTZGk8vYNOCWHUKkWhLHVhqeqAtdA0ltOTQ97/9Dmff37Oph/oXOD47j1miwXOe3wbcN6L/K2xYvymDMbkfm6Q1i45N5jJDatkHIWUPYbkoQPiOIk2XF9ds7y6JobIfDFnMRNX32eff85mtaauKpqqpqlrQIKBWdNIjTWJgxFzvR5CTKGiqVDlIWqwRAYotHQnzY8oynK05SYhMyGAU6BDyEU0fOepaoFtjVIs5g1/6Td/wN//L/4/XF5ecXRyyttvv83Hn/xDLs7PuHd6wvVyyfvvvUfbdeJ/M/TcO1oQgyF6ITjjk9KvGgt2koEZS0ykRCERiPw0I6KQ56+YfGj01kwupvLlHvIAItDns1DQMAjLPS2oGTWy1m4X6EyclCfk+NTkc9QYjo8aB5BUdFVMPIWsiJpF1YRHVlgjBn96W/aThSxsM0iAmIT0kotsUQh5N+eDWkP0MGwcUWu0KlHaEZxK5yIRHsfDzmhJSkFGpDOitLR5ez/gvegUWa0orcESaGaW4+MZVRnRKiQJdpv2JWT6srLM5w3NrMYvO0LI7fGy6FilE29I8GIJoiT5Eq5VwtRDv0WJFHhAq3S+lGJWGmoim37J5uwzPBoTPXUpbZGmrKnqBmMLjIaD41OKssINJwxdJyTexK0ZBifdYWWJ0woD0s0lTNiR/J45PjlY7RORsx8GhqFPiZoSfkTf0+dgw3v6rmNIJRDpztHJEVuQzqIsRNBNm9TWaVPnoHQQGiO+OsaY1NGx5SPGdG51Cjb0dFFOELcirTkTkCCvXxDSGpInKzXOW9oYgtF7dYnIxBpzm4ClYHiH/5JepNGJj6DGT92p1jN5eaocRLUNNAS7f/niyDiOvsL2SsGGuFlAVAGPCFApBYVXmLj9yRHcVtgIQPqetRJyoJy0VLNCiatgTCcClYIVQ1FUHB4d88Yb7/LRBx+yuX6OjgOboRcZ6CALjo4GreVmlU4XKY/kdiCQE1wZhUx7HqsFARGfAlAhUhmNVcLtGDMFBYWRNlbRpt+2Axm1hdvyALXGjHW1jOpoLV0gWmm0MmM3gkr/ZU5PJIkgRYhKSdCkJFP1SWCKHFCpQK4966jQAVI6jUFQn8FDkYI7bw2zwwUH906wiwqlBdKNrk81aUF3QgSvtEg+a0vbRbQp0VWBLpMpEBHTD5RVj9+0hG7AtQO964kqYCuNLQ1K95RWUVrYbDZ88tlnLAfHyb27zBcLCiOCNNF7opVW16gDKqasJ6RxEVNcb6SunrNVrTXRTbLFENE+ud6qABP7b+8qllfXPO/PePb0c6w2oj9gLOvVms2moyzXQrIMAZcEwyKkoDQRUrWIvqligW0KvO4odEVZlJiqgfTavGTmGzwqTTZzFtRGETxoHfGDo5mVfPubX+cnP/4J7733Ht9ZHFBWNW++8TofffwRb735Jpv1kvXyivsPHnN8cszZ2TOG9RqtSy4urnjt8UNizshVLrCRuFJSxgFF1H6837LRllYSVd8oZcQcgKSMKWdOcUvQC84RkvujzWq3CEEuQ9hTJCBnYqAm9WhJTF6kSJhFAJXOiIsayxP7XJKs0omfurBu+Se7RESEg5MUak1IrtPRU9YN3vUEJyKCQYm1QNbJ2YH/1Va4LBt9qXS8uUNMIa2zVkNpoVSOptJUpaIoIgcHDbM6BT5K43ykjIoQHbN5zcHBgnbjCV7uC7RI7WOMuFSHQEAWdommoFAyB/ngGcKAUkm6Wgna5FH0Tlr/Kw0hegbXcn0Z6HxEuV4MBiMcnt4nRiirhmgtZdWgtZXAVU2QrhRUjvLYgHOetl8lNMCLlIEb6Pth7B4DRh6GT/L+zg2pIy2MwYmU3MJoMmZqUeEUdMJQFiJPkNGLKclyn3Rp9DbIlOPP42hyD2RENW0hxnGRzm6teUv61pNxkYOMCXqXQ4PJTmNaP3Jgkd8zIj4JzciBg0HfWPcNapT1n25qeiyTY8pE3Zfe1C4i8yrbKwUbJr2hTyFxTBCOTBgpolfbiHAbaDBevO0x58cv/qYxRIqyQsfAvfsPeOedr/HR+yLu1a9aogJjLSqKtDBJVMvobW1Ka72tYSNBg0YmtBzJKiUkzlyOkH3kCSlIEGsmJznmoZKCFHUbDBbT7zmU39UruE3PYPr+EJJ/gt4OjBBDqi+DJxMqk7z3MBCjl+BKKZTz4CPGakpKovJUteXk3gnHd04oqlI6WLTBBfmJZkZQoooZo8LoCls01LMDbN1QzhtsUxONcEZcPzC0Hf1yxeZyyfriim51SY8j2JIhKDa9o6wrjo4XrDrPpl2z+fRjVuslx6enHB+fCiSZxLasLQSqNm6UOi4KafF13ovcvb7J0M5cAkEh2GnpzBPgerUm+MBqtdppwYwxin8PjEJh2ZdgJJ5N2sZiUqE0SpwvldagpbtBnHPVmMXn0S4L0+7MkmH/rutYLBYQI4v5nO9+97v83j/4/3F2dsa9e/d5/fXX+PCjj/npT3/Kd7/3PQ6PT7BFhdaG1WrJ86dPuHt6wrptJcsberQKW3llxVZELkYRbAsBgvCcRGdCjX31uVOGNKGK3HZGJbZBhp94JowByOR8Z7g6czn2N5Wyvf3J/0Ws91yqvEEczffLBI4Pk0l7/7Xj50/uv2EYRCU3ekqVyyYifU7wkgTE7TXd1ry3Yyx/192FjTEpiRGsVQkljWICWUrGXVYls/mMg8WCwkpgoBIhuvegTeDo6Ii7dz3X1y3OdUm6P5eFtoq1WifOGoz+LtmQMSsM3nZeRhJwell0Pa69ZnkpLd5RGdZtx8HJPQ6PT5jPFyhlsEUyc+taQkw6ImlcTH01cuup3FOCyk5bn7N9QOabTefCaWBQVsUYMGazwNwiWhRF6rhh1KWYlgNvXp/dOfhFj/N99C9iu42vtPeC8VBuWz/U5O9ftP8X/f4yx/ZVtlcMNhQWJVl0lOhsJGulDGkb06WLyzRTernPkQqL1PiyjkEzm/Hotde5vjrD9xtW10tC79KJl9YkrJhQKSQb02QNDNmvSgtEhhdNgpSVFu8LFaFQxWhiplSCGJO1e5QZN4lziVbC/oDNE+s04wkJBs59zi8KMKasfkFK0r78Vt9AxxTQBZ8WSkE3QnQS/JiE52lBoCSQkvukrAoOjg9pZg1DEDl5hyHqklAIoS/EiC/AFDXl/ICymbM4uo+pakxVEqxmCNKRZGPEDA4779DNklBd4M6fcj14Nv056yGKNHlV08wiwQwM65Z123F9eQ5RnDAPj47Hc1JV4F2gba9Hh9D5/GAMCpx34vMxmYDyeZ62dU7PZ34u13VHpCOx2bP7q1JqhHO3ZLOtnkLeppNXURREFM4FKf9oCYbygh0jO4vnPiEuBz137twhhMDz83MeP37MG2+8yYcfvM9sNmO+OODx40ecX8o5sakb6P333+MnP/kJWsP9+3dpNy3L5ZLoO5q6RIdsrKRRViDXqGJSkt0do9Njyo9jjKOuikrnLabSwbiQJGKeNIaonex2RIImOh3T2vz++bztOLYvSsJYk2N70b4y7ykTGac/U9RkGgA571CpjBZiwCiFtaK4mYM1pUSlM3uQ5O22JGL7fbZdVtNj1UpL14IO1HXN8dGxePoAm7albTdJr0EzBI1SlrqecXAwZ9Y0rNcDrnf00WPJnBaZb6UV3I5zXcxoQAw7iq8Zldq/PnnMVyiCHwj9mtCtaa8vcSHSDzIOQj4+NxCCJwyi2dF3/ah6m4OGXGLrh0HajxOpMQcW+wFGDibyT9M0N57Lv++c1yRHYPaCqtsSvH2S8vS63TZPf9ES/svgPWT0ISMX+d90sPmDxtfcxDW++DheFFi9KLj/ov18le3VulGSo6SO8hOVBB6wRTXkJ27hlslC/+KDvK3IpNCmABxKW3RRcnLnHqd37hOGjvVyw/LiQgSwogQc0o2RNC90TEJfFjtsF4rswmhTjddokqqaQUepfeoxIyHV6RJMFrNehvAfVMi14FtEUW58P/nZyb7Crl8ETCZqlCxaOTlVGmMEzs4lnC3UhzhHKuls2EZbUlyOCCemairqxQxdVTgUQzTEYiYBYwU6RIy2zKoZzfyA2cERtpoRTUE0lphExoTUKV4skkx5rJpT6zlel9h1RyguUFWPrcHWHt056AdRb9WK4AbW11esVy2XV9fJNbjCOU9ZVDda4MQl1IwB7XRCyJNkzo7yudzv2Z9mUUpJl45SitVqxeXlJWUpPfR58hvREnYX0DyObKqtF4VAxJmXIPXbPHGlRThk7tLuomOtFenxhw85OztDIXouX/vau3z88UecP3tGCIHXHj/iww8/4sMPPqDren78kz9i8J6/9Jf+Mid3jjEK3v/Zn7JqpUW8XS9ZNM04gZlBE7Qn6ogPN9sLpUy1Pa7xPCoZv2MA5gQVcc6NPA0hLoO02uySJfN5n57D/FgnNcNpoJjRkBub2p7T6X6n9890DOjMVZgsNFPkZF/QTCkjZTAN0Q0oAlYpXK/xQxLj6jraTUvww9hhMJ3X8jmbImaQgrs8hcQ03yiV9IEM8/lcHD+1pt10eNcLn8lLSbiuS4wp6YdIURiOTw65Xq7p+l663ExJMEbE4pQEMn3fSxnBGFRamFHgJ0HS9HjzuM+BuNaKQkPvB/rlJdGnuUEplhG8c6xXa0hW8n3XEVwrLs5ZOC2In4zzW/Lu9rxLmU/a2TVVVY7BYJY6FyJqltbOi2zibk2Q8cwjGP+NuYU+X1t1Yy7Iz+8H3C8KNKav+VVu+X698Xm3Pnd7APSi43xRcvEXEtmQS22EEZAISXnLy2nQ20Bjimx88ZbaOHMdK+aygRgZGVsSkBa0ajbn8OSU68tr+k0LfYfVikJ5isIk0S4olaYuS1H67JNbJNKhYWUdx6gkopIGrNH50qUFPYLScQw4JFOQVlvhu271NXYWouTiCvk8ZGbzzUBjmjFP9xGjtJWK/blJN/AuFJ0nM6Joi4SY2PnpfEq9SBEdoDS2rjBlQVCaIUScMpSVtLWZopL25KLCVnNM2aBMIXVgJWUBFwND8AxB46LBO0F8lFf4aBiwOFNDeUC5OKFuI2rjiXpFUVaUlafrB8wAwQU26xZPz6rtmM1m1HVN08y4c3p3tFs2xo7IgjZJX0VNv/8uJDydMF8U2GVVUWPMuFhuJ9ktapKvR24hni5aTFAQJpNZDFLTz3BaDgiL1DqcPytP8lVVpSDLcX5+nvhFkTdef43XX3uN997/AG0txyenvP76a3z88UccHh3xxhuvs2lbTk9PsaVhs14RFCzXa5rSUChF27Vjf72IBlmijgTyZP/iLE/q7iLzmE2mhpSVZlLlbmkkBb2p62Q/OMuPd9pCU0fC9HOnx7I71tk55v2yyDSo0XrbAXLb4jENesaW36gICS20xuJdT9d3xCiI2Ga9JiTzNWPMiJbmbT/I3e5/t1Asx5LQD6WYz8Va3BbCc1EhUBQls9l8FLEr6hlKWTbnV6ACB4cL6rpktd4QlZl8h0DwwlvLcpU5C86J39RGfNrWnMf4iA56jx8G6qKmNJa2XXH17DO5jpuWq8srbD2nmh+AtmIpP2zIgen0eoz3ndYURTb4ypw6PQkw9Hj+chKRA46bKNaU/7O7CKqklLv9++5CfQOx+JLnp5/5RdsvHIyobehwG9p343twe1B02/aic/HVDvNfALJR/vb/hHu/87/gDtIOhNqyZFVq2TDWoGzBmS34+58t+Ifn1S2Twz48lbKdvb9Jhg/3D9b8d77zRzjnOb37QHrTH3T0m5bVxTMRTgnSQaIRO/rKFtI7zpYgCrL+GkVCMJJck5JoWSPlIaVIZFM1wldKKYmYozjMBi0S41nsaX+yzhNO9ubQWmTYp74o04xsqnOQ5ci1VrAHewcn2iIZQgxB1OuMUgnFSMQ1lUiTRLS1hOAo6gZbN2wGjzcF9fyQZnbAbHGIsjUoQ9SWqC1D1Lgh4MIAOIpCiGguscS9d/heVEvxAd8P9EPP4COUc6rFHYrVgK2WRG3QhaiHSjuxGPSRNEJc6pfPHI22banrGWVqOx3PZ76GexnqNHPZ52rk1+csdyQ0hu2CGWNkNpvtLIhTSHd/ocpdCEprQZ5UTPtP7W3j9d0NxvN+MqKSv4f3no8//njUpCiswTnP9773PT786COePvmMu/fv8+47b/Mnf/Jf8emnn/Lt73yXJ0+e8MknH3Fy95T5Ys7h4SHt8ppZZfHOoffGmQ9OAtaEPE4Xg/H+mELNKnVrxO3rYpT35bJT7nYQ47Jt29/0ut1Wgpr+vo8e3V5Gub2+flsgkc934KaPyxTR2mbbUuqRToow8lScc6yurxm6Fp9al6WzI2zVetlO5Bn9ye2+Mo4iQ8gOsVJm2zkOrZNmSkFlQUdLSAZ/Kt3jbnD46BJhcsAaxcnpEetNSzcMY2BllCEShJSb9DXEbl6OKSMqeQwXRTGOgeyAPJXDrssSF6X7pzKatl3y/ImjnJ8wqAKKhuboFFvWDENPYdXoExWjcK2qpFx8s/whfJTb7q9t0nZTv2T/PtxfQFWas29DCF7E23kRf2NnPMWIuoV39DLb9DulT0Wpm6GLIiHpKYeUQHFMf/M7t/++wpr/ou/1L3J7tdbXgzcoT77zUq91wEUP9F/2ypfZIsqUmKJmdnDE0Ld0bUsz/5x2eSFuq9qI2JJWSZiroDSFqIVOzrP4m5jkY5LaeVGp5py1MhhLKRku1WMoJAtNDEoysywTSx5QMaESehzw0p63LbfscDPSdiMLSwtCTNhrfqlk3nFnUjdGI7qRGhuDqPwlUNxFWLUbOmswVS0lEWWomiPmB8eUzQJdz/ABeh8YXBDhoHGhjRjlcDjZ3+BQ3lOkczMEJ14KfU/brrlcLrm4XrPqHJ2HaEqKuqYdHC5GlDJoY1EmYgkQt1yeYRjo2m4kkuUJO9d2UQpbbMlh+0He9nyYG0HHNHiYPs7n1KSW2WkgMs3OppbLMgnmlrNtsD0G3fnfyfhlMuHlCW5aM8/1bVmwAmVZMZ/P+MH3v88//Wf/jM8++YR3v/4Nvvbu27z/0Se89eZbnBwf8/mzzzk6OWJ9vURF8S8qrMDOOCG3ujwpoxNTXQjRORgeg6zp90g/PjH+NWrkqNjEdwl+S/AzSo8eRDcmf25ObvuT+20Z3PT5dBZ3tmmgvr9t78ebWeG0nDaiEUqy7sIacI6u29C1HeOMnsdQcHjXC7nW6HGs7UPzW4GyLSqD2uWuKSUls/lczPl0dELCN0Y6MBJfxsWQGuDE98MWmtmsYb5o6C+GcYwao4mJPB1z98+kBGaUGeeU/cBuP/MlSqA5DE4s5pV0fYXBoauaGJIeSSJF1/OGuqmwVgJ7rfQoHa9HJGd6PaV9/8Z1myy043qaz1f+X2TEwXeSwb3XTLcvysZfFGjcfOEX//nLtt2Sx22ckPz/3XUBcuI9CTRu7O8lj+GWYP+27VcRjLyyqNefy6ZEYtsWJdVszoE/pVuvaRYLrs4LYhwSYhBH9brCWEpdYHTY6Y9WpPpyjDuXTAIPgT11KhjlCTgh20lqIopGSIyIwEiShMw3jlLpJgrJbQ8EWnSiO8Iu5LsfsY+ZKIHdYCT9q/VopjRm7kr8DmIUgDxkekkUtVG0ZXFwyMHxCRhLoKQoD8DMcMEwdIHWefFfSYtTxKfzoUALSTVGyfxIE5MhMnjpfnB9R9euub5ecnm1Zr3pcT6gjRVNjRBYtx266NFBYQrREcH7EdkahoHVekVxWY7qg6L2OSmTBE/0uzDw9IbMkGs2X4Ot+uCLti06YndKHNNAYxpsaJ0k3VNGnDsA5LqNQOh4TDECelvLz585nejztc+T/jCIHsM777zNj3/8Yy7Pn3N1eclbb73Fz37+Ph99+AHf+d73qKuKJ598yje+/nXmpeXJhx9wPJ9RJWfiGFL2HjNnKDk8pvJK7lYYx2SYGN0p6TqK2QMoZaRozcHBwcjZAKQNXWvpEJvUx6eIXf6O+2N9P3udZqzTv/m4u48vW0B4AfKxj3gZY6jqmRCmhw7XdyJMGMMOcTHvk4TuqNvahJXa+ZwcLMfJuRiXFbVFQIdhABVEjn8kdEqw4KKiHwa6LglXKYUtFLNZzfVqxTBMuCspsLAp6J0Gzjn42A829p12Q8gdJZ5+ENn2qBRe7FWgb4leoYqIDY55ZTg8OcaU4gsyRUj2PyvGLV/nNgOvmG6d7SWbLLLTWysykpHzGd2e2QkSsDfeXmURnSaFeS3Y7mz3ta8arIxjafpniUdvLdbsj68v/ay/gNuvR7CRLoC2BWXVgB9oFguOT05Yni8YLp+hCjXCUFml0yglnIJJgUYh9UiT1PSykZj8LUFYGeHIH45k+Hpk/42A/q2HOx3U4+QTIqKJtMvXyNki7MHICdnYDtAtRDh9bT5Ek9wUdfDJJyW9S4uEez1boGwJ2qJMiQ+GrhVxLkpDiLJQoCOFFpny4Htc7/FT1CVCcJLVGqXxvfAwNm3HetOy6VrWbUvb97SDYwgBbQvq+YKmHRiGSFFCWSdBn+BS14zYvmdk4/nz53Rdx+HhoRj6lSVlVaGCtL9Oz98Uhs8T3DRYuA1Byo+ni79Ijm/h24xyZIQlT8bOOenIyl0Aeuvr4dNioPR20ZbPvwlj5vfkBXtcFEJktRKeS1PXfPe73+MPf/hDPv3kI979+jd4663X+dnP/oxHjx5x9664wz57+pTFrKYqSuHwGCPOplqhC+ELeR9RQcZ9zna3C6K+EZAJ/0T8hyDV9VPZMbcdijeE3EcxSuli5K/EeOP67Gb96sa9MoW1p9ds+veXyc5yy/ht+8nGfdNy3Gq1Yr1a07YrGBwQ5F5SKUiwFh2TdT2B4BQulTum+8vffXeMmRFp2f7slazS4knMQXNBRBZ97wf6vku8CotWwgGaz2dUl9e4QdpFjbKT6yPIapxc5+nYy9dh/5rnLizvPC5EcfJO845FDBfjsIGQAqVuRb+6ws9qTHkwzmUjMjZRgN29DtN5dDLm9gLv6XtehHjtntdbxsLLBAJfsG2v5SQp5MXH86J9TJEISUz2XwOZapeTgbwM3MQ1uAUXerlj+fPafi2CjUiSUlYKZSxFVVPP5ty9f5/u+ownqwuMiQmCVNv+aiDLZuVNqh6j7M32gqbnyfTKEIg6EY3SC2PcwnRa3aym5aPVCWWI6Y2CuiD8irCdhOFmJpQfp9J/ykbkcV7slZ5yQqTMIgGWHG8OmrWWGmpUClvW2KKiahbo+hBjZ2hTEY1G24qoFH3oidGh8CgGVOxQ0eMp8Z6xnVgUIwdiAD841l3P5XLJ5XLNZtPTd5KF5XKIDx5bFMwXBxTlnMX8iIhldb3COXGbzRnWerWh7wexbW5btNZsNh1FUTCbzymqElsUNxaofB6n3SbThe22RW7/3Pf9ICqJe2WY7K2wWzNWZN6DCtsgMMQIWqOZoC5pYZkiWnnLwcv02gcfklukJnjPt775Dd57/30uL6+4uLjg7bfe5sMPPua9997jez/4AU1Vc3l2zlHziOA9Tz/7jNOTIw4P5sIhIOBDwEe54auM0KjszZKPcVuayjwNUU+MWwlkZOLLi+wodpal883NTpTbArwMC++P+9uCk3HR0TcXoOm/u9s2c9xHkPKW+RWCqLXEILpBVVWhVEBHg6ESpKNvcX0nonVxy3mZIjbZmyffm3L98zkUDs82wAps1muubOTq6orFzFLqiDeKwhY45+mHnrZr6dxAREvQawsiBSiR0p5dzFhvRARLx5A8igpIyOiQWk5jjOJJorfjLt9z06Arn5Oc6YziZcGjjaIpLH0ImAgqDKyvL8Z7fO4GTu29sb1Y584+dktHioTc3hJs6HFBjzvhiNZT/kJM5zTP1i+HbPwim1y3OH3ipYLe3Rfc3OfO7xFJjOPNtyhujnd16xm8fd+/irLIq26/FsEGadLSadLWtqJu5nB6h+XFHZ4/+QiDo4jicmqMnsC+jIqDAKiEAjCican+J0N1O6FuF/oxylDpBlRJbXGM0KdDfJeEpRPjXqGEaxF3o3eYWGfnQ8yBTIyTPadHOsuo52BECKNkhUKVo25DNBC1pp4vuPfG29x//W3K4xO8baibY5S29DHgifRDjw8DMfa40KGiwPhGK1Aal/wJMr8l+MjQyyTWdQNXyzVXqzVdN4yulqNsvbaCRiwKZrMDXn/9Laypefr0KRfnzzi/eEbX9yigKGvatqWskxGb1qO5VlBg3UBhC8n4gh+Rh6IsIGVSzjnh5Vgr8uBpEs0/LyKEZWEgrUWJMSMaosdix/p31k2BpCibOlIyxD3yeyZZuqhI7nJMpgHIFO1SEUpjcd4xDD1KW37rt36T3/+DP+D8+RnHJye89vpjnj47Y7VcMp81fPbxRzx7+hlXV+f84Hvfoe0dxeBZrldcXS/php4QobSGo2bGfNZQlYXIvUdR01WKsdyiYp7uRRxqO6mr7UTvPUMuQaUFNSZCNGzLU9OAY8pZiQjiN12wx3IF22Bli+DdhJL3s+a8hRS05f2Mz4etMde4uHpPXYuduFHip2S1QiES/npEakANjuhT6SiV7KYBkpRmzHbeSAvtzhFGkime4up6ySefPqGpDHdPDlBlgfMe53o27YZ+6JPTshKfprIgRI33mspbDo8WLFcb+q6j7cWnSCH+TiFKd8swuBHtMqUlz3wZcZNzDDG6kTtktCGaApWuf5a4tkbjoxcV5ugIw4YYA6FdcfEssljMUU0jejOmwJhExiUkUDhCatG/NUScILnCvU4lr8nfYi6h5HOptj9MFuB9ROyLtp1FPT0eyyZKjWP/q2w5NJjqYmiVjUenr4nj540INvl7qekLv3T7pQQX8caDybGp7QL6ktuvRbChlMKOUaUVVcrZMW3f0mqLqme4bimQcZSsUJeGQTl0xqFkR2A0g3cymccoKptKJ98TkxaOOC4eIQUK4kqrMWNxRY8jPktBZ8nxIaZWSW0AMV8KBMoyi46ljM6HZMu+ZWkDSThJeubRiph0+KM2iJNhIqp5UeJTeEypcHHA9wFPgbcVvqqw8yMefu0b3Hv3HczhAlcW6KLEaY33Ay4MY13aux6RLAeFlYUgeogD9B1x8PgY6Xs3kufW600qofS07YD4wSiiixA0cVAoZSlszeHxjAcPHvLGG29SVTXHR3OePp2hP9JcXF0SA+jCUdYzBudp25a26/BaWnr79ZqiH0TjZLmksJb5fMHh4SFHx0cYLfLOSluBGBO074N0F2hjxYl1JH+mhTUvhCh8PxC1eKwYnRq9lR4Z/VKrl2vhlccPEe3F3lwrkagmxlRW2AYWwYcxSJ0upgpFaQuCsaNniFKa6DzRiZolGh49OOWdt1/n5x9+xMXlMx69do/moKHr13z4wXusk8/Lozfe4s8++oymmXE5aPrBEaj52re/TzObQYSha7k4f8am7zloSmK/waTAFaTlWlnhPnmfOm9SmhljEMl1tQ2qQTw2sk+JngQMIQiqMkWFtmUbuT7T/eTFe6rJsCXmJtlple+TbelmioAoLQtVzN0XkyBRzu8WKdTaUpqCqALaCCqQTSPF+8ijihnaNGi7IkQtXA0vctoh+DRPANHjohO+WLKy9z5gkMDHh2x2p1BBWrittpydX3Pvbss777zFMLQUtUYHQ+t6lAgjE/2QpMiD+POEgNYwP2io5xWrjcX1jsFH7BDRQRxEh6EneFJZTRb8ECVIcz6KWm8KFtuuRStNXUkHYec8fb+md46iLKmqmna9ISpFXVYEHPQbtG/pzqF5+CbDZknwTtp1i0rmTGMQEbSIRsz+4gtK0CHuLWxp7pbzO/5hm+jp7TKdrv6t6Nd+2W6abJiEtmglJS99IzqU/apb9vtlW1ayTgcxPm9uDV0UUW8fT7/X7rr+xSv8bd//VTedA9LJR0Ymwd8kgX/ZeOPXItgAxnqZ0pqyrAloFkfHvPbm22zOn7D6bI0xikJLGSOQYEPldwaOwNp6+kQqnjBORKTHeeAb0SqX54mIt0vabUI7JiHoOKZUzIN2i5rECQEvZ2a31TS1SpLBSo2Wa/knplKJ0YZ8aMPQElBEbUCXVIsjju485N7b71Kf3sUcHuKMJWKkHc+3Y/ueBCyCHEXUuEgHLwvy0K1xbqDrBjZdn/Q+tvLNbvC0m47NpkMF6LqO1WrF0CcfhpQ5lUVJU9cs5jMODg6o6kI0JmLAlAV952i7jrZrsU7af5U1DL1j8GK8FCPJoEmQj6vrJReXl5yuVhwdHe2gE3K9Mhy/C8vf4CdMIGWrrCjHGuGx9MOwXcAgIUkSgmbrJB0loBw9XfZ+phn9fuYu5bEo7eQRwIMGXUp26lNp7rXHj9FlyaPXXudf/p2/wtHxCf+v/+w/p297FIqLiwv+7OfvoY3hW9/5rnixRMXi4JDjk3vMZjPKumIYOk7u3uH5k4/ZXJ9Ta7HIzgG9xNBxHNc7uN1LTF5TdGL6XB7b+Zzsv2bqn5JRqHyOvA8YE2+9X6YERzmX6VoFxqRhWl7L1yEHFUoplDUobdK9nhx7IygMAZ+Y4xatC0CPnBcJOCSIqeokq+/cGMxEGXjyb4iEqPBRgRbzQkek6x1Pn53x2ZNnHBzWdIOjbze4vsMYCylQytYK1ojMABpmGA6PDlgtV2x8pDCFJDlK2m5DAGUsKumlmIQkDYMkRFVZiukb0iYrRmaC4qEcViu0NYkArDDaErVokhBETFCFgWFzjb84Q2lFNT/k4FRTNXOU2VtichI26ajb+fOXjq7JmHrB6/cRjS8rd6i4LdNsEY5bQgG1+/hl7gU9PZ69fe7/Pu2guvnhX/pRX7i9ctCR71cm69nevhRbdayX2X6pwUaMkTCsuPyz/zurj/8BxeEbnH73f0Axf7TzZWMMLD/8+4RhxeE7/2ZCAL5k3xNIy1orUJ4tuHP3Lk+Pjlk9+URKDEYlFvlAaYUsN40IQ9zG1LmefjP6ZYTn5IU3L9Ztk+X0b9NNpYAk6zOA8Di0Toui2pZy1OR7oiYTf8yfmUoo2ecaCZ7avsdjCEahZwccPnrM6Zvvsrj/kFBUOCU6AjJ6Ai5mP4uk4RHSUpoX42SA5L1IUg9DT9f1tG1P3w94HwjCKGW9WrNarYXA5kWRc7PZjO2qmfVflmLjnDkns2bG8WnkYf+QAFycX41fqe8daIU2hsE6ejcwOIcKajRqatsW7z0XFxdcXV3x+PFjDg8Pt5bmWm2/zz4rfw95UGlcxZj0SYYBNxHzssrKudPbG22nTDLJmva5CtNgY3/M5Pr+iLbkGhRyHL0bGLwnGkVRVbz77teoZ3P+y9/7PX76x3/MJx9/SrvqeH72HIWiqBu+9e1v8+7X3uX6eimW3O2GTz/5iNV6k4Ixw4P7p9y9e4cn3ZpudcWiKYnBMVrSv3Ac377tcy/CJKjef82242RX1CuXleA2jss2YJjub5+UqZSYoEW9TQgUyAJqdktY08BFJ/QQsq5NxCeURrxkJhA3uWsjIzPJ9iA9PwyOmHyMxFvFEQIEn8x2CQSl0IVFhYgLkfOLS54+e8bxyTuApphbzMGCwhjalVxHbUSvxthSOBMu0ofAyfERXdvxzJ+JZ1UMxKiFIxaDBCdKVG51Cqjz/VBYOzomj/NTHMZFJBM+SeVTbUQATUpp6TyF1Mm2vMR7T9N1lFVFXdXoskYhnWUhIUxa6xfgGq++fdGY3A82bkM5vigYufH8zrL6cqTTsaQz/u/m3/7CbmpbMXlhwKG+iDmyu/1ygw234dkf/gd0lz+nOvk6vn3Ok3/4v+XBv/Q/pTh4I028gfWnv89Hf+/f5fib/z0O3/1bL7HnLRqhVHJypaDtBDI/PDrhYrbA+B5jNSYOaA2F0clGO9duRWZ3f3LatoORMh2JdPO0qyfELon0vjzQuG3AhrBV74toIn4MMmKahBRp0tUKlz4PleSuEfXTPOHFhEREQNsZAKasaR485vitt5k9fERvCkLK1jSgg1iyG0jOvVkvwZHYq0mOut9qP3gvzozJUGm1WnN1eS00W22FuHh1jdYWFWC1WTMEj0vfd17PqWYNzWJOs5iLyJgCWxYcmSOMLbBliVafyAKhFd5vMJnwpi3GWwrn8YNP51Kuadu2tG3L5eVlIvYpZrOZdEsUdrw5piJe0+uUN601TITB8qSolJpMuIzXTxxFdwMXGZtbcmm+9tN6/nTsTF+ff7wTMSatFd4LU8KWJcoYhqD46U//hN//g3/MarOhbOT7mmi4f/8eDx8+JoTAg3v3uHN6Ste2VIUlRs/QbtisrkErqqrkZ3/2nId3T3j8+DGfftTTu16y2Iz6pYx8+p2nj6fnb/9cTs/HfhCWz28O6pWauG9Ozn+M8YbvRQi71/G2wJE0ftBb6fSpPsoNvk6Kv4OTQCuke200nyMSvYdcgiFprtCgVCeuyUn/QoIQUCrgnJiuxSgCglYrsaqPo7A72WhRaSlxXK82FGXDwaISOf/VFW3bpuAlmak5h7EetMEFQRiquuTu3TsMnePi+SWD78UJO2dMiS/kQ2AYhtRObsf72yXHXgkcHDGkc1eY1FKPBOCpFZioKaoSrVVyZ/WYyuIGIbMSHNfWEp2jPjjm8PQupiiIWGldR5LHF/E2XpZvsD8O98fqfkAxLaFM/84tgcet8/sesvGyx6i2v4zPx9ven5LcvwhbVneYBhyy0Mj5CjtJ+svt85cabPj2OW7znAf/8v+M8uB1fHfOJ//lv8/q03/E8cEbAjGun/LkD/536Vvsati/cFNIKh8l5lBeFmhrC5w3zBdHlPWM/qKli55SB0olXRN24poKjJNYFuNCMXIoQoiEHHVPBl9mVG8P5stviP3MWTKzXQGivChNF6E8+KNUa0YyntIKHWSKIuix7ilxh5b6KNCc3uXOW+8wf/AIP5sRtJXXBYX2ET2qJPoUaIgyYfQSlElpRXwvfFIK7fuOru1YrzdcLldcnF9ydb2kmS0wumC13iRkQzKj6aJeFFIqKcuSuq6ZzWYjW13KHTCbNRwdHdFuepkQ3UBZOikZJdEQq6zU0Sc50bRroe97lsvluH9BKUKCR2WU5Yw1Y4M70H6IdP1WeTBLo2dxLp+JilGM+LJa6H6GvH/9b9OXyL/vT3pa65R9R5EFjxFbFChj6b3H+cB603Lnzl2+9eAe/dBTVRUGS/RSTmi7lq7bUFrD4WLO0LU8+fxztFIMfSedPAZmVcXF83NM9BweHrG6uiCE3NrKjQV8/ztMs8Sd+2Hv+f1zMn2sJrfmFNWYohu7n797rqYLyFSoCxC9hxTITfUebluAVEL7lNKQShApnJd9oVLZQuOtpYgF0YSk6JlF4MDadMzZbiHId3TJwRUV058UnojRUg5x3jNoxcXlimdn5wQ/RxFo1z3RO5TWeAebtpPOrtJTVDOUNpRVyWrVU1cFp3dOWF6vWHdrNJrCWiIqdfJJ4DEMww0zuuDDhCcTQQUR40LQkJDGvM7lk3T/aMAYQTfFmNLRe0doFevLZ6MSatPUWHOEtkLilupWRN3i0XPbvfKq24uCjRfte5qtvwjtkNepnccvc5y798wLPjhvf0ECjRduI+iuuG0u+LLtlxps2MVjHv3V/znK1AC49Rmhv6Y8fFteEB1nP/q/UMwfYarjV9t5TP+LiFQ2LsGKJeVsTtUsaK8uicgCY2zqIhi9TWQTZrcW4lcKImIOPKIw7EkLlFK7AkXykphKLLtR8rj/F0Tb2+e2QjExweW7X1I6IYJSxJhb5mTq0wqik8nBIMTLoDwUFhcsqqpYPHjM0aPXCQcLrp1HW01wEeNEDMv4gI8Dg3KE4Ea78RjDiHBIoOHwQdrm2k3LZtNydXXN02dnXF5c0XY919cbZrM53oXRxGzoBrrY4pI7qdiuF2NmGWOUzpK0OGhtGQZHXdfcvXsXlKIfepwTQt2WKS+tmVbtKnlOz3kurczncym1BY+0qEp4HlI7ss78mXzNYhwXlxF6n5QCvKTU4zU0JMSdm8jGNPCYZvJ5DORgd7pg5nIAZO0F8EaDCdiioh88q+WKth34m//G32K2mPFHf/xH/PSPf8psNuP08JSqrHn+/DlPnjzh6uKc1fKSxbyhXdfUhWFWW+6cHKKN4sc/+hGHh4e89ebrnJ+fU9q7wiOI0m0gbp63Bw3TIOM2RGN3rH/Z32T876u15s+5yWXatQbP5zpzMqYCXDFGhr6XxXQSZBTWCvl3gnRoJX5KZEHgpMKptNpyrLR484i6p8I58SqSY08BSx5XMS9MKhFrM99MbRVmkfwJBW0/EIPi4mrJp0+eUVUFs7qkbmYQSlbLK0KUseGCRxlDXddiAeAVV1dLYlAcLOYcHMzZrNf4GGRyV4reDSPPI7okIJauRVDiZKu0Ge9JtNAavfcMSYW2UGWS5xfl1OAdShnKshAELkaxdScQwoBfXdFHSTaWzwWBK+eHRGXGctX0Wn/Zdlvw+kXbFwUa+0En6maAsT+33whA9g7jC4/rBX/bv0u2adEtr/0CNP1ltlc9f+mAdta6G3SC/PgloY1farAhVtaNfH5wnP3o/0x19A7N3e+hlOLyvf+cq/f+n7z5r//vefbP/08vv+MoFupEESUqjAZl8MGiKWjmBxyf3uWzZ0+JcSBfsAwdMg42KYkM/UBhbss0dwfVdlDqMRO+QeqJ2zpkngDz89PFxjmHteU4MY6ZbAqezJgtS+eFD0hdVyX9jHGwZDRDlkYfIlFbXNFw/Pg17r31LnZ+yHUIOBQqSAalC42Kjui8nEvpH5B95kU2Sbh7N9APopGxWa1pu4HlcsXFxRWr6xXrtZQuUIZhEJvpth+oqmosa+QTnrOiummwRUHbdSitmc1mQvh0nvWmpZnPUKqlLEsWiwXPn1+AEvQjE1VtUUiXy96Wg5j1ep1g54GmaTCFGVUenXP4kFQScySZrxOJyDXt5Z8gE9MbdXp99183DUCnE1zmrEwh/Wm75P548l66GnRR0jsHyrDZDNy795C+d8zQdF0/Lq6r1QqFiMOVZclyec3Tzz7j4cOHeNfz4N6dFGA57t99wG//2/9D/q//4X/IJ598xIP7D3j67IyTo0NWqyustfSDLE4KbnyfaZvuFFXIi/pU0Cu/fvpvPifp26JU3Akqpudx97MVWedgeg2myNHYthwli7dRygbDMIxaEypGbMrKQ8xdJGDEqpeoNMYU0qptNMQstOaICW2SsSO+SAo9fgdSOVSOK4kKKiX3WkI7dApclEYC+yh3oA8KpUsuLpf0g2fWpKAvbIOsEKUVXGvN4Ifk2SJ8keVmQ1koHj58QLtpub7e0CfjuK5vqctKtDL2grocYBi1Fa0zOdDyEWsksGrbDcYMhAjWenSlMKXZ6axRQawM8J6+bfFuQNvt9VigKJoFUIDezo9TzaEXBao7ZbLJ2HvR9qLAYTqWp4nBbeWW/f3t3K+3fPSL7ufxXE+fH8fGzqvhxgqzGyh82ff+RbbpOcqJWQ4wMuJ7a7Dy54Fs5C24jk//wf+aYfWE1/4b/xuUrWnP/5TP/8n/AaVLPv/h/5Hlh38fbWtsc4c73/93Rs7BizYxFpI6ZAyAiYhEdEVRNSyOjimqhtgOWGMJQeqWSrE3ULfZ4zayzYMpC4KlkxvzYhRGgtj0Mu8LFm0/4yacLoNkquSnUka0DTrknkzyxiOaobfy6Yk7WJYV/TCA1VRNw+W6I87nHDx+jeLwiA6IpqA0MqgNoH0AFYjKJzRDulBC0qqISXm03azHssr11SXBB66vV2w2nUzcztG2G1arDdqUDEMQmW8UXTfQtvK6HIABooORFsKstGitpeu6VONW6XGgaRrm8zkHBwc472lDK4ZRA3gn/g63Ld4+6YBMdRTG7HSCJnzxzapGgioqlV4mdeUUhoz3Vl6Mv2gyyxbZGb2YHjNsRcimQU3UYG2JcxFbljivODo+5f0PPuKDDz6gWdRUdcFrjx/z5OkTFtUcaw2Kgroq6Taaodvw05/8CGMMd+/eoSosfd9z9fwZj/9rf5k333iNDz/6GH9HFGBtVRFXehQ2c31PWRZpTN5eCpnyJkCkz/OCdVsJaZ9fsV+Kuu3aTO8XY9Tec7sT8PRH0AoNqdPCGiOTZ4z4QUjPMbVEE0Ud05gCUxQYG1BGDM6KwkgJTQagdG+FmDRkMg6Wg588j2zdg30Qo0KTSjNWOSHER8XgBypdUKTyQu/g4nLJ2dk5h4sFMUDftsQIxhoJyLXC+UjbtaBN0r+IKBUI0VGUFfce3GO5/EBM2oDBBdADOEepNTqE8X7I/24DOxGxazcbtNEUpXR3STAJ/SBop4zfiE/fuSgKFAE3SFdbZaVNv7u6wPuATtoj1cFAOZtDYZJ+jdkZHy/avqgM8uex5aDvy7adbzQ59FEnZH+/L/PZv8KAY3okub31l3Xuf+nBhu+XfPp7/yuG649589/8D7ZBRAwcf/2/y7B+CkS0rVG2RpdzvvQUxyTDrdIViqkGq4XEoW3B4vCYejbHtytpt/I9OVKcblt4Nt1cehtsZEgNUiaWA4yEbOzzLfINOr1hw+RGHvcDNyZE+Vpqb5LNdewtT0RypxTvKnBB0AxlZYHvAX0w5/iNN5k9uI+3Bb0XnQi8cC7kuDz4ARU8PgpbXoINKZ9E7/BuoG03dG2LVtBUFf2wFWlyiSjadfJT1Ta52tpR+Kjre4ZEws28B0FqpGk3xIjzjq7vuF4uiQFm8zluGJjNZjRNQzNrEMM2w7OzZ2JlrxV9P4yw3m4ngU5dAMPYqTJ2QyhZ1AygkxaGD34yNBKihQK/JwC8gxjujiPxdjA7xwJbw7b9CeG2YGR/TGYeAyqKRoRVtG3P1VXL+fNr/tE/+ses1ku+9Z1vYgzU9RFVWRKJ9F2HUnD3zgnRD1xeXAARo0rC0IPRNKVlcAP/4P/797l7ckyM8PTp59y5d4/VqqVu5vSblbQ3An3fY22xM7b3Ebv9rGvf4Gz6XadBiARfBqVu796RczzNPG/eQ7dlvNPPypuI4G05Id4L0TiL6WmSvHeMROdxcUAFKZtIUO6JvoeQBL0SUXNbMsnjaBt4jPMCEHo3fseuc8ReFpuitMKnACEAu0jXezabnuvrFUZHrLZUVYk1iiiUbtrBoWOkKCpW6w3aQFEYETFTIqxVlAXrtpVBbLQQMnU20tu6IGulUldaTAigTzwxCSaMMRRlgVJa9FLajsGFUX3We4+xBTq1uBujwXspM8WB6Jx42miNc4GqbWkWRxRNTTObSTloUg67bfuiZO7Pa9O33Me3bXnJGn/Z+ePk8ZfEDvvzxlcJOF7lvOWEdz/Q+EXO/C+3G8X3fPJ7/0vas5/y8F/5O7jVE5QusM0p1ck3qE6+Tj6rrr3A1EecfPu/j/6S1tcYA91GLK11NGgMgw44HVHKC0SKWBo7JYQrQ0ww0E1YarvfZKo2+T0EETWKKdiQgGSvIyVuXzuFAPM+pr+PrV76dqfS/bJNTBCvSm2oOmwnBBBotnMOXRT0IdAHz+LkDidvPCZUFYMTd1YVAgwe5T0RCTAIXpANtWX0B+8k2HDJGrsoiClA0Vrz7Oz5CO33nbS9xpgcJmPWr5DLKlocIl1cVCWmsKJT4aR1ddN1aGsx3tA7R9d1lLYcIej5Yi5eG1rx4IHDhyBBydUSEwPaePC3w4mZiJrlp533mOzlofVIzFMTF+D985+ho+lid1tJJGevRu8GG/nxfrY2HSf7178sy53nQwxSNtKIQiuaTz79jB/+4U9YLlf8q3/9XyXi2bTXBO85OjykW3fMT+9wfn7OJx9+wOXlJcZo3nrzDZq6pi4tMXjJOjV07ZqDoxM+P3vOpl1TFAVd37GYzfAxMriBqrC4YUCpbenkRZPbNPgYJi6y++WXaclFvq+URjISlX+mZZXpPvq+33lNPqdf1ma7c79P3pv/brVBeUdwgcELQqCikTJLIlEH1yV0w6N8D27r0qsU0v2R5rFc3jLGjKaQEIkaIoGQ/RGUwgVpoSWCVoZucFwtl1yvVhwuZpjC0nYbwIt9e2Upy4ow9CxXK9q2J0YoCktI37UoCg4OF1wvV6m0JQq+KareGYPOJyt6vRVSs0Uh8wASCBkl940xJnX2uXFOyO2yPQoXPMYkj50omh8Oj2/XbNB0vceuN5TLJeVszvHpHUAC9Lqux+t026L4osX1L0Lg8YXbZP158Ur0ZbtQNx5P0dRf9jYWc1RqTpj8G14wB7zM9ksMNiKXP/u7rD/9AwgDn/3evw+AqY658/3/MYfv/i2mRjaHb/1rgm5kltQXbM4NXJw9A0AhwjpeR4JNuhptS3QOW1aUZUGIHXVRYE3A6MliMpZF4sgUDzFASF0foxqd4Og5m8oRqdYigx5DvHHRX1TPmk56ty1c09fsZGsxw/d6PD0hpoutDH0AXdXcv3eHw8evY44O2QRAWywaFaBShqClD34IHh8GPAPBBYKL4i6ZjJ6idwIIK/FgGHqZzOSkbWvRU1g7hDhmOMMgXImu63bg7in6kydhkJbVsixp6kZaVo3m+OiYru8oy5I7d+5QVCU+eD54/0PiZk2MENRuvXl6DaqqGssxwzAIZyPmlsqta+N2Mcu+Ndvrns99ukgyWUzarpXW6WeLiO1fb2CHlzENRqbPT8stkEoqTrLAiNTnX3v9Df4ff/f/zXwx5+vf+BZf+9rX+fFPfohSiJS5ivR9xx//9Kd88OGHvPvWm3zzG9/g7OwZF8/PMKcnVHaGimC0dNX0XYv3jvms4eT4mPPzC2bzBT5Ithxdl1Q/pV6/DQ62qFuefGRw3t6Oun+NxoBqDFyyVkWYTJ7b++JmML4XgKSJPJfJjLGjN8mYKubXpuOfIpLa6IR6KHAyH0jHRRIKc45uaHFDj+tbgu/BeyxJrpu4cx8H74X3k47XOeFkxCQFr4wSOVAt5U1R8xROmY8GHyJ9NxB8pK6E4+SdoHBVLXObsoreScmwbdcoZdBojNU4pfHpe9y5e4ez5xe0bZfmDlBRFE3VJNBKlaWx5BeLsFVQVduySnTSURN8EO2bwREnKK73DlsUyQ0ZysKKqKKPaJM0OXyHaxUQWbcbAsKzqZsmBd2JaDveU9PW2N2kMI+D27ZffYkBXlZbQnKcXzwouA3ZeJXtlc+J3FgScMRtU8NX2lfafqnIxtG7/23mj36H4NvxOaUNpj695bVZX+PLT5p3jouzp/KltQVtMUZTlAXWGEK/xq1XKKvxWhF8lEnBKGzM0qupApMmHR+9wKdRi4OnUknxUwoXOnkjbEWO8kQn+hjyeEIaShdmDBKmC3PKkCJu3JfOLakqd0akBT3IZxodIR0jGHxQBFXQRgXVDDWbsXhwj5PXX0MfHNBGg0cyHHmrT6ZpDoIDNxD8QAiD8E+8ZNDe9WN2orVhSJ0gEcumHWiaAwa3TlLfBdZWEDti1IjJvARAzg1s2g0hRgpdpmOOmKiSomFScgweW4hC4Ww+o5k11E2N0pq6qdDWoLtOZMWJPHz0iOVqTftpj9aadt2K8ZmIeKayTCYtCrnP+UA/eIwLWJJ8uJJ2yBjSFUjcHKPtdgjGOGaz2y0tqCoFfUo0UnyEqCJRiZ9GHl9qMi6MNhgj9vPa3FxA87XPrYjeewlkbMH7H37E3/hv/S7/6X/6n3Pn0UOsKfned7/Hn/zpH3P27ClGObpry2azoqpnGB1Z1CWbzYrLy+doFbh375TFbEZR2tRpFESm3znaqyX379zj7OyC5WrD0cEx7abnYDFnde3ABzRJ6TGm7oEo039AbUWuABGQiviQ/FHIE1Ic7wO5FXaRjfy7n5QkQibxht121hijwPExcWnCdL8SJFtbUtUVTVVjygJblJRlIQqZivG+1EbJcyKZi4+BaFLwqBUmIX7D0NGt13g34PuOkILyYCLKqlQG0igt42DwXgjsKeAZieXj9KEgKnRUCQlB5hpFcpQdwCpModFWJT7VQFPXHB4eoAws12uurte0naewDYUVsT43BJwX5A+tmdU183rGZt1JMIEiBo0yCQlCi+EbSnREiMJrKSy5tFWURdJTkFmo6wdQGmOT9LlSQqRN90lZVHRtK5FNlHtUxYgJHnwnSZ7riK7F6wpfN7jNgmBNCtQsUeU5UThkOhP5tycxjbmbC990AX7R87dvcufuL6o7r9hJtBiTr5f6jNvW5viC57/g2Kdoxhd99lcNBna+n7Q/pbnzi8/Ny26/xGBDoUxJsXj40q9/2c0NA2dPP5VFsbAoayiU5bCaCRHJDfTdNdpqnAo01qJjpFIGYth2cpCRipy8pkxKZW5E6pFP2axKLMF8ekNqSc1Syjb7mWSonO2FH7kaJCjdOwRMBZTIXMcUH/tU942A8pGgAoVJyoVRoWyBx9LFAjU7oLl7n4NHj2ju3SM2Fa0CCk0ZkUnSC/veuw6c9OpH71HOo32Qyq/3+KkjpLL4GBGpDOnfr+oFQ9/jQ2RwAe8VRhcIOGHT4hKISshu/dAmw6gSY6XOi9IpIPBEJbBtrTVVXXF4dEhVVpSVZDVoqKpSznM6O2VVcXJ6yvnlBVfX1+MCH5V0GrkgRlrShhiJyuCDp+8dtpQWTqU9Wokpn3NyXUZ9gLEDJYUKSYo826qLNoLA/WpEtQJBg0ZjVc7GQCO+JiOKBQke31XHBG48zmOKCOvNAHrGN7/7l/iP/5P/jObwCKM0637N52dPefTgHodNRXAtwR+jioKPP/mEqrbUtcXoSFPVFIWFGPHJOCwScYPDpPbqWVXTFCUfnn3KYnGEtZaDxQEximCUtRqlRMo+vz9PsiGRK/NCE5JMe4hhZw4d4+302IeYWiUFdfR+oB96CVZSrZ8UdEh7qceHIKrAwUlAQApqglynvh/YbFqCF2v2qiop6oZmPmc2n1FVNVVZUFgxWCuspShLCps7Z7wgGlE4ETF6+qGla1tUGNBBLOfly6itMVmS5UAJiTxEKTvI/RFHFCtKqxMkZFQlfod4kkgpGOVROqKMouvWLFdXnB4fSgCuFSrtS5o9NEUxE1JmGAhxEFNC58TrRFsKW3JycsLV1ZJh8BgtvIrCQN9vGJyjsMU4dmOU0o4xeWGR7i8XpJssIhYH1upxDOS2YRBkpN9s8M6lwEYmtUJrBiflJ1tK4Ojba2x1iOlbNhfPIQg5t2zmaJsUYKOUnEwK2GCXyLp/D9224O4H9i/ebmbt+5+1/3m3RQpfFtSoFzyePnubHud+qf1Xvf0yAovbtl8LbxTnHGfXz4VMZRRYRakKwvyQw9mc6Af6vqWpdPISAVJ9STGJSBVbFrES+DTfLNvcVI8ZlmbfgVLY19kSXCWSVYaG1d6isj9ww8gPUXgfkzx0ynbG0kPADZ7g081i0oSqDeXhMXdef4vZ3QcUR0fEqsIbQ2k1uhAosw8iNNT3LX4Qi3g9Gb4hJk+HIFCscx5BcgfpKglQFCVd10uP/HKZHCo18/kcpQvOnl/hvAQOEY13w1i+sKaUQE2ZkSS6XTjkOLTWFEXJbDajMKXoHqTsfr1ZE6JCKYO1BavViqIquXf/PlfX1/IdggQNzjnJ6NKiJM/LgtX3A6br8WyvSwI1ZJKMAYu0PmqTOn5CHK9znKySHr83ccVxWtjh8UzqnPJBaoRQp+WkKY9Aa701eFOi6bHZbHjzzTeYL+Y8fPiAP/mTP+Hk9A5XV1fcO73DvTtHtKtrjo+OeeON17heLjk+OmK1Wu2UL1SUDoIRRUHho8dozeA9l9dX3HvwgH/6oz/i8vKC49NTVpsNzWLB1eUFJmffkbHUEVMwEeKWJxFCOv8xoLSkbNNMMN8fU78TpaSrQvg8WRxLRNyGIVu/u20ZBwksBe4vEjlXp98BWuEBDY5N2xIur1DGUla5E6qgKgsO5jOqsqSqKmZNTVmWo+dIlt+Ow0CfWotNSkhiQrckYffScq5CyvxzeUkWRh8DhAm/x2zLdiQEkxSYGaPTnCT+N9aadMyWqqpo6ooQHJvNJqE+msODI9Carh8Yug3eDWlOyiiEjMHj4yPOnp9z9uwc71KgVhi0tjjn2XQtVVlgCwlElFYpMZMAuu970IYuaZgopUf/oGkpMF/boZeSjdz3Ul6LqXynDFSlIQTFupMkqFtd0HYb4YEB5byjOTii0bUgyCoS0QiX6sXp6X55YRpovMyWk9Bpifu2f78i5eLltxRIw7+YoOKLtu2auYtq/CI8kV+LYCNEIQrm7DjqiKoaIFIUls71OO/RuqSZzYjrSyKKYfDsKk1PBpTOKoHbzDRvcQJ9xhh3BoDSW6GvqabATZht/7HCh5TxRvnsoJPaodJoBL70ZG0Qi7IlqmzwtqI5vMudt77G4t4DvCnplIIQJBAISB0+eJx3SFalUdGKvLfacg3yOZAFAykbGTkBbScEvFJXdL2Qbo0tILHpldbYQohm/SDKhiFEvJP2yRAiRaF3SpQKcTztkx29tQVlWdE0FXUtVtTey2S1Xm+4uLwSop2WMgxKiUQyisX8gCfLNc7LIpTdXHMpJe/Hh0A/DOhBZNG1UklDQ8IuQaQE6s29Ppjk7Etk+wXUzvXb/pDKA9uxEmH0scmIVYwJhSG1Yu65+07HTp64h0Eyw9OTExazhsV8zpMnTzg+POTZ06d87c3XOZzXbJZXrFZLfvyjHzF4UXy0thDibjIgA4VNcHzfJ10VpVFFgYrgAFUWXK/XlBfnzI+OuF4uOTl5jeXyOvE2IkQ3ljWmvIxMePY+c3GiCEch1yTzXyIkJdpedFdkdca5YVSoDFECyBACXZ9ltBN5EoXSBUeLQ7SVcolwLUx6fY+xBVeXV6w3bbp3lYyTtaA6WivKwrK6LilTG/ZiLt1PZVliCp3a3kHl8k1CnKL3oj6biJwxKHGJJZDRZklgBLWIIaSRtE02xutNRjGT3g4hWRCATvOCTfPKZrPBDx1GK+q6pqpKdFEAlm4Y2LSdjHu3ddbVuhAkwsN8MePk5JjVaoNPyYsPAWOzw7CYLUZN0h1LiFEMgKbvekyRLe/dSOLMHkI50JD7c/J87saC5E6rmTUzbFnQtR2FNuLG3a3x/YCzltW5ZrNZy/e3KokECjJ6u7Y3O+d2eo6nCd9LLY63cCFu+3dK8Mzo1Ffd5Pbce38c/7f7ua/4Oa/y+i8KauRw/kKWUX51m/eey6srWYSNoqwKqBrquuLo6IiVhvXmik3bcnh0zOXykqAZCZ87Jz/X2qIoKIwa8BJWyktiIpHGPInI+zJB0GZtgHSzTfc//X0X2RDYVym1Ja0m2FWrmHOn9LvGVDPaqPEUnD58ndM338YendIWFQ4hw5VFRQiBtm2JJil+euFmxBASLJrLPFsFzbwYD064CcZYqetaI5oQIDc5kbKqMbZk0w6cXzyj7/yEgwGDy+21YKylKMRFUjYpVYDwaIwxzOcLjo4ORcK8qlAofOhZr9aECKvVaiznFEVJ3TRcXl1zfb1kcD7B7dL14JwEV857+mHAO3l+GBxad2g7JXamYCNGQmpPDUG4MuWYcW6RjTxYxklzZ1NbO3gdUH57zcVZM4wtjaDwKqCj8EvUpMVzvIlz2SWKU3FhDZ8/fcIP//CfcfH8OX4YWDQzhsMFZVUyDAPz+Zzri+dE7yirWsbBZiPeKlpLB0LqHvBBnGnLssEUms+fPeP9jz7m40+fYIoKXZa4KIFC7x2eCFpQIhW9tHzmbpIcQERQKqbFRsSjtDWj3XuIYXI+RYK+d4N8Z63HADRfz74fxk6noR8wphA+T92MxnqzeTOOuzhyOgKzIKiZtSXh7BnrdSvES7Zgt/cRpxyrYaBNrZ9du6GqKtFCqSxlUYrCqIxYQXSCT55BiZisRMnXB9AhonXEaIUxEYyMdq30iHDlEplc72zcvb07Yi4rIYusNoYyHVMInmHw2Loaz4G0vovOxdHRESsCm5XwZfL3HRK6Z6zi+PiY6+WGi4tLtFL4MFBaix65FlGCuhiIqX01I1MkAunUj2YbYPqdBcgYcYaVbr4tWiwSAiIapiOpzAJNU4HRXK6WbPzA0HfMjk5ZGYO1Gn1whLI2kff1WMq7bbst4Ljt7y98P6+ObPwigUZ+/81yyS6y8VUCja90HC8KIOQm/6WVVX4tgg2nDnhW/DW5IQtD01SoaoFVd+jdKYPteV4+I/qek3nD89nHPCNQKbFAPuOAiwcP2RQDPzIPpfUxhrGMkjUthKOxzVw1KrWeZk+SiHE6JXCSwe9cAC8oibTA7V4YhSYgAYo2KssAiVS23trYZ25VdCW9shSzBUfL+5QfHdJ/pPDaoozFaINFJm3vC9A+lUiGlIXJpKEVsuAkQaLg/Og/IhoZOgUbUWq42tC13ZixZPj0er3ivL/AE9kcdKzUWmzfnUvFCLDW4IoKrSQzE4jbSACC4mJ9zNPLYxb+AGM01bOSmEofy9WSGElZlAQPWmuc92za+1wNb7KMS4aDnqEa6Ot+5Go453GDkwnTB5TzeGPoqwJf2PGmHaywZrJwWobkjTUYZbYlt+lqML2Gk8lHatxSgrF6a/BltMZihXgctBABo8ZEI/GsUjsts6iJXkUKhK/dhqc/v+APPnqfz589INz9W3wW32ZdrYhhgSai55FWrTBKsQqMi4TWUn7yWrPebJK8tBnLNZfXV3y6fELXfAv1hnRLzKqaqA3r48fMZg3Xi2M24RLXd2gCKvoxy9kmX3IeQxTzOLEIsInEGXcCkzyWvJcOl5jbLFPwGHxAeU8VGYMkY2XxL8tqIsgmn5vLnCqIJHfO2E/bjnK55Or6miFl2+PqrpAgP6FMSmtsQvuiUgxagS3wqZNFgv40BCKjqZkayyYBjzw243hK/Bwl80MYPkZd/Z6U6JT4h6C2LrPWWEiiermLJYvf5S3GKAiH9xRlidIGFxCiPIrBewYnwZoLMfFYRJE0tgOzxQFHRwdcX18JghoN0nKsBf0E6bbpPKHQEGTOCDGO3jLAeA3Kshzvg4xo5DnQe5lPClskfomIByql8G4QUr81aAelilgLg4n0rqO7OsNo6Ty0RZKTrxpsrIlGuFE35tRbEIkX/f6F21dANl75M2792C9HNn7hfX7J9qrIxi+y/VoEG7F6hHv73wWEbN0BF8BHG2Cz9+JrYLH33Aw4+aofjuDNf57b+3/On7+/lcDNBqMv3y7Tz1fdZunnlk0hniUGmFbOpn0lwy/w0V9py/UVkSZ4tS2P14Wc7k/S44vpa6ov2cf+fQBwH46/dvvLA7BMP9S3v+YX3ab56ZdNPp6bt/eXbQtu/9q/qu2LpgfTfcbdn/17VBe/B2SisRoXLpEGV2gMMQqZ2VpLu9lwdaUprcFqaSMFUtu7JC2RgRDg4uKKvuukJKgFWRq8Q6sCW1rKsmC2mFHPGtrVGk3S9nAR7SRAiiHgvMOYkqIsJfgIYj8/LRHnwGN0gw5hFNBrmgZrzYhy5BKzNaLxEr2ge1VR0ocO327Ae2odid4zOOiuL4ht0uIxlsWJAVumoMcLEvyS2f6XZeJTnt0o0Z72+2Jl0C/e35cdzzR7ySXY7d9SzBN3j31/vy/73W87ri/rshmPIx3MLxtTeeVgI/qe9tkf7nJmU5amEzxZGKnLz2ZNkgne1m99SLXdGKnqivlsRqE1zg2sliuulkuGIanOJegxpIxKpwyiKCx1VTGfzWnqGucc680aYqSpSrp2zeb6OjG/A40vOOoanA6cN6sEV0EGrbbXRk2D3O1rVP4tbp//kotxc1CosaabM7Rxt0rgSiFFyet0UWHrBmVM4gOIkZKUX7L4l2RqAmn6sfwjF0r+t+UVxPF9+e9iVBYTJL19r05lIqm1bw2upN1NJ3fRbrz3xjLFOJnuijJlFCFnbbaw4361Moh1tRjC5Tq5Uir5mYjNfEjiR0Nyoh1NsIijm2uc8ChA6vT5OuUS2BQy3YdO9yeZ/eseJ993ep1v22ceIzmLvi0Lm97c+9PAcrVEKS16IUkuOncPyELlRxg8EwLHy5722LadlMuSRfl2i2m8SfFOJcQleCGPNrOGwlohFY7nQfY7nhLERVdIjmactLfnaPJpk/MmuiZ6ez2y1w/TxWF6z+0c9u68s/M3Kd0Mw0DXtuP3g60kfD5XWYY5w8Tbh7vP79/C+4c0vfZa7f7rF9/GVw/x1WtYm3RetB7vLQFXc2liq3NhkoaF957mcMG8roX/EaU1tagqUAVt37NcbairWhyLux4/DMSYSrsWUJFNu0EZzWw+Y7Nc4WPmLevxnhaCrE6oixbyq/PYYiuxn3lFMcaRID/Vipnec2MXTpTysNZaVEtTl0lA3GeDc6ANNkZqpXGDGDj21xe09YyirBLPrE7ncPfcf9m2/5r9xXYaSN1c+G/u/xcrIuyP5tv39rL8iFf9+yuVQL7gHPwi2ysHG759xsd/999icMNoaGUKSz2bUTc1x8fHHB7f4c1Hb/Ltx99iMZ9Jn7p3BD+w2awIUWrB3/j61/mt3/g+d5uKy7On/P4f/BP+wT/6fZ6enbPqe2IhLWyt87ioKKua+azg/t1Dvnb/Hf7yD36Lb37t6zz7/Bk/+ZOfYJTnm2895sM/+Ql/+Pf/C46swvqWby7v8Tc+/B4X1Zr/5Dv/DGsUJkGeRgmHIhsRiZNIypTzBJJuFiUqPAKxaiGU5W3/wux7DqAM2lQYrSiMwmjJT7AGjMUpA2XDoCymmXPw+rssXn8b0zQMaFRdEY2hKEsphwwDpVJEN7BeXRJDmxj9kstPOwC6butXEhKXwDvHer3GDY6+H6TOnSL9sqipqophGLi8uuby+pqrqyXr1QZra5bthp9/8L6QzozCFpaisNjCYG1JWVTSMZDY9ZWVwPP+/fs8fHif45Njrq6uuLi4YDE/ZLPZcH5+zmbTsula1us1PsCq37Bcrjg6Pma5XOK95/nzC5bLJavVivV6zTA41qvNaEwGQrIMwVNVifhnzBjo5KyrLCWLm4ptVRP4Oj+3z8cBktTz9jVGa6wRD5TSWqyx2MnnlGUp/heT/eYJecrd8N7LYqoVP3rvx2zaluVqQ1nWhCj197ossSrSrteURkPq0HDOi0aB1mzajvPLKz772ftcXF+x2rTEIHwS0TOQwpdKRFzheBhc21JYzbe+8TXuP7yfNCZ6+a5pocynI8TIerUmuoFmsaCwBZt2I9wfbaQ0lUiTWVXVGpuIrJK5KluKfov0j0pQHXJJUU+CeskKQ+IJ7OqgiIZKREo1n376KZ+99zOA8Xq3bcv11RVdt0ETqVIpIPgcTGmcJ+lm5H3K2JV9C0SlkncSKpc/JbgojaEpLVVhqApDXVqu/6X/GHf4G5Ik6a0vzw7JUKnUURspjOXg4IC7d+9y0BQoJW3KzjqIflzgWa0pyhpbVsxmM/oUnHTdMJbQqloTkuZJN3SUZcXp3RM26zWbi6V4RhUiC5D8Lckt6t6HcawMg8PaSeCgtrymHITke0KusZRNpX1WSLJKRaw2Y9tx8DLvFUbar71z0opsDD2RNjrCZsny/HMCCCfn4BA9nwnxdcJl+PPs2IjEl/r8aflhGijnpOiXVaK47XO/6nO/qu0rllGEfDjWv42w3iHi/cDQD2w2G87Ozlgtr/FuIMv+dt2GojBUhU4Ln/Sa6yitegqwVlNREmxBVdfoEBiComwamnlBWRXYwoymXspm+3LPEEIiUgV6h/gd7Gd1EW5kTWnbgZLSgIohiFfFTra26+eQ35szvP3MWW7GZPZmQGxDtNSvNQRtGND0GE5P7lKd3qFLAl3aGEzUhCFIkCIxD/2woW3XDP0GqzwqCvkKBT7pBfhxgg4jYhBixAUhyA7e0w0DLgpyIFdxoHeezUYW++vrFavVmtVqg7WBrkvaHFZUAo01KLOdrILNSIJBKam/27IUkaAIV9dLzi8uRFyqazk/v+Di4oKuH6RtMcKHH33CbDYjRnj27DnPz58DsLxe0vc9Xdex2bT0vWOzacc2XmnfDeJGqwcKTOpSifhEUi0Ki6hRZgfWJCLlwpjs5iBzRGkUI4JirUGZHIRmYzjJEmMljp8hRGwQDoXWApHnbNsmYqqJMqnLoqaJCJqjNDx+eJ8f/eSn3Dk95b33P+DOnfusl0uq4xNW3Zqh3WCbBqLU632UvH25WvPxJ5/y8aef0XaOdhgYXEj7l84jTUiOpEl4CkVwXurxwXN5fslhU6W28KR5EMTjJiT0SSZIg7Ea6Row1PUiEYotRVkI2VKbEdnKKJPcSEL5C0qjtN2iHDEvIlMbgIQIJvEwdFrMUmCQg7dQeuq6pq4blFJUlbRXG2NYr1dcXV5wfXE+3p/ZwE8WTBGoi+nTtDbSs5lrYSmxijovuAFchOgJxqOVEEVDsa+InF6b+GCByZyAyP6Tsv/j42MePXpEqQPBdQQvLa9VKW2wSimi0mhb4kNgvdnQrluG3mGLRCItSkKAfhCNHudbilJUOod24Enn6dabhEKpJIwnY1v0YIQ/EhKCkY9tGmjkxdEYM3agGGso64otaCRu06YoISbhuyAlEwXUZY33QZCXELBFRm0VXd+yPD+j7QXZJHqCitTNbEQfzSTwuG37lS+i8eZnvNqxbMsku4HTLyfo2C/DxBvr4L/47dXLKFGMtELwUoMk6RQYyQJ8WqTOLy6o64qqyFmgWCq7oaNpaozObVQGHSM6yo1Lqs1Ftp4iMQR8DARSD7dSibjC2JLnXGB5fcVnViZOU1isVmjvXilqnEbOikTeI180WXSyemKIcbIoqfExbDPYOL5GJeY6ZKts6XjxBBROa1qgmM+Z372PPljQGUNpLcYUonIaIrQD4NHK4/uOvt9gjGLoejl3aXOp48QNHmstZVnK724Ygy0hioooFkBRlpSFZIN9Lx4rqKynERmco+9X+ADWFoTx+ybpcp177sMojxzZmpM5H1it1wjE31JVFcEHLi8vWa3XKZNfg9JcXFxwcXnJfD7n00+f8Pz8nGHo6bselVjzm80G76S0IshAxJiJQJaSbpkMyefxlN1nbWFTu6OofRZmezvka5nRiWxcJeUdCIOck2y2ZYyhsJa6r6nKiqqQxTYHeUVZjje7tZbCe4qiGKXURRulTyZxnqYqWcxqirTfXHp0idirlHgABT/Q9j0+Rs4vr3j//Q94fnGFS0JsAU00egyuSARJrTQxBTghiHhSGAZUDFw+P+OgtsybGSiDKUvKUsZQzv7LsqKqqpTlii+RNhaUHgOMMWSPkwUXElogk0JIrxtLK+hJN5AagyGJK5LIWBABLlkIoapE+CoSOD4+pmlmxOhHomVZlhwcLHhw/x7PnnzGs2fPaNs2Eb9lDiGalN2HUUJchezAPA4KRiVZUkeS9zgC/eCwRlM6hS/MmMNmsbJcXjRKj+csxm1wK8GsZ7Vcsg49J0cLiqpAx0BVFsxms0TwXeI3PVGLsZnWmiJJfVtbJB6FFyHRrPYZPGVZ8fjxI4po+OC996QkSrZcSqrKSW5dp5KaT6gR7PIY9jPyjPLmIB3iqMNirE5S+ZqY9FimFgFpckWlFtlSgYuBOLS0S0HUnPc0g+f03vbenR7HbXP4i7ZfFoLwwnLeFxzL7uOb5/GXFQrcFmj8Rdi+ErIRnEtwrwxATe7oSIt/9ITg6PqWEGVCNymzR2l8DLhUl5RAQQKYfBMabTBRjbDv0Pd0zmNsiR8MbjAJUk2ywp0IWK2W13weWuZRPCaCUSP8C/mmnsiDs62cRZXBGp8y8vQHLa12IUbMyJLYyiYzenVskQzxFBAdgKi23Q9EL/tK0GlU4LUhGM0QIVjL/PQO1eEhzhQJ/jAJmXDoKOSsfmhROuIYyPCuUO+yD0XulRdTplxKcSEtcNqwWq5Si6i0ow6Dw2hL08wxxnB9fU3fD6xW66ShIAGFeC3ocdGJ6ZoLaKuTF0vSR1ARQ0Sbgvn8gKqucV7ExK6Xa1arNUTDp589wcfApm25vLySDN8H3n//A05PT/js6VP6rme9WY8W80AyuyrGYzNmGyjaogAd8dFvJchTMNY76XRRaaIVeXhNcH4kGGTxN2stTV2LHkNVjWUTKXHv8kCstQJ91w5XVhRFQeEG+mFgns5TJGKcx3kJ9PJELdyKgbqumM8aisLw9ttf489+9nPu3bvP08/PuHvnLuv1iqqSRbQbBvquZd21PHnylM+ePmW12uBixHuIKhHrlEHZApQWQzuCdEuQL6B0RPkQmdU1bbuk3XR87Z2vU81mmJQ1F0U58h18CpayvLg4pCZH4hs8jNun5ggEteWZ6CQGp8Z7So8lFEGXgijgBo0O+T7OgS3YouDk9A7HRydcXV9RVhWzZkZVVVRVSVWVHCwWKGN59uwZBE8ZgqBC0RDilvyYwCwypylFO6n8tOU0Ba2JUdqvB+cYnAQBeX4XmXjp8FBa9qPzwoxojRSFnISLy0us8ZwsauZNSWkbbOrOCT7Qdz3eeUxRUTezkWPRbnratk2iaGkeCBFVaIrSSvARA/fuPsDqGZ89OWO1ukarSHR5HMsEGIOoJDvn0IUd53UVE6/EyHNTHl4e/67vKQqDtloE5YLH6oKqKAT1CVJWMdpIF1OMYtho9Ch77n3AKk1tNH10uPU1yxAIpuDg4IDCGhTFuNbk4GYsf+WxdcsCu4ta5xE2Tt83xuYXbfshy/h7fJl3Tz5nGnBwUz90+j2+SqD01QKOlFi/1Hte/pheOdjQWnHnzpEs2gnSU9oIg9haqU3HgB82LJdXzBcLlJbJvzQFrRtAR5paJi7vHVQVg1L0MRCtwZYFhfMYI9LVJkqHge9auhX0haFvO4a2xXUbutUV188/5+LJp4TKctWuqVygCApb61FvAwCT+vQngbXPQkWI5sJWCRARu9E5w7JsK8iRqAei9oIiK5GMUhGsLgGNsRVDiHQ+UFuFjQMqRqwp6F3AKw3FjFA2tGj0/IDZg/v0hQWtKDDSt68TqS1IEDdvGnxwhNZR6xLvHUoZXJJU3gYBHkgZuFUUuiSEyPJ6xXK1pqwaylK4GUUZWK3WXFxes1otOTt7LqqgPqCxrJfX+CHgBrGXt0WRRLdAhN5NamP0eAZIhNCoNW0/0A2eOdKO2XU9Z+dXrJYrlqsNfS+W9J9/fsnZ2VkKMgOr9Yar62VCLQJlUY/14hDEN0FrjS2LnbIWyH3igqPUgigIGVVhkzNn13UUVq7ner1mSIhMJvAWRjJ6rcH3A60LDOt25FoYq/EEyiqfvxKlFcsUoCwWCxaLRSJOG/oQKYuKorC4wdN2LiGDWoLmEHn9tTd45913mM0btFY8Wm94/8PPmc3nfP75JcaUdP2G2byk94HWBZ6eXfDBRx+zXC3FJTZqXBChJ2MVWom9uoopIFTJ0C9sDdLyfZARwhAtZ1cbju494vT0johmTRAYYwwFu6qYks2SBK22m0r7VrdMXCFufYvGLUr765bbMPm7UgSlwEUi2e8kCu/EWGxZUc8sd+49xqMoSkNZN9RNQ11lrY5D3i0aqvnHnD17tkXmnIiTuUG4PzF4ucfj1tnVpBIKPmzRSWOIUfySut5RGENTTxYIrVMVRlqhQZSDo1ZUZSHlx+AYhgjRcnpyj1npsVoTfcT5iI6efi2t3lVZg9L0XUfVNFR1lcZ3x2a9YRi6UXPEDxGPBlNh6gPKo3scH73B0dMrzv+rn1Lh0SZiVaBMZWgXnNgTDB3ROyor/COlwEQE/QpJ6NAFTEJUTAQTNaUqUEFT2ZLWdeDBq0iPIFFKW3Fz7joCirJKujwJrR6GFpSm0hYTPW7T4fs1sWnYnDd0y2uqZs7hySmmrJA2XhApAzlXYlip2F8IFVHK0jorwiYyvBEp+ajVxIxz++58NafLiIqQKavTIGBMXmN+z+2IRZ6jd4f+9FPZK6+w8/yvdEto0ws3NWKNvMqhvHKwYYuC737326A0gwtcLdc4HxmClzp00laAwKZdSftVXeODRxkRchE56C1zOSjxBwlotJH6fqkDA3KjhhDxvUhTtyri5iVDP9C3HdEH+rZldb1keXXJqluj10uOtZZujp3mRzk7t+VZuWQgsr1IV8jklSEtZkSNTtwGrPBFdM7KkH58oyAqKQspwOhANIbeCQx4uemwVQNFzcqD0TUHd++zePiY8ugOwZaEoKispixKpOPEEaLDxYD24tQq2hkiqhRDJAZGhUaFwhqBVV1weBeISf7DGMPp6R1iTCJaPnB5ecXyekWMsF6LhPAwCLRvVbVV5/QBHzw6GInEo3S1hKT2SWp5G5xkey5lwBFYrlYsFguePTvj4uKSzz9/xnK1EUVIY3h+fslnTz4fFzTnQjLRUoCbOEx6rE1jZ7JoTktaMUZ01GOpq6kbYvB0bUdZlCxmc4L3LJdL6qpisVhQpqxNJQGikIy/UKCSWmkcHA5oNx5bFSPPYjO6a0p3xtX1ktPTU45PTyBEzj/8mEePHqHaxMtQRrpFuoHZbMZv/dZv8fbb77BYLKjqmrIqOTj0fOe7P+BHP/rnnJ7e5erqijt37uBD4Oz8gg8//JDLy0uuV5vEzcmlEinVqYw4JgnrPPEFwjYKYHfCiFGChq53PD+/5M69B2grpUSjCwqdy0ma7Fch7xMzMcJN8aUcCO5vRr2oxfDmJh+jwXsMoNL+MqIkQWCBtSXHJ3fpXU+ILnnv1BRZvKsoOalmaFuhTc1ms6YsC1zbEYLHOeGcuaEXa3nv8Il4LcinCHyJLHta0BREND44umGg7Xo5F6SSa1CMtktkFFXQNq2ytohOUvuOYlZQ2gJrxEDPdXIPlU0DClo3jG2zJORIqYi1CmsqlDa0nRdJd1ujigX1wTHF4oiqOOTO4zf5+OOPcKsrlA9o5SkyQsC2e0wrKUk7BimFpnFU2EKkzGFUFe37TgKrIlIkPh1+UjYsCoaE3jnnRIE4OUG7hP6EdH6KVHZV/YAOHq8V64szGeOmYHF8F1OUNHOFrSqISbU3eHxwCPJ6y9IW8xVLgz/u/GmLdOcxvff26Qje7iftd0Qn5JKMQfZkf3sj+kuDhi9CJV424PhFCKgvfEfcfqVX2eurIxtK0TQNIYKxkagtbdsTuxbntxOAD55u42hmc2bJIwMk84lGoslRulkh6EESmLJFQdRR3FujwlqP8VJSMEbKIqNyZsp6264VXscwoPqe8ugYZTTSv563xDFRN/IpObYYcdFjcrklZbmSEUsgoUkThNIoY4naYMYMTSVkIaS6pMNqySg9iqWuGDqomhNmB0ccnNzh9PCYNiqOH73GlfOsB8Xh4ZF4e6Sbbhg6hr5HKekHzyJQenwc0qIqAlPbiFiQDYsi2m3dVekC5yJd1+J94OrqWoIKa8d6edu2aK25f/8Bzz+/IIQ4yoSLw6dHa0uIJI6G33ZvpFKJ934UAdpsNlhrWa/XfP755zx79oxnz84kc9OWtm3ZbDbEGKmqauR0lOUEmdC5xr7l82wdRLfqrZmkG5wTdVYlokIKxeHhIcF5+r7HDQMnJyecnp5SlyU6SleSVippD/ixeyd38uTSXzcM2Lrk/OpyrMmvk/jSO+++w2/85m/yV//qX+X0zinvf/ghP/5nP+JP//RPGYaBBw8esFg0NM2Mq8trTk5OODk5Gb/H0eEhx6cnVFWFMZo/+IPf5969e3z22afcuXuHtm2p65pPPvlEjispo+aSBYA22WqdSXDw4m2/thxi4NNPP+Wb3/6WlOJCGMXIMoFTJRhf7mtJFqy+GVTsq+yOd+OkRfrltlQm03rsTAFGgmjmSd25c4dIoG1XY4BRlgXWFiP3pqpmxKh5+vSpJEe1tGWE4Om7jq5rGYYelX6Xbi4vSGz0qXyzVf0FKR/2vWdjOpqUkGQpdjUe/9aDWrqPkIRFJ4NH54jRcnF5iR+kU+P04IgYo3jfEPFpfur7Xko6RMqiSIuAFosB4/GqQFdzdH3AbHFMUTXoouLu/fscHB5x1a4ISuOCY9O7VBWPo5qZNnrU0sjXcTpGpry07BOUOVFVVY33TD7nfjIvTLu9nHPimKs1xpbCLIph2+3iPW6zpBsGghJBw+H4iKqqJLnJzspanPGmjsS/VltkPI/jU39B+Ba/jO2Vgw1jDA8ePGC5XLPpetp+OQ4yHRnbQYdhwKU6X900ExVAIeNprRmcY7lcop2h63uUVkI6M4YiKIogssDKWLQuCGjKKg/QwLptWbUb2r7Dx0BZV4Qw0K9XuChoS2m2EzCQMjzYj1sVQBBjqMKK9LLSEaOkRl/o5NiYDNqskVpnGLO7XJuORKOIRhGUF9l0YBUCbnGfh29/g8dvvEU9PyCicVXN+dkZH55f8uCNN+hjZDN4mqpGK+lHl3KGGzMGN4iHSdafAGkps9akgCGMN3W+sfMN3/c9q9WGTduTpcSLokRh2Gw2XF4+5/LyCtBUVYPWRiy4SeZnaYL0PpI0FJOSZyJoikQRznnatk31a1mgiqLggw8+Yrlc8sEHH9B1HYdHJyilub6+3pm8ZrPZiFJkQmQeZ5kB732WWzc7v+fHVVWNKocameyM0rTdGjcMvPn6Gzx48ECCVh+orE2KrOIFohErcr2/ICqBW5ftegyInp+fc3p6yu/+7u/yN//m3+Q3fus3OT09RRvDerNm/W+t+fGPf8zf+3t/j7Ztefz4NQpbStcMSNvvpuX58+dcXF7wr/zO73B0dMRv/dZv8YMffJ8/+qOf0jQNV5eXoCKPHj2iKAratqUoi52JPHcJ5FrhV8qCIjz9/Cld29PM55CuO0qncpMeS4qgxAMmcmsWlx1LbzyffWn2tim5NG8R4QAoldtPt6WgTD2NShGVopnPOQXW6xnStSJjJmfY3juK0nL/wWNQhnazkY64NC8MvXAghoRu9F2H63tBPVyPGzpcaud3Pru9KtCW4Ae63lHFKDC70uPxZT2U7fVIukM+YIMkNt7LfXM0n2Fnc/wwsFwupXMroQfzowPKuqbrOqyxlEWRvp/D+UjEEG1E2QrbHFDMjrHNAdFWoDVHp8fcf/SIYX3N9WaFS8isTvwvbTQ2FqKMq3QyxRvGoCOPFdEEEen0uq5FVC+hjRnFyGOq73v6vke6uWyaLcNonBh8ROmItZHB9RAVygi3xfU9xipxqFaG7rpifb7AGi0SBrEUZ2yt5H3j+Pp123KnZPrtLxjB8xfdvgJBVC7hxeUFy9WG1aYTuVzvkpeIImhNoUCnm9smR8+MYvgg8tJd27Fcr9BBjIVQOpVQdOqulaDCDAVaebZdFDmS1qzXG65Xa4y1HB4dY+YNV8NAiAFbVmjV7cQVkhkpVBJF2q/qKa1lkKdWV4XU5ka/hORjolWS9k3YXFTCrI8ovCoYQqR1A7q0lE2Dqhb84L/+r3H4+C3c4BkibK5WfPD+n9EsDpgfn9L5gC3FZCoGQVmCF3v0oqooS4s1Guf6sRySAx05zyaxyrcGViLExeiq2bYbVusWa2u0NiyXK2KILJfLCSE0cHJyijGGi4sLyUy1Hk3PlEqGZT6O5m6Dd9LCpuT69H3PcrnEWjtqYoQQWC6XI7pRFAXOkzJ4M+phZBRjLIckFCOPoWmmlFGOcXylLfs4kAJgY4yUHK6vOZwvePPr36Dvei7OLzg8POB6dcViUeGitMmq9LkioJWWi3GNiHSD+MEcHByx2az5wfd+wL/9P/p3+J3f+R3mi0UyuwJlNbPZgspU/LW/9tf41re+xR/+4R/yySefJfJfMeqDlGUpHQeXVzx79oy7d+8ymzX87u/+G/zwh/+ck5MTPvvsE07vnDAMA6+//jp//NOfEtUwBmLb7HMrjCX3TeZmvHjimp7vEALPn59zcXFJPZ+PZS0J+qSEtouYpIBb3QL5vugDb6mr5+O4uQ9DUsEfA6J9f45pjbtpZmhtReEy3UNa2WS1bkHBrFlw767i4vKC4DpkelKUlaOqG9wgC6RrBoJLZcWuo+vXuKGj6zroe/rU4SW0g0DvxPMlK/YEpUS2HjWK9qFCQnIVWUcjeI8xiqquEiqVTBkTMmSNRZmE7iUkYOhb5G609IOnd5FgjAQXpsI2h9hqTjQVPgoZuZ7NePzGG7TXl6wvL1ADaB0wWgIua2X+0yGOiWE+rxk1zB1mIGWSqipRURKevu9Zr9e0bUvTNMBW2nyaKOQgJuax6wKu7wUFV+KnFLwcV1koSjQORexWLM+fYoySgKOeUdYztC1AGTm38SbC9uuy7Whz7AUav0ptjl/19srBhtwritVqxfX1Eh+T4lxMngVeLJat2Srikerfo7wtEedqUMk5VUtJwhRCmHJ9pO89zkdcQCzXJ06WKI0tRINDGOSOIQSaquTBvVMaIuvPPsOFiDXqxnQ2zbFUmiRyrdIWxUiI2qpuSnYeILVzBgk+gkMpS4yaYBROFTilcbGAqqY8PeDR229z7+230YtDXOtYXl6xXm04u7zkvQ8+pnUD3/ne96irgsJoSqPwfUdIGUKGJcuyEDpEKgttyXkCnS7mc5no+p6s1gkyOCUrkUxjs2lxg2e9vmYYHETouj6VtDxVVVOWYvHcti1bZcEtA13lbpDUMhmR6yvdCAEfOtp2w+XlJSDeJ+fnz1mt1hwcLNhsNhhjmc/n1M2Mum4oioLNZjMee0YplNr6MUyRi/y6qT07MGb1IQRcP7CYz7HGcHZ2xma14vjomIcPHhCcZzGf451neXlFoTXdZiMoMqJtkkt/zkUJQpwfJ8rNMFDNZ1wtlzx8+JC/83f+PX7wm78xliBGuD/EUTSr6zqapuF3fud3+OyzJ/z0j/6YzaZjPp+n7xyYz+ecnJ6MLpt9P/D973+Pd955h+fPn3F9fc3de3c4OzvjG9/4Bp98/DHnF5eYYitYJucokFtd00iXiUuxsyjn85mRo3xuZTEYeH5xzv1Hj8bgj8TLiiTDsXx3jcXqtN+XScaU2hIZ9v90y8tzHT5DzXnhG98zBqhgTEFVCnlziLkMoPAe0aJIrdGz2QHORzaba7mntaKIkVCKCGHfD5C63pxzdG1L188Yhpb1psVsNpg+eQkNgrA6N4xBXSYrKqUISqeAQ7gRSmus1RAU3g841xNDENdXlQSwtKaokqmeQuTIg6frexprUKZKPilGEqRSY4sZx/ce4SiItiHqgqAsQ4iYGMFY7j54xOrqiotnn9NeXeCHFqviOCHKtfUia5DK1tN7Lp//oiik26cuxc1VM6Kx+d7M/2ZkSVrWJbhybhDZBLNFxJvCooyha3t8CJRlSfAD1si5atc9125gGHpWqxXV4pj58Sn14pCqbkSkcTKI9hfmqVbIF203EIX9XycB2C8bhbhNF+MXDTSmx7v/3G3bi9qKv2qZ56u1vgaRkFZKj7BPJt1E8sKtBVpLE6BRhvV6PfkSirIoyV0gyhiO79yh+uwJl+tOFkcf6IbAJqEn1hboKFbaWhn6XroiDo+PqJsZttDcffCQg6Lgg6trVPBo7cfWVzl44XhYAybBh0JGzuJNk/5xmZlxPhIQhMBqg/MiiIX3WB0JBvoQ2YSAns1Z3HvE4YPHPPz6NykOjnl+9pwja/gn//D30E3FH/yjf4JpZpzef8h/82/862miDijfw+Coi4KAJ8CY1UtrW5r4XCaIittHkWyggxcSZVVVY3Yx7dKQhVKg3826l4mirlJWphkGEcVar9dsNp20IGtLUURiFJQkpOvbD0LEGnyfukekFbPrOkGbroUHkmHhzHdYLlfMZrOkfXCANsVO2SM/zmjYNMiZIhcZ1ZgiZ8456TiYzdisN5hKzLaePnnC9dU1rz1+zNHhISpIZhidR4XArG7oNi296yiMCJVJy2N2+oS+kyDZT7gbSomq59/+23+b3/7t32bdbkSLAtEG6PueumkgxtEYLHNQHj16xMHBEe/9/H3pjCmknVVs2AN1XadW2BprLX/9r/91/qP/6P/Gw4cP6boO7z2LxYJHjx9zcbWccFe2QX5O7sZ7VG1bHN0wkDlGwctYK20B6fxrrXGD4+c//zm/+Zd+m2GQe805lyZxNXbTTD9EpeBzH0PZ6ipMn1dj2WU6gd3+eFIOjYlXEBW5K0R4YjD4KG6/2mAKS6UMSlvhXERJXmKaSyQACCwOj1BGpQ4UOaQQhD9RlF5ECb2QsKuqwfk5w9BRrTfUzYbNZkPbdXStCPRhDTp1PbV9TxkArbBKUNOIT8mXZxgChZVuIWsN3g+s12tUXUti4wN9L3wSY+U7mbJI86+iKmuKokGZgnJR4XSF1yXV4gQVDOvOCdm8qimqmqKu2CyvsXXNm+9+jcvnZ7z3x39EcI6gAkSHiXGnNClz39bdNZdE8++5NXfwIashjGOw77cl35z4OOfouxZjNFVV4AaP7wfKpsaHSJeSnEBGFwMqhNSarqhtiQ8Dm4sz2s2GWdfT9QOHzmPviqNtlhzYH0c3g4I4zvs5gHrR9iJU8KsEGKlSufec3ul4mZLfXwbt2H/8ouPcD5KmycZ0H9OAPj+X15J9obeX2b5SsKFS5Ku1kkQmZzVyqGMmNxL6UtabocJo9DjPKKUwRUHz/+ftz4JsS8/0POz5p7XW3rkz88xVpwqFKswooIEGGgMB9tymTLIp0SQdttQRouQI2hG+8Z1DN75y2He+cTjC9rUvHJbDpi1LMkXKEqmwKFNsoN1sdje6G1ONqOkMOe6911r/5Ivv/9dameecQgEUvAKJk5W5c+81/MP3vd/7ve/mgBsps94ckh6csN3t2e0HfMgM4wiqSGE3LV3XslqtODw8ZLM5ZHN4VMoMhlt37tBrxfsHB6jdZVlwF8tbIWtVIqBcx9zRr/WMhMhGXTY5ZYhKkbTBdh19TKxWHdv9HteuGLPihU98Cnd4g4/98tcZtgPf/9EbvPyZNd/5/T+i0YmffP8PiSmyWW946ROv8MVf+RopBUw1MNKK1gnJzmdNMm6CK0UG2KJy4vxMsobNRuDt/X6Ps5pkbFkoRUFzHP2k9qlU1cYwE1ojaEFiGEbatitSxZmm6QoZUOrzwxhlkVRVK6KUGrToKshrBs4vLjg7O+P8/IJhGKZBKHoeMl6apuHg4GAqnUzKkosJUsW06lhb/nudqwFM3i2r1WpivXddi0Xz5uuvS6DxsRe5d/ceKmdiiASErBx9wHYtB6sVKmXGfiB40RhI9fyTWNkbazm/uOD4xjG6IHn/+t/41/nGN75JRuFcdUaTwFuVFgRBAdQVIy5rFYeHG1599dWpI6hmhMM4TMGGIFSZL33pS/yDf/Afc/fuPX782g+5e/cup6enHBwcCBKxWASgkDKnzpR6Tsz3K4nXSgxh0gpJudiq50oy1Xzw4CHb7ZbV6gCowRJyXYu9X54hxNr9tVzwps+XrHn6TS7qo1dex0TMVqV0VRR8qJo2AtCUQKeI7k2JQgH8ZDEsQl2Ipk0u5dfKrlJKeAFGOdrugBB8KVmAIWFtFA5P8KQYyCljYyDGkSa1NN2a1ThyeXmBurhEG43vFclXZVbwIbEfR4zpwIjYl8qVDA+T0zBSwqgic9vLS9HU0Jo8CqpinaM7WNFZK/wxpRljJhvpErSrQ5p2A7ZjSIYxZkKaVUJRipgzpmnIKDqtuf/Sy7z/zjtcjgPiAS/ii9boQvifvU7qWtR13cTLqIiFMYXzUUq2y42tjvmafABFV0M2MWOFaJxKAK+1QWkjVhLGiJt08OQUSSmSo8cAUSX6rccYh7GOOG6IQ49KlrywBPhpR93CftpxFSP/6X9RA4r5Xiw+86n78xwgLVGM6xv60pPmaYHF0zq/6nte/365Zix/Vt+3knyvByo/S5BRj58r2AghTqQoXfgKUUUhQOXaPjUvepW4No7Sw965jlW34vDoiOMbNzhYt6QUMa5jtTqQcosfJQqOQkw01oi528EBXbemW61Yl01LW+lg6dYtB0eHGD9iGst4GWkWLU4KJpEaUX6sxkkl2FCFm1EgcJWLK6LSZOPQrsHnRFCWXejZkmkPb3H03HPcv3OX9a3bHN+5x3/1T/4xn/r053n/h9/n9T/+U1770Y85WDmeO2547oWXWN+5x52XXqFxjmgtlIncNBarIYw9SVlcI7BjiqF0h4gdtHjQHBGjkLacc6zXHf1+X/xCfBFdEiGmYRgm0R+QRfdgsyHFxIMHD2jblouLS05Pz2QBiVlIo0oxes8wFjv3EliQBFnw40jO0ir76OQxjx+fsNvti6KnSDGLyZPc8/X6gOPjI9ardfF4SbNA0gKBue5JUo9lpL0kk9af1+uuiNrbb7yFH0ZeuP8Ct2/eIvpQQCQpg5HlOnLMYnaWI9oYYq2VZ2nHG4PoImyHnsObx/Te86lPfZrf+q3f4W/+nb/FzRs3ZSOydiLFCadBrj1dWzCmzEBA9SLLPi80MSVc464QGj/xiZf5whe+wHe/+10ODw8n3w/p4EmCwiy4LTFGaY0sI38GCBbZaOHGxCjcIIzBqGWgZ7g4O+Pho0d88pM3J8QpxkSOcp7Cm5DPyBoJ7utCKG+yCBzUFEAwtek+CcleRzbmkqGgMiaLzDkqkVTpzJo+U95XaVM2LVEFrfe/IjO5nJvRGgwovWIcZVOrrZ8qi0JojKK7kXIih0iIgnTEVWIVE93qgNX6kv3ukvNTy357Nl2DD4ndbqBtG1aFs5aJ5bwCurQhd13D0eHBhGRZK22vZAh5mEpgMQgRWFmD04aEQ3cbVkc3casbBCx9VGRtGULPMAaUgVVONM4xRo82mgg467j93AusNsdcnJ4Q4oAti6U1hmxMIUnPm3Yt09YAv6KRKHn2MYYraEa1lJB9IxQUsiRx5TmlWnpEyunaSHJX7RPIEIpxm1R5hKysCaioGC5P0cbRdivGbgUHa3J2V7gmH3os5ucSGXhiDboWa/zUMszi/7n2vWjrXH97Qdrq8bSN/lkIYP19/btnXcf1gGOZvFUU/WmIxrJkWdfqp53Dhx0/l1y59+N8ElqJYJBSU7iWkU2pQsm21O8qlFk3irqYWufwoXiAOEvTthwdHYHeEyMoPaJQdK2YD61XB3TrA1brDdqKhkPTisqjbVpc0+DaltQ06CLmJHemyp0Xx9SyCeoyWSpUq5URYpQyoC0+Qh8TIUeycxi34vbd+9y8/yLHt+4QUuT23Tv8k//0P+XWzZu8/aO/4M3v/RHDxSXHqzV/6eN3aRoHzrFqV9y6cZtbt+/SK8uYNLbpsM6iDIxhZAzi2tjWzSPLohxjmODJuAjilFJcXlxMtdE6CCozPAQJVsQOWjbWYYiFk6GmUsd+txdORwi0bUEJYuDyckvfj5J9h0BGMYbAOARSTjw+PeGDDx5webkjpXlOZgWxPO+m7bh54xYHB5IhG23JKVxpY31aZP20qP66gVmVY6/EUhEIe8DF+TkvPf8Ct27dmsiEOWfxaMiimqmBHCNjSvgSuBllGYNcG1rR+1H+DSNEw737z/M/+Ht/j1/99q+y3qynVuDKtl9OcqU0WpQtrl2LzB2nJVschpnpXxfputjUa/47f+fv8OMf/4h33/sJwzBM6IbIbecri8Pys6rqZz0vbTS+tv7euk3TNFycn7Pf7Ygp4GzZIDKEmHjrJ+/wyic/Jcq62gj6MS0IErzJfYWcVQEw5uCikkmXz1ZNHSdPQrHXF8mr/KSCduSMVplInILPMkKm91zOg1p2W8LD9UvQNY1JeUKhqspnNhEdxJk450Q0EZsdmViSoYjSFm0sXdfK2qIyUc/I3Bg8Y9GTsI1FY9A6TSZsKUZ0I9oqTduw2RxgQbgNPrBar+m6jpgTscxn7T3KOkxWYDpst8GtDgleJO33YyAkWRc1ihxEsMx2DdYYovdEH3Ftx43bdzh9+AH+YiiIiSjCaq1F76OsOdWcsbao1/LlOI6gHK11xKKtUcne9R7XQEM6WEaCT2LK56QcnmPCthJkiWy86MHEGMg5FfVaM7ULxyQu0VY5wtCzvzghleBxk2/jVuuCINppbfnQ8sJTsv4nX3j1+58WyFQEbTlX6pHSU5ABBddLP08r/yzN75YIRP399XLQ9bm1DKyeVqJ+VrmlzqHraMtHDTh+9mADMe2ZJ3EVYykwafm9x2NLu5NZBBs5RemDvnJBavYZKQNkdXDA4MXUKopdygTlOSdKpU3TYIp9M2VS7/uelKIEMNaK+FSVK0ecHA3IGy6CI3IGXdQ6VQGgi3+EaR0tolJ476WXMKuO2y+8yBsfPGbdHfL/+a/+S6zOvPaD76PGnueP1tywio994i43m5a0vyB4C8cvcXjrLsc3btHvRzjocN2KaIx8RU/0Ca0dq24lDO0indy2DUo5VE6MY0/wIznHaZD0O9HFyChyAu+l5jwM0ura70d8EHGvcYycnJyVNknLbieBiPdSL67tqjEKEW232wkqFSM+RLyXNjwfRKPj8ePHbHfV8bPomNSNrTyz9Xo9tWrWmm8lCy/LJk8bvNdrixOjfeFwWyFdheLhgwc8fvyY27dvc/PWrek+CtFMnnsYvUgwl3JaiDP5U/bLXMTKwFiDTxHXtqwO1vxbv/d7fP0b3wAUZ2cXtK0om/pRukqMNSWjr7V7aaGt7183z1wWrHo9VSsgFDJizpmHDx9MJac7d+7ye7/3e/zDf/Sf4L3nn/2zf8Z77743Bco5Rml5rAsUeYJ+lwuQ90LKe+WTn+Slj71E13U8eviAN15/g8cPH12559oY3nj9db7+9a+z6tblGVuUkvM1JpcsN01GdepK2eZakLH4qiWU5QL6rIysBm7Xx8P1Y35/pi8RD5R7ridDxczcNFuJxW56jSQoMqBT+TyR7PYoJdorxnpBXV1AWysqpVrRNY4HzhGQ8sZQOE5jCHStK+XQUr5K8oSapmG16micI2fEf6qSpEtpurbO1hbfnCErQx8yeu9ZaQ/Nim7dsvNbtFGsVx1Oa6xSEgytWqwxhNHj4wDacP+Fj3H24D0e7s5RJLQpc1fpOSFcBPS106u2vo7jKGacam5xr2O6BhkTjyjPyImtyInW6EaXEp2UVTNpgYwJpy+HQIih3A9DU5oPtAEfRi5OHqKUJmZFc+hJMUlptaCA9XhWiLBcY542vlS6jjp8BGRDMSF91z/3iffnSZ7GxJEwegpw6t8tCfNPQ2ZqYHD9Pa9/AVML//Wkr34/+pHLi0sJgGNpV06CaioFX//QOyHHz96NAlitaZRh1JpUWgBDksmYsmh2DjFiSq91M4wi8etFEEenVPxSRPHPaIPBlPpomjJFVCp12ETIiTEOjH4gBEdIfo58Y2S4uCRcJi4/eEjeb4n9ALXlbRG1uVzaVXNA1SyMIl+LIqiGaBwhabTpSKalPTjkhY+/QtCaza1jfvD9v0B3a/7sO7/Pa//yj3j9h3/Bc7dv8NmbB9xY3+bAwo11R9hdYk3AHrSM7oDLzSHNvedQm0OMdfTZo3OLK9lv70diGDlYC5Qax579OJRaesYaPUX6qiw2IUSxce/aiQEu7XmSKYco3iC+bAg+JC53O2ltw0xiXqOXdr1KfuxL6WU/jPQ+EiL4BN4nLrZ7+r2w8R8/PmG73Zc6uWwgKdUMVAhuXSc11WEcGQYR68pkxnFgLGqhtXRSJ8cyaq8DX5Ca2gqrRFjLiM9CmU3st1tOHz3g9vEt7t+5K14KGZJSpQVSSmReFQXHwkXJZFzrpmw1U0zlcsK2LVpZOqX4tb/8a/zNf+NvkoLwAI5v3CgBlDyTyvOoEvNaKQwaXbqDpqw7X71W2bhrpug5PT/n7PSMd999p+iYiBcQOfPlL3+NP/3TP+H7f/FjUBaVvCjWkklpRPxiYlmoi0T2ojsl+ZHN8REvf/xF1gcrfD9w984t6b7od/T9KAsVCussZ48ecXZywsGLB9PGl3PdvCmLaSTnSA6FQlyytOoLVIMFQRKuBhozJC1jZv5+ClVqTWaKYeS9CiE9lxyyoFWUn8uYtCJrX56pyczzR3wGynUY0c4pQcY08kq3TM1+rabcy4QzGuugSYm2aYh+FDO+ruPElmCjWTO6FUMQD8WDbIhF6l2V4NwoTdc2HG023Lhxg+B3GOe40Tl0yoz9jqEf8FHWQ0yLti2qWXNw8x5mfRNMQ1CaVdOybjva9YZ+34t9QPAimkjxts1SsotosA3Hz99n9dYd1IP3UDFjdcIQoIzhnNNUNqmohipcq4krpLSUoJTCOTuV52IMGK042GwmYnPbtmRritIyrFxXjDw1wxiozlWz0Hem321Fq2MYsY2lW7nSMKAwTcs+JPKwI5w9ZGfEfE4lKS9urEFjUQgBUyspm2sSNi/I4Arh00zPvu4NZcwtg4syHmTDnuKJKYHIZUyq0kUETMEjZGIIxGktmCsD+2KZgCoBTS1TLsZgqv+mdKVEKGXyPHXN1Q7QnOT7GAo6l8Q7p8rwr9Yrbty4wUYdToiW0rI3Cjqd6IeB0/MLTk/P2O32RUMpTNf7UY6fo/VVcbBacaJFttvkYoiTKzytiMWMa/RSC1YhYFE4Ld0rTelOkW4WTQpAzAy7gaEf5UaW2mjO4NPAEAKXvaG7tKxWmn7YE6KnH/fEoSft95gUuHjnPYbzE8LllkaJcdAMTWVUFGdLEcnqUNrgQ0Kbhmwdp/2IWTmS7Xjh45/i6M59Trd79J37/ME/+6fcvXODN19/gzde+zH+0QMOtOKr927y3O2bbNYN49BjtYIY0O2aswTKONL6CHv/Y/gbN9g7S8hRuCbKo0mEIZJ8j1bQOCswqwpYI73mSsvCLjbqVztOZJMRgzatxfxtLNyB0QtTOxTRrXEUvQ1rHX0/iiW4MVxc7kprnQyuNPpCJMxkNFlb9nvP+eWOxydn7Hd7ttsdIcQyE4BkMNaRcpqg+5QSq4MDVus1xmiss6xWK4y1GGcgSBZfM6bKRdhutxMMWwMPZx1DL2iIdZaEEN4g07aWy9Mdjz/4gE3b8dzxDVojjsJVUMg5R4Lps3Jts00RH0eM68gqi+9MkV9XZAmoFXz+1S/w3/3v/G2sMmSn0MYyjL3I2xczM6dN4aoYrBFOhjGKnA3WymIkqFPViBASbi7zKITAbjdwfr7l0ckZPmZCyOV3IlsdfeRjL32Slz7+Kb73vT8mI4J4OWdiGOlWtnQqJVAlyCnBTU6RtrHcvXWMyp7kd+W8IkeHK1brlt1+j1Gq6LYk9vst7/7kbT7xyidKEJKn4FwCBYM2oIq2C9Rgopa7TAk4ZoO2icsiqwqoak42rzPXj1xWv1yRVKVKKaxmb4JO1q4hnWpgIz5OqrxeypLM56nE3SerJGTSRQBUGGgS2OQMOqMwqBLA5ZI4OaOJ1uCsobFuyqTXmxvoG/fYbs/YjZlNrAiKEYXgqFH0eB8Yhp6jw0Oa5hjf79BpRKeIyQMqG3RU9FGRrKXd3GBz6x7d8R1UsxGNnP0enzJ6u6Xt1lgDIWeyk89CZfGY8iKi5WOWLpbDG9y4d5+TD35CPAso/NQ5Q06yBhTH1hiFLG5KuUMbiyoEZWflPslzFMK7bmT8dV3DOPYiTIghjFm0MIzGaPFGCUWdFaQzKHhPrG7S3hcCqpS5ldLEEDDG4RAhsBUJPe4YTx9IkupHxmEQ1ElbjNVyXUpJuStmjI6gLSHJnqOMBLLSiTZzilLOqDAjG0Pf44vC6uzqOycQ0sWTZ6+VnFmf9eV7ePTwIUHna1oxSEdPGfsxpSKVX5LuJGOucsDE5K92NuWCNOTyuhJsxFSCEvl5TeRTjOSiwXT33h0p1cUwWTQoKr8oE2Ji8IHTiy3vP3jEg4ePODk5ZRh9KedfswR5xvFzBRu2bckakc21mqwL8710ndRIzRhD6xrapsUAOUhWbqu5T3UaVJmQIj6IZfow9IxjzzgO5CzOiMF7ht2O3aVjf7CWVkU/4EdLv9/h+55hd8Gb2zP85RmOSNs1GNLVdqKiDWBXHX1IoCx2s8YnRR8Tx/ef5+j2XW7d/xiPLwc4OOTxgzN+8J0/5I/++M9YryybruG5Wze4//HPcKNp6BpLJnKRPNlJZKhtQ1SKgGFzdMzmuRfonr+PW68n3xAJKmSD2O/3oBTtekVMkUjCWVN8ZpgGbFcQjNpOCrXbQwiYIQT2+32pk0Z2ux7vPQfrA3a7Pduwk03bBw4PD1mv1+x2O6q5WfVAyVmY+LksuClnTk/PeP/9Dzg/v6Dvh2mCqIIeqaqmmuvPBR5el5qzwKixfFYsgYVIpNfySiV6Vmt3pVTR5TD0fU8MCds4hmFAZRjGgZs3jtmen/PwwQMOuo6XPvYSK9cI5Fpq3FOJJF+d4HVhqB01sXQ3ZOT8U8w4o7h55w7/9t/9u7z6hS+IA7C1suktoMYl/LpkeKc0k61g3lBr9gOU85AMYrvdcrndlkywdpkYQDqMqgvvX/vrf53XX/sRF7sRpYScV95ZrMVjIlfX17I5xug5WK24e/cu3o9QWrpFgOmA9XrNyePzovIo7a+ubfn+X/wF3/jmt3CucmNks0HNKAZcvU65xmnluPbv0wOKDz9E6OrKO6llieh62YUpWNVKE4mL1878n9kVWi1g5Kv8oZxr8FFClPrcdZpKssYYrLbSPl0kvTaHR9y4f5+Tx5bddosPCXPQolVE4dEqQQyMY+T05JyHj064/9xtmqYlDonDow2r1YrtbscYEmkX8LrBdAeYbsO294T9uZQrmob1wYbVao11bWkzRfQ8nMM5W9Zn4TyMfiRFhe0aPvGpTxEvHvGT71/itz3OZHFOzqpsarNFAroa8tm5pXX0dK59aktk5U1U7R/vRxrn0IriqSUI6MSrKsGjcH0Kj8ZaovdTOUcjxG0VRc2YlGm0JuXIbntOn2DlPS54bOOwbTuV3HOWhAoFISn8vmcoKHwdr3K9spZVYr3pM18o4+vh4xP2O1FArUFYKkiKBAMlgcgzXeD4ZC57vPnG23gl7c+Vv5GBkIFCoI0lmIg1YCjfV5QzleCjBiWheGXFIP5VYiwYpjLnMPRyTrGWQASR++KrX+DmzZtTN19jHcpaQbQKshJjZLfb8cGDB7z77nt88OAhFxdb+mGkX/DNPuz4ubpRooFoFd5lfJaNMelCuMsSiVpjWLUN665jtepQKTGUm6XVDDHrUp+LOTCEkf3QMxQvAiMFD5wy0lIXM2HwjMMwSwg7JzDjfst4fsJ+2JGHLS/cvklrnPSM6zmTydaRlGabFe3xET5rtjFz4/ZdXnn+efoUWd++zR/8+Q/Y+Uzz/gmvvf4WMUa+9NVvcOvmIStnOGhbGmUlmLIGrTMqR7TOuIMDtLNghOfRuBbdtqSunTenWgbxgZhFL6LWFv0gmap4uyw2qbLY1cGdywI3w+RVOdNirdjKW2uJIU61NmMs/SCZ/s2bt3BNQ39yOpHPUi424ahSXhCfm+3lJY8ePeLk5EQ6UqgeJWaCxyu0V+v2Smna0qZcVSmrB0RKsZRPJNioG38Ige12O8mVgxDl1us1VWhsbQqEGwKrtmV7ccnDBw9wxvLCiy9OC6BCXSHRXmd2Lwm1urQlCfxbJKiTiLxlpfhbf+tv87Wvf4P9vufg4IAQBZmqR92k6mdV4aP63Oavmv1V+DOVIGzu8BkG8cJZ1mTreQ7DMAVk9+/f55vf+hb/5L/4f0FOuKbB9yPeh9IuKCU0rUWGWsz6Mr6UPrUSx1ulFEM/0DZr4ZYgQUKFyJVSvP322zz44ANe/NjH5k23PGNj5hJJXXSvk1WfdlwNEJavedrr5xBDLaINQR0oyEN5v2sBTuUEXfdoWZJVFaqUFpcqrPP7zDyYmdeVk6Ac0oKTIYmTs1Kggry+7VqObt3GOcPDBx+IPHdMrFctKgqipLQErmOCx6fneN9z7+4tWtuwD5lVt+HW4U2GqGh8xisHbk1ULTExoYWHx8fcuHEDYxq2u30ZO4mUVekyC5ASWlsZnxmMlpbn4+NjXvr4xzl7703O9mdi+KjFI0i4S2lREiueRDpNZo9+HFG0830qz7w2AiwD8Bil28qo4kldqDMyD652m6msJ7QpZ8raVtaMLM89Bj9xWlIIGGXJYcDvzlEG+suOcX9I2zps00gBJQb525Q4uzhnu9ux34sBpQQNgNLFvkACgmbU5PwcCvjx629wbsaiWxSnMbcMCCpKIAhD5M6lBl4gpcS//OM/ZiRNcyUVxCLEUvKoxpcliBBJ9yjq2yGWoKLOtXxlzsXCi6xlrFi4VNqU4L+QncmZ1WrFvXv35BqWU6+WaxBCeVVtrh2Otexdk7WPcvxcBNGQYhGvUWIylARYLAwWtAKnpWziSr90zHlym1wSuHKBPMXaOIDKWGfocofRVgioKaOSdDBYZSCJKJEfPaEZ8UNPDAOaSI4jKgXWncNZhfJpuoNZgTcO03RgGh6NiWZzyK3nX0S7lnd6T+8H1vqSOx//DLfu3cc2a379r96mcS0HqwYTR7ICbTVGt9K+lTPKKHGCTJFJvk5J2912u+Xi8gKdopwj4r2SyfjgQRXvknKPu7aD6K90/VTS1Xa7lQFQ2s+ktTiQkme/j+Va59VYa2F2n56elcmuCqFLbOwvdztOz8/Z7felvbXoFmixuE5ZCL8np6ecnZ0xjgGlkHJNrTVS6pRV1VvPWaUrkHKFlbWe26GtrYqEc6eAc+6KxHEIQWShy33x3uNDoGs7kV5uO95683Wssdy/dxer5bNycdCUDpw0LXhLefNlcFAnTkrCbxn9KC6nxvDyx1/iV77+NXIWm3jvI9rZ6fqnjgauBoL1M2pWJwtoLi2CccqY68yq7eHjKEJpS5KYdBRJoDEWy/cYI7/2a7/O97//Pd588zWBX5UQQIVUKqWznFPhqYndfQiR8/Nz7t69w8hIJadKEJKl7bGclXGuEIsD3//B97l565Zk3TGKXbcp6ICGnGVRWgZxH1bPvWoOt0RDfloNWDLeekigJ0Eci4Cj3r/aNVEDoWcdWrKlcg7L19XyD9P6JZ+rpt9rkpQQUyQYg0oaEjRNx4E+xDkRyDt79IjRJ9SmoesaktdkD+iAT9Kyqm1DPwQOD4+xWtMHT9N0tEcbVt0R7uCIqBv2Q8b7hKm+L22L94Hz8y2+mA36EEBLoGU0NLZ2mVhiI6ibNYZuteKFj32cd9+4z8XjDxiHrbSIpzCNZ+ncLet5krZgchJ+RQkGpg2vtEnXttn5PYQ3ZIyWvaK0A6dcASLRtJHWcz0hHXVM1HVnSrS0gphLyS9DCjTOEuLIsA1oA701DBdHrLoVtB3aakIW3tEQBra7PWfn55yenoptw+ALSZ6JABtCYhUN8BwAf/7DH3LCvmzmgjj6UPyoQiwEXwkyUllXPx7WoCXY+O4f/iFjLn5DNUkDlLEIQhinz65jVkohaVqntDLMgbCq/5vmUA3Qcg2Ga1ttnvkw1aOsak5ZI2Xg+X4Ln8w1bhJjtM5e8RqqBqw/7fjZkY2cidGjTaLrDG2riSERhoTPAe8TGkOjLa11pY4HKRdIKEVC8ox+ZL/bsb24ABPY77cMQ49Sia5taGyLd6W9MiRUEkfYxlpUlm6CoR8YnBUFRmtZ37iBN4lwGek6+eyMQddShNLk1RF6fYjqDrixOiTbjkvbstoccu+Vu3zilZfZ3LhJTJCypmlXDH21mvYSGefIuNsRTSIbSyahQ5mM2giXpUyEMSZ2STFoTaNKBEue7LVjjFOwYWsWkIqse/BFzMtMr63aCvv9nrbYZleZ7xg9SjFJySs0+33P5eWWlISQ6b1nu+9x7YqHDx+z2+3Ex2A/4n0sHAyocGMIgYuLLe+/9z77fY8xampvq4MarmaJ0llkpoxyCalWU6xqqlUDorqp2kKwq9yNyrXw3oOCdtURfWAfJSN/9+2fEEfP/RdewBjNxfk55viYGAIUmejahlcj8KVZWV0YZUILiiNk2oxxmtV6zTe++S2eu3efmDKukXNJNZtSM6dk7tCKU4BQ78FMUlakWL1f9BSUxZgmP5ntdicLnk9TsO1HUU8UEzTD0Hu01hysN/zO7/wV/oP/x9/n9PREFFlTgKyx1hGjZGpY0T0BGSePTk7YHG2wrpFypYJdv2M/DKKbEyIU0axaM/6zP/szPvOZz3H7zh1iki4Au5Asn0m8s+LkTzuW6M9HCTLktfX7SqyrpNQ5KKgw9NLDQ/xdrgpOTWOzrNe1pFKh7fn8yrnWD8h1cZ+7b7Q16FRKin1FNjoO8hHaGFarDU47zk8e4bNi7RpxJbYGxUjIhqgsR7fu0TUW5Q7YHB8TU2I/BgZaGrOiaQ5xbkXQAfRQtIJUKT22aG1EENH70jmoy+91QVAdSUdGL1C6sRJcHt64wfGt2+hmRQievt9hUpylDdBX5ktK0rmiUQXWj9OaUYO8STOkjIlpk7KiH5RrjlpKBfJcmNBRrVR5lnkKzJc+KzFGrFa01hBSBKtxjWHMgA/k7SXnfc96taYpyqvt+hCUIWtHyCPbXc/p6SnvvvseDz54yOXllmEYpyBiv5duvYPckF/5TRSK7/x//5D3/flE4oylzCFkzMrnKRFUEZZU7ja8JNf7/gcPGUkFVSu8R6PZbA4LATszhsiuF9O/ZYmw1PRKmbSuvfPYz/nq2JbxnSkq+NR2CK2WisjV6LAi6Hki08v0UHRdJ9L0TVMsNBp8Qeo+yvFz6WykfsDFgLEK7wPKKILJDDozKCEGWa1xSm6kc5aUQkExRPNhGHr6fs9uv0XrQL+/5OzkMcN+T0qRwuQqNS2ZMGQl9agocsNyUxX94KXPfbVi7TR7BbZdkVQWmF5LrVAby/rOfcz6EG9aHl7sODo84lNf+BK37j1H23aoDKd7MYULIWC9eLNoMk5p4hhRKdCtDslZEZRBGce05mqIyaOcBaTNNJYaes5eAszC5E4l8tRKiWInssgPQ0/0I6SKIuhJrKvqL1SxHKkzSouwBCI9Dx8+QmvLMOzZXm6nSdP3vSAYKZO1ZbvbF/vsQMqZMQjhRyH8De9lEj169IiT05NFgLDMPpc18MItyMIDqJnMdZnxuvnXiNu5mVC31Ampn3FwcFAkvZ2gCqjJp+LhBw944f5zWK3JIU3ttc0kp5+fWPAmCf0SGEyfqwzjGCTCd4bVas0L91/kl770ZQ6PjopujJ/79q+dL3DlOirKUcs6tYumBh8ieDZzBCZUI9Y67cyhqcFQLV3lXMjTPvKpT32aL/3SL/P73/l9xn6HMY5x9LRdA3gmZE9WPWLOnJ1f8PjklDt3b+PaltgPnJ6ec3G5LYicBMEpeFbrNX0/8vjxYx48+IAbN26QEFGqECJtu8JaJyqPZWOqZbMlOlPHwVWuhp7Gyk87ameNfF/HXuFa5FpGWSJ7szBSbcG+jjxN/ILKBbnCAWH6rOlnVTJ5IrSqsimWJUupSRgQwBpLa1dkBUYp8h3ZiMaxZ/SwaleoZEhRY7sWbVdsjm+zOZAy4uroFiHBuN2RdINXlrD34BWUdnxnyiZe5pFSGqUF9tbGkKi6RqoQq+Vara3PI+NjwKO4eecem5t3eLjfkZN0zNiCjNg67kuXSoqR6D2Ucm8sQfTVZ6am8lr93rlG0BijSX4s5QB5LlprmsaRskVhBG3NnkmvQilccY2tpVvrHI2z2KwZAWcUOoEySqwkxp7tyUOMtbTrA6xrMc2KmIX8bpwjZogp42NiPwyllBkZhtlULmQ3Xdd+HNj7oZQ5ypgq6IRMtlpXqeUeCKYEU4CPkaASKteuu/I6LVy1vh8wxmKMxfuC9CzuJ1lhbYNSBRGvQfHieymvU86rlFFIqOLtpVXG2BlZTnWeKvEBQ10V6ax8GSmluCnY8OEXRRAFjp1BtQ2Na2msI8XIfjdyoXsu0o6QLVGLJXsqrZrGaDKpkJYqyU1goaG/5PL8hMcP3md7cS4+FFmRopparlIC5Sy69MOvujWj9zw6OWPbj0Qspj3g5s2bXDhHaqwULLQmOoHltWtobt/kvZMLdmHPp7/wZT7x2VfpDo4YU+R8P05MZ6UUaEvMCmXtJKqslcGoRpwclQRVSleXWNk0bNsV4mdE5RFiD0naz7TRE4ekTi5rpAg1ltYw70dy9JL1lIla7Z232y2Hh4cAPH78mBgj6/Ua7wf2+54HDx7SdWtCiFxeXJJzLkx3CSrGURjeMe8mKfOU0jRhrHOFYCTqnufnZ7zzzjtcXFzWgPrqeLjyg1pOkHtRN53Z50QgvWVmKQTIWlax08awJFrWYER4BOJpY4zh3bd/wnN37nC0PqA1DmUFfpzFteYIv2ZbdaO/nklnJeWiECJN25Ji4OMff5m/9ru/y+c/93mMs+If0naEGDFWl2xu3syWOgI1A6z6MjXIWaIgy6M+B9mgr/68wqG11awiJjMHwfCtb/0qP/z+DznTiu2lZFxt19bi5nxeyII2+siDh4+kJLVqGUfPo0eP6YcRa1q0lS6Sqd5bynavv/Y6n/70Z0qNWYJ+4ZHomSS3KC+x/OzFBl/Hy1Vko46NJ+9PefWi2DKXYkFcmOel/CpaEkKYNASWKOHyMyq6MZ3ZlXO/dp4lq1CaiXc2k9DT5E4LkuAY5+iULPoHm2NyypyfPiapjHYdnVsTwp523dId3GB1eJNmtZKgfHMTnRQ2WnyGrBtiBoOmW3UcdC0UXRaB+2PpdpHAt3GOkEtApiUokRBf02Qz2UiMJKwzbG7c4s79j/Hw4SPCdodOArHH2skQPLYVYbCQAuQkaJBiQiGX9/56t4YkSpYaNMYoSOJcepP7LQrPRnR4kp6ewRI9k0TC4OxsOUERmXQoooKQI47I/uIU5SzHt2+zRWG6A1abmzSNY70WQ8jVWr72fU9GkfNILCKIupbG6nPV0uGTqQ3l0l5/fdTqPLkHYWy9N7KOQJrawiWIMzRtR9et8VFcg9sEoqYq6OIUEKMpK6H8vAbfMKl7TyMyi6igdbJmkWMRxRMkOZY1pyZPBeSbET9mMnWzQDUk2AjFJ+unHz9zsGGU4mOHh7SbjqOVw2lF3w+cne94aC/4AM3ZmLkMGksmeU+/32MageoSqZRVC6kxRVQM9BcXnD1+xH57wTgEBp8YfGb0kX4IoCwra7FtB8oQs2JzdJM7t25z9+5zvPaDPyf2O46ON2Abht0FTincZkOzbYCAD4mHezi+/wpf+cznufPcfbK2jKW1UZLb0gpnqoqoLCRiMz8z4TMZUxckBHkxRjJ7ozMpjoRxRxr36FT4JEWeV2k91d5kgdUTGYiU0VOEa/HBT90YtfZ5eXk5bTjWWi4uLsg58/jxCeM40riOD97/AGstfT9MtvHeC7k2pIzf98SYCuxa+B9Rat6htHjt93veeONNPnjwED/GUpf/aNC41k8q1dVrqMzq+ppacpgmnVJTcLVaraZApB96cha4+uThYxrneOmFF2Wyl1a5Cr36EGgbc2VTrjDuMuteZt8hSiteSJGvfe0b/Pf++/8m3/zmX8K2zWRSZ52Ttl0rnTVL19m6ENZ7AEyBYs55UgatQVnV1ahIxrIF+DpHcu6KuFrCkvXAcOfOPX7tN36D/+c/+I+EOxBFdl5UF0tAVKzhxfQts93u2O22dJ2Iku12e5SqctKUYNfM3jn9yI9//CMeP/46N2/fIXgRbNrteoyxV57h8h48LXCY6hbXrvHDD12CJwnql0iH1rrA1Uw/W5bv6vsvyzzP+rxaMoAa/FQtBTlntdDnyDB1bEmyoSsUAlCywRXRSHnU2RajNY1rOD95iE9wfHBIk1qcM3SHN2jXN3Ar8dkJNNiu5dB2xJgJSUiEaI1V4KwRcneMBeX0KDWiCglUkIFqz6CngMNlDYwMg2f0Aypb0Q1ZHfD8S6/wzjvvcra7RPUBlIxVbUxJHvOEuFojZocVdVgGlpWbFUKYOEtS2pJxmNKcEdfsOZUyBDGiKD5GQcTylnMgF/S0sRZnDUZJK6gikUJGKYfOmdD3OGMI0bM/P+XRe++gV+d0h7e4qQxtt2J10HF4dMTFxZbVxZZu309uvTFnrHcYH9FpjiKdbWh0h9ERa+ZulOtjyig1bfydWU0/71YHGHXt9Urj2jXNak1TO+iyQtlZo2d+uZ7HIlfnzrL8Nz2LLAaFuay9Kgt/0Fp5drUVX1A+6WCb/x4oz7tpHG07BxujDzTNLxDZuNe0PL855vaqQaXIttnzKBsOYsb6iNp5hm2AmIjeM2hotAg5oaV22DhH1zSs2hbXHnH64D3SMDDudux7z+V+4HLvBUKMGeNW2NUKjMU1K+49f59XPvFZXnjhOTTw4osv8fCDd+kvz0m2obOae3fuYNuWFQ+BH7E62PCt3/6rrA6PCFnTh0AMPTlGnNUcdmtCFg3rGmTMX4UEBQWGzMU1ttyVLG20Skt3zTgOUhIKIyaLnXWsHSZAWGSxKYkBllEa7TRg8WOPD9KOpbVmvV6TUpq6EWqAUDPoy8tLhkGQmbfeemvqPLm83EqpS2nGMeB9JCstfh+pDIBSH0wo9kXCnKx45733eecn7+FDEC2JPKODzzoytbY6S9zWUoUs9nUDkjJC7dGeyEalHltLD7UsUlGCtlvT73suLi74xEsfp3EOqxSXF+ekGHGtqMqmnAvh7yqUu+QTLAONCQ7Xhrbt+J2/8t/iV3/1VxmDBAI+iJ+F9wHjrJTDlL4iK/6sja1ekzw3CWJnGWWma6sqrtVvRN6udq/kGXGbdtVU7p1jv9vxzW98i3/+z/9r3nl3kOBr8j6R86vKptJGKxocKQf6YZDsqGxIobDvMaKuWtctpRQXFxf8+Z//Gb/+m79NCOnK/bweiH54sFFKEHVzzpSZ9exBVrVESu8IM+KQAEMVoCqhjozHxXnV7GyJQNXXLOOeJ5GXBeeo/CxJveVa0CIkca2YNhjRxOkIsXidxIQpvCU/DpBGlGnpVh2bzYqjm3dpN8eTjPfj8y0xXZYaui7QunRphBjYMxPIXYG1x1H8jCqqEEq3F8pIkGItoGVMF2RIlRJos264c/8Fbj13n3D2CB8Ggdu1o2lEiCsVoS6xfijlISPlCLiqXFkD7boB1n9tQUCl9ViRtMJqK+tBlvGXYmLwI8M4YmxLFYmrhy7ribNW2miLO2+MGmucaDmlEec0qEyInvOTh6wzHGyOuTx7DNxEGcuqW5cW/RVdtyqlBUFzxiHgXMTFeQO+ffPj/M5v/E8WG3SeE4TlWFr8x4Fy/GN7n0jm78Rfl7+Zxn4mKyHUpzzyj/7x/0Lk7V0WLZMCu9V/Zc+ZP+/K7xdjOJMnHknMWWQqkoIkqIvMKbXoRim+NYYSPKZpvlWUuX4tOTkf5fj5kI2jYz5955jbrYNx4Nw4NhFsiIy7nssRPkgjfvRoZ0gG1CjZZs5grWG16jg8POT48BDtFa025NET+oEwilSvtJhJbVQ7S7PqODjc8JnPfo5v/+Vf54UXXiTmTMyJ47v32RzfZtU6VPIYJR4upu0wpz8GfoQ2Fn1wk4shoHUmxcy661DBk6OnVUVWuiQmsqTkKdjIZWLWQVTzHKW09JApTcoKlRVh0Q9tFBJkNQ5tZ6KiNgbrBOpsigNnipEQfNnAZlb/fi+tbEvC28XFBdvtdiKNBh949Ogx4zhy795zvPXW21SdBu+91EZjwqeAL1oNle3c90Npe5WF4vzskjfffJvLrfidLPcAEa2y0ya6PHKumefcFrVcaGrPemXyVw7DcnOe32vW3/DeTwv9e++/x63jYw43G2yGYbeVBbwxuKYRUbLqbQITolIXhiptXr9U2VCHfkRbwzf/0jf52te+JrColo6rtpXFLgRRVowhYO3M/wCutLvWckkNNmqWB2BNs5B7lhLXdruVZzip8skoo4624tpbu3lyDuRspmfSNB2jD/y1v/bX+ff/T/9H9qW7pWmaci+lE0wQnAClI8AaN403IQCCKIcaUSLNcz1daYVWhu9973v80pe/wsHmsHBp2qvcF+YA76eiFSXzq2Omln2e8WLUlYW8Agg141VT8L4cQ0ukpY6x5Xi4ejpqGg+5dkPkOVtUaj5niTfKKqBZrAeLR6cN2jqaOrZDJFkr4zQEdttTUlasDg65e/cOq80h28HjIqDMJAWgFKX7qsF2HSaLCuUueFLdCJoGFq6h1++BcDnmTUgpWYutE2EuZQ2u6Tg40jz/4kvEkw94ePGY6L3gPGVg5hRR1sk9qG2U5fyW3IKpi6IEHXUsVkS3PumUJbCQzO3a08/z+S/J1lqDM6JDZLSaCPyyb1isbaC0lccUhIOUYdxdgrG0qwPSXiwWuoNDjDF03Yr1+oC+L+TaJAhf0/jC35sDncPNTT7zqd9+5kh91vFW+fdTfPaZrxmGC6xryGrEpqYIjLEIKuZySYUd6jyQRzIHzypD1mVcZo1Kgs8rLUUqa4AylnJK5flKKXAxgqa545w4r1dkw40ea/1HuvafOdjQWnHrYMPNbk2nMhlPmxVNVjiUiMIq6Tf2KeFyi0rS4qmNximHsw3W2MmBNccg6m0Fal+vGtxK04bMmBVmDCjb0K467r/wIr/2a7/O88/fpx892lpZFACPJg2RMI6sVy0hGfDQRkWLDOoxg2k7iJHWWFLwtNqUdqmA0Y45m5SWNq1YyFozrXIqChNb6/I7JYvmdrel3+0Yh4FGSZur8DpkU8qlJUoBRiuaRrpsPIlx8sUobYVGyKHn5+ccHsqkODs7mzav3W4nG5d2DIOQQW/fvk3f72kL/O9LKSsXZKYfBqISUmoVfMk5F/KjvP9rr73Oo0ePr3SeAJOmwvVAY74vMyS63GgERuUKyXQcfTFYml0kl6RSpZSQPdtmqgc/fvgQleHG0REpBPbjSCzlk6ZpcW0jGU5ZwFKcjcysFeRJunOqrn/JrlC4JtO0Ha9+4Ze4f/9jjKOQI4dhFAJkFPVEaw1DPyABVZ5aTo2xEypRUQylhDFey1QpZpJj4hD0fc9ut+NyW2TCs9zfTBUXq3C9vJ8QGZUEy4U4LSUpzTAMvPLKJ/mlL32Z7/7B75N8LChGCTYmRETuf4iiXSOjeIIv5NyVKeRWzTD0uEYWWmst52dnvP76a3zhi7/EOA6Ts3DTtE9AuNdLP8tDlU37Ce7EMw55LxltSql5Q2fmCFXJ5mt/SQ1ItK6BYKlf18F7bZ97dpBUAxHQeTnG9fSkrhzldCY5epNRSUh6rdVcthbnMrdu3+a5F55ntd6w3fdoNeKahsEHBh84WHe0TUPXOA5WK1adIH+7weMLOdw2QqAeR186j+R+ZGLpdDNopSf56qp+anVVwswieqgtN2/fZnfnLufvvsUunBe+hnTlkWvrZNGESKokNGHyK6oJxLL0seQrVbdnSyb4SNISbLStKPqGGPF54f1RkJ3JAkAZGtfgrIhJjoNImSslHUUhBiJi8RCil/FMwLqGi5PHjD5y5/5L+P1WklDjaBvHatXRdSsa1+N9xBrhJRljMXkONox1/MIOReGIJewCgYUFWiiZw5RsXJlzC+PFnGtbckERM6hsUCSMSliVCtIubbg1oJyOMjdmcq+jaRxNQTeaxuHGj3Yvfg6dDcU+w0kf2MYR3+95fLnjnYs97172vD8EznxgiJ6oNFkbbLtCNxYXI3EU/QIVYWUcrdKEMlFTFp6sMg1WN+Q4cLA+oPfnNM5xvNnw67/2a7z08icKYQmyF9Z8TlJfzkph2hU+i1mR1KDnulZTnANVFujQWEfWiqC1EDMKZG2KzbTWtiQLReiKNC0gSsvDS4j8bc4JPw70uy05im8HJKIW6WdS6U0PfhI/Mwpi8Fzsd4U4Kxl5JtOXlitrLZvNZqr/a60Zx5GLi4vpuvbDwK7v6VYr+nHk8mKLcy37fouxjtFXIpb0gdumRSuDHwN+DIQhEnxk3w/84Ac/5p133mMYwsQvqYc0RRR1vYrvVKGfUkN3RklHjhbNlGXJRzZ9g3OW2q5VA6bK0zDGoEzZ5LqWfhho2oYcIuPFBTe6juODNX4Y6fs9XVEHlM17KD38Er3bYlktqE0RuwHGEOnajpASYxCo2bVrXnr5Zb769W+inSOHxDgU2feibdFYh8qKpvAepK5KkRhW5CQdUwDO6YLMZUiJYfQoHcirGdK/2G45O9/R95GUHSlrxjQSSdLlhJYxsyidVD0JkR8OKKfo+x2r9QHee7797d/kz773F5yHM0gi0ezTSE4KoxtCFoJgNSLLLDbuAutFIlmDT0n4VkqypZA8GMVffP/PeOUTL+Ncw353zvpgw+XFJU0716U/fCGptuLSXSRBjqCIKeanohsphwWyUY3slkHIXHqS7wUNSklNUu/GNIhwlCLniFIJNFS/FKYvyqJvSInSIVe7VpSsD6iFQ+wyOKnRi5yb03YmkGpZN3SOtOsVR0crlPY898I9jm8dc7BqIDn8KOTtxmr8mPHjQNc2tKsVru0wbYGy15FQ5mdMqXxsSy02+cZxcbkl+pHkHXYtJL8QE0RILkpL/CjWD9po4aN1HevjO+A2eHakNJLTSGsExDUqlZKwYclTqd1fk7Jo6Zarbs+VCBxTQjcWoyw2irS3UoquWzEMI7nv8QSMUjhtxOCua3DWSMKCIM9aW8bBi8y3c+SUxU9KReGYlBJ1Yw1WGWKIdCmTLs/wpy0rk0nWkLSIM65XK87tJav1hiEktIemzez6AbsINrjWdfPf9KGshihoVQhRuHyLsqTKiqg0iVL+WAy/Zclver9syKESbRMoIeZbnYgqk4qr9aE5lO4iJWX94khKUkk0sxalFJHpt6X9/acfP3Owkcg8Gj1jGKHfM/Q9JxcXPLjc8nC75/1+5MwHAhnVOtxqRbNao6wR6K/UxJ1rcMZi0GXxk8GqlSVrizKWtjMl01Os2o6v/8qv8PnPfm7qua5ZG1RYsGRJSrT2q3vhdMMBU9YSXSNvrVFFE0MhCqhLKLW8ecVrJTpUC8i3QLc5gh9HxnHHOOwwKklfszbCAdFikuTHkTiOaLJsXAr2u0tqbRiViMkTgyx+q9VqkpFNKfHBBx+w3W6nlspaRz+/EGfW9XrNyckJm4NDttt92Sg9KaeiXhhL5iolqhhEFMx7cYR9+623eeed99jteompntZCPd0PVci+ywV6zvyEHFfajhcdIBUVCSFOmWb9/dSWqJSwvQsakzOcPT4h7Pc8/+LHRDBu1WGNoAM6a7S1GEp7rZaadFYzApARmWaltZSzUMKdyZLRHd+8yW/+9u/wsY+9RM4zmbSWw5Z6INfJr3mRwVXYXa5PtC1SUpycnrLf72haR9uJZfbJ6RkXlwPDEMnZEiLI2iJ+HtooUBplZsg/pURWQboKFKTosa4lK8lY7t59nm99+9f5h//gP2YYRg7Wa/xFT4oe0GTVlAf5lEXiCiohM17p+WcZYcW/9+5PePvtN3n11S+IYd/ukirYNAcAczb2RFlFlYdazdSuTdSn4wqLV05AzPye8nk1iKovUuWe1etVaG0xJk8w+YSmXPtQdf0zVIWns2BBT7z+ybPOJZGq5XlVkF+tRbfAuozWFnQmxBFtWlJSBQVMdF3HjRs3qOTm1aoDpRl8IiSPjyMxVXRSLkJphR/G0u4+0vcDTdPQNg5rLI1rsKZ6jKhS+lDEnBiCJDeu63DrQ5LtGJPCZXFnRUVSDISYMFgwdkLfbEEPa7BRn8mSsyQWBEiAEzzOWYy1NKhpztQW8Fxb1bWmsfJlrZ4I7PX91bS2CCJYnNbIKoHKOGPQ2pKyFtE/ldn5kd3JQ5zR+GTQ7Qa3WrFqWrquox+jtMi6gAlSmrnSZfQLDjaMNeggyaylSiMwoZBKgc+ehJBn62iTG60XCFvZt5IuCQZkFctYjmgdyVoVZ+OyhtW3KuNVlvtZ8dsVmXpnHc4G3C+Ks5FS5q2Lx7Df4bdb+mHPxXbH+X7gYvSc7gd2PuNzYt042lWHbVuShhyikNBK5pq1IhbAuG4qSssGJnrviSFJ21rXdXzqU5+iaVvGIRZYUohetbXqCky3mPhVqAc125PX10gdsBo2ZQj1OmeRGQlK6mJSwG2lUMyCVT54hn5HP+xKz32pn+VcpGOFd6FgoWgYUMpycHDAOEqW7kNRvDQWnwQN6LoOrTXvvffeFWW+KlJUfVLatp1+f7DZlO6CohiqDeCn9xaNiSRKrCX72Pc9b//kJ+LiSlXr+xBS6BSALTYUZj+Aaltc72fduOs9bRoxZ1JKTVbUU8vokGZRL2sJwXNxccG9GzfK341XSKX1PaqnSt/3dF2HD74QJUu5iyzaAJRWUqSc0LQN3/j61/mt3/otjo6OpnOs97heQx1Xy4CDxfXP82ReEOuPLy4uODl9LHB2EWtLSTGOqSyUeXq9KsHgku+yfO+5Ji7IXNsWTxlrsMbyta99g+9+5/d58MFPcMcHNE1DSpHFWT3joX60YxxHfvCDH/CpT316ymAPNzfwgRJsPllOeVrQ8bTre9ZxnXD6tH+Xz+j65l8/aylfPrcuz4HJ/Ac8/Wc/9dZdfYHI8S/LAEX2Wwvq1TSWYfCcn1+w7jqyLwhnuloCFG7QzIXQWtO0HSldJxhLuQ5E46gG/pMXjK4qnqa0xwrS0Q/DpJWhtGJzdMjRzZtcnj+GUdBe8khGSbeSBqMMubLHmZ/zUl+nJhFzV0rVtamtlvOcquVh4TfNhN6qK1TnZJ17k3xAUa41xtA5RzaGMHp8RYpDKKReKXeTJTncXpxj6Gg3giK0jeNgvaIfA23jGEcR8dPWTN2HQCG4/qIOQWSjqR0iSJekPHUoAYBWDoX5kPeRIyMBmS6dZllVgriauDgyb+SzSRl0WduuzWMAU9ZdIYn6XxxBNObMWxfnbC9O6Pdi+7vb9/RDwEfofcAH8UzZOItuHRhZTHwMpBTIWknAYQsmN8VhWsh3OTGGkZiFSd+0Hbdv3xXzqNGjtZtqgkuPkOtth/K9wJb1+6aRSy5ty4JyFHntmrEv27dqm5e6Ej0WZGNpajTu6YcdMYw0zqBUJJV+ZmVLTRXhIFTFz6OjQ9q2oe/302SsE8ZYEZVSSrHdbtntdqSUuHHjhljCjyPvv/8+MUZWqzVNK9f9wQcfcPv2bcZJ+0ACjKZpZjnqYgEtYjUDoQhIffD++5ycnE4tqfVefug+UG9i+TZDQV1ECbQGEHVwVgne2mVTM6FxHKcApHIZtDEY5BmfPT4hp8Rzzz03PfNhGGhb8V5ZtqBW9KHtWiF4Tht/LtyUccqCjRPxo5dfeYW//bf/Ni+++CJVybRuTsu+/vrZs9phGblP0c9YsvB9kK6PqrgXtvuC3jhS0qRcEJAoWZmx82ZY32v5/nUjSkmDMnKvEijbEEbPrVu3+da3/jL/6B/+R1zuelbdAdvdeUETPupsf9YjV9im4a233uL999/n3r17NE1T7s2Ti/BUO74eVKhcAI5/tYDjelAzia495VnU11RBt6sLqWJGe54SVeTaOfNM6OWph2SM5f1FnAOUCLIZK10kKXn2vSdlhXUN230vpRtt2e4HtPaFHDlM613XtjS4ohWxmTqPgg9st/uJj9X3w5WNvGkaGXtlE2+TyL2J8q+ekoLD42Ne/PjHCeOOsw/eIfXn+AStbTBFMNFZR1aaGEXSfinzXjeh+n6Vd1VdkVF1Hi27w6oAmVhVKKVo23ZCcZek7oqaiNBUEQZUUooPqZQ3KQJVuWrFeEIWv6ykYHtxSaPXKNMwNi26WbNqW9bdyH7V4YuRmXPi5VKP2rb+iziUknsXTBHzUrEYx6lyNYt2V/VksLGcS3O3jCBz8t+JnCMQsVo0lrz3ZQ+es8sZ8bvGvVFqLqWUZ/VRjp/LG+UiZU5SYlAQrGFvDN4U4SEjLNmcETGs4rNQe8EFAkZqUlaTjSIqqaOjZaLFpFExFuRABu/HX/o4R4fHkDPG6ikTqZGvMUulypk4kysGPz+KJ8ptC7Iv6vpDqohLmh/U1B+eKOQ/zzjuSXEs3QIRYyp8J/VdozTby0u56Qt4fihCXm3bYqxMBq3FdnnoBy4vL6c6aBW9GoZhkhmvG/vR+pD9fs/R0RFHR0e8+857Ba4UHkTV8K8DZhgG6X6IQkbd73vee/99YphbB+tC8MxjgWbIoq2oEHbTNKxWq2uliCcXfB/m1rh6ft57GucgZYw27Hd7Ls7OuHvzNqbUDa8iB3KOtU5cOSB+6rKQAChHGZu1n79e49HREf/tf+1f4zOf+eysoqdnl8qqPLk8z2dtjEsi3PIYxn1BYHRBwoKocA6CbinlROpeG6p6LlMZoAS31R57Kj0ZtE6EJNLIxjqsriTAxJd/+Sv8yZ/+EW+/8WNy44ilpL8si/y8h2xcgR/+8Ic899xzaCU8ImPbqYyyvCf1b64gGiXDvr44Put4VoCxDMSWgeH1Iy+ebX3t055V+TRmxuj10szPeJTggum8q5Ny6WTD0DQGa6FxHY3Tk7uvtaKEK/LZYdLLmUusDav1is3BAW3XUSXtq2OyUgFj9NRRNCU0VjhTy7VziaIIXO64c/8+Q79lf3nGxfYMhwJlxX4iS9acsxh+qTQnAVUbZ4lWVsTCubWAyFEQSaOFCJ9zxo9BvHmcY0hCWO9WHYo8CUICUwuwMcKL0rZ2f0X2w0jw0l1hpyTUkJQmeV/WFdkEgvfk/SW5EKJXhzdxxtG1DV3bsG8cdhByrV08+8kC4xd0mMW4rCtBhtIRybIK+cShjZRRZL1SQiRFku5ceFpUR9ucGUt7cSrryLTnXXtfWeurLH5zpQ32oxw/V7AxZNhnGLQhKcVgDKGoZ+aSOWkn7apygaAzwmYulrw+Rgbv2fsRHyMRUNZgmxayIeqERePaFYdHx3z+85/n6OBQ6vAl+NI6Y4y0q9b1YlkznxaXujFlptZLgXqvtmPmlKCI1uRcBbfmOm0Vo0plUglBTKLr4AeUihhtQckgbxtDysLgrgTDJdQrOg2yWATv8btqiqVLHXw/ZR81E6vE0L7vp6x+HEcyEljcunVLJmUJZGrGL46Ggh6EENj3Islba3KXl5dsL/c4ZxjGWNjslJr24vk/BVbjyn8LTNx13WQrv4SsgamUkrNIpFsrxMu6SYzjyMFqTd/34mlS2l5feO75ORhpmtKOOmc7NVip0Xb92Txyq+6BbIjeB8bguXv3Lr/9O7/D6MfCPJfSyWrVEsJVE7fr5bXr96U+v+utlXOQVUXNpDtJbN4DmYTSoo6rjZlneqZsCst2WulG0VrKiW1rqaWXGCJN09H3e+7evcfnP/8F3nrzdZQyGNtIhvO0pP1pz/VDjnqdP/jzP+fLX/4yB+tN0ZO4+prr73+ljJJnldcPCziuIxdPO5dlALIMOJ5WSlmWU64GGxIIzOXS+Z9/FThIyrAzAliD+YpkDUPEOikVJ4RjdXR0DEjpVbw6hmn+13m82+9ZrzpW+56h96zX6wndTSmVds4OqAnRQNe1cm+88C689wyjZ/SeYRioLs7WSJv15tZtbu23vPv2m5w/foBRhpg9MRZpeiNrlveBZvHZy4Cvlj2qRpC0XAuXxVmLNZbgA0MQ12NjrJS+s2y6q7ZjHEUPyY8SaEmAIfyT2eRRC/cpiRy3MrVV1pCTYj+O5KJzkhEC5LrTbPc7QsiCBmvL6vAmrbNYZ7BWl2CslOnLo0sfIWDPOfHo4Zv803/6fyCT+Ut/6d/k/v3PcdWA8OmHZtrQxJ9o4sctOIPp6cEGoQaBeQoeRFojkXIkEwTZyAFLwOk8dfBdn7MwJ+BTQH+ljPIL1NkgZ4IPKG1I2RCjRum2LBoBUjFBKpvwOAxo42Qz7QfaxhYi5cjJ6QmtU4TdDp8SGMvB4SEn53s2h4fse09rGv7aX/1r/Movf1XKACXGkwF9NctRqpK+ZlGZ6YGVQ0SmlpczS+rmlKbofBlU5EUIWfvDZZGSuqW1kEJpJyPQGI3KkaGXSSmytHNdc/bm0IzjwMVFXwIDz63bNyejteqAWoWtaobtvafrOtbrNY8fP6ZpGnb7cdpgz87OiCFwcXFBjDUjEEJpRTRGL1lFTBL9Xlxc0A97Rl/1EkSHo9YIF6uu3GuEZJvzQlVVYCTaVcvt27cnzkVV7JwM1Zhb41JOuGYlbbeIhfq6W7Hf7ejajrHvOT855VMvvyJeDJMle54CuJqhVu5JDUDGsHBMrL3nuSqUDoSQODg44Hd/93e5ffs2WjvpWilBiyhjmokbUgW36uS6HkxU0m5F2JbBScpZSh3GMAZxCB3GAJgi8CadETFndKxqj3oaZ1AclkuQnCIoZWicmThPZNFAiTGQs2KIkW9+69v84b/4A05OHmJdy+jFK8WWNt16rh/VJnp5aK25vLzkz//8z/nGN75ZAhBT1scn1RSvH7IQzlLWS6gWnh5YPPO9FvO9/t0ymFiSe+v4UQUOrpycObtfjvfStTEhG3U+fNTzSgKBZ+E3JDQqJ1SSeeNjxibFfjtysN5gTYMfAlsv+jL7/Z5HDx9zud1O5ceZt5A43Gw4OjrE+0Tfj2KU1baFh3aV0FzXh91uR87ginqwMYaulCAq+Xy1WqEydMdH3I73uf/KJzh/9JC4O0cjSVUsgnA1gGiaZgooljB7/Xk9l+12S+ucONBaRyquwipnrDLkmHAF3YgxkUPEaUM0FrcW483ONbRNy6pbCWITo5S1o2jsiOGjiEWEcWQcA2MsAb0zxFzWxQwaw/7iDD8MNN0a27Q07ZqD1YphHOnHgW7VwdZPcfq4YD897Ugp8tqPv8N//o//99y9+wm0UvyH/+H/kt/+rf8xn/7Mt39qGUYpBUlEMcdhnMZoVpUgqsghofJi3izWgZowUwIzlGJIiZgjKckaoHXCqADBkmISMCAlSXZY8OwqUdeIRHsq61z1Sqm8vJ92/FzIBiljlMUQUaoQTpQlkgQaRC62kpa0TtNgSj7i+3FyEhy8R6NENts6Uh8wVloytXHcuXuPmzdvyc0sznhVqGoJyc4P+aoMdUppUkfLZYAppSbb8mWNP+eEmgiM09WKn0nJ9LVWk2Sr05ngB3a7LV4Vq2Wy1FZqdwtzRnw90xJYNEyQo3OW8/NzAPmMpptQDpiFvW7evEnlciwXlLZtuby8xDnHbr/HWof3fbn2NAlILe9NSsIAv7y8xPuwuPb5iZckdD4kjb6SIVYxIWcNN27cmEoaIlzlJ0SlZsT1eRxuDknFeVKljLOW3W4nGiw58/DBA4xSHK43ssSnSNd1V6yXc85X0J+lUuhMJhOuhMqZEKQ7pWksd+/e5atf/eq8yeR5s6uTTZ77TCpcbszLzLjei+v8A4ElU+keqToDFq0F9pWREiWziOLuunwG8wa4fASzRkWFTAGyyuQsz8Nox8HBEV/56q/wD/+T/5gbq0P6YQQ1i639PMeVskXT8KMf/Ygvf+mXpXTYrKnzZkbw1HQfr77H09/7Zz2eFpRcTUKeRDeu/62Yc1Y+Vv3dDAEpZgLvfP5Pf9+noi91dcgABlQqekTgvawPZ+dbGqdxJrPfbafE4Pz8jN1uX7xPZI6KDTjsdz273Z7dtufwcMPh4SE5Q9e1peNmUb4u658E0G6Gwn1gLNLmdRNMJXB06xWbfIubd+5xePMWF2EgjRGdE1rNAVnKV7lzS4G+mV+0QDwKGmLMjPhaawk+TRtZCNWNNBWEYRbnW25yUn6R9SCEQGMdpnEEL23+gmYUmYGcRGU0wxgDPmZ8AmtbWcvPz+gODlmtD7BF6do5i3GWbILM1SxqpR92hDDw2mvf4Wtf+1t85Sv/Bkopvvvdv893vvt/4cWPfZHN5taH/j1l5PnRM/Y9wQfq8JzmfcmB61YlfkVFOZm5FBJzJqIYUybmSExexp/OGB3R0TGWfSGV4Fg9Yx7WknlNwn7hyIYgABl8wmoJNNDi1BpzLsqNIgITBo8pi1saAtlpiGm6UWSwjcM2juMbN7jYjwx+R0iR23dv8dnPfpaXXvoYzjkpKyR5/TKgkNO6yrVYbkSVY5FymjZsuLo5GGMQW26B3cTavRKZKqIhk9wWZvI4bNn3W4ZhR85BfAKM1NRN0Z6IWZPz7Fy73IxqsFEnffWpsNayvdxN9dXqeApz9vX48WMuLy/ZbDaTINayO8caw3htsR3HUYKNxT3KJdjY7wfRh/gpSduVDHTx3stM1lo7BRjV2r0uNsu6ekoJ4wy7/W7y53DOMfQDq7YTrsb5hfifKKntppwmoll9zxpkLMeDwPRM96ycvZQq89wR8+1vf5tXXnmFGENRHeTK2FnaWU/1bK2fuSkufze9pq4GxTtDa2kX1FqhjEKXQEOElqRMaLR9yuZVORz1ORTn0axRqkp9l42tOCSjMl/80pf57u//c4ZxT9t29IO/wt+5+hkf7ViWjB49fMgPvv99vvTlr06/X97Dp92vD/u862WTp5Wrnva6n/bv9feph9aa0Qd0rujj8nelNj6Np4p6PIP0+uTVTJ+XKgqoQLQpmIL9jOL8Yss4bHE6cnl5Tr/fi3R8LZ8Uy3io90CRmgatTOGOzW7BkhgLCloTjHquq9WKzeYQ13YELyUUriFLOWdMK+3ZBs3t5+9z67n7nD98gPdyDk5nrJX2bLMIyuv9rmvVMvhYypdLolU9gmYyfuWbUIKXxjVoM5vn1Q2u6vdUs8Ma7AAimli0KRprxZNq9AwlUTBa0+kGYxK5H3FGsx0j425LGPpJnsA5W0q2Db4PE3UnDR8ebDjX8au/9u9ibVvQvsTZ2fscHd7Fue5D/1ZRrlEZkg+EnXBgckzSREIRNkOkHKaoPYtzbCqIBpQqQwz4JAKZ0urvSSSUSVibUalht9sRYiAUDRSjn8baYHq2FbFar9NEQ/hpx88ebChQRlwHrQioY5Qmapk8PsuDjykQmpHQjIBsBn4YsLoVmVktMt3r1Qpiz/HNW9wbIj968x0OD4/56te+yS/98q/w3N3nuHF8TAzSdWGMFjJPSley2DqAl19y/zNuUUiuk2AZgU8SuAoaI4uLfKVpAihjy98BGfpx4PL8lHG4JOeENUomnhZ9D1tIOhS4s24Oy2BDKUXbdhijOD8/R+vCp9jv6doOZ5upVLDb7aZzH4aBvu85OjqiaRpOTk9xTqyInXOcnp7SdR0nJ2dT6UQG3pOW3zHFqc3uIz3+KTPX0wIlEXGRoe9Ehl7ruVOoenQsM5/6+cM4TM8wBVEnNFrjrOXhg4fcu32H27duYY3BD2NZ/PO0gC43/7rwwCxRrhaTri74wziilOaFF57jr/yVvyLBShUpU0xBDHAFJanjpkbyVxGi9MS4mzbyxb1S1WxHNITLuWuxf06BkAocnGd1UBFQq2W7KmZVAg+lStYtYklkKYvV7gI/eg43R7z6pS/zz/7pf0G7WqF1fKIc9DMtAQtEMQRP6xx/9Ed/xOdf/SVcc9X181mHlN+e/P31AOhpAcJHCTKu/+z6+1z/jCtrR6p18crnqvc/FdS2yqtfPaenoR0lJpCkNIu1gmzMipCyuBykjC4GiGe7S2LYsd/vpo25cojC0iwr56LArFFUt2hpmQ0hgEqsVl0hh6YpcarJTMrS9j4MA+PoCWU9rQGCc471ZkOIggzcuH2HF158iQdvvslu7NFxL6TTFKY2/xoELUtUNTBYPotQEpEUEylEbBFazDFNWg4xirVC0zR0bUtGjCrr39dF2mgtzQUlkNNKTQ7WRutimCgBeNs4slf0UTgdSUlRrHWWGDw5BKK2XF6cMSqDalfCJ2ulnK2HBHskoN+Pzxzbcp2arjucrvuHP/yv+dM//c/4vX/rf0Xbrj/0b1FKtHF2A62xjNqQcyDFRA4SUGgUOUUiaSLNX4UKy/iNBa1PmZQN5ETICaXl90pncoiEYZT9NKVZHO4Z07euhW3X4lxD+xGF/H72MkrO9H5g9IkYYokkZUOOKuGTCH7lEKESdZRCZ9Apo5IYmK26js3BRuzR+8jR0RGPTi843Gz43Be+zK//xu+wObpRJJClPu2ckfupFSrNcriSoadJ36GeJ8hi3rbt9P3h4aZMvKWXQslgc5YumbLpyFfZIEo5RKGIKUjWEbzURo2TshCFWT0hIXVRELZ1XJALl5v+fr9nt9tx5+7tQhiEYRgZS/S8zN4r0nF8fMzh4SGnp6cy+Zh5DNZaxiRqi9ZattvttFAtB4xSlI6Y8ZnBxnK/uF5br50bNfPr2o71wboEEnMQV/U/lvB7SqWeqDVN+b2z4oxpjOHycsu473n+pZfkJFKe2PlN00w+IvVYkgKvIAoF1IgxMoyjSCGHwMHBZkI1lBKhmgo81sm0vCf1vWvAcR2mX27AS2Stci6MkQxQlQ0CqlOoBBqGRFYZTSrKoGkKUqdM01y1sK/12cUDuhZsqxKwKD772c/xJ3/8L7gsXjrLts+nbZI/7ZDzKsEScHJ6wg9++AO+8IUvPzFW6mdc//unUt7r7574rPzEeV4PIp72u6f97HqwIbDzjBotD2lbnZGE8tPp++voxpP3cv6blHMpxiy5KdI9orUEmH0/MvS7qfNESgOylggRXk0/18VEK8ZEFQiyVnhGq3XLatUVAUU7daTUsuY4+tJ2nfDBM/pQSpO15AwxZSHvB1FDPrpxi1t37pL3F6ReNqhQuF9aK7TVJYuf78WSuF31kvoylsc8+xTN3V9xWh9UIWXGGDF2LsUtEZMrwVdF0LLIlBtj0WhiysSqwJkhhYAHggIfgigpD2FSHL08P6fTDaumxVkR+VofBMyYJdgAtP/pHKf6jF/78Xf4D/7v/3N+92/8e9x77tMf4e9gs9nQX+xYrVakwaOjCNDFGKeATDRbZK2dEitV4rCaCIGIdqGxSZJ0nw1JZzAJ7RJGV35ZWXeeNTGnkSzX1jYNzrVcm9rPPH6OZmHJR2KKxBxRWZcsSuo9YdLGdzSukRMylqgVflTkHIRE2TlWByuca0h5xRB3+Jj5xKc+yze+8W0O1odoDGEIpSxgRf47zzLfyUhWWFu7ZzRprq7mlDHOTHcpF2nc6BeMdKWkXUhJZCwkwVSyyoTSotLog5fIsAh4GRVYtVai6yD8FWtKB0qSLLOiADGKBTpKSddLCWa8Hwjec3Cwlui61B3HYWBzcIC0RVL4GBfUMsnh4SHb7Y6TkxOUMoyjLE77vmezOeLk8WlxL5S6ZyyDCVXrxlLXCyHRDyN96Wipg2nxuBfwccn+S6qWC5lIKbDOcLBeszk4uIKc1MXmurV7jCIzb43FWsPYDzTdqkipax6cv8/xzWOOjg5JUTRLjo8PpbumQMH90BNLd9Mwjlcio6qYapydrj9GCAmca7lz7x6/8Vu/jWi2NKW0NBvEyXiaLeOvBxHXN+rlwnfl9pVFwRiL1halZrM2BRLI5lJuIGFyQe3Sk50sy++n88giOCeZTibWwm7OhDCijbRY3rt3j09/5nP8iz/4DnbV4H1CG4p3TO2QWvIqavtt/Xd5zLV6awVxss7xve/9CZ/+9OdompYatOVUx01ZwNR8TSL9//TW0ycRjbmUNJfvZrXDKRhQMkLnesX8Ja+tJRCZfzK0i8iWkudRk4t62/Py/ac7kKnaO/X7ukhfHQPlfVSmCimJjGGpq5f5GIoq8hAy5xc7lJJOuxQjIAaJ0sZpC6IrJexkEjGU9XfiGEnHQNetWa0OsFYI/D5EUi1BGg1EQhgYh4F9LwhHSsLVGJoGu2qopbqkNavjI+699DInH7zHuN+Six+WS5mWcu+sQSOCeaP36BTRoZQFGifKmEXQTiHW5jEEnGvouhXQTyNMvIx08dWSdTDnhDFi/6AUDEPPOA5IMDYLlqFNGQdapAfQ7IZedIeiKM+ZCSUU6e7WtuicGYYePQ5Y77HWEa0lth2unQOM9hoH6VnHa699l3////w/5Xd/99/j1Vd/h4/SiaKU4nC94qJpWLcdqhvR/UiKZXbHgpbmXBIUQM1yDK1z5KQnBValNAnNKmpC1owYogF0QtmAVgEbPE1I2JCxyUiQlmcn2LpfoRIVNTFWs1q3WPMLan2VC5KHiNZkoyVSjIEUMkRwukErYSdbq3HWoHPAGOncUE6RlbiPDsmTtGjTrzc3ca2iazdEn8mx9P2SydGTjLRkKi3ZoCuR4+QVpub9RmVZBFKK6JrFIBpi1uiJXZ5zJIZSkkmZajyWKcInOUEOpDgizHmBjo1WdM7SFmW/pEsgBDjrir6FommFv9APg2hHIBOELEtO8B6tVeF7GHEtJHPj+AhrLaenZzRNy3Z7IYI0QZw8QwicPD4lRehWjrY74MGjR9y4eVukx6HYo2ds06KGUTwUigFVTPJ7tGX0gfOLi6mHm2mxljvBggFdW5kpbramoD+ttazaBmc01gns27bCdt9ut1hrJ6b6EmJVxhBzYrU6oO/HYogXudwKV0MZaedUQFbgS2tyJYNNi1i2V0pqSik0Gtt0jNstEc3ee3KGg8MNv/Tlr/Lix18RAaAiTR7jLEm+NIWTcTJnT3VRX3Y7XM+uKqwNAqla43DG0eeA1gZFpHGifUHRbpEz1k8QFeu1XidcVoRFFwXHSCUpBwlqdC6tbgltDZ/53Gf50z/5F4BI6ecciSniXEOOlbBZcf+qulu8WK4dlfcSY0BbyNlzcvqIH//4h7z66i8VryIhqtYNrESosjmrGkRc1b6Q+/UkUiQbiC3lCBkMBbsBmBj3U4lvGsryumqULcz6PCEyOWXQBekkC9/KiPT1jP7UyVDft6wdi19Vb4/p5/WVuXDcVLm/KgNV/l4V3RdTuG4a06yI2TD0W9arhpgiWklrYsZKOTDJc9IUHkQaUVHEr2LK+BC53O558OAxKEPOppApLSkNhJBRJUv2XtrjnVGYzhB8UfSMgfHiAtNYYsjoDG6z4eDuXZqbN9luT3GpZZXB+C1GB4KW0ocxZhL4aowjpExjHMO4L/PMk7X4amirIQnBs7Z+1yy77TpZKxoriZw1YqOQy9iNiRhDkT9wM5qlnXT91OeBIFQhJ5RRtK0lKyHYRp0YEjiE1GlwrLQinJ+SnCOOYgXgYmRcIKmt+fCgIefEX/zFf8nf/7/+z/jLv/p3eemlL3N6+i5dd0DXHT1BmF4eSik26xVt29A1DUGBY8QYSVLHnEhaPMkTM7FZqZJ0UAixWqGUKRXBTETKRE5bgtFknVEkTAa73dGFSBPAJis29HI2iypBVdquqCtYq2nbXxRBVO6GyCLreWJPraMlY9DaFPnXFqMVcWSCkacMWYn6YQZ2u57N4TE3ju/QdSuBkmOts1eIJxKjdKMsRbxynrOQ5SE/j3PpAhnU/X4/ERhhXsxjsXU2BeVIyRPCQIwejThgKiClwKpd0bUOWzZb18z1b1mwxG2w78MEgdfaa9t2xCT1UmsNXddOfwuZ9Xo9ye/GGOj7NHWbpDRMBNH9fs/h4SFoRYpSQjk6OmK/F0LRarViGPwkZy73QVoo6/f1c5clqOsI2hRoLDYDmDsxpDx1yEEpofS9n8hbSqnpWpZ/Wzs6GmvJIWGahj4Ebhwf86Mf/oD1ek3bdfRDjyubaj+ItPlSfbQ+v7rxT8+9wM5DP6CNxfuRECPONhwcbPjVX/t1jo6OCDHSrVbTe078kQXhdYlYLEsrFdGoaEjlhUyI2QLZqZuh/F2Vu1cSt5fMuWbk4oi5IPHmmfdSn0MNNOr7Sat2IqUwCbjlXDapLOjPzZu3efmVT/D66z+gbTt22x2ubfF9j2tXC6LXslzw0Q8/Dvzp9/6UT3/6sxjjCCFycLBmt+uvjKb63nKvrpYkroy7a2WQj3I2H8bdgKtE5iU5PNmZZFz/5kliq1r8+7QAbBFwPPMMS0siEtDUv5N/Za5pY4khMgzSNUARzYoxobLH2obGNRK06akHDh8Dyo9EBOHKSqGtA204OjykbUqLYwwlV5QAZb1eSTegtux3Pfv9gDGOGLzw87QT5KdpODw+5t4LL5D35/jHEeUjJCsQf/AoI1LnmlJ2RLLi4EfIiUavpCsxhrJJltZgVBE0VBiraVqLs83USWitYxj6aS2tc6Ouq/W/5f5rMSpjbj+PqRjVQRERc6AUIUViyJKYBZmn1oA1Ct/vGWPGbhRWK+xiXfwwZCPnzMnJO/zn/9n/lu32hO9+5+/z3e/+3zDG8ernf5Nf/42/96HdKApwjcU0jmxE+DKVhJsopa2UHEGLGds0nnPta1NYpbFK2Fw6Z3RCkpCi5yMofpbXZFAlKclKZCu0WpRoC6pRS+9Xgcgn0dxnHT+n5qpkU6a6mpUHnepCa8TVc7VaiYhMnFsIYxI5XakzyhFDwtqGGzdu8dy9j2FNC2h2u17QhlBEj1SeNpllNrkk8E23YJrAiTQMbMrPdrvdJIozBxlloc6i+S/EolKrLUGGNUIi0lrhnPBHtFbE4AGFtY1AyoV0VZ/UMtOuIlWHhxv6QSzfRWAqcn5+TtO4Sbp7HMdJ1ny93kztsRcXW9Zr2dQ3mw23bt3i/OKC85383DnH5aWQSbtVR0pXgwNZzmRyC3T8NJretaetlt8/mX1aK/4u4lkiZNRaG156ntTFuy7wzrnCN4FxGGhcw8nJCfvdjuc//hJNlSAufI79vi++ErNpW73H1y3vUeXnVZV0FA0CheYLX/wir37+8+V95qCgvueynj8HafM1LzcsGWNMaMYy0KhjNOVUvAnmrhitsmTAORVosv5GMgeo96m2DFahuUoqrUQ8pnKfBNZx4tJUlr5S0qmwXq35zOc+z2uv/ZCYMmrRnnh1DOSyr+YnfvNhR87w3rs/4Y03XuPzn/8iwxAKmlUDiic36VzqINf5RMvvp7n+lM98Fk/jabyP+TPnz5ueUzE6rO3ZS1LjU49FIFr/vfL6WoJZnHUdJ/X7GnRO9yBlMiL0F0IihD3WaVLwYpOuFUZZUflNmc4VfQNVvHKylFtU4Xbt93tO1ElBeBNHhwcYlXBOi139qp3sG2QNXLZ4B3wUh9+2sROS1HQdzz1/n/H0MY92W3IayTYK6ls2NYeUXSDRoIkpFLHERLMW0mUGki0BcxkaIXoa06CNRkeFNhmrKBy7q87R9Z5fXwtyzlCD+ZLRS0knSDdHGUc1IB9DIGkzJcqUUk2OAb/dQptZ37hJ27Sk7dyBsnqaieHiOD5+nn/n3/3fsdudUjsZlVKsVses18cf+rdQAiItKGcuwWMMIykkfMhEBck4cVYviJn8k7FZ4bSi0RqnJKExBOELZSGcm2wwJBolPEOVMzEHoo54kzGLYKOWYipqqLRCpY9mL7A8fm6Bd601qixsdUIpZk0A6wyuSlUXffkYItoKVCYaEhY/Ji4udxwd3mCzOcIYYUp3rSWlTgaKyng/91wvSUFL+Po6UUu+T5jSE51i4uLiYspil5oJsrBb2rYV9c/W0nYNq9ZhTGa7vWC3DQIbNdUyWkan1iw2rrmeKH4X+coCprVmt9/T97tynnM7ruiSDNPGudvt2Gw2WGsYhoGzszP6vme9XtM0DYeHxyUbl/7vO3fuMAzj1CobfLjSKTEtwjU6zUwZ8YetqzwD2aibbi2XVISh2hBP6EXTTFnIsj0VJItvm4bd5SWb9QFvvPE6m82GrkCo+/2eVfn7GAKYWQK5+sjUjX65yaPEe0Irzb7fs+97mqbl4OCA3/qt3+L4WCZ813XT83naJgRMG8/1ja2eQx1vtTvm6vhjCmiYNsJS3wZUufc1c6jJ8/wzPX0/oyo1CBAviSl7i7FYBVQ0JMozzkmqgWTu33+Bu7fv8O5779J1K3alxBVDYHZR/tmCjPlZKpL3/Omf/DEvv/wJuu6Afj/SNO01ArKqZAnqCv80kuoTQcMzTulpAcYySHnacZ0kavTVclgdr08GHDVwyvV/V07sic8snUTTKxe1l3pLSvmdVG67a1qU0iLsFzU5BSBPJMqcZC3DgXWW+rzqZ1el0To/rbVF0belc9J67SblTeliErdVT4q5EIjDtJZrI+W9HBTWOTY3jjm8dYvzRx/Q95cYk7A5kOOAdI0oKO23KhdNDkSe3CDkzZAh1vtbkMFYuluIFJSjIkzqylpd52Y1p6sI6oRwZAnMqwZFzlk6N6K0latC2kYLYuFjlE5EaxCTuYgPA30cBRGIAZUtKi04Gx8SbNS95PDwDoeHd575ug87TJaSiM7Fu4tcxPrm4qFCo7Ke5ngq/hnKgFWyHxmt5T1UQumISQqbAipDk6DVUoLUOUKOJJVJKk2sIgnalvxG+VLqoyMa9fi5gg2jNUlDGIvNdd3IFjAv5efeiyvpUCyPOy12v+MYeOP1t3j4+ITDzRH3v/5xGtchEt5uqg/VhV6kqWsG6afAY0k4rP+9/IoxoPvS+pnTJGozmRC1LW3blEDATEIxEktEMp5x2OPHAWc1bdsIolMehLG17dITgojuuMaWFjQ/ZcopJXKca+/W6YnH0PcCDzaNW2QWYi/tnCsBUuDs7Iy2Xc09zqsVJyenwjAu7q8XFxezH0rpdKmy5XMpoIqdSd+7COJ8yMB5BrJRN9fNZgOIjkfTOEKU4O5papv1v/u+n9RFt8VL4aK0vt26eYvGOVIobaApE5NYGac899sv4dN61P9OWa4vZlEl1UUm/Ctf+Qpf/9rXJjOq2vu/hMyXTPdliWY5ruU+pit/U7+WnA9B+vQ0UUuEUbLasgkV3WEppRRCm766cS2D7OtlgTqmKooy++DIwwtevDC89xysD/jM517lvXffnR6tMc3EL3n6jv6hkeh85IhyhrfefoMfv/ZDXv38l8r8uNbpNFdOpqx+eV+X1zhfq6ovfurxLIRj+b7wpA7K8p5eQVEWz/4Zn8ika1IzQF2AqusBypXrXSIdT2nXz9C4FmtavD8TtEFl2tahVek6MEJ+DCGQxzxt1kopQuHeONeAMow+su9HLrfSTt8cHRADRdguEnScCMxdZyArvI+AZoyi7ZBjQpf1URvN5vCY49t3efj+u5ydPkapTKsMtnDkfJCbYo2UeEDK7uMo3StG6ZKRS6ARy00cwyjohnMSdOQylwLFlHNWm112GVYp96qQmktQgTJoBVZB0BpFICuwxmDLOm9DIA4eVCz8KUPICqMyq8YRSMRxkIcc53bX7iMSRH+eQwGrpFgBDYpkHN5aqCiGNWTtUMoSsyLkKJyfFMkp02rNisxaQacVNktgEpNG2jMkxm9QtEpKTm1MmJjQpHk9mk7oScTwaejhTzt+rmAjkwuZUi6yRpZLmDnGyOhFi94PY2m9VGht2O/3/PAHP+JP/vTPGHzg937v3+ZwcwNTTIe0Unif6ft+WgSbRuzKRYa3kWcfCz2mBDXLAKR+OWdZH4sqp3OOV155uXQGCK+kwtAxSovZ9vJCrrEozY1eDNYUGeeqYBcl6pZJUjODGKNYxGdHNT6qrWRS1zclQDEolaeW11gMieT69JSV1J746tbaNA1HR0dTh8Q4ei4vL6WtNkb2+z3b7ZbtdsvZ2dmkGFrby6Dcswqze4/3ge12K5Mzz0+4DjLUbHW+zATlmsQD5ejoaEJTlot3FSKrm+GyK6Uu6s4JmfZwc8jDDx6ILX1BLnJKNK4R0mytzca5rNE0zRVU42qQGfExE9K8kB8fH/Ptb3+bw6Oj6VrqvV4GE/L857+rfInrAUh93bJ0Uq93KTKklHSiGGOpN1rpTEoBYyEHFouoRam57TWmVEyvSk01Cz+peveg5vsrMPxSHbRA9KqWzzQpwqc/8xm+853fx3tR6a33TyDo66HFkwvKEk28/jPbWGKK/PjHP+SVlz+Bs13Nka69x1M25fqJiyBh3pznQODDSiTXn+H181v+9zJINaqShCPLUtrTskIVxUEAAQAASURBVDcJEtOVuEw+d/m1uNbp/+QpTLlpMcPKFeWZUA/NarWGE10CxLasd7EYmclYHYOUJ9BX20ElEM+FQCnKwbvdnnNrWXcdbSNrUc7ShF0DPuFZlbUsRVIMxACgRbU9iJ+KM5bu8Ai7OqTHEVPE58xaOcQgrSC/KhFQoIxQT5SjH6WM48kkLRm8KkFyiOKXlUk4a4hJCPry7Ob5WW0BlnIA9XnORGpNKkiAUmIJHwqC1baNlKomPQ6FsxZ8kT3PQl5NpYyxvzgjGUse5/HifsaN9mc5FHCoDOusOEBjtCFoi7YOLdEGKRuMMuQkCqhRKZJSZBVpiazRHKTAKmcppUTPCHiliGjA4ACbhDSrxiDBRkjoCCSemN/15LTW5CKlsLQ8+GnHz6WzMQwjWTP5SFT58OUEHceRi/OL0lGSiKMXN05r4XzLMAR2Q88XvvglPvnJz5b2LHVlnxMOxFgWbqgKn8bOok2yAUir7fIc66ZjrcYudDa6risTcCSEoYjajHM9kOLqqnJJGKIEFwYQkA1rnKAsxWBtyuCLENDk3Go0KVWRKYuxRjozsgQ7lc/Qdc2EtAxDP3mf5JwncmcIgcPDQ4wRUS9jDCcnJxLQMG+MS7RHzJfGK1l2SoEKse/7nvPzM05OTp+eNJZgY/lslyWUml3USa+1ph/66TNqsFHVPpcci3pst1vpEgiB/W7Hc/fu0jYNKUg/efDS+aOYyzY1MKjBzXXSptaiaaK1Iowi/uVj4Etf+hJf/epXi6hRllayPKM0y43qWV4hy01rGXjUTarei7oZGyP1aWsEAbq8vGS738lYUqJ0SIElUTOaUd0Zq8qseKcoUgk4FKqcfy3l5InXUYmq9X5YY8vzl03/cHPEJz/5af70T/4E55oFWbpOvrIdKrjSWnHtHtf7NP13CdyVUbz73k94+ydv8plPv8o4eqxpmMoP8i7Te31YhrRc5Grd+Gnn82F8jevPbVlWrD8zdhZsWz6/q+Wf6ROfEvDMfJwrwUZW5ImLrcr/Kk+qvHa6xxJo5KRYrzY0ruPysscaR0x+URIQOwQROGSSwEepaR6PPlAFyeQZB3a7nt22Z71aYbQt5RshoCZmDyXvpR01RlE2jTHi8ZDLBpQVm+NbvPyZz7PaHNNvLwm7C4aTD+i3Wy6GHqulQw2roKwHetXQ58wY5JKNNlOwlaIIA0qGnlEUgnnOZMMUuF/nT9Vnu1yX5DWLNnUEHWqcwxatj5gycSieLFAaEqQzUUwLHSkFNJphd0m2jpaFB4h62rj4b+pQrNGslWWlDRlNi5BajTKQLSkpHJqcPCPipJ5N4dXkyEFSdDFyoDQdmhADvcl4DD5rItKRpEiyFw0j+IjyGR0yymbpGi3JUWa+l9eR1V9YsAFFClZLm2aqi3oWIicwCchcXp4TvXg/GBRN07Hq1oSYOTk5xzjH88+/wOHBMdY2i6y0QoGGlBoJDsYAeQQcSs9eJTHmK5nk9QzGe+i8Z4UMqOoxUGH8+m/lLRgFMXqMga5r6Jpa0oGucbSNw9qiabDY4EMYQdVNSlON3OT+QIyquIraGSZk1nUQAyQpFWmtpwBoqWDpvRfRn2GgaRrOzk5p246hl//e9fuJlAkwDuO196ktmcKhOT095a233ubiYksdP89Cqp9WRuiKWqiUT6Qdt3GOzGy/vBSPWrLHKzrjx5HDzRHv/OQnkDNHmw1kCSYp46HKYGUtAj3ABKHWjbIuLEuyJ3kWDbp58ya/+Ru/wQsvvAA8mVVfL1Es0Zvrr7v+38sSR/2bGgwvO1s2m42gQF7GXLWSd05a+4IfioBToYXltBhHNYOYiaLLbO7qJnsVEag8AaUyxlqGfsuv/MrX+f6f/QVaG7LNjOOAsctrXWD/V75nepbLaxbhJVHezAm2lxf88b/8Iz77mVfl2di8yNznAOJpx/Iez9dVN9N85ecfBcq9Xh5Zfs7y59cDluvozdV78+Q5P5sjsgwmqsZHFRWcg5OcKV21GudaNpsjLi7OGMaRrnNULk7KorrprCMtYpXl/RCPC0Us2kdj8NjRcHZ6Xkj2hs1mJVoeud7e0pljMjlLS3pKWbLY4lCtrWPY78na8vJnX+Ur3/zLMgb7PeHyjO35OY8fPODRg/d4/PADdpfnDLtLxn5P8iPWaNZdB2EQUUitsErKQloJMTFnLXyWLFwLmzJJGbSa/TiW+jcVlVsGF7U1X2ktBoYx0rVOjMRSwo9jIfeDM2IOSkw0xqBtA1qLH0kKst/lPBmUAf8KbMePcmiieYXNjRvcSi/QHZzTrU/xuy1GaZSyZDQ6JohBnpPKwtdIEaegM4Y2KVYomqJEPBjFiMGj8Kq6yEYwkfXNQ4J5jhQyKrJofS1jiycD+v+/lFFYwMa1PpZrRLo4Ce89KssAbl2LtQ3GOPp+oN8PbJqWGzduYZ0rkGJdTDPWaZSSYMOaUCB/UewUt8I6ceX11ZHzyYVfoYsqp6Ayw8STqJ0jU5lBZXQKaKOKX4mlaRyUjmZZIBLeC8S42+5QSIadkpQnRDZ8nAINlEj+pJzQSrpN9vs9wyhEz83hhv1uO8F/4yhIS9u2hCA8jRjTROjyXt73wYMH5Mx0DdaJ5Plut6Pve1EaLWTTJW+kSnrv93sePnzI2dk5VV8gxavPeMqWyIUUNA8urTWr1Yq2bfHeT/+K6FCakJqlNLlSaoKC6zjp2o5YEKK7d+7gXMPQ77HaoFEiZFQQr5QTZDVdQ0UTKg9kyduJSZ5TDWw+97nP8ctf+QpVX+FpUdWyHny9LLgMKuqx3PhqiWUpk14h3hASCbkfR0dH+DRycX5O3w8icW9ncm3VcFBqIbGerqrOLj9fnm2asn6lZk7CRBotf58KA1Erw+3bd/n4K5/gBz/4Pk3rWIqZyQkA1N3vyQ13WQZbnoc2ugj7Kd555ye8/fZbvPTSywLHq2Ww8dHJZVOQu/iea99/6N8tAomnJSTL1yzNvj7kncvfyFnVYGgZREzvy3KoXX3PZYA8rZ+lvm6MY3NwiHMt4+BpW1cSnLIO2VIesFa4D9OZqQnqlo4GIYGSJTHb7XqUOkNi+WM2m05QHT2fh9FSulVaMYyjlFOiIGTKGJSxeEaiMuR2JcT6w1uoO89zI2VezBG/37M7P+Hy9DHvv/s2H7zzNu/95G3Oz07ph0gT4cDJdWpjiWkUoblS3glByI5k0VtSRqPtVSffeiwF9WqZllpSyYJQS5DiCDEy+gG0wjlL9nFSaBXRtAalNSElrBYvEZKUk2Ks2jOKm5//Ox869v5Vjqwcl0d/g4MjOHj5F/YxTxwDj+j5Z2xUCQuyYBpZMSWjUzfKz1FG+vkIosYUYS0D3k8IR62CKFU6FDK0rhWBLyOGQcMwcnlxSYiRpmlpm5VMhEDFaKbFUxuFsQrXOGLKBF9bHAuJUwl3wjlLjM2EcCwJczFKq63cu7k9cdlhMNdqPckHnDW0bSMiXEpKJ85qrFVoLfofKHFmdVY21L7vC6oh4kU14tamwvTy39utuDm2nfzd9vJyUgalQId1wtQgyFppIxaWuQRVl5eX3Lxxu5CjRMRpv98j8uOldFDuRd2UQ4j0/cDF5Y733nvABx88xPtUNrbycMszqN+yGFTLzN05x3q9foIhro20VC0JqUs+w7zxFQGtlHn46CFaa27fuiXoh5K2Na3Ef2dqzk25OPCa6RnX96ob63ytgTEklLY0bcMvf/mXef6556TksbgWuBpkLP+7bk51w66L2vJ1FcWomdayrjwRc9FY12IzHB1vaFaSVT5+fMrp+SUhjMSoqG3dKStE+OnqOS43yWVQX9UvZdxdff3M4p//u5Z6vvDFL/LmG69Ll4CxpKoY+BQk4/pRg6k6LnLOxYeiIITOEoaRf/kv/wXPPffCQmVw+b5P/4zrJbvpenmynPdhvI/r/I2n/fccfFzVjbkeWD7reNpLrpxSCUiWn3e95Dw91yyS1FYZco60bcfR4RFnZw9F2E4vsKaCelklraKKWvKUgEPKusJRyIXfFlRgTMVDhygE9nTEet1iihifQhWuR5xCo5gi2SOGb8XMS1mHV5oxZrGDx6B0SyajcsRs1hytjzm8+yL3Xvk0se95/OgD3n7zdX7y5hucvPkjwvaE1AeCFgdZpSSxSzHJZ5CljELG6CfJtDV5Wj5TMW5z5JrsFf6GsdKFI0mfpI/CywuMvpofKshJRB6zmCEarTBJEJaU5886fPFb7IGwf0TyA1XkO+U8BX9K1ZVLgkiH4nZekcmcVpMV+e20d2YFxlqMa/CFO5MLgTanVIuPkuBTg9lc5MmFR6QVmIIGV7aWKueVgQjSOlsCCcgcHr2I17cZOCAW3mRazJmKbIhU+rz+/SzHzxxsKDVvnjWi91FYsFobkpIseL1aoRJ0bTfB+avVAfvtjpPTU5pW3PBkU5KbVk2mQoiYYKZavXOWnBUpDhOpJ0aLMZSOlZk7sMyyZFAuLMFzDYS6womopKuxbMwDKTiMVjSNLXK2IqijsiZaqVcqJRyMZi3tpZXkCUwbvEyQMsCT+MhkBAFwTvgd4sLal01G+Brb7Y71ek3OvpiwSaBhjOXx4xO895yfXxQkYZzKIlrLZ5AVQz+Sc2Lo5bpq2UTKRntOT095/4MP6AcpFxXOaCW1zxnktcFUN/faDWOtpe/7iV8CiIR52YAr4lNRp8nwTouUrtwXjx8HNgdrIQaPA4bqwisdNbreyxLMNAVFqYtO3dRr4Fg5Kd5HbGO4f/8FvvzlL6O1aH/k4t8g1zlvMPW4zslYBk4wB0/X+RvL7+v9Bgp3JZfzM6ztGmelzdpYy9n5ll30WCvBl1TiROp7WSOd69Wz5bx4UdTNp/J1mLLsqQRR3kcrTYoRYw3PP/c89198kddf+xG2sVcS70pjfGpAkGvbsebo6JAYApeXl7LwqyjuuUgHwxuvv87bb7/FKy9/crmKPP19n3LMC1oNqD46srF8zbNQjelnzGJpHzXYWD7v5WfluqAxbwZKPRnwXPn8Mh5zziij8SmhjWNzdMTp2WNCTLTWTQgENWCpyUFF5GrWX34fUiRVOfOYMI2Mnd1+IJd6fc4HrNadEE2VKdYTwtuoKKLRJVHTQaTOI+z7gRBh1awIMbPrAznJpt1aWdO0aVC2odvc4P7NO9x98RVe/eUtp2//iNPXv88br/2Ihx98QOh3WJ2xEgmQAFuVaxcBWkpJuEopkpNiHHpirOWlLNYQKRHGsfx3IZYrXdSaxUBsP4xFE0jRGEMqpX9JvgyNNfSjtIiaMlz1Uzr2Tv7gf8PJG99h5z2XKbBFWnqTVuLr5TQ6ZZoU+YRf8T+6/DqeyP+6+X+TUumqBJS14BTeKg6ff4F7n/kcP3nvAe+fPCInTxx3DJeXNNoAlpA0Phl8gm2/4/z8lH53gfKBQ625rS1HSXErazYYdIykznCG4hHwUCcudGBwkWwi/87/8J9ysHlOPGNyxkxju46vxZ6gPtrcu3787ARRpHYu7RwZrS05a2LwGAxWaxpjsEmTYsb3vvhXwK4f2A0D/Thw8+5dtLWgwAdP18omEVPCGEtOUlMSQy9N0xhSzAzjvkTetWwzG10pZci5LOw6F939OG+iOROGgOkc1rUoldGNIjYtPozEYBn6SM4BZxXg0Yjvh1HCEzG6uBXGCFqx34sVfNeJbbDYnzeTrkhO0lqmlUUb8QUY9iNSq2/IWayZjG3QIXDn7vO40qYokKd0yVycn9DvPdvtJd4HEfoaBfkIPuDHLSpqdheXZC8bPBFSkN9Xv5f9fuD8csuuH8lZJnX150iyQ10pIwCIRPuIK90fKUdWqxbX2Ek5NWWp2YcYClKepwBjvVoDWZjT3hcjtUFg95Roteb20REqRgyQgsdTCZbSMpqVtEKvVx0hxmI7JeUpkBKBSDtLqUmU2Bu6g0Ne/eKX+NyrX8RH4Q3kWMzjdNFX0KIMq4so2/VgY4l8LLtSYIaeK0HWGDPxZGrwUyV+naF4YSiMc5iDA1TOrFzDg/SIi/NLLBnXWELIjMGjEF0CP9YrNiXD16RcfDay6G3IOdVNeRZxq4GOUgq0w/uMtR3GBn7pS1/mjR/9CKM0KXmZT2hyFAJfjJG2a4SUrFWpeUcMms9+7nMcHQtn5/Gjx7z51psUvisi+Z3wY88Pvv/nvPTix0BZlBIESLoBhNj6NCSj3v95gxbkrwZZT+NeLBE0BcWOJM2ZX0rFMbNkk3K3xAE0JpK+2kK9fO96fk8LLH7awisZ6OyfModPci4xzwrHaEXIkI0hq4RrV2jjuNx72k4Cgm7VlWgSKe/mIrKoJGEQL6co2XD59Or3EsmEnEghk/pEPj0jqsxt2+A6J2ZkUWG1JimFSWnipkWkU8M6i42Zfgjsd1vWqw3GOFatYxy9SKCP0gFircEaSwoydvWqY7O+zeb4Dvc+9imOP/k27775Bq/94Hu8+/oPaXWiVQmnItpkVA7iDBulUzF5MZg0OZJ9EOXLGMjaiS2DMYwpEZXDdrK/SCkoYqzsDcItjLSNeM30gwdlGEdR4EUL8TYlL/NNi+T304KN5Ax2s6bzCRMyB3VsKWl5N86CkTL1UVzBpYy55uYx0XuMlzJl1kq6c7QCD9sHJzQ+cIQhpECioesOxbKhrIc2K6z3hJxpVhqfHdlC33uCcWjnsNnQKosls9Ni3aGiQhWuUEwQNdOo1GhUDFjshJbVcW5M0c5Sc7L1sxw/lxFbyqosdFJzM8oyhh4VM61pcVpDKBOpqoUag3WOi8tLuoM1TdfStA3PPX+PGAOZKq4lmVu9DukBl8ndtBYfdMnkfYGDdYnOZ9EbpUDpjNe+eJrMcPt+P6CUw9kOrQ1t49A6E2LDOCiCP5fPcgJESSBjyuaZGPZiXBRTJKXAer3m9u3b5Jy5uLhAKSXseystYJLhiviULWUUEHJlSgljLYeHh+z3e6lZajg7u6CxjlW3EQ7GfpTNZxStktVqhUJKVfv9HgXsdwPRJ/wg2UUIiRQpku+qiPYEhjEwDKWnXEHKVZb5akfHdRjaFKGyWhe1zpZnUx1E6wKs0NYSvZ8ssnU5V+ccjXWQinw7mrNHj1i3LZ216HIOpqITi/ZRay3WaPbbHcroqZNE+CZGPHeKxoFJGduK5sCdO/f45a/8CkdHN6cxkAtbvSJderGJwLzR1Oy2ltyW9eLlprP821q2glonttdeI/MmqowylhubQzrr0DliCFxcXBJiQitN2xi0zwyjBDymOLhWy/kS+U/nsRSPmjPp69m9QquGGCXAff75+xzfusX28gxnXeHTWKmdJ6bgTGXR1wl+JKfMpz79Se7du4PWiq51bA5WOGf48Y9fK7bhVlw2teKN13/MO+++wyuvfJKUpP19GMYrhNRliWF5zEhE2agXi1+d89fH6vT9ojxRg41c64W5IjdCaNXmSfLpsvPh+uJ6HclaHk9cw/xJJcS5+juW70vxujDyPJVtcN2a/uKUmBVOG5QyUp5FdGnKbS5fMp6VnltqJdmRdx/DCLrBakMOibQfCPmUkBJZKY6ON7i2BRLGNljrcOOID4X3RpqcqY3O+KHH93tWa0NnNMZpdDKi/RBLy7sxkmwt5o9pDjF3Nzx34wVuvvx57n7is7z/5g/50ff+BWfvv8XF9oRORVaNwSiL3w0cHjhWbSNJjRdE1BiLURptLca1JKUQnlzAZ0lwxsKFmcZBFhO7rIAURS1TifOuNbKRj7Hw+IxCZ/EhMU8JKkeV8Y10GpqkUUlhqCVghbYKdCKpRJfLdqugcQ1RGTCKpDVeKwmGY2LcjfTvvcc+jITBi39PKSsFnQk24Q3kcWTc9+y3W7a7Sy53e9Lo2WiLbR0H3YZNUjRj+v9x91/NsiVZmhj2udgi1BFX5U2tRWWXrm70TM+AtOEQIM1IM76QNkYjnvgD+Av4yif+FTzROC8wjgEwGjgDAtPdJVPrrMy8ecUREbGVKz4sX7597xMn8+adLnCqPSvqnBsnYgvfLtb61re+BW96QHnAgUS6HJWfN9JEjhWPwRiOmYVSQ2QRz/lsP8TgeDKCaAjwlnKgZYwPqiAQnIPUFFt13lLqoiWVUSkV9vsGxlqc3DiFlBL/9J/+U7z66qtREMtOJIKJHDdCp0KMLH8W5qIFgRdVQjmKuAkmqMc5qhYbG5MjKW2zhNZEFBJSox88rKV6JRAxNQux7kRU1ANGPYyiJGXPR48epeMzKZIFZvi+yrJMXILFYpH0JVgpkyTOkaTKzUCLOpM9OTxQVVVSvazrmjxxAGagqrTOU5qktTYWbyJ0yNkBXQxncVgjbxxfnMfC+ScbFVprHB0dXal3kmBqjAYJGX7EvwghoO9IHKfrOqxXK0gAzb7B6dHRhIQ5J5UyP8BH0hZzQ8xg4MJICGZhMmstoRxS4/XXXsMvf/lLcHoqIw6H2jxcMudyAFeh87mgmJkZSHM9jhwRYeSDxzJ9/ltcXDYINkBJAQOqfEG6A7TYs5hUtJaubGDz68+fY4j9a+0ArSUWyyVef+N1/Pv/4b+H1iVdo/eAoJCO0jKLi1NRvNVqhTt37mThMpq3d+/exf37D3B+dhFLFJAQ0Xa7xQfvv4cXXnwJzrP2CI0pRqa4n697NgFX4/WHxuqh78+f2TyUIuLmnD/vPOzChsZ8gb0OkZlf+SQ+deDaJuGXiEUApACpdYHlcoX97pJS4iuFwRgsauLA5UjJeD4xOf6oPwFIOSJFpMyJKEZo4rrpsV6vIKWENZQKSgZ6FJALDkKQdgwhpS26vsVytYIQJFcAlBDCRs5Z1GHCaBhKqWA8bbSFrrE8rfD8Yomnnn4GL7z8Gr78+F18/tG7uHzwDdqugRs6rMsSHRSCBYx30IL2B2mJjyQQAG/AGhsCHrbvyeCKY2ywNJfYQAuxXgoRb6MQHqKCaHSUghdADOGqOYkaQO8sWmvGsu+aCO1SSDgBLKVEEUO1yzBWH18OQG89euexh8PWO/R2AKxF5yx6GTA4CxcCGTtxH3UKMFrCKkl1UgaDbdei6QYMg0UwFtV6Ca00ZCCDIjgHeAdnDbynRABrHQY3oJc99UEcy044eElcjnGdEfy/cb78TxFGEQBEVKBEAIJz0CBRFG8j3OYswZcCcdGnzfvy8gJaKSwWCzzzzNP4X/3n/xlOjo9wcd5MFm2AJwkNClK8DGmBYhIgx8LnkKdSCjrQ5qwQUMRCZ7yQsGKnczUCHJRaAcJj6KnMMhCrsRajV8qLqhIlwZ+S5HeZxLlarQAgcRh4gvOCDCBlmWito5AWFV3bbrcp5MBppFIItA1VSayqKn2etDZUyqKhtF2S+WXxJ+uJWOSci0xiCeMsjCGUpY/ITN7X6fke8BDZy+Pzn5ycJGNlotwXwzBMLksxVutIYr0q4YxNm+vZo4coygKLxWKymCfD4gAyUGiFpu8SH8Q5i7KqAdC9J40EIbE5OsLbb7+NZ599NqEs18HeIVDRsnzDyw2FQ/10CFYHMCGMzjUd8o0u957rusbNmzcpc6u+xMOzS5yfXxD8qyRkDJk4F8CVUxFA9Q6umfiHECq6PkZIiHD2+utv4P133sG+2ZERaRysHaCknqCM1pKRf/fuUxCCsoKq6AWz9svNmzex3e5SSEtJgaoq8cGH7+MnP/s5bt96Cs6R7H/+zOeGUX7Nqf8xNRLm9zq5bzbGYsuz1ObGBmU5yWRY5OJe83bIcDnUQvo85vv/5FjXGy+UWaSVxmq5wnlZoOs6LCoKSWglUGiFYB2kHA2iQ+gbz8MQDQWqyOoiEqyASNw/OztDiOTu5XIJJWJVYamh1Ghc05h1UZKgRde147MRIhoctNZay6JbrMdE6euEjisYARQaKMoVipMSt1bHOL7zDF77ya9w8eg+vvzsU3z9+ad49MUnGKxDDYFKBJSQ0AIIdoCWgHQOLqrnQlC6a3CGCLKen6WkKqkhpOcjksEWQ3GQ8Ti0b4X4eQEudzdtu77HWdvA+Q7WAQIykSg1SJxrozQWSsKYUaejuOxgnYH1Flvf475pcTm0cNZgCAFWCfTBUTKCdTDdAO8CLAKMJEVUqRRCkDDBkQqr1CgrjapajNw1Y+CtQ3AGxnVwXsANAcEYeNchKJNCbAAQREBQAV4SIj0icodDhvO59F3tibOFhfcjOc06yhSQAojCQoQIkNVYVBouDj4JSpd74403cPPWrThBDltKhGbkN4UJYkCbjQfLh49fxLjYYxT8EkKQnLYliM+YDmgNikJAKYG+b9P3raOsFNLzoPLvITh0/YC+o4GiYmZEWZbY7/do2zYLK4ywO5OsGJkwxqBt24QO5BVHSSGwSKI6bKwworBeU2iFdSbI4IqqpEYg6TMETzn4nqpBdv2AwVi0XYeu6+MCeDX+zf10aPGvqgrr9Tpt9LmYVf7QBmNQRmSjKksoOWbrKEFkXuscdrs9nr1zJyEgvFjyOfnYVJlySdk720t45zBEtATZJs79571HWVV49tln8fbbb19BTQ7dMzAiZ7kRcN1ncwMsNyyAUXBuXJjHSTk3TnhcKKVQ1zVu3bqFxWKNoqzhvcP2ksIq1hFRVgrSd4lsm8mx5htMjgyl80pCR4Qkb9474Padp/DyK6/i13//d9C6gBA0f6UikS4ew0BIImWcvkxhI5MKBdZ1DSVVRJm4PwS67SXef/9d3Lx5i/gAMVtAR8G3OQI0NzYEkGTfDxkp1xlWed/P0+LTeIseLqMbh4yNefjmUBhFHJxPYfwhxOSf47ExAiARyaXjgGoHVRVWqzXOHz1A35OC6DCYyA31xAWbGRsJVUz9wf1DmwejgN5LKBBKQDyqCwASSpZYLcv4DF1EQEjvResSIdCaaa1D2zbougb1YgWAEbuR3zSuEyxIF+KmDwwhwBnaXAtZQGoBdVRivT7F8uYzOH7mNbz+kwt8/cHv8fCbL/Hg3lfYXjyA9ANWhYbisGGgTTl4EjMznhO2A2BJdl3FNG3romSeiqEdTRLmygcYG+CNiUkJCjaEWMDNA7D5QwIA9NbicujhhIZ1AdZ6OE/oSSEEWqlhqxoBJWzkXSEE2G/P4f0AJwIGMaC1Dc5Mg4u+h9USvirQOwdnHKms2oDgAlwAjAiwEYUigpSE0gXqQqNQgrgaVQkdJKRzMKaHMx0EDIQLUENAYSwWwcJLB0sKDdS4qGMMq/HYGe96Ps/+xMaGgIDwRAASEGiNhRssNJDKyPPiD1B5YG+pONBivY6bbo2+J5ErdaDCInM1mIvqHBJqwQgAb8YEN06LJnEJehswpupIieVyGUM6exgbYN1AVV0LFY0NillZ6+G0iue2GHoyUKwhshELb/HCx1AzhVhGz5YRi7quE2PfGEMl1KsqeeLcX2VZktLkdof1mrJ1uq4jnoYgTY+mabDZbJLB1TQtdNFBDZGljoAQR49zPqIZA9VP6Qf0MUsibUI8WGawdP47FX7bQEqZQj68CeXee1mWkEpBgDZfEwJsTBmr6xrNbg/vCUWSUmK9XicPmY01zm6Zh1EQ31tvNtjv9yms1bYtvA8YBhszIug6nn32Wbz8yiskNhYNQkZM8sbXziEwfhZzqH6+oeWbziHkgt+f9xP33aGNksYPEfWCp0Jq5xcXQHC8BaWXYDJo3KDy67hu8yXonGK0hS4gBJUBf+nlV/H73/4W3pIRozXVleCquTSPaRO+9809VFWJGzduTPqBNF9oTmitU70G5wFVFvjoww/wxhtv4qmn7kIqAcrsuj7+m99L/gy4/+ZjdfrlCK5nz+LQsxn/7eH9eOxcLyUfJ/NrvBoGmV1T5NewwUTGMWbH4J9i9J6jsiahqESyPguUZVWVVPnV65C4JvNXXkeEwyZ8jUy85ve94zXMY78nHY5Ck5PA6ESe4k4GuYJWEs52aNsWFxfnWCzXcSwGkNERoLWEEAWMEQC4Zg+JdwkxqlAHDwQlUcbsEecVXBDQK416tcHpyRF2Zw/w6P49fP7x+/jms4+w3Z2hit+3zkF4KqEeAihcJwXgqICjUhIKlPYpJNUDCUHEeigCUmloCRjHfBQJCIXgqKyAdx7IUl+5vfrj/wLPvPa/BTFoSMskRMlYqssiUWuFSipoK/Hf2iNy0Id/Ah08jgRQwONWsGi9Q+89ghSAkqnumEQkdIZs9mdGKQTdk5LEK6mExEIplCFAWwfhHOCo3IH1wDM+YAgBBh5OBjgZUC9O6FDOAZYiE5EYhHRiMZ5ynA9XuuTa9mQ6G4JuynU9mu0ej+7dx7JaQOsCzhgUBdmbNGjiJm0dhJQ4OjpKvAnepL33URWzQAi0IFLGiUDcY5KnmEOdXdfFyaTgvaMwh2KdCzZYZFJ+E0Cy+AGPrmvggoUPxLZvuwZSWAhLNTmoyh6RH6UQiQuhpEw8E4A21aqqcHJyghBCLKstkkfOaZCr1Sp5WHVdg3U3OCzCoaGmoQwXHRGNzWYDIUTqM9a4YFnzpvkSSkvUixrFTkMk7482i34YYlG2DttdAx9EhM5iyxZw9tjnXvF6vcZyuZws/ABSyjGAJP0utU71TZxzKKIH4b3HcrlE0zQQQmC1WkFIgVITSsNGAPMqWO48oT4DKRDyZ/tumHA2koCVILXOv/qrv0qoUNd1E05QHt5KmwREQhr4vXzMseGQoxpzFISvNScX5p/NW46K5P3t/YCyUHj66adQFAW0Enh4doFhsNCxnoYLZEh656AKfcXL5nPPjRo61+ipIgLELz7/Il586WV8+OEHWCxq9O0eUsYMgDge2PgahgHn5+eRP1SlfhqGAc2+oTEetXLI2NZUEKxv8c67v8OtWzcgpYaUo9z8IaNifv0h6yc2bhhRmRsGbIixx58Ua7M6Qek8kbuQjwelVEIh+RkeEnXLjU1GxfLr8GHkl0QgbvJs8uOMeAeFUJRStMlBYLVaQ0qFZt9guSgBIeHsYclsPl6ePRX/MglRKhV1XbyBMcS3UEqhaTrcu/cA3gUcbTYoCgUpFZgQzruOdTbqVgg0bQNjeoi4No/y7UBRCAih47MmdCRgQCF9VPgkYyEEASMkRKGj7gvgg4HxFmp5jE21xOL0Nu6++ArO7n2Fz95/B/e//ATN2X207RaLsob0VJ8KgjIAETysbVGAsidZv8cHGv/Ok9Lo3jZwPmCwVI7eearzU+goE249tBXAzN64dfeXB5/Bde3T9NtfQAKo4uvkBx3lT9cKCITeQK4AgHiQNDK5pN7Ib/oujtWh9mRhFB9gugHNdo+zBw+hBeUSu8FAC43gfar8maamiMVugLSR8GRjwRb+G03KfHKK/EjJ2ODFRikR49AxTTGFXmL3yDTD0+RbLGoEGHS9QdvuEYKH8waAg0FkdMc6FEX08vb7PYL3WC7qtLhw2KMsS+x2u8SxAIijwR54GTX5m6ZJm1/f92lhyzf2YRgghMB2u03Xq5TCcrkEMBpMY4olILWEDkBZF9BdAdFSSqf3HoMZsNvtcX5+EVGJDBQ7gGJw4wX9xo0b2Gw2qc95A+d75+dYVVXKdecj5UhB3/dY1ot0j3fv3EnnyRdf7gde4Pn+fSCURkSDQ0oZC/BFiDZuihACL774Il577TUsFotkQHDYKid88vkBpEyFQx7q3Pi4sqn4UZY8/9shxGO+kebGEp2LwhhKSZyeHpOR6wMuL7fxuBbeWBLvKQtwwOIQMpBfA3uybIiyB8Yy5q+//iY+/OB9CACL5RJ934LSyRFf4+Z2eXmZ+EdaEw9qt9tH/pGHkCzwRRldi2WN/X6Ljz76EG+99SaeeeY52ixnyrTzvpu85PjckyhcmPIU0rESWvAYrlf2WX4Gc2Mg78frWq4Iym1MPz5w2pmBSD9EZP4DXMtGSAkpFDabI5yf3ccwWJSFjv08Hj53FA6jOKOxwyKM45glZ5YKSAY4G4mUELh58wZWiwWM7aPCsYHWBYBYNiIQn6dp9lgsVxTp59ohcQ2WkoyZEHRESgxcsFEYkTL1rHPw1mLwNlaZJcI/goILpGgqdQldr3F3dYLbT7+A83t/xB8/+QCff/AO2u05XLeH9QGFlkSqNBZluYQdOhhrQVpQHi4wcoBYdI4MHhuNMR8AO/SQqoibrIPKyjD8Y23SgYq9ZfWVCCWUyTCfr31/sjBKCAFt0+Di0Rke3PsWhdI42pwSTBP5E96RwJRUMhEFebH33kMVmrIR4kbqrUgsfl4Qv8tiYmODNpshQnWUhRJCSJOV+mnEfgQQwy3Eyl5gAQiLfmhg7YDgPYw3kJ5kssuCBlfbNgTFaYnFcgWtVFLr5OthpUb2Ijj8wSgGl5PPFS55QcsLpVH9kw7eebRNg9PTU3RdlxZT9kwYVXFRoElJCacCirKIpC+yRo13aJoWZ+fneBS9Y2SL5nzQ5JwFIQTquk4EzlykilEHvl7OuOHj5Uqtph+SgQIgkVurqqQCSGI0NuYbDF+jlBJSFEDwJCKXoV0kHEZaIjqGYX709tu4e/cuGD3ja8k3qnyzn3uthybT/OecazA3KPKf1xkac2NkNGYo5361WsZ7YPLrHhCjodUbCylHI/6684/3E8AFA0MIESEVQJB4+eVX8NRTd/HgwT2s10u0rYvjgTev+H0Abdvi4cOHkVNToO97bLd7NE2T5huHDpxzEEoDErjcXuDDDz/A3btPwQcNBHmwT66+yHjODdJc0XX+LBjdzI3WQ0YD93U+Znmtyp8PP+/5WHkcz+66j1xnHDpH3BwZQ0G0QQpsNkc4e3QffT9gvVohBOInjLIBjIz5K+MqO2s8t4/olh2NeU/vE6nTEccgIGYbHcF5i7btYl8Q0iElYsaJwXa7hdIFqrKahGYpZZlqXdE9DvDewllDlVgrlRzRIZC+DJxFXdbQhSbD2AeEKPnftz263qCsNrj10o9w64VX8fSLr+Pj997BFx99gItvv4LsByyLEsYOsM6iFBLwljQrRKCS9Ux+juvpWLZCEO8okBQBAqUjq8erN/Zn3UIAvKd6OZETTCnsQiR9jbx0xaHQ4nXtBxsb1hh8+MGHUEEALmC5qKCFiNakhlYaxg3ZZjFmqHDMt1rUOL1xmtL+fCEi/wHR4xqJodc1Vqgc0Q0Zs1dEFjcEVKY9wIs0EOBDDAFoD3c5YBhaIs95+q7zFn0fEKKSnowbb6EVFe0BFaCr62WCkHlTZm6GjBwRTm9lUigJfxWJx8GDPARKC+26DtYYLOoFVqsV2rbFer2OEGeTHnJd1xS2kIoki11U94vhExcNhEfn57i4vIyZNrzBuskqyIsTL/hc7+TWrVs4Pj5OyATzD/h3vo6yLLM6LQXB+9kmEAKRY50hI4yyd6bPar75s3GVjNAQELxD23eUnYTx71QVlRbNZ555Bm+//aOYFeQm1WHzjYk3Jx/JgY87afg7DEfnIZbJ9eLqZjT34g9uhB6QIiDEAVkWCrdv34QQAt/cu49HZ+fwkpAPYaOWBsS1550iG4h8mmhoBJLIRvCoqho//dnP8F//m/8K/dChrisMA7Hyx82Ljwk0TRPRLRV5OFRzgAx6CUT5cyGJfF2WGt4HvPvO7/HjH/8YR0e3MO/y3MA4FALKP5cjG1dQOSBqKlxNlx09/XBlvLFhmo+VvJhgvonPr+dgGMWT4JrIY+wQyRDKn9X4tZA2PYDFwAgdVVKj63qqhqwErHOQsz7M+yM3mPJrz58nv8h4G6/PWIfz80syKILH0dEqhoK52jArNQPBCzTNHvWiRlWVUCpq0wgRHU7S+ygKCe+Jz2MBdNbBgrL0pKaqpi4IKh4XEwzYXTTWQmgNvaxRSAVvPRproUSBZ370Kzz16o9x+nf/Hr/+H/8/+OqT93D+8BybWkWkO6CQOhV3dCHAwZPOEKZzUEoJpRURbxVnY0nIH7A+/Lk2JwCnqFy9Ejw2RQxLTrWGGC37kyIbdjDQRQklJZbVggRHAiBCjE9PSlyPErpMTivLEkdHx5GExalS04u+UhgqawytM6GPGPF0LIJ6xs8JgclkZChIKQUICxWJTLRZAVpzaXiPpu/hPZEXdRFDHwhwUQWTeBcqIRrkbZpoAMhYzK1ISMQcpuXNOecJcIE45pdQNViXQjP8OQ61MOpB9zOyzYlvHbBvWjx6+BBt06do1HWDI998mai5WCzQdV3q7zwGnGek8HUURQEfKONICpEWfI59M4J1cnpKMWBICD9l/c9j4HzOvLqviJY2x5z5+suyxOuvv44XX3iRjIxM7S4vHT6HmRGmKMQcxeDjzxGP/JnOP/997XqvgMNc5H0CQF1XuHPnDoSQ8CHg/PwCIXjUdQVjSd95gl5k/Zn/jDls8VOUhYCAWPPK44UXXsTtO3fw7f2vsFzWEGaG+Mi48MhIADYDbCLZCyhZxN+ZXEtomU1Gocf24hwffvgBfvzjo4SQ5e0QqgVM+Q+HjKp8E2UYeP7i70yfr0hk86s8immYj8fQ+N3p9V65lyu/Hb7eQ9/ncBobAgICi8UClxdn2O33FM5VOYggJmN7bmQBueT9+NmxT1UyWiGI7D8MBhcXWyyXC2w2y1ggcohcOxfREADwcJ7WgarqUNeLybOl85IAo9KKskcgYAeDwThYQeusVAVUqRCcg/WkdKqVRKklvAnYNj3qxQKyrmB6j8GAQvdGoCzWeO0nf4Wjk1P85r/f4Lf//t+hGfbQpYB3Fl5QDRbrAhwEvKBMQxvGAp65catj6FYEQCsJdTUT+h9dswCsUghq3LfjEkFrp5hyNf6kyIaUCuvlGm4wuHvnDhZlDWccrCSEwQwWHh4egYrASM7TpnLF1nlopbGsF1FcRSQUgzzFsYIfoxzTNk4OhuaHwcYU2AClyMqm1VfCy1hCGQB70gICRUH55oM1cGYAvI21AUiql4WvJF0Ymn0D5x2qoqSaLYJIU4wA8KbMvIDFYpG8fc5aUUqha1vy/L2PWRRR6z/ev4uE2fVqBakErDU4OTlGWZXYXm7hg098CQAoqwqL5QJN10NKGxcMegESu+0Ol5d7iu8qGUNMKUgdDRSR+pMnHd8Te94sx85cEyFEqvrKISMAqOo6FlEiWNpZi7qsJuGfqqqI3+FCVEqMm0bmbTnvEszlQ0CwDsYaGOY1QGCwlP0wWOo/pTWKusZLr76CW7duQWpNSqrReMonSO7JeO8RAOhItMw3kzwOn3vbeQjnkJc72fhwvWbHwTAKj/TAoRoisxVa4tatm1BaodAK9x88AIyFk/Rhx8p/goKHPm1C/KhFRvMCuDa5D0To1Eqirhd44aWXcO/+16QsqABnTIRR43EAwI/hjXws0fCitOs8pAggcmoEglb4w+9+i+effwW3by/Hme3JTEYcC4gLnUeGbM2REIwLHm+UgV31iKoGZGP+YJtuyIfDD+Ozve49PkbO2RCRsxESantd9kqICAgJFPp0D/RdJRUgJOrlCo/OHmG3b6C0RIE8zHWdgTX2Wz6e87FIglYxhBr1VyAErB3gg8XF5SXqRY26LlFVJRxbwaTYR3NZBwx9j65rqYil1mnsjf1LTqeI41pKCin30Xkqi4qcC0QDmGwUtN0QkUwP6xwqTeNDFEV0NqmmTLXe4Jk3/wKbG8cIusTf/rv/Fl9fPsRRCVTCQUa0LbATrASssQjuaqYJr1dCULbK44TM/twbybST8yUFrxPswI4ZcDLvi8f0r55I1EsLyod2XmAwNoUy4AOEIilzKTSCkPCeCn15DxhDFewkSALWGZ9icmWp0bZ7lOVxQhloYR8t50iDgxABzhELfbms4ZyJIQIBpTS0VjT4hCBde47zAiSRHIDgDHQpsb1oAWuxqmo4OwBwaLaXafMsiwJdHHBVVUErncSmpJQ4Pj5OBgb/rOs6kTk5nOKcI3EVT8iQEAIiAFVRwluXUjOrskKhNRaLGrtmi5PjY4iYktUNLayzGKJ3v1qtASmx3zdwXqEqNvC2gXcKVbHEed/gm6/vJcMkOK6mq+CcmRga841xsVjg+Pj4ClqTa0hw9gkbfSEEtH0PGQuKwVM8bLAGutDwzuPRo0d47tlnqR6Fp+qORUHpzznygECGKV03Iz8OQYpoQDhijQcQH0AI6KrG0ckpXnrlNQhdgmsUMJ8mv888lMKGCFvvh4yI/N4BTDxcYOoxcsvTmpmvwn3Hv/MY4X/T9SJWfh03cyE8lAIWqgBONmQcB4Pz8wuo3sIoD+8lQhDwUUeBBNQinCVBxqYLQNDRuCSdmiCIlV+oAt4Cr735Jv7w3u+w32+pTosnYp2QEtZYCKFJLAmZzFGseUIofOQe8IYERippYxICOLs4w4cfvI87t58mYTLniXmgFbwgsTKq8hvtDgloUSbZaV7hAkCpgYI+G0Ig0cFA6b1UO8YBguolUXgiHnOyro2o3tzD5eeTc0SuM0Tm6Bchvcj6xmd/s/HzbJxEvpmIIS7+HASMcRCyxHK1ofofxsBYh6KgUAUCqX0S10PDmkiEDkSwZOQigA2NmCIaQ96JrJ/mQoBxHi5YBOexa/bYdB3qxQIQKnEZnDNErPaA6Rp4Z4Hgo2S3QFFUkFJH3QrAeRozpVLw1sLYHtLaiNgRkqeUQiHJoPbOwxmXSNAiAKYfIINAVSqIIKgWSnBonIcVCkoq1HdfwF//r/93UJsN/u2/+df4+sEfUcGiFB7LuoBWCs5RbSZhLeBHjllyYi2FWZz18EOHytdXnvk/tlZLjSL0gKMCflKLpLdBVWUDyXtg+nqc9oONDe89tttLLBcLksOOHmsM9Y3WtGCi0jgxeZHt+x5N06RwixDjIj2BbGOcU4hR7Cm/Dl4UyrJMst68WRDSQdVi0yGjASMFYD1IvtWRlK6SEkECZqDU1tVymRYaKUlYJ9fQUEqn9DgmPK7X64RuMD+FQylmMGibJl0zZ3VUVYWu67DdbtF1HU5OTuJ9eFRViaquSOZ9uADHyNigUUpht9vBuQAlFdU3iBuUiNU9h8HMvMEw1oc40BixWC6XiRAKjBsh34+UEk3TpI2Znx3HQquiRFFRJVFvLaQglKUqS/qb0ghSQcW+54k+T4NkQ4FCFQFSR08R5MkbSwasisbfc889hzt3n4IuirgYuonhwOTiOfQdAhUB9NmL+yNHRfK/58gHHyM/5iGI38/6Pt/Y8sbGbKq6maVl1lWJGzdOEALxUe7fPwMGAyccAgT6YSAin8gEsOKzp+uinSKE8bxFodG2LaqqwMnpKZ59/nm884ffo4jZXOTtKtLEAaMZ+VJzNYST98Mh2PXjjz/Gz372K6yWG0gZ54wPUDrjMMQX38PhcMOUXEvZBB4sHHYdAvVdLR8vbCDmhmj+Gb7Ha48FrokSUoRsinDlIR5GJMZ4Of3OmSkKVbXAbncGYy2cE7AiXLnO/D443AmEK9kEeZiSDXHvfSxt7uFh4Sywb1o0TYOjzSZxMmRMi/Y2hsicpbR7T+J6/dBDCI2y5G0m9QKt6YK9Z4rjBS+ommtC4HiD85N7gfcxY7AkFVWvwUKGxguq6gyJ07tP42/+xb+ENw3+6//nf4mzs/tYSDLEFxWto95ZrCud+GUcjp7Maw43/kBgoyiAF18EFjXw7X3g228PIfXTpjXw3LPAakWff/Dw8He0Bu7eBU5Px/cuLoCvvkIKawoB3LkNGAM8Onu8axbggoVxrDIIjpFILQQb+IzI/YnCKCEQX+Dk6BgAUqxeZotx/GCs4Dqyo/k1DJSKyf/2sV4CC3VNF2zqAmAaa89jjUWE0qiEepc0LEJAjN3yxSNNIuccmv2eqqgOPUHVboAzA4RA2uDYys05Ilz3w3uPhw8foigKrNdrCCGw2WzStbGhwtetlMLx8fFEbbTv+8TTODo6ioZTi8WyhhL09/2OdCmOjo5xebmL4Y5AGRjeQ0AmKFFmm3bbthHx4b4UkQNweHDwxrRarbCO4musiJrXeyEDxyXxrTHsFaF2IWghjEaGEgI+OGwvLrFeUgVYay0hXHpc5HJCXk7+G8/h431SahobIhDAIoqOvfnWW7h582ZUZR036ENx8TnczJod+UIzNxjy7+ULO//MOTm5p5trb+Tfz8+RYsURBZlvbHkmzWq1ImO1XkAIjQePzum5xBAW6RlRSQFCFSKfJ9u86bh8jRIwlEIOL/DGG2/is48/grVEKqbshRAF8kbBqSdtIQQ8ePgt3nvvXfzqV38VPe1RFTXvfyHjz3A9a5yfRd63Pvgrzzjv7++6Nn6xccprU/5M5t+59rgRhqaThyvTj0oxxLAxxtBQfgAytMj4Wa6WOD9/QKnvpYQU09DMaFyMfZjuKaIp7BDlBOfc2KD+swgiQISAtulwdnaOQhNKsVwsKO06MCrlIZRGAKkv932PoqpQ1wt47wChIATfHYAwCjQSwmJjvwpIOSUl+0AEbv4qE/IBYLNZoih0Ug+mUgkSXmtoKXB86yn8k//0X6A5u49//9/9N7j49isEawBfw9sezhhov0iaRs45YBgQhICQo5EkpYIWj79dLhbA2z8iXSxrgTdep/c+/xw4MHwAAFUF/MXbQF0Dux3wk58AH3wIfPHF1c8KASgVpd4L4M4d4JNPgC8ir6QogBeeB370I+DXv358Y4PGmgfXYApxotNwHEN7yYEJODgfDrUnKjFP1t90MeRQRYLBwROGRkhCBCIno+87cEEm/jswkhPp+/Hmk4sztdjzDYqLk3Ud1c1gdU6V5SsFBIKBYYHgMAw9rDVxEYk1XQJNFq6fcnR0hMVikQyOnEQZAtVEySFINh7Yi80JlWVZgkh1BqvVCs45PHjwIB1vuVySyJfW0LrArrnEfkcS6rSx031QhkqHtulgDYUzSLkRyUAIIaCLqbZC0ADnvg7XGBt8jWVZTlj4h7z6OWKQjMloDVNpeAFEON9lcu2BB2gATKxcycfgZ8uLYDJi4kbrDBMNx+cfQIX+bt++jVdffSVl/1Da4Gis5KS4eQgkhEBEqOyz81j8IcODG/8t77OcuX1ILCz/TH59bJwwqTYvepdfX1VVODmWKIoaMlZsHXa7lAkExLoO0WCXQsLDp0WbNi9SUgQC6rqCDyT+duf2Hdy5+zQ+//RTSE3pbt4FEkZKDkDerh9Th/pKCEBIhXfe+R3eeustrFZHVAJAl2ndkJGMSmXTBdGwDpg48+fJhul1z+i7Wm6s5KE2ns+HjnkoG2Z2VKT1Ky2JU8OVHhfzYhjtGHkeIURDL84hrdk503BSTcYN/+R1IPW5nArZ8ZjitW18NpEMayneRGPEY79vcB8PyNE6PcFquQSH+oh/ITEYSkMPvoEqCqyWq1hjJ+f3YDL2ffCJBxaCgNYBXDCOjI0YTkviZBGJdBbD4JI4Ga9JxjnIosDgAoI1uPns8/jn/+J/CRksPvrd32K4PMOiEDCtIJn0LHSSwqlAEkfk9crh8RmiJ8dA3wPvvU/IwskJ8POfAQ8fAtvt4e888zRQlsDf/i3Q9cCtW8BPfgycnZHxkTdjgM8+I0PktdeAoqRz8fA8OiJ0pGmQEiYepwnnoOBzHnnah68geUKMBsljtCfibBRFQcqQOmMbB7a+Y/xPCMA7cIaI0hrDYKBUzKfmgmeWF9lxYedJMi7ISBsp32g+kXjQFkWRsjtYH8DPFiIIuk7rXKYboePiJGEMERE5G2O1WqXN92pqJhkBnBYaQkihBUZXjCGlUWdJ8rbruhSOads2ISGcddK2bSrOto0Iwu3bd1CWJfp+AHuV3hHEuN1u0bYdgtAwxsXCbNFDzAyyx7U+l8tlkkYXgqrRLpfLtPmFQJkmLJaVe0XkDQVKGYuIBiQV9dk1LQqlUJUVJIBCaRrQnvRY8oWyKIrEAZlA1iIadB1VsG26FtZ61Isay+UCL730El588UVwuMc5j+CupoNeiamz1wdcmVD5/fHnc77GofAHt/n1H/Kw5xkB/H02uLjxeJ8TXcuygC4olNe1pDzLY1AVCtb5yNkhzoJUIhHA0qafGcZcLVcIiVdfex1ff/ElIBWkVlRwK8RrebzhNLn3HN1kpOXRw/v46KMP8LOf/jJCtQSpJ7WqeH1SyIOGxvwc3Cgk5iZ/mxuR33WsQ8fMkaj5/eTr0dXjAckYmyEb47mQNrrx3+OC752HUhLO+1iDZoGupbR5RjZ4bI4F8jAxmGQM/ZJj5SaOAzuD7DixpoILLhqsIo2t+/fv03qrCihNHDAV62QpRXVRvLfouhaDMSiKKu0NowOJiYHEJRBCoPIWpDArR2NDixQq5edgTEjrJfH0IgIVAtrBoRA0pyspcevZl/Hz/+RvUAmP82++QAWD9vwhuv0O1hOTMK3v8WkFoSJCHcntB+TKr2v37wORvw0AqEpCOLLI9JV2egO4vAT6+JnzcxoDm81VY4Pb8THw2qvA199M3z8/o2P9zT997EumZgYIYyC8T/sHIxsJecvG/XyOfFf74cZG9KjIx8jY+dkCyMZ7TqYryhJaK3jnABSRgBlSCCUfeHmhL4C98mmMkc+F7DxlWab0SAp1yBgPHDtDKQklgHbfo2n2sHaguF9wCMHBxqJSy+UyGRr5eTicIoRA2za4vLxMMt7n5+epUFmeqsll452xODk5QV3XODs7Q1VV2Gw2uLi4gMuMn7ZtcXZ+hn2zx927d1EUJYyx2O8bDP2Ac0e1C6x10UgJ6E0Paz0uL7fo+gFt28RKsfOCYlcX2xxy3Ww2KcOE+ktNUl95cLFBlxesklKSEqD3sMMA4z2UVFhUNawxKGNhNt42hBCx9gYm3lie8joNvW2xa3Zw1lOYJhCisVyucHR0hNdeew2nJ6fjJPBTsufcC803FZ8ZANcZHGwU5IX25i1H6OahlusaXyP3AV8Th61ysbjcQGG0xFoSSLt182b0Kj3QdUSoS2GzAAGJJHwXxPh+QmVo7pZFhX5o8PTdZ/Diiy8DisKG7737Tkw/ZQ7CVaTncTdyIQSMGSCEwkcfvY+33nqbamuAWe8jdyEE0h45FAbLf8/72nFBqQMG3iHjM38/N0yAq7oV3HIUYH7c2ZXiOmQjv/4QQuTAs4HK98XXjlTKvKpKtE1APwxQcnTAuA/mJFYaP0QgHj3WGD6MhgePs3QvYZwb/H3vAi4utih0gUJpLJcLILB4W+SMSQnvA9pmj+32Aot6Aak0nItE4+zu+Vy8jngPKGVTGIU3f6UFiqhYm7dRs6iEiiKSCIA1jkrYFwV2/YD18gi3nnkRJ3c+hu32WGLAUgV0VYFt06HthkSkF9FZ9oe8+cdsNgNBVkvgjTcozBFtqoOtKslA4VNaS0bGYnH9d0IA3v8AONoAP/sZ8Nvf0jmcz/r4sa8awDBAGQdhHVDwOcYIBTCibfT74+sTPUHqK2kZOENxORHCxCLOC1jJOOiMMSirCkVRUn74eoWm2adj0sSWyAmC+WQ/9HPOGmdEhCXEE+NfSRK6yjoKCNEI6KLJFmOGcZKWZZlgOU5trapqsujwZsy8BWstVqtVqqfA3mVuAa7Xa5RlmWqcKKVweXmZJjyfq21bAAJaFyjLCrvdDm1LVVP3e1IuPT09xdnZBdq2h5QqXo+EiyGgvu8jt8JOQiiCZ3q0UqUQaSNjA+ni4gJ1XafQFBt+ObzMkutXwgBKQTqHQik4S4Vtmv0e1lrcPD3FZrWG7QfyrDFdyBmd4pRhfo/7pu9jRkxVIIBEZ5TSWK1WOD09xeuvvw5dkPdDsOy08uo41kYxrcSxANLzz5GGuRebbyzXbUCHxu6hUAy/n4dW2MObQ+C5wTMuzDGs5VwijbKB67yndEIEKAEiD0c0TgjAgzNlxo3KOdrYqL8kiqLEr/7qnyQk7uLsAl9+8TmgY4jjMVexQ0ZCCC7p2Xz77Tf46KMP8dOf/oJ4FrHKKHvCgffoTHsn77M89DH+m0pZ5obG/LleF/441O95+GHOhzj0c1ybv3shFmlCIiExIk7Q3FiiMeuIoDsIFAWVKx/6nip9ZiESdrbmfS+EQvA2CW7N11keV2ntpqAbJNh49/CgopVt22G32xFhXGvS1CG3F0BIKMx2u8Xx8Ql0UcIHl3g3/FylpIra4/Nx2XPPQp9CQM3CtoxykLZHQXpN0fiGAIQuCLUIAi5IHJ3ewc2nnsP2wTcI7QUWiyVKJaCrBcTFFpeXlyRTEJ+IUGPlXBqDj7ep5k1r4Be/AM4vrqIP80bI0PQ9pa7neACEfpyfE9/jn/w1hU/u358d9wdcrzOesrm8Zxo4zRmZI3fj/ivF4cysQ+2JOBtpAkfvkT1cICum5RyE4FhniBOgSIs5hTtI5ZKzNthQ4DTS77yOMELUOVTN8GHTNGQEKQ3ubhGdMWMG7Pc7mqAxHmmsQfAukabYiOAFjI0LPl+IXnXTNOj7HqvVCpvNBk3ToGma1B/sKXA1VyaDsmAXLxJ5kTCqM7HFnaeewjfffBOLrjkMvcHFxUUMlSASTUlNE1LBWofdbofLyx0uL7douw5XxwEtBiGEVJqaF+DNZoOTk5O06LASalmWqX/5eqm+zCJxUCapoN7DC4Gi0CiLEg/ufYuqKHG03hD7PMbFvJvGQLm/OVzDnjvV23BYrpYI8Oh7A2upAFlZ1jg62uCVV17Bc889h7Ioo7GJRGbklofCcouc3xusSc8sf9b5Z/MFkDedufGRh1ryje26MTw3JPia+Ps5WsKGF/cNvU9VlJVSuHnzFCEEDGaAiynkzmm4fqCwZqFoA4mLPRUH5v6Q8EECqXy9xHK5SQTsX/zyL/HHL/8IwTLPjIrEkvQ0zR6/foRUAs5bdF2DX//6b/Ha629A6wpCM1JK1UVzhyLfPOdGRv5iUTv+3HUtRzPmzzlHV3ncz1OeDz1TRgXoje8Ko+TH4TVqbiDxfeS1gjTKsojokJ3U+2HjnDln0/4SkKpMfAi+/4N9yt+Na6b3gIshLmupBMKF3pJA42aDermEMQM5AVpDSgrrGtPj4uIcy1jmISAq1iILnWfoSlHIiGqM4ZIAKgGfzxO+dg5VDwMJLSqtAdNBwEJCQ0gF6xU6BxTVGk899xLO7v0RDz69QPCAEuTgnJycoCxLnJ+fo4mlKEIMmxC6IuHwA2KHIKLnf/JXZGj8/vffbTQAhGJsNqSe7T0ZEMvl4RBKXQGDGY/p/ZSXl7cfgmyQcquFDiHxMaI/fmXch+wzj9OeiLMhpYSXknQ14sD1cbCk2F8IUbhFAVEOVkpaRKy12G536LoeOjKagdGbY2Mjn/iTa8gWiLnHyIhE0zSksikVdKZQiUAW/GAGUkEMDkPfou87SsUqdMx4cBNPwTmXSIu8GZO0OPEuiqLAo0eP0n3k7Gjuk91ulzYpzp7ZbDY4Pz9PG03TNFRzwnt88803USwsZqwMDl3XUyqsoc/u9w1Blv0A5zzOz8+x3TU4O3uE7eU2eRCTvhMAICdVVlntj+89hLHyJT+TeUYFPyd+8YYAUInm4EnkxwwDlosllKb8fwQPFmCq6jLBv7zRsnfFaZ/M4XDeoR8Mhkh8VQUZhbdu3cZrr72WuCV8n4m7ginkna4xLmA8aXKEJR9fwFWjIzcgcmMj97TzsXvI2ODxmn9+3hf5tXCf58adlFSkS8bKpSwDv91u0bUdek9p0VJkUHvUKxEeEDJE1F7EjZ1CLcJJCKGAIMD/Pffs83jhxZfx6WefxLkR+yINroO3eG2z1hBfBwKPzh7ik08+xmuvv5miDGMIRMRN6jA6NErWHzY2+BnNn9V1SCk/7xwRY2Nj/rm8zQ0WpCv4vjDKaJnkC3viiMSPeM/FBkWqT9TZduIQ8ZzmNWpC8hYOWjh4f1XOPO8nmg/TywtArGMEDP0ApSR2uy2RNLsB8o4k2fo0jonL0XUGu90OXdeSLlAAAA8u8MfXysazEGzEZ6gikMrd53OUnwcT5MuyIKVPBARPqfJCSATh0VsPDWBxfBPHN+/iwRefwAw7ckqURiEIedZaY7vbYd80aLohji3QXsEE0ccY58sl8Je/Iv7Fhx+S4eAcETuv25vvfUv8i9u3gO2OUmC3W+JeLBZEGG0b4OISePkV4n98/TUZGE/fJX7IIfLpD0E2ugB0SqCUAkXg9YvXp1l4MIINfzJjA8gXWiLg0eSm5r2HzEhs+aLMkK1zjizIpsFxdRIHPBVJyzfzORmOj3fIGs//zXA8lTtXqLO+kJJYzNZETY64qJdlQZZ2nLRkZZMhxJA+exAjAmNwcnJCA3S7TSmxLCVeVVVaBJy1ECDuwVhEzuD8/DyFPS4vL9E0DXEktMYXX36JN954A/v9FhcXl0AQaNse+z0VvDo/v4wpnhLnl1t4H7Db7bHbt4RstO2BZzdFp9j4WS6Xqfx73/e4ceMGFotFKrQmJaVbsjGXe9ZzCLvQGgIxo2IYsFwucXx0DG8s6XxhRBO00gkez6Fx/umcw3K5JMOjNZMxxX389NNP45VXXqG+9i5mRpH3LQ9sCnmYLzekEGOR+djjPpqPu+9CKvjv+SQ8tDkJMWaa5MaFEGKSBp4MojAtx56I0YoqLTtnoVSBxaLGzZs3sNvtcH6xTfwann8J6ReE/ngxLoBU3ZEVfxVEkCANDDJU3vrR2/jjl19GoS/SwPnh4DIABHI+lIgcnAF/+MPv8MILL6FWJfHhHanIElwroCFn68lVo+5xF75D4Y95WIXHAv+Nx34uT3/lrvg6Ju9l7xxENkL6fUQ2RmNHRpVONgJCvN6yLNG34/XmfI38+vn+hGSE9uraDMyM7NSPmVEiuQgXaeNY63BxcQkbxeNu3DiFUISOOUeVW4tSYegHnJ2foywraF1G3hBSyM4am0QD6Vx03jTnpJx47PwsOOzO88XaBYSQ0IJS7RF5O54AGbSDw7pc4vjWbaxPTrE3Wwg4wI/hm9VqBV0U0GUJsd0nBB5CQnj52Dv3W28BN28C+wb4p5GkeXEBvPfe9dkoDx9SGOTtt9PQx9/9PRkVN2+S8XF2RkjJV18BP3oLeP45+lzbAr/5Df3kFgIZMPv94fMdaq3waJTAUooUNiIDOPKnss/SPnI4FfxQeyJjg6dTCCNHI6+dwYuAiwObwylcJ8Nam4yNm7dvxUEvEIJKi2+elTI3JoQ4bHSwN4wQsFgssN1uIXqJdUaSNIZCEcYayAj/0sYSU1+jkcH8hblHT6qlyyhupZO2Bw9+Nk7GHHKOsXsoKZMyKTfmbJyfn+PychsREYuziwusliucn51jv29wcXEJISQuL7dxoxd4+PAhef5lid2eNDW2uz32eyKudt1wwIqOHr6YboB1XacsE77Xi4uLhASwgZELm/H95iXcpRBw1sEMBgIC+/0e6+USi0UN1xtISi9Ii3g/DGlE5TBpzhVp2nYsAW1IbbAoVHoWt2/fwo0bN+mYkqypsixIgTRMN6K595r4D3GRzj3BQ69EiM7Geb6o55yWPDRyaBPMQwPspeUbBhOG8zAK/zsPCWlJaZA6qnwK53ByfIT2zh0MxsI6h9JaGGfZ4qf55j2CIC5EcPMsmShcF9MWtVII3uHFF17Ecy+8gE8//TihDqSfQGPrhzYqKEaZQ1/98Ut88unHePOtv4hCYkBwgPSc0TaG8+bzPnnBM2ThkAd/6GduvOXHYcMzDx9et7jm/Ze9ie9HNqYfFxhTuxHJTaxBIuL8CgEoSwoZ5wRjXjd53jJ/jTZtHTexaeFAbhMibHbVAGJ2mSLEQo5ItvcO2+0O3lm07R7Hp6c4Oj5O46YsC3TtgPv3vyWn4/iE6L88bzyFsI1hg9jzEAWnPgs1KkDT2kXzpdBFcgAZ8VNSEmHWB8AZeBeVS6PBGmSBen2MenOC3YOvIZRD8MSTSkiQlFivVlCqxL7Zox8G+ABoo4DvyCbJ2+9+C3z4ASZpp9ZOjYF58x746CPgj3+kMMluPwp0PXwI/NuH42cvLoD/7/8ArNc0Zvb7q2GaEIB333286+XWBIcyeBzDJ6eUDF064OisjAbw46amPZGxQWqVhjZn9rLiAEgZJCLWRgkOSlClDm8tCqUwWANrB1xeniOE5yPaEaKnV8DaWEkw0MQgslS8SRFrPmSbJXeIEIKEohRZvKXSsH0Pm+UbiQAMXY++a7FYFAT1Dx28t3DOotIFgvfoY3n4MQYbFSadg4xeRfDEPTk6PkJZlCSt6xwEBHzc6Ajio8HcWhOZ2qzsGdAbg+ADHjw8Q1mWuLg4h7EWw2BwdLzAYAMutjtc7vaQQuFyu0M/DHDWoek6OGuxb1tYK7DbNWi6Htt9i+2ugckFzQTrLiAZGrw45iGUvu+Tx+C9Tzn4XBuFwz/8fTYwkxdWaEAJqKBgBwNjBwRfwVtLz4/ghhRGsXZEK4DIcZES1vMGTRkrzhNZzQUBY8moqNQSy+UKN2/eRqFKaEX3IZWGYPhfTo0FHi9zpACZUTDfZPj6DnE5GKnLOTrz7IQcfufzzze8/Ly5ccvn4s/kmx7/jUiVjL8TCVQXGqv1EpujNbb7HYQS0IIkmoOLz577SAgE5l0ImnNSFBA+oKjKEUkRFBP/8Y9/is8//xyICqZdt0dZaCilYazDIaPjELJDnm2AVAGAh/UDPvzwHbz6yisoK0CpkgpyOaCqSoLGBWUJ8GYYPCtMxp+R40DO8dU+nhuN6QU+IFePoX9LEfVi4mZFtIUwWX/yY+doC508O9i0R7JxQHdDX+MxGO+JQysiZL1K66FSJZQq0PcOZcE8BwElRToxjylCZyWULuC9AmWlkLHo/Sj8Fu8m9slogFF6KxkaUkp4F40CqeGcx+V2j27o0HQDpNI4PSlT+ntVFtjvG5w9eoi6qkguQAgY58khkBrOE7k8BCoGp4QARCzCVhZUhVTS+0pqFFJDKwl4CQUJ5y28tagXNZwu4CBhfYC3BiKK+1lINAEI1RFQHcHJiowbP1A4wIcoBEdPS0nELBcBax2keHxjYzD0epLWdfT6vuY9hVj+IZu3Adp6SOuBgkjWAoG0FgAgCCofYQ1gB5J6F/Oxfbg9EUHUx3iYFCIJrdD7kewmyTNCIINDIkAFkrJWWqN3BPl+++A+3vQGEBVV1tOITOkhWvbxnBGGZFGYANJzSCBkmuBRlS5uEIu6RtPsYkl4mkpaShRaY+h7VKWA9wbGDAS7BY/gSfbbRkGluQeFENB3HYZYYr2qamil0bVEyOLNhr1SF71x7z26oYt8Dp8Qkb7rYYzD2dkllssFvr3/CIvFEsY67PYdisLh0dklmj2Vlm+aNpFMGUUZBoOAEk03oO8tus6g7XOZ8pEoGTBqonCoaLVaJV2Qtm0TwpHXd+HS9izbzoRZFuNhT34wBv3QQSuFbt8AwZPaoBDQUbaYfT2lZDTKMrQhEOu9qMbibUoV8MbADw4mkoq1UDHlbYHTkxsoixJaRfGrQCRgISVEDFml8RumCqH8XJnoNN+EJqmAmMby52muuYc9h9r5/UPcAP58TiwFroZq+HrmhFGWaqbjhmR4LFZLHB0f48HDh2g7dqkEFDRxVDKBLyqOJTKVzgAoRYu8Jq9WQEFIjZdfeRW3bj+Fb+99BWuId+G8RS6g9/0trhFcRAwe1rX46qvPcP/+H/H8869ABA8pqO6N8AJeBAjhqZZK9jxpwxiJlDFPNvVZ3n9zlGq8GhFttTgG4r+lGA1jNszmxzvcRrRn+pE8ZMhS5KPBMRJK4zEEpwJn6AYEECSk0CiLBdpmC+JgFfFYMobOxlTnRHAXAtbGUIhSEUHQEIK4dXmdFuZPUH8gHsdDynhFfGOCUj3Nvk0IhXceJyenWC5XKAqFqiqw217ggZa4e/cZqHIF4w3awaAbDNp+QAgyhvHIeSW1UgVdkMyUVISsKFVASU0p9NFghvMIzkGxMSQFZLAInhxiHwALBSk1ysUR9OIIUDWs6ZCedux7RnHZqNU61l4Sj09+/nNtfnBAMyD0Bq6ISR6YOkjBGgQXX97icbXOfjiykWBzEl9BRspyAGU4cIwyGgQenCajEDwJulhj8dVXXxEMJkpIJUboTEz1BiZ8DYxwZd5Edn0Akmw3VRcdNxddUKpk8J6ExUSuHhow+GGyeTKsnsfwObRSFCWWS0pfBWjxX8Sk6L7vJxsJhwWGwSQeRN9RDQuqi9KibTt0XY+6XuDycgsIiQcPHuLhg0fpGHOCYN8PFC4BoSVNSzUMrLGYN/JgqbdIV2QcRAz95xAsXzMrcubemxDEl5hvoilLxVoMQ49CFyiLInJWqKU4clmSQZrB1+SpB3jnx/t1hKawPDurnC4WC5ycHOPk+ARaF1kYIoZgsmsbh8cUYucJJCEh5NVU3DyOzf3B959D13zcPGvpSt+HaUjlkFGSIyk5AnPoenhMcAopvT+iHEVRYLkko3G728F7IhA6CLD+Cvd3YI9ejERXGUl5WitoqeEdefrFao0f/8Vf4P/96AFMv0e1quDtAOssfsiSEjL4XykZU7v3+MM7f8Dt20+jLokvksZMtr/lzzMn5Y7PNw8CXA2VzA3QQ8bJ/Dz5M5gzVa7nilxPoJuHg/iqx4u++hd6RmMdj6qq0DbbuE5VqWo2ESppTeX+YeNUCIMQq0fPkbvcuA6pGODUME6XF9/n0G/wBsPQ46uvvkLbku7P00+XMeSqMAw97t27h3qxwI1bixhOtXGtdJFUOl4PkWApg0VpLhtBhhGhTbFoYbZWkdM7XmPwATZYWALyELRGUVaolyvK4PMeBcjJYV0NHzxcLI3g43PyCJD+h4cJ/9wai7n54NGbIVVczp0p6x1ciOt04GrT399+uLGRPAOK9QrIyYT3zlGlvODIM4CAUHSxWis4hLQx3Lt3j2B7VcXNZswXz7UyJh5EmFzKuIAwU3ZmnBRFiZB5XBwCASIbXhLK0g8DqDS2Spto+k68ppzIlyMYu90u1arw3mO/3ydjhSviEmt6iFkyBl00LIyx2G73aNshbQD7/R4XlxeAIKGw7XabzpXzWJKR5yys9WnidjEENH1szEindF8paMFkDREOneTGBnv6XPgNQEqH5XvlbBUOrUgoCCVSxc/Nek39GavAWuegtILk2PeBkAJACpDOOVg3ZsewEciLYlEUODo6xmazoe+kzwF5XRQ+9nWxfPplfNb5ZycZDtnYmhvB/N53kaUmYZtsbPExckMn3zz5NT9Pfo35OUK6X59CYDynZJy78ONx2NiQkqssjym3RVHEjYsyWqyxMIPFq6+9jt/99td48KCLKCEVG/wh0shs7DKpvChLOAt88vHHePtHP8Fzz76EEBys9VHqf7rwzQ2M6/p+jmYcMijmqMc87JY/Ixn5JPO/H77J7/57/jcKbX1nj9H6KzlkrSbZJyF4CEE8KlI9nhoaXBQzN5LzcT1H83JjYx4yyvucjF5Ks/DeoWk6CHGO5XKJ9XodM6RorHddg4vzc9SrY3gnMQw92v2OMszqJYUzAEDGuklSJiNZF0UMlcpoaAQ4R0U3x/0ixE2Q7s86C+MDvFCQkfAvpERZLaCLCkYoDMYhGHI6yRkjPN4FMlysd/De4QcIiP7ZNuccurZB1y0RJGgfz5wvIQSMM7DekhHnLVWSfoz2RMhGGmz5ggtMBnI2NGceB8uDBzx48ACX2y0W9Tp+NySLlsuXs8DWONjHRSMCm7RQEosKPk8JjAaMKkZi2WBMkhYfBgshHdq2w765hACwWZBeBgtLcWZLvgGVZZlqlTDhk0NInCmy2WzgvU8puM45XO5IOMY5H1GNluKdlztst7t0jkcPH6E3BsY8RNM0MLFctPcBRSHhHBXFspbRDodhoAJlydgI01RNboJh1TiBy7JEVVUpXZcRmcVigc1mk8I9+/0+bu5HCfHx3ieBsly2fbFawA4GUkgcbTaxQiDXQwnQiqqHct8xahACGSPee8g4RoLPF3oBb8cNnRVYF8vFxDhKmwAoJMOhvhwVyEMe8cPwwU8+kxNI+RrGaTCiFDzW8hDL4akz5WwcynLh8+Sbac7ZOHSMEEb+CHl1IW5EhD6tN5uYQt1FZc0xbXUyNoSAjChJAKCi0aaUJnl5IVN9juOjE7z2xhu4vDxHs7+Ai3LVP8T3Iy6AjEidh1LETdg3O7z//ru4desOljUX8Jpmhsz788qmna1BeV/nRltOBsWs/+fPjZ8zhWkVlVK/pk2u5Voi6FUDkr4XDvYhH1JgRFeYpM33kaes598Z72naT0wgJUdQp/R+5mGxZPh8fOYG+MgxiuGruB73/YCzszPUdT1BfGmN3GK7vYCuN3BmQNc1MKaDtQbWWZSCamgJKWNBtEhIjwiJ9wBx+cfwI6O9iJyaEEQMV/dwAZDlAkoKGGvgiwKL1QrVcoXuTCNYTyXVY/gtBJBTpjQ8aDO1zkP6x4wX/Bm3zWaNQrfRce0mayLPI2MNXKyobJ197En/RJwNE5OFi0jEBBDToqK0rJTQhaaNP9ZhYOayDwGyIM2E/X6P3W6H2zezNC8pY6xRJiW8uUeSMgHiVA6BKhMyg3v6+VHoK8R43DCQ4JFzAwJIw3+320NJgRtHJ1itVpH41iVLn1OztNZYLBZYrVYxhNFF0S2SJd/vqZrtfk9pU9vtlgyAvoMPIVZwldEoICOkbTs0TYO41mC73aJeLnF2dh4fNG0iUhKxiyu+DsOAvjcYBovBUIqXMZYgTXoqk2fHsWEhARkzfxgp4M2as1HY6OCQBUOVjLLwd/P0Xk6hraoKu7MLQpGKktKLA8HmSikgYJJxlC9kTFY1ftROoGfFCxo9/8VigaOjIxwdHc3CPJEI6xxlWmSGRh4imDefGRc54RUYDUn+nTc9/ndufF3X8gnL/+YNA7ga3knzbbY55BsUj02Ko09j7UoRQU4pjfV6jaOjI/R9D2M8zBA1cNhRCNONPBljkjLJiqKAEjIatAFlWaHrHH701l/gN7/+e5T1As4O0EUBf3hvPdic84RySYmAkZyrlcJHH36At958G8WdCkqV6PtuUg03N8bylhuP1y2Cc4OD+mCKoM432PzZCEkckoPnTW88Xh98Jypy6ICR2sHGt1RjVWq+PtKkMWkdZeNBCCJX54ZXjm7w8x+N63BlHuRGeP5vJq0yugBY7Pd7nJ2dYbFYoCw1jBkAEAK73V6iGAL6dg8z9CADmSA3pRTKsiRip0RMt2VUiYiRbFzRNYxcJwrb08s5SqsNQkCzk2UsxLLCYrWBrpYwPkBFtI/W30CcPUdZXM45GGthrIX0P4ST9OfZTo6OoNUOIYxSDyHuQVKyQx/HjJ86Q9/XngjZ8N5DRIiJF1qahDEWGgBjLY8KKDmWJ3chQEuqt3F5eYlv732LN157Ky0cDNFT4bE+i0tzuh+xkp2LBgynK4XR6s89VqVUjCUjpdzyhKLMiiZlWJSxVDyndHKIgD1wRlkWi0XcjAM2mw2stWjbNqabdkkXYrfbEX8iEiytI4uf88uNMdjv99jv95FjIeImHLDfN9EQIeRguVwmoyfnswBk3Hlv4WK+OLUsTMBCOTPvZERoQjIwAExgdy7ExshH13VX0kM5LEXPy6LZ73BxcYGXXniBrrkfUBclQLxmWhgwGo25zLy1JBvvQNdUFAWEj96vdSgLIqgul0vUdY3lcpk4OblIF2ltjJ5vbjDMNULSgi+mGhz5ODqEQPA5uc03qtxrzTkCfGw2NnjhHKfYNEzG1zBHT9hgyYu3MZ/ApQ2ZCLqr1QpnZ2fw3sFg6s0GN1YI5fvXSiFILgwXMxaERJBjCOfk5Bivvfoafvebv4UIHAL4bs5D3ih7glEmmcau8wGXl+f4wx9+j9u3n4ohHwEWo5oTZ/PFbh4Wy59vbvDNn9FcjyUP0eScIoC4amkch6vptsD32xqHQhPz3wVGrQP6nYmk2b07j7KqIqI7hgvz601ZVUJA+ukYn5ONrbXZuBVQasrrAJijMVYk5iYFFfIbrEFRhljPaR/rR9VxLTawzqDZbzFc7LDb7eGsiWUvCIkrqwpSK5A4ukzoDfM5nCMVUw6fDIOJAovkHJZVhWEYUd7legkJknbXMWOqXqygywW8UNBKI1i6dp+ygiRCoHL1iOTZQ9GCe3/7f8P+wa/hQgCkRpAKxgNOSIiiRqg2aONetb50+L/ufowBHv/q/F9jb3q4uGknA1rEdP24oQkhUGgFLSVE8PEnBXpsAExaWwjp5r3NRaQmxLmhlcLxaok333wdL7/wIm7fvInNagUVx8FF+N8gYEFkX2sBbyGUhAyelL9VTPXwHkoEyBDobwJjpsr3tCdCNqy1EVZFYmingeg9hTSAiaHBXjWzxpUiqe/33nsXf/3Xf4NCk7Y9zz8OXfDimhaYaIGSVyeTgYH40OYkRCkFdOb19V0HYzoadAaURmoMtFY4OTlJVVjZ2GHpcSmpjD2Xrmf+hPeIHqNJ5+RwAot0OeeingQJ4TDc3XU9tts9+qj+SQOFQiTG2phq6yOD3MVJZuOiQP92ltJCrbUYjI2waLQrZgtvdIoghEyGYb4gbTabtKFxDFQplUS92JNmo6vvexRFkXgplB1S4dGjFqvVCqvlEj5WJLLGUg48Z1NQ9SiSiQ8j+dJFPY0gRSJbGkdEUZ6UbPBtNhs8++yzqRAeQJlSad0WVze5+QKfhyg4n5/HG4/rfJPnMF/+/Xm8O//uHLGYv3J+TH4t81h6foz8uXGf8fYmhGQTC1RvqACExHq9xnK5wjCQXosQ4cq1hBDA8nxCCoKwxVgFVChJx/cexmiYweKNN9/EB++/A2cljLWpDx+rpejFuImSUmgApMTHn3yIn/7s5zg9uQGpCpC/+t1IUHbozBuWVxC072qH0JK5cfp9IbMr13PAmLnufAcuiPoKQHBTJEJrCi/01iIPWWtNoTIhMpn+GBrJxxajA/NQHc3TsVBmblTnxtcknOcAUqFF4lQQ2rvDfr9CvaBUUyEVpKQlQIoAwEEqoK5KLJY1yqpMa6uQlHZbFAUhotHQGIyBiUbGMPSTZxE8YOI6knUiJDhk61BICVHWgCoSCdQ5D8+cD+/BqdmMmBwyIfv9V2gvPiZhPKHggoQTCl4pBFnCVmsYUaLre7gHBifD0+iCxcNvPkBregRnIWJGXlACRnoMziJ44rYtyhJVWaJQEioEWCGi8KTDICQaY2NZAhH3FpKm6IcBVBeLOC2llhBP30Ylb+NkfRebusHRQqGqiNuzvQBcoKhF3+4QgocuFWysVaUjOqaVQrAWwjsyOHh8PkZ7Ys4GS0h7huXin0nsBYCK1Qm1RlVWCC6gH2wk/BF8aozBJ598iv1uh+Pjkxi3FWmAs+c1ndTThTcRJiEQ4Cc8El4UxqJENOH6ro/cBdpUTSzaxagGIx0cCiEBryL93G63iSvRdR3atqUCPnHitW2bQkTMD/E+wMV0Xu99tMSJIEoiPSFa6rFSrOVNk5Aia7KwgmNDw8O5kNJHKWODkrsZRWYjL+s0Sg2exWOZv8FoBiMfKawRjY8pIS1gt9ulzX8YBjx8+BDby0vcvHEDhS7g7IDVYgnbD6TFwoiBD3DBQUmV4n9SSuiCvEbjx1RQE0miWhcpLZeNjTt37kBrFbVZRi9ZSglIkaoZ5Bv41SE95SDliMQVDzjbuPI4+dwIyFGOeejjkGede9KHDA02Sg4ZS0IAY8pi5MfEvylFBN/lcomTk2M0TQdnB3gbIvluLLLHYcd0XWw4BLIKOFMsxHk3hICnn34Gzz3/Aj756IPpOHusRvVYgCiXGJgQTv10efYQH330AX75y7+CEoB3AVJeDZ0cRAgyzz0Pwc5/pu8hXNlL8lBDjk6xamKeqfZ97bpxN/89xFAqUpA4mZFXjBw+9xyBYy0MNjaSYSsEEKup8j3xPeQGL6NeQigoNR3z+XjNuUxaawTBDhNteqOxscd+v4MuFMpSQ2tFJPVCwdU19lWDEATqukIRlZwDKAQqNfG7ikJRCNAQ8ssOHcsH8HV4T2EPax2FT3TUBxGjaN9gBSqlIHUJLzUoCZzIoey8OecSuuo8p5Zffc7Oe7hACqOWHSVNKL+zDkYMkIsC1gwY+mF0/JyBdBbCWizrCjeOj1CvFzAa6C1Vbi6LAuvlAsuqRqk1oYexzth+v8e9iy2aR4/Q7pspUiUkXHR+2TnVosDp8QY3To9xtF6iKjSUoiiByHQyvPex9paFdyQQKLynkvMqGlHxPYWrfKfvak8k6sXs8SsLKCLKIABEARbO86YN0oIZ78zyffjwAb7++hscH59E75b+zptf27aTiUBGQhavTXEjkp3NSWEU4rGcpwKlJDbrNfp2n4wFRikoF7xM3vzZ2VkqNU8xR/LmOW2VHgrp/jPKwRNyt9thv9+nzd9FiDOAFj5Odx16A600nCTGPYdjKJUVsaDRqOA5J2aN/ybTIs/gmXjE2YKTYDI4rNdrnJ6eJuOqaZq0mTNnhjNruDidlDLWZNljtVphsVgkzkciSjqHdQxvWGtRxAUuEW1B/A1nLYSm6pDJm5ISXim44CcLGyEqRUp5XSwWuHHjBtbrNayNfA7QpqsUEcxozR49uBwWzw3SQy3f7Hki55BxDs0f2jRyw2COTuSfY8gzj4F/X5sjJnl9FedoYfQB8JHrY2IYcrM5wnq9h7M7OIxhl7zaLW9OMrqeFI4fYfXgc2RRoet7/OhHb+PLzz6BDMQTeewWgCQIFEL6ne7Poagr/Pa3v8Yrr7yC46ObULKebLT8c4785D/z/h430QOL44HLzsdIbjTOkY78WR869vc90/x4bG3k3rRIRigbxXxMkcY9MIYHldLgAm5T4xkzv+NqRg+/T2unA8nWT42tND6yz0spIyrGThFlqDhHXIy2bXFyeoy6LkEhMwspJFbLBYajIyjZoygLCIxzR+sCRVlAF1EFNCk6jwKEVIyygYxGkbUOLsL6UikUukoI+LgPSQStEaSGDQIqCJBTRyETHwJszJyjInEB3h0upGa9g410AeKZCgRBwo4WgFcGCg7W9TBuoB03AEMYcPPuDdw5PcHp8QbrBfEKRaEBAWipUBYF6qIEKfkSouiMxWAM7j96hFBVaKzBfr9D2zSQUqGuKwTvMPQdGUKWDPn1coG7T91GoYB2v4U3A4a2oecuBHxNqeVNs0ez30OJAFVouID0glRkcAQP4TwUBH4AjvnkYRQm5QDjZEwS1oKsRAmKr3nXI1giOKqiAGKev/cel9st3nv/Pbzxxhtp4EYNFVRVkUq1Jyuc5C/TpGMIR6QgQXatceIaY1DGT9V1jaIsMfSjIeCcx2JZY7M5mlj5jGbwQsxhlbZtEQKFQdgYYkOD2d1sVTIPQUqVMi1ImIs092miI3ImBvT9QHFTKaNeCVJ6V+558HscdqGsEeKR5CQqJpiOi7IAJCn7MQcDQEIxeJFhsS5mkzMBlhGtk5OTtMiMIQyPoY8psDFVWEpJnkW2UbNRWhTFJJebS6M759J7SilI7+JmX6Rnslgs8PTTTxOvBnmtEJXu2XuXDBnuu3FcII231E/fsWHk4zxfnLlfc3LpocV4OianXnOOZMy/kxsv/O/8szm5lyFTNjbYiy20xmCIMLioF9ipDkrahH5MUJaIXkilKP0wqr3KEAmAwcE72rWUKmCGAac3buDOU3fx1ddfHey361qY/l96V0oJbymtentxjvfeew+/+PlfQpaR9xOuzvMriJGUk5o7+Vi98nzDKCB48Dozw/LqfJpeww9t03uJaEZ6b0TbiEvgJ44DjYNx7SHCeJ/S1Hn9A3gDR/o+v5cbb2Pome6T+GFjxsmhUEp+7y5zfsih8RAyYBh67PY7dF2D1aqO65MHpEJZlFitlghBQmt6ZiJ66EUZQ+uSkN2RZ0dFKHe7XSTgt9gcrRK3g50qpRSKSGp01sK6qKAsKZPROA9jHZTzkJ70aUKIOhsxNJ1CKqxfM39+3I+B0m1DUHDR6PNSQIDq/lCq6FjIbXXrCPXNFcRSQ6wlyqMahRRQnsoPFLpAoTW00ihUgbKoqEK6pdqzQUkYpXDedji/uEDbtmg70iuhEPcQ16WAxaLCnTu38dTtW1iUBczQQ3iPoGx04BVCRaE67z0x67xHsA4eFi4AgyPdIpkT2sN0PH1fe+KqrzKWmc5PwzCOdZbyrqUEyZ1Swa0gBHywVO9AKZRCwXQ9Pnj/Peybf0npURGVsc5CFxlrOtDEY5lcKcZFkaBdB+9EEgrKvcQxtMD/ORoAdoj1WwKWiyVOTk4wtB32TZOyLFitk0M6+/0+GRFUQC0qh2oFa02s7Dqt6UIbeYUHD8/Rth2MsUCgyWkGkyYnWe0RFgcovU4wdyEAIBKd954guwj5GevQtC36wZCGv1QQMkwfWvZ7Lleekz3zTdk5h7IssVqtsN/vJylmzNtgAisCUGgN0w/ouw4nR0eoi4pie4F4MYVUcNlGmeC3CMFKwcWUaAIksr8Q4DLjSmnUixUWixUWyzVu3rwdkR8BXUS4POpLemcxxMVi3vINlk4hxutJp53+LYSQjKd802KYfl5DJxG+ZsYCz5PJBo+pt3idx5lffz7fnBtLYSulIAPxNoKQMW1wDEdKJSG1hDCS4NEMuQEoYiIlIUyUDaBIpTFEFAYURgiBDcYSTbPDSy+/ii+//AJh1oeMlhxsMVUSiKhGPmQFMJgeqtB4/4N38eO/+AnKckkXiDz1lVU4xz7kLA1k/T9/bpN+FeJK3DkFMMJoEIxj96oEffre/HllYZErtw9GMcZz8cdI4wKxf6ID5OnfQkrAjdlXnI3CJMnVaoWiVLGf/HiP2RDnec68qNxw4DBMCJg8vzxslEv5AwECEqougEHAegMVFEKQENJHLaEdLi93WK3WqKqSri3Ky2sliZegNQqloIuCQidaQilBoQPmqDkfw9Rb7JstdvtLWGuxOVqiKGIFauXQW0P0WhHHbQgwDoDUgCShMOstjHNQzkE6DxtD3c6DXtHuo58CPoi0wWaDBz4AzgUEIREi0ZmyLgkFMAPtM96P6dJCC0ADVjh0vkcXSgQnUYoCwgNw0VsXEgEWXkYulpRQhUa5XAAxW5F1krqO9w+qDKxigcbNZo07t26TBLwHnLHonUdQnNgxzj0tJKqigPRE/pQAFaqLYSk7UKqrAK6sYd/XnkjUy4eAru9RlyVEnAgMf9EGRpkoSUQmeARJpEsKfTgUWqJSCoN1+PTTj/HZl5/hjddfJyjKCChdwIWAoi5oQ3UUqnDBQjkiDWkV66YkcRc3gcuFEHAmoFZlunzrenTdDl23g5QBXd9BBOC5516kDJcQUMVwwaNHj2hSaY021jjJDY2u2+NyR4O97vZRP4TSrKy3cIHSL5VW2O52aLsBg3VUHCtqZJhoBQ/OYnAGUEBwAYtFib7r0TZtJM4qfvI0gK2N/SrR9g0eXexoHkgJx2I8AHwQcWDQ/bNvEkJIxdf4fowxqKoqpcoZYyiDAcBquURdVcTp6Dpsuz5DsxRVD5UKw77B0c3bKCHQ+0ASw5zREEIUfqL6Boh9A0kTWcVB7q2DlhLGdDELhbJSCl1D6RLVYoXN8Sk2JzdgPelt8A0aSylrjHPlXhzN1evLa3O4hn/PUYucuc8tj2XnqYdjSGMMjcx5Hvx9DoHkUHWOwuREvDkpjxd9LWVKD6YaQ/TAlaJidNZQTaK6qmJ4BHDBplAdVdUcPXaAuTUSQSiMSCQZVM4beG8hEMlnZY2jzTGeeuoZfPXNlxTq9A6qLGGMJUfjUHgljBuhCONO6AZSrbXWQiuF3eUFPvzwXfz8538dlcg1IYXWQauC0mdFTOmWbDtMkadDC2P+nheZ4ZAMhmiUxP4MApG+MhbOyzk7PG7iQehePGUE0N8zawIA64eMpk2Um08fGdFa3viReC5kZFHZBpn4DcZZbPc7HOsjABRSoerVFF6QagypBAgoCLB4HgAoKHB5G62IP+G9hLVD0uAouexAVCGWUgAiEFFRA2VVwUgBG/VTgICuHbDbtmiPByyrFZQisuFyUaOXDqZ1KCCxKEuoQkOXBZSO66h16BpCgtuW0GQK5W5hTI8QPIpCjwRUEVAoARdTd63z6IyFQwGogCAlgrDoTAsLg85ZKB9gXYAxLoVlbaxobj0ZOgLqyo7pnITzOimQAoEMekFzJlgLoTXCYGDaDqBoCey+xV54FEdrNNagv9xBK42yWEFiVLmdOCbRWpQRtdt3DezQAd6i1ArLRQHnyHAodQkIgeVqgbt3bmFZ17C9wcWjCyyiVIBiw04pLG94sm18gPaAZJXWmIkm4jxgfmaIzq84kCF1XfvhyIYQqErSTlBKQYSrxadIvXEkZzIxh2OmvDArKbBYVLi8vMSXX36JZ599Fuv1GkUxqlnqokAftStYc1/JqA5nMVmYueWwtBT6ygLDpez7gUq7v/ryKzg5OUXXdtSpIKVMPgahFWNlQSaAem/Q9xRG0YoqFlZVHbkTIZabb0b0QCpIGQDYFHPkFDKOt05IhiCCnxAkH+tjyIQFuyAlhrbDrmmB9NCvDwXkCyLn4HM4A8AVkaB8cW6aBn2sblsUBaqSrGlrSA21KArs93vcPL2BOqIebJxYa9M5pB7HCsmSE6HYuxA1Sijuytoe3kePVShoXaCqalT1Ak8//QyOj08gpEqoFzAu5ULKMcQW74GNg7lHmn7PPN48ZDJvefx6noaco0M5MpGT6XIDKKFus/PmSrH585t75t57qECidiEtBJjM/7RQzcfD7L08RMAqivBEsg2Jmc+cIQ/vqP6P1hqbo2M8/8KL+Pr+VwTlRiOWr/Gg9xM3z/SXMF4He91aazg74OOPP8Ibb/wEJyen6LsBZakoxk5BHkJQU/+Qxy+u4Rjwe1f6g+dH7L5kaGRz6hAilj+7q318XWNUI/85B1iidRNJvwFcoyXh1wQKCUFGl9LwnkKji8UCRUHrLKuLMuQ/vYbpvBg/H4Agsg0PCEEmLkben855uGDRDzZqYoAMToyGU9u2ePDgIep6gZPNMY42a2hNasZWCpQlGeNlUaCoSkBSCrQ1VEE6BIFhGLU7zs8f4fLyEtYOJOy3qKdzJkT+XojiXz5AaAlEPstgDZpmj2HoUtq2BwiZY4MfgvgKjpxZG9yVHfPuj/4LnL74n88euEg/g9LwQqHreqjO4b+rXoULHv+L7jVIRXpUKhuzJM8vIhAlIrLLY3P8XQiBuy84vN4PVF/LmoTok2MTK0IrhXpRY7VYoCoUCk38DwgRDfS4b8vF5L649ljCEQUlX/ssw0dE0i2HK7+v/WBjg+W+m90e69U6ZRfkGxnDjPNYaoJyMy9RQGDwA95//3385V/+JZUoj4aMFFTtcRezP4qiQPAOnhkaswU69/ySbgEXpqIPRka7xjAYnD26QFmUePHFF2O5+A7BWrjowbFXy1LczH5umiZySYifsV6voVWJpmmg4qTn7Imjo2MMQ49m36fBYK2NIZg+DY65SqWnXRZKRVTAGVhHpB0RyIMzxmAfyZqBB2d2DP59vkBqVWC5WiTjieuZsNGTe9Vaa0itYYYBZhiowFxRoNDEp9GKjMeiKNDs93j27tNUtdXaZFTw5psbNrknbx1VuS3LEsNAISsOqeTQLdVKIM7InTt3UvZM/vznZL3899wQ4Pfy2LONKWTzccVt/v485n3Vu8XkePPnkb9yrYx8DOTXOydHJmG0eGwf4Ww2NsgLGcMko0cyHR/0nNgIZOSHidg+FcdLselYrGwYXDTC6Tpv3ryJ05NTPLh/DyFuXkVRXG9sfEebPCcp8c03X+PjTz7CL37+q4Re6EKiKHQ0LEZUQ0ZeF2ZIRt53h5CPvP/nxvahz/Ialx/visEhBA5ZHBQ5mRsaB9Cf8UDpM/kmzs+P50XXWVjbQmuN5XKBxWKs4SPlyMvIz5ejfXn4jisyMzLHz4TT3PNsNek9QhiLtllJoTcjqML3YCwuLi5IAPG550G+gMNgOjgHykIMDsPQk3qwFxgsaWj0fY+uGbDd7nB+fo5Hjx5hu72EcwZ1XeH4+DhJoveRtzJEjgdC5LtFQUAVOXxwPXbbLYXIY3ZFCBkrI/VHpkGS6Un0zT1g8TJOn/1n3/HMrrYP48+f4hc/6Hv/U7TgW0BaIKZNQ5C2S+AstGjspBYn3Z/M2BBCoKpr7La7ySKPbJACFPdjIa+kRBZCUuJkMamh72Eh8cF77+H+vXs4OTqC9x6lpqqxhSJrjMWHyMggj3cONeUwOBAXZ58TAwFnPZTSODo6xnZ7iaeeuoPNZoP9fg8pBPoo01oUxSQs0/d9QjRY2CoEj0IXkEJHeXWDugaGYcBqtURZVmibHsEDRQkMwxmMoYyTtm1HAmUGwaeNJkSYSlDFQud50hfwwcJZTzLruz2s89GTHxfT6zZcAHERWaCKsDqRU/ssRXjcoLz3aHY7LOo6CZg5OxqNUpBHzYI6dV0RkSiGYXIjJvcEeSO1hqor8mJWRhIgh6wYIeLspMWCrnu9Xl/ZOCaI1mxcTJCAmRDXPK7P13dog8wNFjY48v7KjznfROYGRn4Nh64n3/Ty9+bX49kDnxhG8XhyfI4J+QoCCLmTwPMo470ELqA4OVua30LEUEtRwFpCOOq6xnPPPYfzs4cwfU9hMTlVY32cllDNOO8J5QI+/vhDvPzSy7h58w5cTLlPRpWkNSc6pmD/P++n6/r8UF9fh3zM0aXcwTmIbGTIyKHGX/luQ2M6jzExNDSoWquLYY8hciS26ZlTuimFeL2bEh3nhi2AFJ7zftxo83HPBsZkbZESZRnX+6xQpBDs4BCiYIxB17cwZkBR0tjz8HEN2uNyewmpC0hdwBoPawP63uLycovz83NcXFxE8cQGi8UCx8fHODo6SvWsBGJ9j94AztPRraN7txZCSAx9j2A6NLstgnPwzhNHw4X0IgSZNlgf+DU+tD/8m/8L1m//H6acsIg+paEjBIIqUFQ1FssVjuUCP/6qhBfAr18JY0zbs+oqSTggGpNsWPK6ftUgnBJ1EchA0JGvRHwjH1EJUDgvG5hi9gyD+QzAt5BlFefSPNckpB+B708I+D+VsQEhosIkQWohqATB+YRciNRZ3HgwsxeXx8alFNhut/jd736Hl156KSl3+lghllNgrbWoypLiuxg7nRf98RLnG2wOmQOFrvDC8y/COYunnroNVvSEoIfDixwjMW3bppDHtBgbeYIk3BUSC7zvB5RlBcRSz9577Hc7sPIexxx5c+WNlgcOoxoOBOVx+XDIWJ7e2KjAukPT9pQWlbG/5sbF/D3vkQyLOQIBID0fzkphI9FyHQWlE9fAxUqsTdPgzp07ODo6Qrffp+OwAciGJyNczlFK1jAMEEpC6zLC7vQeX6tS5LVVdY26qpO+Bhe+42efbxTzxX/03sUkTDQnEuuymISx+LsJVs7G3FxFNTduxrE2/c78WfDf8uPnf5/fw7wx4qNCYNYvEEgzIE+JnBs32REgBBu6bHDoxMOimCxnMcw3Wkp/D4H0WFxdwzuDO3fu4Pj0FPe/+eYK0vS4jY1TNvipfxUePryPTz/9GLdv34ETFAoMwUFInT2HEZKGmKJKfOxDxt/c0Dhk8M0Nxcl8Ba55VocCWIgL9mFk47v6i0MEZPAxujKGv7QqgBDQDz3alsX4lgAEpPRI6bMikuwzKQE+BlVVVVeqIOfEcZ4/7EQEADIKPSIoBEQ0gQFmGkRwzqHZN9g3exwXS+iCFD19sDDGo+8b+HAKEYg/0TY9Li9GRINrTwEBdV2hrusoS1Ck9YbvA1JB+ECy2jEECB3gzAAMPbr9Hp65b95F0r1J6z5xNSzJl3sHnelsdM1DfP3v/u8x8ysz9uOLRPEkfFFhfXoTzzz3Am4vnsZf/dsSVgF/eHUJFApSBATvogrnWBSRsspcUkcmHoiKNgTxYxAElCiglQYbKOS8Rr5VCGCRQylIA4sNDJaAn6wNFSDq9fi82OAJvE7SwA3IDB4IhMec3k+ks8EXOwwDqqI8uLgTDDfKjPMDZKuZ/04wL23Kv/n1r/HP/uZvcPvObQx9j8VymSDCruuIryEovJIv+t/FCudUWfoHIKVGUdQ4Ob2F04tzSKnQ9wOKgrT7eeNgY4Blx3nRy/tgNKgEVqsllssl9vsGWhdomzbKx5J42Tff3IPUZESxsTEXLmM9ixCiUBcYBYh0HCkxxBDMbr/Hbt/Ae5p0gxkzEvKWb6xs4HE4Il8seCPm3HUpqf7Icrkc08DipmgchVS4zknwHufn56jLir6beUhsOOQGXFrQAWitYOKmX5YlmqZNDHkqJ036HFydVimVpMrnyMbc0GD0JTcy8s1hsoFkZE/+Xt6Hh45/XVbJIS/kOu+ezzfPkDlkoORhm/yeeSEP6f0RxYGQ2bGSS5K8lhDyxScWWuPYMHODZpszIyRCsGIry6QD6/Uad+7cwcP799P8fBzRq3mf5FwYYwwgiFj94Yfv4Sc//Smqchk96VFsbBz6sY7DgRK016EWh64h//2QsZEbsd+HTBxq133nCtoRQraXifh3kQxE/g6JcI1OBIVr+4hEiGicxvHlA7wYj8kZPjwGuL7OIUI1z08AaU7SGm/TXDqEdrPTstvtcHlZoiwF1isNCA8IItIOQwtrDTxklDrf4eHDMzx68BDb3WUq17BYxCKMiwWhnlUJIRDrQgloXQLeIgxkYBFx2kd+hkfX7tG1Lby1hAxHg8I6n8S52Hm2gcpsuMyAd47+7SP6wH8SMr68QJDRMPQOwdsJ1lZWFUSlaJwGn9ANGdOCnY+IS7ZepnIM4LAmEYB1RJoAJBrCuI7odAzPNpGUCQn3mI1BAdLHYqfMB1hnEyqipEyl5fmAj+tMPLGxUdUVTD+KSCFkIRWEyFkYy6zn6ZI5G58GvUPfdrj/7X18+smnuH3rNqqyAjyhHkpIaMnEPn+l2FO+mM+9TZ6o9DcJpUg3A97i6OgEl5dnKAtNFWAFGRf7/T49YK53AiAhLgCiwNcA7wWRFqsK3hMBduht1L/waJoODx48oAFviIfAxdlyQazcoyqKAiZYSKFghafkNSFhPeV/t12Pi4stjDWQmsV0rsLz+YabSJmesoSGYcB6vU6GHyMd/Mx4YTDGoIhaI8zPkdm5FlWdqtxWFWUf1AUhI7khk3vvbABZa1FVNfr9DjbYKCBWpAWN6jKoREqlvHudFhkm1ebeZz5GuQ/45yh6pCZZIFJKKvykZNo485BMvtHn95B7fLmxkAu88bk5wydHMviaxpj6dGGfGyj5MfMxA5C/4X0Ys0lBxD2IUfOFtVpoM1YJ0aNFjLzZPOWSZc254BLfN2UoCAiIyE+izacoCkA63L17F5999lmSub+ujYbL1cbjhz8XgkcQAV/f+xpffPE5fvTWT2CdQaGrzMjgcR/1FMT0OfHz/i7DYL5w5qjGHLnKScKPg0ocblODMnmPmaHB18Hn9PBRGpzrpRCZlxEg7x2KooKxQxTraxECUJYFOI08BIAJv0rl3KFiIl7H585D1jxuuZVlGYnfMesvsBowedy0oWpYazD0Pc4vzrFaF1ivK5RlBWc9gqcssn2zRdNuoRStk2dn5zg/P8d2t0u6SEIIbDYb3Lp1C0VcaxZ1DZIQoL3GWEcKyzHjj8LQJo514MG336JrdlCWVK1DzBCk7BWq8oo4jkIQhDBnY9X5+BnEwm/R6ACH97lPQ0giW0PRgdNRjBngQyTTCkqWEAiAG0MolGE5pls7n1caJsTex6yyxDubOTY57ysEARsCYBmVjbomTCsX0UiHQHCBRARCwGCozkpZlnDxXtkZVlI9domCJ5Yrr8oKQ9ePiEW2mYUwEtr4RtnoAJC8TO8pJBAgsFkfoes6/O53v8MvfvGLSapgDnUaY6DEVWubvae5FyolUrqXYIs3KJTrE9y+1WG/u0TTtJAywHsLHzekEIinwWqgp6enKIoCbdumjYrLs7OXbYyDFBrGUOn4vje4uLigYkPOoe07PHp0hu12myxy3uj5+rmsfQDgQiwj78jIGIxB35toAPVQuoBSEsZYKFXkT2myEV3xnLNqjpxlk2fGcKVXvp6u76EVKdoBU9XLoe9xfnYeq68uoK6JB+fEUw5HKaXg/FjQicvY55sqk1R1fK8sS5ycnNCCEq83J6/N7z8PeeSbA/87pY9qndIaeWzyK9fPyI+Tn4vvM1+M8+PnCF9ucORE2bmBMr+XHHGZGCNR7E5JiSBA1Sq9h5QKbGrzfbC6InnFvJCp+CJUgzYvEnGWkkh1FIsmJoRA1GcItLFx/xVFARt6rFYr3LlzB59/+imlZM5CU0/U4rh1dsBvfvN3eOONtwiuD1RNmq+fDaQrFkg6zFUjNH//ujZ3aHJU6z+GRteiIAT1hZQeWmmqoRF5codQO2A0yK6i0x5cKC8fq3ndKp7XUitUFaXB66JAITS8LmALC2N6ODsAiM7cbo+LixrHR2vU1TJu6AFCAm27w+XlGZTusN1d4vycjI2+bRFAY/j09BTPPvscTk9voOuadD1CCjJeBk/ZGdbBGhevUUCVJaQUcKbH5dkZ2v0Ole8AUKafjWiBC6QgiogGuPhzkscTRlQjIEcIKMtOhEBEfgEAJCpm+iF9f7/fwyoRKyuHlESqJBuCNK7JQeBxFzVksudF64ab7HtzEnBynGQWHozHU7G2DoVECJEhpFNAOtqHjDMI3sfEqKhlFZ14Bw8Z/kTGRgBZxCrG4FnwKl9MhRCTrIPcIub3JjBxIEMgeI8vPvscn3/6GV548UWoJRHVpCSCkWU1Tj2mal438ZNXSstkel/ExVQIhdVyjQCBtmlQFALeWdiIwgBI4Y6UCRPG0JAQAjdunELrImV1DH2Pvjfo+yHKjvdomjZmr3RougHn5+ej8E4MZ+ShDs58EZIWin6gXHVjLZq2xeXlDvumIdEYkNXNG8p8schRBf67lBKb9RFWq1XSEVmtVnDOpWfG5EyWJ99sNkle3DkHKUbp7r4jVdWnnnoKUo6iL/nz4U2bC7YlREJKOG+hywJaFTg+Po4qqn00xOg4UkoU0QDiiq/5fbJyZ26Y8v3yzwnalbWJAZJZ6PnncsOHf/LzysfyIcOGz88Iwny8psU6myP5c8w3aT5mbjiGEGJOPy2SoxEZCdohIhz5+AiZ0RThd7omjvXyXJFQEghCEWIqFBBrRDjnJgBsMtIceUC3b9/G13/8I+nI/ANsyAKAFAHWDvj6qy/x6acf4ZWX34iGBntk/HzUhAT5fe37DKH5upU/v+nm/B9oUD1RExCIIbDIuZHSI4SxhtEwEBeK1mUNHZ0UbuxJj8bm9Hnla8m88ZjilHyA9gIhAVa78V7BOxagIwHCs0fnODk+wtHRKaTi/iRF7Ka9hLFbPHq0pbIOQ0frRKzB9Mwzz+DGjdPkvLKolQ88zslr94xM+ACE6JTGGibbywu4foDxPSBdDKcQ6d85CmOEEGINJz+GDmJz8d8J0aCO4kcSCcqR72ANzNBjUF3qs+12D6vi2iIoCErXJyCg4iPIiKIhRMpBeijUX4LCIlLEMiHxWaRHGDBy/iLfgxPSpKBKulIqMibCOGuUklBxHrlYe6sf4t7IhgmIpiAPhCsPtScMoyDG81TapPLFnkMmjETwQnrImyuKAl4otFFL49HDh3jnD3/Aiy+8QBVDZUAhC5RaY4BAcB6imC7YhybCuGiPliHA0GGAErH2iJDouw5dR6WPg/UpU4artq5WK5RlmcIrq9UKm80GWuuEfJCMrkHbdHCxlDxt2g7GWHRdi2FwaWPkVM6qqtKGw3A3bcRkYJh4XOJptKOhoRSs9xA+kNxs1q/z/sj5CrnnvVqt0vPKN7o8Ddc5B9N1QNzUKJyiIEBGyX63n5AuedMTGYEyh1zZM+LwkSpqOAQ0TYP1eh2rOFKIhzdJFVEVqipbo67rMT1ayoRw5BvB3Og6FHvOx4n3HiKWMM/Jn/x5Jrjyv/MxnHu9c3Ql56mw98/HO0QwPMTL4N/z68nfk0LQ4pg9/zQ/8jmRUiZFMi5yLYWRyzF6P8jmFy98DL8HP8aU0+fj9W02G5zcuIH79+8/EW/jSkt1U2hT+/3vfoun7z6P5XIz6adpaObq5j9/7qlvvsNQyA0//j0Phf3/x8jgFvkzKYxCBgddF63RItYo4hIEbJjw73Rv2V45WVfHejI5UsifS4iyEHDWwQqbBNaC47pNdOyErkRxrt2uRd8ZFCUJNwoBrNdLBCg0bYu23aNpdxiMgQStl7dv347FFzW22y2qqoi8EQFnYrVqx8RnLiMQ4OGgPGXKtM0eu8sLeG+pvL30cLHYYz5nqV9CKsSW8xvGwpqBaVA0tuI/PAAEQAYiaXpro1o1f9/DCX5+FCYJIZC6tiR9JZlIznMkLsTnFcDVm5EMkauZa+MYnYZlpZQQVkDKjPcYBwLP/fGcU0QvHyeP255IrlxrDQGgriuYfkjIhk2w9gjZMRKQQ/n5zWqtYSj5Ig2gd955B3/913+N27dvg1OteMDngyD3OOaoRtp4M/IOfRaIcAmCFyiKCj4E7HZbWGtI6jte+263S5uylBKr1Qrr9ToVKjs/P0+psH1nEseBCg0FWDuVOw+9TQtvXnOFjTKC6ynu6IPFYH0Mx1A4p2lJl4PEwRTF0kHr8BzVYGg7J2kul0vcvHkTSksMQ5+Oq7XGarVK6qEAkjHU9z0Ga2Ol2Fh+3hsgBFRVhWa/x927d4HAZECJMlPVJM+KKuLyMflvSikMzkCCQmzb7TbWnRl1RoqyRFXViSy63mxwenqawj/cj/zc+f75+echjNwwyA2TNIayWDmPzdyInn9/vvlzv/Hfc84KX1+OwuQb8NzQy7O1+ByHNkdeDAPi/QApic5aClGmzKqY5sffVZJj/WMIRQjuP8pFCWI8T24QA9OtnPunqir4zmG1WuH555/H5eUlSdpf+QZwHV/jUAvRg1SRK/XZ55/hq6++xOuv/2hyLOpH/ud/GKKSG5F5y8nu//AtXPN73g4jD1JIeMGhPJLxZ4eOyKJjOIXnxJyYPB4zwuxqzB6cj1EgW2sBuK6L3CBLqsfxSkkzqUxOAdXXkLCG6ksR90yhXFSo6hoQpNrcDyRQZq3Dol7i+PgYd+7chi6o9ofWClVdxfpISJmC1glCgw2hzF3XkTaTKmGGDo/u3cO39+4Rih08qZQ6C+d9RAamBv+4tmbGRva+DyGJCM77USkV5xkmyAj1m0CIBgDVIxEAp9iGAIepYOBoVIjJNSTnIK0zfvK5eMVQjJYgRhICFTZNzkOcOEIALlA2EY8DOp+HDVeRrsc1tp8g9RWR/KJR1iW6vgME5eQPdkBd1fEGxsJUAJL40pWqiyHAWYNSKxSFxH63w717X+O9d9/BzZs3UFYlqLiMglQScGM4IMFL19x4iFYav0WbGD0cLwS6voNHgNQ6yTrLgo7FBdeqqopqfEVM+aUaBGdnZ7jcbmnySgXnO1jvEQQN9KIsYS63aLuOkAc4hOAAhERq4snuolEhSKsczlIsTMRSy13foYsoCUNmIkioaA3D00AYVRRpUPGrqkssF0usN2vUiyqq7Z1gt9sldAOgyXp5SYzvk5MTrFZU3GgbK78iBOy2W3jnORgFLRU2yxUZSyYqhcb+dtaiH4YUJlCRe8GIjXEO+2YfkRlQRo0QsNYhgPq1qCoUVQ0oKp28Xq+xXC4nntacp5MQCBk9A9AUc5HxDREnqp9mGE3V8a6vTzI592fnKH79zdTQCSFl7rD3SHDjdAGYQ/EBgHakFc1ltpEdN7+W8SfiuDqE1tAxl85hYy02XYdbTYd+IP0SUrNlL0by9EYAsK8E3nm+JBa690BwCNZSn7FRFvj8tAhKpaF1Ba0cqlLi1s07ODn+I9pYW4dSVXkzpEVRQqZr4F6+9ndBGiDWWOx3O3z88Qd444034bxHWZRwzkMJmsvk8PgDXiGu/A5c5XodavPv5wbrdR5euv703fkznJ0rAHMPNN48ovuMwK/AYmZRsMvnGwEhHFqzsUn1LYxx0Hos7EgedZSkTtC9A6CSpzxewtVQZH7fzhk4BBg4OCtTijxlogHCETndC6pLsu/2OL+8QL2ssVhUKEOJQguq5yMV7t4+gYLH/fvnqJdrHB0do1wUMLaH8xZFWWC5WkKVGi549LaHg4P3DrYf0A8d9n2P1pExEIYO2htsz+/j4uE9eNsjwMKLmG0SAoLzKWyCwEXWaON12SP2oKrKiGBGEITu8HOirUeikBoakjgZOXHTGHglAEEOAjvEUlH4xefzHrPxG9gAmAYLA6Or0ckWvPaxUy6YDCpiGEXAcRGqINKooyPL5MhyqCd4MUJggtZ//v1x2g/nbATAWCJpSiUhlEDTt1jKBSAFpCYLj2E7tqxzGDp1YiAvvipKiODhY2G0i/Mz/OY3f49f/eqXUErGzJZV2jg4TRSYohpzj1VKSelJ6aQAJEGOPnj0kfzYtB3ajgr31NFj5tLxHC7hMNB+v8fFxQUpiIaAoqwoPicE1flwEk27g5JUaZPTo9ooBMZiKclT9jEu52W8N8QBGGLNEMo+McZF71yB1GAEIctBRK9WpNjnSEyUqOsqhX24Dsput0uhjMVikbzV5XKJo6MjcMn5P/7xj9hut5AFZYAsF0v6XkmhCzMMqMuS4oxKQyhASwnb9+Dqoc5Y6FggSQiRKt9CkELgYrlCCEBVAUKRVHnfG0hdIAgRBX40hFbQZYmjo6OYV6/TuMq9LTY2GMkQsQ+9GxdGKWVaQHgsMhqSEzVzFCNHPPK/6y8usfjXH/zQafQfffv2ROKd5yUtNmnpoQVYChmJprywimgcagiQwRGCwGKxwu3bT+Hhw4fohx5aU9oeVZoKQBhhfGC6Zs1/F0EAQQEx1dVZg88+/wQPH93D6eltWGcAEEfFuECkcNrlARw2Ig6FXuc/54hS/t3vDMXk1tLk73zs64TOQvSQ4++CYZqAABk3Nx85AZGroQSkFxAuG98+FjGDiOFIQrCscTCKMr1Yzpzuk+uyOIRYyM/5qZpuvq7mhjK/lBSQIhraPqZKKkJZEEN0SlPIxHqHpmuwa/eoliUWyxqrdY3VagkzWJi+Q6Ulnr5zitWiRuck6uUCQXoESZk19WKBxXoBVSh4BNjg4OFhTIeu2aFpeuzaHjYAVVlgGAYoabG/eATTbSH9gCAcXMw24WwNH6X5PVdQZqRSZsaCEPBs3GP8OeW9UOZesB7COTgzOjNKAlrSvkG6JDGFNiZYzJHTK6HhyAfhf+dtwkvLnBKfF4QPUZ4ducMew8jT0RgjmCLdFjm4tK+SbfV41sYTcDYoJbJQOkHNu90Oy8WCypE7B7afeFNgQ4Nqhoxpfil84InNq12B5XIJawy++OIL3Lt3D8+/8AKKsogbUpVkrBkZmFxZNtnTRMg9ScS0JIGEujDBEUCC+JmrMUSv3HufSslvt0RastairBbwkZvB5Zht9B46O6SHyMJYZGxF8pL3sGaE2+lckTsQB+8wkGHQ9wNCmKal0isORgSQngL1bV6G/caNG+kcLMjD4Y0cbVoul0k4jWHW4+NjHB8fY7ffw1pHYaVhgBSEQB2t1lgtlynGp7WGCJSlw6qgRVmkvHvvPdqOquTqQkcxmNFTsn7MbEIWCpLxWTO6xM86D0vMyZVs0EzIwXHS5vVI5kz8Oax86L35QvuPtmWOS77p5Omj41ymr5RlCQhaCK0lWfmHjx7gj19+QZkNiaMSMH06390CgOADpJa0kUmBBw8f4te/+TX+5b/4zzCYHlWpE4E5wS7ZM8t/Xjn+AUPjurAV/7xuTEyPy1f/uC0c9BQZoQkhjPH02efy65lygca/e09hTa1VLKh2ldMUQkyhRoAXV8Msh5DEHG2ehw35O7wPkDYLcTw4U2axJJ0iARHnOZWV11riRJXYtR5KV5CQ0EJjvVxjc3wcxxuHTfjlMEQyuvMWQhYx40LC9APu379P/CbvEYQnViqAxLuIzTo7uT8X8m0YcePFweeV+iyM2XgOlr+GG6enMIqrxI5In4vod57JyVGCfMyJiGwdWn9SmPPAfjgPgQhg8ry4TX7HiJrMkVnMkK7vak+Q+hoHGTIFvUyUynoPLYmzwNkWvNn52QVzZypNEqvsNZoI5//ud7/DU3efgoxpjkVBOg4hbjzf510IIaATGjAuWDyBQggwlmpylIVC8MQazuufMN/AOZfe7/s+cSG6WC/FWpegSa0KdG2ftDaI9ImYwkoehPOk8jcYA2tc1EcQEVIjhbj9fo/LCwpbUOXX+aiOogoC0IWGzxjbm80mFTNjFIOzZkIIiTcCjAXB1ut1UjhlEa39fo/1ZgMfdVO0pCStR0qjaxrI1WpczeLzXC8WFLKIkzcIJCOGkRStNQoBdF1PVne8BmtI8a6KRhHHi5VSKKsy8WfmLe8bHgOW4lFXNpp8QZ5O3pD6Y76Z5sdmAmkIAepaD/XPvMV57t1ImGODNScIjjwWSqElMa3xea/XR7h54ya++eZrODNAFZQ1QHH8x1ukqDH6NGb2mKHHhx+8j1/98i9x4+ZdeOchBWKmnIWU4YqxeV24JDdUD4Wr5r/njdet0aHIzYvww2yNa9s14bzokZPlTugFAfyMRBGSy2sg74y0R0YCadp8GKmaCuLl9y/EXDRq2mc50py/l9JTBWmBhIRq0b8FYjFKBSwXa1RVjbbt0bUDQqBCl0ozqb7GarnGoqphPRkXRMK36LsBQ29gDRHrrbWQSsCaHio4NLsdzs8ekXHEIZAQU1ujtlYIvKQxOZPv57tN4/lmPmZ4jXyn+OCgtULQigydQEZfxDYgxJSzxS8WapsrGvN7eWoyNx6bZOyVGeqtwUqjQox7Mb1CIsyOxs4oBYAZGiL/dHLloySqi14xGwNlGWO8SiPApTRHrmvR933qAF60CGkg0pCQJDblvMdut8dvfvMb/MWPf4yXX36ZBmxJ+vreTzUNeIAfhElFli4UWPHQAYKESqy1EFJAywL7XQM30EDd7XYIPqT0z67tEqoxxBQg6zyatsUwmPSQSKRmQNdRHZW2bWGMTS9rHYSQ8d8uCoYJFLokTy+WZt41Dc7PL9C2A4qCBkfu3XBEaswyIXVUNjSWUX019zo45MBICk8IfnWxqutms0kTpqoq7Ns2AYMklkMCXCdHR6jKClVVA96T8Jq1NIiVnIgyMTIBIVLGifHUF2lxExJSsVclUqiEjY2qqiZ8jfzFEyPfCBn9S2MB0z47BAUDmIynMVNjOvFHTYzH9c3/vBqHK01Ujs0XntHDHrMR4rKdNrWqrGEGAykFbt26gxunN/DtvW9oEeeYsxCPvRELAQgpYdzI6Nda48H9+/j973+P//R/9hS8d1AFICQiEfYqAnHIwDj0/ve9N/fwtNYHPcp/GEMjt+fz87NJEWP/IgtLCQ9OiSVytUzhqxAI/WH+2hjKErOf+fmniM+hPp0b65ypmBsv6W/WwkCRjpCltVEqASnJSPUOaNshGbFKCigE1CWlyC+XC0gl0A9UQt5aD2eBYQjojUNvLRHtHYleDb2HDg4PH95Du9/COQuJ6NgFDxdwZZyTgcuowPWoGLf5mKB/U5jcOQfjR50NKRUKXUAVJZSqxvVIUXoyRwV4HWKiPb+SpHpcy1mugJ8NPydev/KkBA4/547cHKGbz/d5fZw8y+5x2xMriAqIicU6DAOOjo5gQB2rZpkWfIF5VolzjkIvISBYCx09bSUljBnwzTff4KOPPsRrr70Wsw9o8DozTmr2JPIKjHyN3EkjVyQyceOYqCsyJIZ+gAFt/KXUZAS0RJK8fesp1NUSu22D/a5Fs+/S/RjnEwHSRWJfWdRRIZQHBU3mIRpebJRwkbGhN7QYwMbJL7BvGpxdnGO/a6N3X068iTwbgg2NECg75OjoKFVANMakNFGe7ABSKIUtZR603HjQ8rOt4uBUUmLbbWEHA60Unr57F26wKJWCC5EgJUlrQRY6PQ+txpx/F69dFwWcCbGfqD81K48Gn92bmkwYNlR5IpG362eLRByjapqmxY2NiTnjPE95zb06hnrz7IND6an/uFru6YzjjbyhCNUKkIEsIqE1eoRSatQ1KbwORuDmzVu4e/dpPHj4IG44SMbKD7qiwBo/sZZQdHLeffcd/OSnP8d6dYwQLFVHVtdj3IdQjDlK+n3Gx2ScHUDV2Mr4hxwdV5wpcFl0MB5OpPKoQ05gh4AUChIqxvmJ/AmEUSNJjsYMh3lzJ4DnynWIYL4u8XXy5/MwCjfKXqHxYo0lDaKmhZQiZsoN8I74fCEIBO8QHDBYi6rU0BJQQsIYj67pYKneJqxxGAaLrjXo2j6tK0oAdjDw1uDswX1Y08MMPSQ8hUpYk8OTeGK+iSYtjTDV2fiuNnWCSIodVmGA4ScZ1ysJBcQ1voDWCvWyJj5KXSdJCe6zHFlkp5EybvpEL+B1jJHIHKEK4ZqQ2ey+cud0fj/Xfedx2hMZG3yxbDislkt0XUd1MTDGCyexISGSeFQO7Ye4IHCnKkUalMFaNE2D3/72t/hn//yfY1GvYqEsBYexGBovFPn5ciNEHOB1EEmT0qYW9QLfWoOha6GkQFGUcTEUeOaZZ3Hr1m0Mw4C27SKUBDRNRwMzEAeBqmSS+MlqGaJiKMF7zhEUbXqSzJVak9fv4mBxDtZ7DJHrYozBxcUW+z2lgGqtUr/w9fO95cqjSmucnp5OwhQ8YL33iTfClV7zgnIAVezk0u5d16VBWpYl6rrGxfkFNus1vKMy0E8/9RS8deQduHFA07N3GCI3g8NNnPLLhlpd1/AoItJDWSwiGpQMrxMcOOpTVGU1mXxs5OYbBrd83DECxv04XxTn3+Hj5+/NoeXRi5vFcf+RtBAX37lUeQgq81LHcCqgUiyeuAABxgwkKV4UeOqpu/j880+x3W6JtOsCdBTimm/y13mQNHdZKZQUJ8uyxLfffINPP/0Eb775NiErAoBQk/Xn0LGvMy4AXFlH5tfBC3K+saaw8jXfmR6fOFaREZP/YWIe5WM7IQNAKi5mLZExXQhgCUMIGY0QylbhuDyV4bDRWDSgCqfRg0/YZY6i+OTRz0OX+aY1n3/5nMnXmAnBFBEddx773R673Q6LRQUhSZeCqsjS2jUMHiEYwAv0XYvLizMKD6kCQz/ABcrEIakBk7hz1lnS6DACVaEAb7HfXsAYQhi8D0R2DoD1XPyM+nFMbQUgYj2Sa8ZOvr7w/Sdn11NSgHMONrCTDELOTYl+sCgKmygHxhmUJRH5Waws1+Qhgz+vkD0qRM+RxzwcDCA5xNPnOI7FPAU25PcPnxyEHFlj1Of7EB9uT6CzQTfLLF3nHGRcwIdhwOnxMT3ADFLkDpqTRDn26r2DCBIihEQiCiFgt9vhiy++xLvvvouf/fQX8XjTWOKh1LOJRR7GDmdomLT7ycgpK3pg1hpASVxebvHo0SPcuXMHb775FgCBhw8foWlatC1twgyZNk2HYTBUaRGk1Mib3zAMGPoBfRc5Hd5Bao3gKI+aCUyDJV0OBIe+H4gT0g3wIA+RxWninSUjD+AaH+TtH52cptBJ7pFzbjtrgzD3BECqzcKf4+yh+WeVJNW4y8tL7Hd7FFqj0BqmJ6IwVEQ1MC6dQohkmbd9l4wbHxeg7W4Xq5MijQUuLlTE7JU8hCLj4sPwYo70pLGZGRZSZuXHxTTENk/duw4an1vy+Sbzp9FY+I+rhRBGEhyuwsiceEf9GeMjkQeglEzORRs8Tk9v4plnn8f77/4hPicF71jO+ep5r55r3MgIPYkXJgAXHH7/h9/ixRdfgpQKOtZuUVHsLt8Q8wX4Oq/u0M95O4RmHFp4rxos2aYEgTHbJH1hdo5DpNM4FikeFUPFZFggrbcaoxw9e6YArEQAr90WY5EyrlNFBctorRnDIvn95feUE7M5LJ4TGnPji3V2lFIYegPvHLpugBBb1Isai0WNzWYJrymx1FnO1PMQ3sHbgM4baE1OYb3cYDAdgijQD7E6a1RdNtHLl0IA3kH4gHa/xX57AWcGyOCioxDIOON018SdiE+K1wcxDZkeMk4PoT3j3JgK/gkR05cx9htA0uBaK/R9P1n/ONRx6PjzdYr/zmtUHtKJcGS6Eykl6Z7M1rj8RTIBPvFbeJ12kdDPWX3f154I2Qhh1CTQWkNUQNe2qdPMMCSINF848gGYIHatIWNZbCKVDbDOoWkohHC5vcTf/93f460330a5qMFEsUMdP+9sMjZGMRUeLmyxBYzGjfeU+z20xCs5Pj5G27Z49OgRzs/PU6yMa14wkqFieWtGP+jvVIzNRF1+ljv3kLCOrU9CNXwsNkcCW3t0Han8aVVGLy1OgMDxVRGlhqnEdlWVWCwXODo6mli+bPzlSBJXaOTS3SwklsR2srAE92NRFNBSoTg6QrPbY+g63Hn2WUJd4mBmwSiuBaA0EQV5gkw2ZiEivM5jZJQy9wjQukhxRZXxJYQggS/mnfCCl2ej5OOKjI0pWWvuPc8hxUMoRz7puOWL7zV70dV2ZwXx5m2E/QD8/lugt/9w3xEAXj6FeOEU4aIFfn8PGA4QV2+vgGUBfH7+GPj+dC6FgDQGU39kRggvqlSunGLSy+U6PiMaX889+xy+/Owz7No9ykLBearpwsf4LlRjerPscZJ3r3WBzz/5BH/86ku88fqPYJ2lrJVwFdXMx/b8b/OfPxQq/i5EJvvX5Fbm3wj0hYmBdPUY43GFFGAp7nyc58XUcmPDC0LjrKVQCZP/lCIRMPoMFe2SsW5HHobN0ZscTc6NkPl153OV1hcKoclYGt1alzhuRUlEUDa0tNYolIKsFYbOYLAOzvboui08Arq2QVGv0bYtemNhg0NviLgforaLAKCFwP2zh9hfXpAj7El92cMng+KHPu+8HXJ4QwwtG8s8FBbwimMlBFJdDRTyUVpBKlrf2fHj9ZPRjblIYf57vmbNHSwAEPLqukdJ7WOIjAuzMXF4/hwP8dcetz1BbRTy2gVC8hx4A+u6DpeXl3DWocqUHbnlm868Xoc1DoM1GAaOO9EGPQwD3n3vPXz++ed4ffEGhIiFpmYCTNfBpUHIZPgAIwQlJZUJJjEgsnAJrWhxdHSM09MbOD+/RNcNsNZjt2tgLRVXKwpNsFKs8mqtw35P2SvOBazXR2gaIo5yjntRlOgjqsPZKd6RZ28MGRp9z9wAChcJ8AIedQnAi06UhK80VuslETpjyCMEKiDHoYrFYgEhBHa7HXa7HZRSsWhanYwRRh3YGJmnWfW2Q3CkhrpcLnFycgpnLbSQVOwoGpdKkRCXjyJCbMSwBayUSogOcUZMzOIhgwdSRLXQaMnLUdNCSjkJC81JnPlCm65djqI2ORScT8x8scwNmXwi5WjbfPwegsyvtGePIP9PPwcuOohlifDWbYT/xzvAbvgP/44QwC+fhvyfvwL0DlACeP4E4b/6AOgy4+Skhvw//wrhnfsIn59//zUjjrUwNbrykCcbwuPcC/BuXLiZ0Nv3Heze4OjoBHeffgYffvA+rJzWs6FbmRIOr72meK6QeAQBgzX4wx9+j9dffz1yn8oJTyn/br5WJI8yS4c+9HPerjMsrrx/7b2MhtuVt8FjNr/uObQ9Hj4IGSEiegkEKJEZ2+nDHgrEk1DSJUODpQSUilflBSA9vGfDbhomyu9z7vABU+4Gz2UAEw4BApU9IKIq0LYdzs/PsVxVWC4X6TtFqVAojWAsFCSUBWxwaPZbbPcNLrc9NoKc0rbtMUTF5b4fEJxFWWjUJYklnj98gLbZQ4qEW1AWDEKsCkvWX4hWYOp3NnAx3c++r7FD6mIKLafOBsRilAgIwiL4uMYZCUgGq6ZjdP7i9YqNjRz5YCJoblQIIQAxfY654ZC/Pzd08+fN18NGUE4u/b72RKmv7GGLcTZguVwm2H2xWEwuljeHXMqZrV1jDHWCHPP1lVKo6xqr1QreOZyfn+ODDz7Ay6+9CmAkBOYdcQguF0JAaJkmLYUzJURQAGixW66WkSMgYB1li9T1AkJIXFxc4Pz8HGdnZ2iaJoUWqorKBPd9jyKQ5O4+alE453FxfpFCE0zKoZxvH2WDiT/hbIAxNkqHm/ggxxQ19gIQs1ToPgK0Vlgul1itF0nVtF7UaBoKj3A/s1S691Qc6+bNmwAovLLf7wEgMZNzS5m9IiYaCQAmCp29/OKLJNpjHRx8gteUIDVRpYl8hmyh1Eqhz4hNOd+i700aE0qOMCtPJI5ZFgVpsJRlOZkY/LmDm74QaUPK67/kgnC5YcHv5+8lhGxGHJsbMNe24wryX/0U/r/5BPgfvwRqDfl//BnwNy8g/L8+GqUDn/Q7JzXkXz4P/1/+DvjyAjiuIP7Vz4C37wB/+xV9ZlFA/u9/DPH0EcKHj777esHPjY0NIISpF8uLjPMj34ARDSqdS4ssh+SGYYXBDKgqg+dfeBFff/019vtLKF2CU2zzzev7vaUxjECf99CFwheffYpPP/0EL73yOm1qGLlj3OYGTb4Wzd/PP/84bb7pIvbO9L1sU86ciMk3ZsjGFbRl8v4U1eCWQ+jpODGThzlWvJ4ZYyOKGTeOFI7xEGJ67jk5ez7+R9TXpXmTGyj0HQWuyaO0JiMncMkGSv2UuQo1ABkcykKiqku4oGCcRO8EFlWB4B2s7WHtgH6wMQPQwFsyOFblCmcPH+Lhw/uwxqAUhHgnszWOwcAPLD2nMPv5/c9+Pr6sNUCIhOpst3XOwUuRND64Xo2fqceGECZZfXMD4pCxx88hFz4kpHcMfeUGAycCzBGSQ+fISfSM8v/pjI1oUQkgaqcLdAOFHowx6LsOR0dHdEGBmcdZPC+MsJ1UpFQnRUC9WKAWgtAA///j7k+aLEmSNEHsk0W3t9jm7uZL7JFbZGVWZteehekFGFANaA5zAM0ZV/wh3AAiHEHUhAOIQIRDg6inp6qpBtRV3ZVbREaER4S7x+a7bW/VRRYcWFhUVO2Zh7tnVWEwmmlh5s/s6VMVFWFh/vjjjx1gyYicn5/DOY8vv7yP7WYN74YGYdcD5p+995Cph+aDl45QMaMFJhNSxdwC6FoTZWrPz8/x6OEjbLYbIkwGAqcM3fDahjgZUmk0QWtDBqSnM10S/VHr77ptSMo8CNkYQ/LoXUd5U9roelheJLlyAAHaIliTqk7m2Nvbg/OBgyHoUTL5EyDSZ13XcTPebrdxUvHGmpamMbs5VciER8zJKaVQ5DmMMSiyDKbtm+wJD2JytzQBZaajsTEuNJgLn1cUJZbrNbbbGk3bUdmvVNAqgw4/ZzqD1hmyLI+l02Vw8i6hCuIK2HuHY5Au3nGkkG6oYy7QuIIldXZeeFyfAlUGfPyUnIRtB3/vDPjeEaEQu5yNV3nPuoX7f/4OeLJC6IdN37skJfTTm8B+Bf/gDC+RPxkMn/ehAyYNM40hAt9AAp6kWeEdYJ2H1FypRugdC7HN53MY0+Hw8BBH165hvVyEbpzDKDn59J3XxGlQMi4enjUMHJEZP/74I3z/+9+Hcx2EGEZd/fRIUQILbs4I4NL3+LkvYVCvuo/dzoYAxiJR6W2nQRP67a7Xz/BBSlqCy16jHH6wOfy5lsmDwECcKf7eWjSNA5DHDYgCHAfKRlLAxgRCoCenpw5R2pCTj5SzMeB+gNJcNlTl6YzGvK63sWGc95RusQCUDCrJWkEKBeEkYCWsBRarNZp6C2McNqsVlsslvCWtfmMNnLN4+vQRLs5PAN+BdC8scTkEImKROr3xcbzQ8R06IZfniID1DlIpeGOhEluhtIbXCs5L2BCkWku9iKQcpqb4GlI0gj6dgwFyvJnMzfaLEWOyZQBraae2jVPW4/NzcDa0hRJKpa9JsCL2yxyvgWxwwy2SiLXGoJxWWK/W2D+kfhvbtoEUkqLdLKO0h+mgMg1ICRWMdCQvTkus1ytIoSL3YTKZot4aFJlC19Z4+O3X+Or+Pbz/ve/DQIR0RoayLOB50YH1D3qUwwWRlP7yA/lJeijmBijiGHiQBG1WTnC+WGG12aLebkkD3oUutUJieXqC9cUCwnoszy9CS/QMsW5dEVKSFSU6Y9B0Bp116IwDoACvYI0NDN9+I+PrAADvDCkmKhXU7xwkBGbTeZAOr+AtpYKUyNA0bUwx8Lg656g9vPeU0wyt26uyhJYK2+0Wm9U6aljkYSwAoO1adEFPpOsaLC8ucPv2bXhnkWUq5MW5C2yQqO9CmbG1kN4hLwpYT/X+eV5ivdmgsy2EVOgMLS6hMnQeUFJDZhUgS6isgFIVlCogRI9aFEVB3SFlYHoHsrCxpJkCTwiQTLgiemRU04ULjCK/JGLjf6dVQEVRRIiSuTjfiWzsFfCLGgjIFTzgn68g/vAmaRZ3OxCZV3lPa4FHS/pZAOLP34IwDv6LE3rtjT2I/+4DuH/7G8g/efPF1zo6HAScNxBaQEPFueq4QZOUoQEV4ISCEoaE1EBVI9umhs4n2D88pMqt7RbGWLz7zvs4PznDerWEUjRPiyKPJcZjRzvcGqhpOSEnHgi5dkTRO9PWuH/vM3z54Au8996PqNGWKmGNo9SkoL47gIPzpLfDzgbZj909P8avXQU18/xJoWUpxiqPwZmJTkiKDvnAe1IQ4LRdaGPgaW0J5yCDDaPS1oz+XvJGwCmV/tqU8vDWwltqOOZhQ0+VHoWwVsD7FoBEUQSEz3pI5SPS0POiEPQxeuRDCAGpAGNH0HuCGEkpgVAKKgPpXECRorOjFPXibIn5ZIpC50QU5/WnBKwSgPLB4bLQAMpSYrt1mOQKi9NzLJ6foekcoDMoAGUhsFmd4/z5Q/hujVx0sF0D+BaUMKGgljrVJpVRnmTX2XlLnx0ACIW40UpGlyCgICC8gLAApAeUQu0shNY4ODoCVjQus/09tAKwFkSEtdRV1nobkcTU+UkR4ShTENJB/abPXzIEzQ7wMnyJkJZHdMwB4mh0ba+h0TshdN9SDFGUMXdkl5L3VcdrEUR5IAYEIUl13daT9kSmNKA1EDgB5Jwg5tCdpw6OSsvYcVVKDQ8XOBE2GHbqFvj0yWN8+Nvf4q233kHbWUipQi7dx9RGqobHBqG1oxw3R0UeUXTm1q3baOoNtpsaVUnRgzWGLtYhpHiCg2AsLVjHpyPvVgoJL/rsXjyEDIZAA4KcJO9xCcofR9ICIpStUTTBnVlnsxmhEUHi3Yca8zKnjT3tKsqcDI7AYy4PAtb05Vb8e95A2ZByCmOzuMBkMqHuisE4GWNQaOLUZAGugxQwnUFZ5MjyIkZUUhLUKaWEBS2WzhhqDBRq+glOLaACugEhIQVPaI0sI/E4Yw0U+pQIH7s88XFUkjoUwOUNZSwHnyIfaTqH3ytlYHK/6PCAUKEkkQ8pki3mH+k9AMQv3oL4o9tw//a3wKYD9kvI//6nwNZAXJuS49EY4Kc34T9+CpgXoxw+GGDeUBAizbiBJtEXghQ5RZ2BbyBIWTTPC1y7dh0mpPSOjq5hf/8Qdb2FFIwu9GlWHt9Lg5L8RL+mMbFhTuZFjtXFOe7f/wJvvPEuBApYayCTarFLeWghQkND7mlxxdjuRC2GR5oDT6+1j38vjzffx+C60jeCEQ36t/M+OnukQZNDygxMyEZQcHVcKQDQphM7XtBJh2WSAcGy5Ixp7aLj4p2n87ExRM8p4H2Yf2beS2qD2WFPESPaN0K5ZTit6QRaIdC1JupjwPuIxHivaANllIT3EKUxmxboOoPTXMK7BsYEsUlBvZqWp6dYnp1BmI5Kfx0FQzSmAtIDLypg3/3EPcZPl/gew3d6QcrRe3t7OLp+A/iKfjOfz9FJSeXHxgOOqqo608K6PuW8K90Xn1vcUYcH27E0zSWEgJK9imvcaxxid1n+u/Rm/Oj8vD+kKZR/wjRKb9j5gwHENsZSUpMuGUiMTApK9Rb4AnkSdp1BnmfwPpRLlgpN00axk7qusVqt8NFHH+LP/vwXuHb9GN5fjjBSBbVITPLDx+HDf6TSEALIs5wEh549Qb3ewLXk+dqugeAJGW7cOOrQagy1f+emPTQfgjfoBbh+Xilu8EMT0oxazNehJfMuRrGEiI5CVVWYTCaYzWZRpIsnodYaCIaFtfz53iMnBj2c2bZt8NpddNK4QVtaugYgPr/ziwsc37gBoG+CJ0UvzCaFiLnBSIayFttQYovAaIeU8EEIresMTSZBzZrGZCf66jd7/jd/Ps+dq3KXu5yN9Hfj8eYxAjCYQ7xw2eHYFdG+8LioIaY5VYG0lm75+hRYNIC5glz6qu8RgPjLdyD/2x/C/p//E/D1gl4/mgDrDtAS4k/eAG7PAeMgPrgB/9lzwLzAxLKxUiR/nY7LOOqK3z2PTQ+bE0naYjabodk/wHZbY29/Hzdv3sTJ86cwponz7ztTUjsOdgo7a5DnOWzX4e7du/jgg5/h5vGbsJaajjFpma43RMchcgNEQBm/29m46ncpBE3rJ03tDuH2774nRKPlR14H/S6kc5WGUjmkyqCUjD2FHDylKCyTQH3IiSE4In2wmM5lht1pfeWDNeSdg3GMCopLa0gIAet6ZyONdlNVX/4uOCnkiSRPtkRGfSLnPJTUofIO0CqDUIwyEJJKaIumqhYhsa1rLBbnaE9XkMIROuQ8zk5O0dakgkzplR7RQCSA7n6uaTpDXPq76Ab2zy5xPAAqAtA6w9G16zg4POjHxPnQKVZCaEJDtNAoyizKA6SORqqQ3AsYmijtkD7L1OkbXm07eD67bOeYcDomk6bHmA/1XcdrK4jywTAzM/bLssTGrAelTmkVSqrbnupuMEIBAJBEqKQ+GCKe/9Gjx/jVr36Jf/1v/msopaO8dqxmSK6PjZ4XQIQhwAbTwxoHrQEhNPb3D3Dz5i202y3a7RpaeCjhSVkPJIdLOUwNLww6R5K4fN10yQRlQoXWw55ynJBcdWOidPtsNotkWh6f8QJl3YqyLHF4eBhbwfewp43oAzsI7ACm3Xa5GoW9Ue7WWuQF5vM5mqbBYrHAdDqNRCF+Jtz9VgWHh5+DlBIqzyF8X/7Kjic7HE1o6pYXBTWnc+RkGWujFgl4EYs+hcQRGiEuPcktRWZ4MfDv0g2QN580dzn2vq9yNvjnyIQPaSWel+zMps8gty+KiQA8XcM9WgI/vw386iEwyYE39+E+e06Q8qu+RwC4s0eOyMMFsO0g/tW7kP+7H8L+334FnNX0u8YC90/h/q9nMTsg/w9/BKw7uP/HR9+JagAhhxt+TjcldmRTiJf4WB7CqZgbhqe5tNlsUBQ59vcPsN5sYE2Ht956Cw+//RpPnjxCUajY9DB1dl/m8CDFYVigqWsgy/D82TN8/sXnODq6Ca2yoEQJkAaIBHfn5Mi83zCuhoN3OZeDzTOJ5GkzDX/z0nfC9xMiVoHBZhM+iHhuUkJpCsSE0JBaEzIoSVtGcEdeAXjjCKJ3rLLbl6HucsZ53WutwGqx7DBxlJu+LYXf0yg6DQR5HxisW8niXsRDQ3BAOPjkoCLLNPVEyfNwT5Qap8hewFoDAY+yErh9ax/bzSGatsVq1aAsc8B1WJ6fodnWUM5QeiPIrgomOF/x2MfBCwYba+9oEOcjcTJYih9AZy0OD6/h8Og6XOLMrlZrOK0InRL9s3Po0bd0TMfokPcexnY9guV7knOqBh3tmwe6rifoj5ET/tseIea+NXIQdKXBV4omv8zx2mkUdhQ4+uVByfMctdzGm+GNg5EHVpFkQhGnS8bKZVJKrFYrCEElj7wQPvvsM3z/Bx/g3XffiwPHBiqd5HydUKOBCA5+LwolUJZTHN+4hXt3P4NpGhSVhJakuCcDyYfV+SwEjBOwvp9sUtKCZ40I63gyOLjQDVYIqgjhdAQRX4ctzVPjrZRGVWaYz+eYzWbx/tmrTT1YFRwAFjzje2fuBivNCUEqrnpvDwJ97i8tJ00ntXMOm80G169fj8qkQovYB0eHZ88Cbgynek+CL5pZzlpBWWLft21LdeehLBVAnMwkRNRXotAhQlfaKebzeRyr8fNOnQK+lrQ0K3Xk0s0hXShpioUNaLqIeez5851zEea98li38P/uMypj/elNoNTw3y6AXz68OifyovdkCuLP3wLe2oP/f30CTAvI//4PgdpA/tUP6JzbDv5vv4T/6AntDGFY/LcLKoe13+1o0DgxWhHIgS68CG6eRc8nmH9IEVrRSxXIihbeWrRth6ZuMZ1UONg/QNu0ODw8wttvv43T02e7jeMrHN4T/8paiyynUvbPP7+LH/7gx7hx4yas7VO0gkvhhQSJV9H9sC14mc8ajtHluUQbU/i95Cj+Fe6HPmjwed4DAoReCCmhNZXIe0hIpaF0BhW4GiIgIMoJcvuchTMGzpo4b1N0ahcZmlBOTpUI7Irr+xRMsNnq8q6dBpuDZxvQ4PBXYDKvMRb1doumaWO6Jc/ycK+9DACZXxH6RjkY45FlFd56+xacV/jqq6colcLFyRlWiwWcMYAx0GEDED4EhIFsixfMu/hc09cAUOWVjLfjQfsF/1sIgawscf34JuZ7BzBNjxBu6wYupKokFL1PAFb0zsZV6ANfExGsSY6eEGJ68FplEZkUQsTA1Zi0sZq9NBeGCIkNwzJ0qFN7usuGvuh4rdJX54aM13STyrIMUqnYcS/N8wMMrRIRbDKZIMsrGNNhu93Ex+m8iyWO1jpMJhM8f/4c3gt8/vnn+Pjjj/HOO+/GQUvRlfTmvfcQOyFviSwrKH8nJbTKMZvu49mzZ1g8P4G8dkgpFB/6MFhyIGxg3FOQpEjxVFIErrSGzkiIy1guzTVQykEIcrL29vZidH52dhadjV0TKc8z7M33olgXVbsM65vTagzn+94z7FwwyrHdbqPsuHMO6/U6NMmiZ1CWZez2ypOP32+MwWQyic9NSxWh1i5ob0hBDdvoeZkg0StDeaSFUBmUANraxBLgLCfeScqHSJsD0ThQiiXLMszns9DzpXcAxtESP/NdBKar0I1dCyVFL/jgiI/nWzzfy6yzbxdw/5e/A27vEZfi0YKQh9d5jwD8v/uUnOjGAEUG+3/6n4AyWcrG9aTR9L7++t6AC/Cig2w5pVC8c3FzCFvzaNwkSQN56kkiIUKajTY774FtXaPIcxRFGWSYM7z55lv44ou7OD05iaRe4NUcDppzFkJJ6GCPlNJ4/OgRHj3+Ftev3yBEQAqQUmYfgTKpHGCCaJr6uHykqaNdv0sj0n4cX96Bie/vMYTwvRdeUoEnpJWCVBk88510Bi0EkT8FYA09M+9saKceyKF22EBrl7PhQ3oY8HHDkYPI3l9aTzQ2Qx4Vr0UONvnzOHUCuB498x4+ImE1Vqs19kPnaoCqnTx8TBt4QfKzQkjoTKAL6NXefoXvf+8t5KrEyZMn+PL0OTbrFaqiRBu4duxoICg3X/XcU7tMSGzyu2Qt0KlEeE5hnATghcD1Gzdw49YdzA8OkJ336aSiKGF4+lkAzoemcCaRSt89D4cOQFpZkjgl8W/73+d535k4dTLSf/eoIgcZaedyysOxMwtcfY27jtdCNpi3wTwMTqEwVF+VJVzXK2dyhM2TjkmJtPEY8KTLsuBgGAcZEImiKGKH0q4zWC6XuHfvHjabTUQ80qh2WL4j4USQpE0elHPcEdJBBQhsPt8nQTHR51y11phIjfW2BvfAyLMMCHwKhRxFEXQeIJCW0AFcKqRQlRN4D2RFjjzPqWJnux1cE29uSilUVYWD/X3MprPoVKSkz6ZpUJYlZrNZJIGxOh2nZ/g62MlIIVIpFWazIj6vxWIRN33euM/Pz/H8+fNB99iqqgCPvvle00CxU8H190oiLwpolcFDwDiSD67rBtu6RmsJ1XDeQ0gVm8LR55P8eu9EaFQlpW9IhKyLjlQK46Xw/tjjTuFtNn5jp4P/zWkmRuRSWDzlA41TCC91nNX09SrHrvd4ANtErKtrgc9PXu58u1RFrzrYaHlExCm917TskdaTI2RDkSqhtYa6+OqMencYh+22DnagQFVV2Nvfw/e//338p2fPoAOHKQ9O6/h40TBzdRJddniWtsWHH/4a77/3PubzA0BQQIRYPQGw4aRd5DKilZ5veC3971L0K/KZZC8keNVl08dKpHwY/gVVBPf/piiWNjGhMkBKCJUhL0pIzVFs+DQf5Ma9g+lamI5IkVS6ZgabTIqCjtcEEBSSFTvV/Bn0LHonf0hwHwd7abq3r+xKeS2EB3hPBF4BIpqenJyiKkpMJlM0TYc8p81UBpQFQRLfOGqa6XzYWL2D1hJvvnUThXY4eXyEbycTrM5PKM3tJJwn7SBGg533sUFkTIMJMZjjY+dKSmpuByEAKQNyFuaEVETaryq8/70f4NqNY5RVhalxAEjfaD7fhwv6It5YeOthvINxJBGQIhA8Nzh1279G7s5VSMg41cGVX/z8B/MOveOZIlaUdvMR3eQZfVUq5kXH6zkbflhGyB/OD0Np2kQY8eDNyFrWmDCxIRiRQ/VgQ+Cjz9+KwPWgQXz8+DGePHmCt956C5PJJDat4QHggTPGAGrIp+WJ7r2HFEBeFnAN8Q3eeftdfHlvC+tA/Tycw2bTQCqFPFcQJsB6eY4yy2L6wDnijBhLXjLJ8IYyIZ3RIpASOqOKkidPnoQUUVpO1gtLTadTVFW/yTN5UwjSBWEiHb9uAxKUaSbcdtFB4YnGrYidc8iTFsPkxPUdabMsw2w2i8/02rVrVBLGKJLoORMiz8lcpPMgNG+q2xY5N32zFp3pUDc1lR6KkFpJ0idcdaKUHiBUvTcdStSSBXVlNJl4/qnzyUYxHe90waS/4/tP53X6mX2U++qw//8/HLyBxmTWaNPlceJ5xgeLEgkIUu8NEaFzPijk0jY8nUzhXIujoyMcXb+O09NT5EHB9lU4G1dcPYQAHj95jEePH6IsK0iZxxJyisABIThyp/fwNP4u4zlGYNLNNbaZj5fy8pFf/wHp9YQIWlJ+XygNJRWUJgRZSBUifASHIpmjCE6Q6eBDu3VrTdxIeT5fQoO/A1niMth0PK6E+kdIR/o5PmQwvA9qysLBAmhbCk4vLhYkXjidYD6dUh8oJQAS2oSDjRVOUpBQGHH/KK1/585NKP8zbFcLfPKbX8NstjDewTpDc0F4eDt0CNN7T+chjefoIQVkgR1VYwzysoKQpB9yfPMWimoKBwHjBYAegS3KAo47E1sHZ6hvlBE9p4KRZf45JYiS7XKRgzN+Fimxs3cEr77Py6gO3Refi9/DmYz0s/5pnQ3wjbqB4eaJawKqwZ4hEzjTluYxGtW9ce8dhZQkJqGUCKqdBJOulivcvXsXb7/99oCdyw+lJ01aSC2QduYUUkI6DakFbEfiWl1rMZuU+JM/+XM09RJVWaIoSvzqV7+GFy2KPEdTN/C2I9hSKkBRS2Nj6HM7S9UV5EkmTF6VBU6FQlZQqejTp0/jJGDOC6umzmYzzGYzSNlH0+kiXS6XER3ilAmXvNZt0NGoKjjnYuth5lhMp1NyUEwvUc6k3qqq4musyXFwcIA8z9E2LbwPnXp1X0a7rhsIIDqVWZZR2Zan5nwePkrLt8bAshhVgLCV7qtOxpUoXJarswyz2RTz+V6UU08XFE/6MZIx/jldHKmDMSaWAv1Gm+a1x9FaNNivt4D+Z3/EsUOA9ePmzM4dlcQS0hMgdJDxk0JChKgNnvpsOGfBXZM9SH+nyHMcHR3hjTfewNnJSRJc/P6HlALr1QK//e2v8fbbbwOQEDpA8gl6Qd/7911KabwA2Rj/DVdzMTIAIELi37WBJ2cnVENwKosFu9g5zyNxUqoMPq4HANbDuA7OJu3ITYsu2IC+FH1YOTMOGl90pHB9eGVwvvGY7XI4mDfnnY+dVJ1HDERaTwTyxWKBqiohBJBrDe81lFfwsPAgFFVnOZSzsJaqYoXXyPOAdnUOb779Bv71v/lXaFZLfP7Jp/CdhMxzeFgY11CpsOvTDmOHaHDv43EACaoxb0NpDScIMZ/t7WH/6AiddViut1CtgV73Z1gsl4AOKUcICO9Jiw99aiNtKT/+ot9z6mOI1vU/078DMIU8z6LdSoO5cTqFkCuFICUbnxe8j6KXAGJ37pc1gq/pbAxzlHyhqVFnzgXn+lNdfClljLSn+YTaiktu4NQ3CKJIPy3BsdhstyirDp9++in+4i/+IpaDpkJWPerCMFC/gKQQkFrDmI70LyQtYOeBt999D9eu/+9R5jk++ugj/N3f/xcICCwuKM3QBaVUIUQoHaX+Ad77kMeWUFIjy3OQ0IpKiKMKRZXjm2++wWKxQB6UOHmjJlXQvVCBI2NaJI2y2VnjclWGJa0hdIPTG6wUyo7MZDIBgIRQqpAiQABCXxeL6XSKi4sLnJ2d4c6dO0m/kL7qI003pBuECA6e0NR1sjMGddvAcpSsFZW/hgVNkRkr0Q3L6HpnQQQnbBrmVOqpX3YqUsJten+p4xCjz9GmwXMoXYDjSC19/X/ph1LES4r14ki/M6rjg9MBOE+VBZIjNpazD7014NnJC2k/Qd2Ib968iXvTaSyVTp/f73PoTOLLL+/j+fNnuHXzzXDtaQSfbi4vj2yMEdgXOiivg2zwEeedikGMFEFpV2VhkxPhuw8aHA5dWJ+262A7AxMCDPg+/Z06W+k9pV/fNQYibKxcMpwGn+N1k44PozUcObMD6OFhvQcMoJRDU7dYrzeYTidBYIu6TUup4QMnwjoP2znUtYF1ZIOlFsirHMZT2vn6jev44Mc/wfOnz/DsaUtqx9aitRaZGgYSY1Rm4DgNHKrgJsggbyDIgSbFUI1r164DQmBbU7m6kA2KjQJQkiN1cQGviWekhISCgPUerTMxhZk6AOP0B9EOZBz7FMUYc9jCTzGDwIg2/z51avhneiYiKnane3yqGPoq6/S1nY1xtJcuOK01lCBSY9M0cVMa5wiFIJoN1cJn0eOSUqFrTdxsuKxSBG+/rmt8++23+PTTT/Hzn/98cP5Uf0EpBZkNW4pbxz0TfNhIAa00TNfCOo9qNoftOjx++hzzg0N0dQ0sF+jaFipugkQWVUJBZqSsKJ0LzkuGPCtiEzGVFZSyyDTWmxW+/vrrOE4MGbNYV1mWMXJWUsLZHuZnb5Y5GJyeYARjvr8HqYgH07ZUT82NsBDGrG1JZXRSVZBCxmoi7z2RRoMzsliQTgN/jswzeGspnWJdPL8MC4wdJilIi0Q4ic420TkyXUcRHpLFHEpctVKhzK7/PlSmE4mY1rD6JEU5UqcjrVjhudkvIlyat+ncTUnPPKfS84zn/++xlfzP//BhW9hhT3ZtRiIgez3e4+AsORuACdLUgYQWEMI8z3Hjxg3ceeMN3L17N9Ha6B2D7zpEH2xFRMAHmej1ZoNf/eqX+G/+6g45wnGDFuA+LuQQDDfEF222V9k9/j4wwPHnl5spyX4cibiX57gKHAGPVBuEuDEWxnTouhaW158xNB4SVLWSIM5aa0qLOxtazPOGdvXY++B8eh/GWQZnZwe8Pt4n+t8j9HsSMbUGIKDQtHbrpsFiscRkUmFvOkWWqxhwIJBRvXEQUFCs5qkyyNwDwkMoAZ1rOGtw56038c5772G1XmBxcQZ20hyi4OolB2OcFlLJPiIFU0TJ4YCgPSXLS8wP9jGdz+GEh207OEN7Q9tpACUAYL1Zw0oR+r4AwpGj1XkXx4XHjGXie84Ec0p0tINpH5S0PLUPwkRAkIftGfqsACMpBiSd7uEsdqAp5tIc/ydNo6QLiyctkxuttTDOUSfU7ZoGGpSjatsOADXcirW7ELSpGgeZaShJSpv9QHFUSnlfpSWc7bBaXeDj332IH//4A2SZRjWZINXwh5CQmqBehnt66I7USz086tYEmFjDCzKCF8slvMxx687bePzoEQovILY1bGYDLOzhlYHsHBwEpNAkiy3p+oXSkFIgyzMIKYl9nyl8evcTLBcLZAEtKKsKSkrMZnNMphN459G2DS2EYBBS9IMmbIeiLAlGDS2SVabQtC2cpfRFVU0AT6Snrg3t3IXApJoSqco6eEdOCaEkOfb399E0DZ4+fYr1aoW9+R4m1YRkjkE5Vel6WfVMZXDBA9Y6gwya+bDU5bVtSXDGWUfORpjEfR8HSSiTJOVQrQtonVPKSdEC1hlpbxRliSIQRXtkg+vAFRku0Xv0vCjTSCDNNWqt6FpG6UBOCXjOIwd5wzHSwU6Q5z/8X+DBkstCD43KC3O0cYNkiLZ3yIRQ8FBwQeNAaQnrLFTeYrrncefNt/Dgywf8QQl+IoYfAMZT0leS7x6kZqoA07WQUuPu3bv4sz/9X+HG9dvog7IguR6u+SpfYJdDEa9mR1SXbk7xuliB9dKHjNEEJM5J4ARIGdE/Rkt57rNPIByVKlrbwZqWRAltB2NaGNuSkwUBqXJkOocMBG0qgwhpHo8glY3eIUu/fChxdqTwTONBqEa/VtKffe+octUFEEuBaR159ChWiN69h3SAdoA3Dpu6xXK1hbnh4ISAlwJWAN6Tc5TnGbJcoqok4MkJU7lEY2o0psZsr0Kta+xdm+Gt772FLx58hpPTZ/DeIMtyak7nAa4AErJ/dpEiAEauk+cc+ppxCS6kgHUO89kU128eoygngFKAygGhiDifcDZIJRlAsCs2sp2oCy7NB3IwUrYIoykkSY6g3MtmiLtcI/6NUqzBIojzImUkVEvnAWHgui44fxLSK3gvKVXlHU1dS1VAzgWBQR4IXg//VGkUWg9iIHQkhIhRcl3XFI0LgSzP0YkOy+USJ2en0Fpjb28PRdhsAfLGpVQwxkEp8uJIhKf3grkDIFWteHRdi+3G4f79L/Dk8bf4/g9+RFyNPA9lqVSC1FkP5Ty8HUYaLi3ZEQIeVKcsPCClhcoqVJN9zAxwWxU4ef4cXdehrmtCCJoWXhpoxQiNTr7Iu84yDZ1RHruoSmzXS3zz1TcU/QUtiaoosbe3F3UurCWj4L0P5Wo9LMkt2A+PDuG8x7apwWVlWZZBtBYGAT41LuhgZEH4jNIr3JXWwyPLNfKigPMe68061ocvl0sICEyqClqSpr7vLKQX0Er3KFGY3F3H3BwR2OS0YRdFhs16i7Zu4BxrcdDWwSgGldNpSJlD6RwyOBrU+4LSLlIp7O3tYzqbxVI3FvsirgCjWGQUUyhwWMqVHuJSymTIE0JIv/ViREPYvGfTtx9cw+L/+MeDaM45Th942jDCPOun4PA1RoU8etSPqz84qgmnCugLYQcmkP24pwM8glEYOkj9PVHE0rQWq+Uay9UKddPCOw8bSu8gFAQk2jwo8AqAKybo3i9Hq73xCc48TxEfumt6es5KC3iYMM8BoVrk5Qytcbh28xb2rx3h9PlzcFqG4VoxUPYUyX+DUcZIl8lTNVvnDLRQWK2WuHv3Lo4Oj2OVlJCCQkp4QAkIk541OdUVaYWrHI8URaUh4Wh0aJhpjV8+twCo/40PTltwLqTSwRmn9IkLfq6Ah3fkaJi2QdduYEwNa1t0pgmRKFUUZVkOHSr+ZLB1xnRw3sCH7dajrz6BYMcxINIWAV2VcU4CCKRMkjj3oUra+d6Zk8m4Ci9COoLWLg0XvZ8dLi9Iy0gLic4A67rF6WIJkSl4KZBnbD/IvsLT85dSgsyHRyE1vNKot1ts6w2csrj11k0c3jjA1988QCYk4Dy00PA+6AQlj86JHvHwXDnjU1vi4YWDFyKkdIBqPseNW7dw/eYtZHkBJ2QQ8qJ7JoyZbMpsugejAiQX5jCv955/aOFcTxRFdHjIVlDjytCxWyl4kBKuFR6sSeJhiQ8jgI7tRYLiDdENS2j5QATOBRUdR8KtI8n/Vzle2dkQwZPTmhQ8AURSJit6dl0HqTVsMPqz2QzNtqbc+3QWHDpCPIwxUEnXOaCPQpmDkGp5VFUOa7fIMo3F4gIffvgh/uAnf4j1dosiMVDgnhspmckPO35yvjD8EkKo0Oa8wmy+j/W6RlkK3DjOUddbrFZr6GyDtiDCpLOW6rZD/o46r5L3WBQ5ICwhEBL46MP7WCwWYInw6XQaNS7SihOG8oUQsRyLNhQBFdRSiahJehd101DKBip213SO9C+2220kjGqt0bYtNpsN+dAG1LnVGFRVha5pcXJygvVqjfmMNC28p2ZECJERQ3Yp2pJuaExWYk6OkIKa0lkEzQ1AqKB+qDSUyoID0zfP482d4fSiKHBwcDBo/85OARtqTn3wwc5Z+lp6sBMyZuGP0yWpA5GeO/IOnEd3UKA7KHaeJz1Hv36GOWGg76DJa4kjkjFxi/km7GTyeVxiNFLyK/8b6MuCrQU2NTW8Oj3TWK5WaJsOnSWjBKEghIqNFK0zMbIa30s6Jun9je9XCJU8txCVORuiO4csyzGfz/HGm2/i/PwMzrToNTC+O2wa/4UHwuZHJGVvLO5++gl++MMf49qNW6QFEjYqNsrDLfH1j0t5/qsuEhjMmeRFQmLDpsLjSaRpXiug5+IA6mBKKEbbMiG8QVs3sVpO6zxC7Epng89UyoVW98PPSzclvlZy9MkJ79dJKnpnBunscZl5eqQ8hHRNjFPebddhs61xfnGBrKAKkjLLMJmUyPIMTVMDUFBCIVM9skYIuoSzFnXdoDMd8iLHj3/yY3x1/z5WFwuYrqXGoAg4jA9IBQDpw/370OjME6rAh5QaEhoOxLVQSuPG8TFu3ryJyWQKqXN4KSBlFrkepbEAVuGZqOghp8iZtR5S9igR2YkhgJraWw6a0vmUVvMNuXBDOxarMqUc2JmrfubP/GdzNrwHTEedHXmzIQXQXoOgrCoYa1GHktT5bIaqID4CiWT1nA3vfdTRYKPLE5XPm+YClZSxP0jbtvjkk4/x5OljHB5dC5EoN4Qix0jtmPAvyss6B5RFhevXbmK13KJtTylNonJUJaBk1pchmS4sVHI2qJkTV0gQ+1dpiYuLc9y/fw/ec4v1IlZWpFU6QO9wdV0H+L6MtaoqOHjSuhCCGpIpFUmWriPuRt9SmEi67Pw1DZFbq6qCdRbbZgud8BE2mw1OT08BgSg+xtwRqSVM4IJwpQhXvEgpo/aHc8F4O4dt06CqKngIrDdbULmhDd0VFYRS0YFKe6CkiwNAJM7Ss3G9VLHvWf6RKJtUA/Cmu8vYsYEbOy78O55ru3PNXFqIwd/z312V7+XXxkevZMvGJnQlHUW9/PMw5RPI0Ojr3se8lJTjwigAzY0MZVWg7dqAdgWiW4TN+b6HjkR67Brfq/49HhcfjDhH8EVR4ubN23hw/x6WFx1+78N7SEGRns4zPH3yCF8+uI+jo+sQUsGYNsw/ReX3cYP/Rz6uOGfqbF52zniswsYt086aiQMmXCCqGxjTBSKoCdV/FBAJKZGpLKAaWQzG+vkk+tb04wsWQ+cj3ZRSZ6O/lyGBkT/nqvvnI0XL+rmRygKQknHbzOEmDkYYrNdriK2AEqTPpLIK3jtCxQ2lhNq2hfMU8HVGofUN3n33Pfzxn/4J/vav/wbCZbHhpuIxT/0+TgWBQcj+mlWWQTgNZx1UluHw2nUcH99EWU2o6s5TlaPSXNnB+2bYQ40hUS8M9yPn+rU8rogbj+c40BvbqfGXUv052NaMA6n0511o3lXO48scr9VintMc3HyN+5MAVNVgjMFyTQpwh4eH0Fpju+krJAjucRHdYM9vXAGQilPxTRpLn9c0DXSm8ejRI/z93/8d/jf/9f82QISX2+SOj3G02t8awXla59jfP8KNGzWaxmK1WiDLJITIoBS1wjYdCZZ5IEBtlKMGWFnVoKwKdF2LR48eYrPeYDabDEg8DPmn1wWAepuIQD8Km0XbtsjLggyJ6dAGp0IqBWctFKhlvLUWZVmiLEtkWTYQD0uZyDwGUko0dY3T01NsNhtMJpPYlt460uQQ3g2cQHYQx8gA5wPrtkaeF/Ae2G7roL5KLGsFJnmy7LKOcutpVMTltdPpFPv7+7R5yCHLeoxwpI7Ci0r4UuInH+yo8PvHxnBsPLnzZvo3/N60SmbX9aZ/T+ca3sf4falRSI0LV3mppIFZGhnqJF3J7zGhkZvOJIoiQ55nqOuGWo5DwPnAQsfljXB8pBtD+n38N/w80k2E/9Za4nAJoXBwcIjbt9/AZrmEx9XP72UOIYhJD+dRlDk2mwafffYpfvSjD3BwSPowUlBqkCu/ft9jl00RQlzpw+zacOlnJGtBQ6u0AWA/V5yzcCZwM7qWgh9nQwUBOSsklEfpE5Wsn5SkP7hWkWxWGG9cgPcSzieicvH6Xbw2XltpIPCyY5auQRZ/ZI6UtZ7S7SGY6EwHLRXK0kFAwoqANIeGbV3Xgkpk2fFWMNLiX/zxH+HZk6f46De/5dxktOMpEhXYJb1vlzzI1jrUxkDnBa4dH+P68S2oPMe2aSGUggaha50hNM97j3xLgSngqQmnvBz4ej+sQhkjpbtQxBSdGAdi6WHM0Jn4rsq6Xc7w7+NwvEbX1z5CYZGo5XKJsizRNJQj3NY1ZrMZrl27RpF83itn5lkeCZIyIBVeiMHk502No2XmJRCi4KFziaLMCIrywK9+9Uv8+A/+AHfeeANCZDTpRe/AcB1yOoDjQQQQ4GgAFtBZgVu334CUGb59+BCbzZqux0tkkFA6RPEIaqQupPTC6YuygPcGXdfi3r37qKoKZZFFBUxu6Z6W7KYsYh/IUjyR8iJHWVXw3qMoSlSTCay1kRjKzH5GhJxz2G63AwcjsomFpyqVMLZnZ2exAmVSVXHsJ6EfiozdHoeiVwBi51opJaV9rAtEXIe2M9g2LajEjZ6JTIS7dKhASiO3FEpl7ZE8z2M/FWtcRC7SKJ6fZQoNXgXjjh0LRpj4M/n1HmnAzgXNhnm4PIbRRLrwx5txigb2XI2h451+bvoZjIaNDXv6Gfy80+7MSlGTKO8zFDl1C/U+dEwGQhqlJyJS1+IhmpSundTRSP89Nkrjsj0pVcyFF8UEbddgNp3h3XffxZf3vkDXtSNU4OVSKnGMfOh7IQBnDYQAvvnmK3z99VeYzmYoigKdpVSoFCIFcH6v47Ixfrlrju9hWwIkpfl64EQSkmHgrIVpOzjToutqdN1QYiCK5mU5KY8KCbiho+A9jVG/hnYHZ33FiY9jxa+NI+r03FchG0A/P9P3c7p0sKYtKdCen1+gbRsc7E1RVUX8jK5rAQ9kKoOSGXUakdQrxNi+55PSCpmeIJvs4c/+/Bd4+vgZnj95QlbcR0yPPt/3RFaIwKdK0iitA/LJFIdH13B4/QayoiBHw3ioTMNuNogdacN1TmoAIKL7crmCkWPyMaeA+vEb24x0nY0DqrF96s/L420H70+/0mP83F60Z77K8RppFD+AqFn3gW9cKYUbN27g8OgIOuTunafNbbVahRyppw0kOCusw8CRfq8s2Zc2MldAiL4ledd28ABOTk7w2Wef4fYdKm8jaLRFpvNAuAzRAIabRjqAtKAU9RhxDkJpVJMMx7duobMWX3/zDYSxKEoBXwNwVKLkBYjo5ahUSMBTmaggUtY333wL07U42N+HtV2s2OHyUU5F8QbIm52URDHmjqtKK9jgkHRdC9+FrqRSYtN1UJmKqqxs2FlGPo0ujDFoDXn8RZZhtVrh4uIiIlSz+TxKxMexGkl1s8Q4gNgnhQSEDKAklM5Qtw1IvClHZ2v4hBgrFX2pjBGN3a2MrTU4ODige1E0l7ztF9q4DIufKy/QXSmR9G9TKXiSQ297Z2/Hpjr+zutuvGj5s3ct5NTJ4U0+dUS48oqjoXRMUsSEq7/SeTxOyYw3fSEEtNQQMjhcsdyYIFbruHKEo1gH5+2l8+1CelKHapfNGG/C9LcZiryCdR2yrIBzBkdH13B8fIyvv3qATOfo6hZFWYLapO9Oh+56rQfAaTMSkOi6Gp999ju8887bxAfzgpzjUNW16/p3RXVXbaAp/4APGTfwl/RmRE+C7itP+s93rgv2ImjdGANvOtj4ZeA9oV1aZ8izHFpT2Tg8BnObzsck5KEDvQv1SKPhNMXBr6tRI7b0/Wk6L0W6GNnldceFBum8N0EU0m882Rhn0HUVikKjLHIYQ09bhSZ7HgLeeHTOROErnWVQKkOZFzBNhzffeQc//6M/xv/4P/wPMLVFrhWE8xDOR+fTex8aRgo4OLgkx6KLEgfXJsHRyGG9gBMC8I5adTgLKTyE6dePMRLsbFBK+HLaYtdevgstTJ/JVa+lr+9CTMc/j18b246xDdz1nF90vDpBlM4ePeiiKLDdbmN/jVu3bmG+txdJa3whUWK7beEVbYB5kLtW6KW3UyKcHsHDfdRHJVvcfKxpanz44W/xBz/5A1y/fgN1QxUYxnZQKh9cf7oJjQdKCBEdEiGIoCmkxHQ2w2QyoUiyIA2Npm2pjtp5QJBXTMRqR02SBFDXKzx98ihwEgSc6yPSsaw6IzfMufAhhVGE1u5d12EynQZD49CGKCbLqWwVgSnO1UBFUcQNlBVcWTwsL3JILWG7Ds+ePcPpyQm895jNZqgCslGWJd2HlIBWEBCDPje82XFKqyeJWbQNSbd7z50GA8mKhYkkpVBk4Jyk9eJpRFaWFQ4ODsKGHDgH4dGlZNr0uV6FaLAx6x3LoThYr+UxRATSuTfcyHeTSccOzvhaxhvSWMCKzsnlg0OodLwBMDE2ddBSg3H5mkPlhpeRcJjnOfI8g7EOwnmYuPFwKaLbmYpMP2vX9xRd2RXhCkGOutYFvAGKYgIBh66c4N1338OjRw/pHoKTqXUGZ4e6EoNz7XAAJPo+I9TG3OHLr+7j2bPHmEymEELDC0ndM8XwfLuO7zKquzbpVwFMhOAKFCb49ZUgPL+tNeBKKSZvO2fgLJWXAw5CADrTyHRGzSGVBldVCIJ64vnSFuXja+FAoJ+Duzc+vu4xOpkSRTk4SZ2NlHh4lXw63bxA29IcIL5bSSnVcB7mfHnvQ5k/oo1yfL1CBlRPQmU59g8P8bM/+iNsNlv88j//J9SbFaT30FIGG8PvE1TdKAAhw34kgKPj23AHGVRewAsF7u1G40MoroOP5Hoa73Tchmv7qtnyImToRQ7DZfuHuHZSW3I5ALjsXKTnSwGAXX/3ouM1Sl+pQoHJh+xoFEWB4+Pj2CFUSBk5B5IwLZRlifV6jaooSUc+8Bc620/+1PjyxpZ6wjJWQNA1tC3QdS3uP7iPDz/6EP/mX/+vg35ETgtTJHlr3xu/1JHpHwCQZRpcJ24jPC2ptjxEezy5HaieH46Jd6CaKUHdL58/f471eh0a0JkBXM8bBQvrpKQ/GjNBXTKD0Bf/jrkSLABmjIE3BpnMImLE48eCammkkE4OFl0zhsby+vXrmEwmFAFrDSWJGJon8utAL/TCMD7zQrTWMAgbtaU2803bwHsmgmoIRd0qlcqoz4OiGnQ2SKnTMZ1OURTEU7HOUHmowyWjxoYlvb9dX2l6Jq1wAjD4me9ljHAMnQaLNH+efh87GWmaZOz0sLHhZ8z5aS43TecFnyt1mJm3k87jXdccyaTeQ0qBLNMoXI6yImejNRaupc2K20tT9HU5d5+eP43206/UyRijLHSfKnC2REinAVI6tO0Gd+7cwfHxMR4++hZFlhM/K1RRvOyRmj8ZQHBAYHF+hi+++Bx3br+FvAgCgKkQxBXHVUZ88JnJHOvfl+RWX/C+/v2KkL8Byifj87PBUbCO1oRwHZzpqIW6RxBu0siKHNxrCFLRugEgnB1ULzmXlGnj8pzhe08dUCH716RMBfaGxM7U0Yib/2jNjdcF2+U0HSQEaTGREGOOIi+Q5VQZk2V5uGfCNLwnXR8E1pEVVH5tg6CbhwAcoIXCrdt38Iv/6l9i26zxyUcfYnWxoAIGAcB7aEmltk4AXinIrIjP7NrxMdaFg7GGegBJsTPTlxJOf59M3XjeXeWEXOV4A0N+x1XPib+P1+uua3gVRwN4LWSDPFwu4wSAO3fuxOZdQCCmARDhxmxoczyfz7FarWIU2bYtdGL4eZPjduVN0wycDykltKIOqyxxrrVE21l0bYPf/vbX+LM/+3MUJam0xc14NPi7PEKAJoNUPvxE3HxIh6zQOLi2D6kFNpsNVusVlAtSsT7oQgjACw8hLSQ8mrbGkydPiNiZK1jbk7vSz2boO90snXOYzmZ0r4FX4b3H+eICk8kEhdbUTTVUn1jnoEsdz8WpFEYzqqqKm3bTNOhsBw+Pi/NzbDcbIFS5lGUZNB96p4LHfZcnzM+HK1fobxUgPbwzlC+1gNTUhE5lzIrXsRGbECKIe112FLKQZqGy2dC6Hn2KLR3H9H1pKirdCHext6O4XIySVDR6jHTwuA6dDYKQxw4FMIxYUjGx9Hfpsx5H/hRMXSarpvwR/h2jRPz6eG4Pr5l4QCLow2iV9KDRLToDeEOIIcHIiKjG5fu/THZNnaDxfY03MykA8tRF7BYLGGido5pM8eabb+Lhw28igmaMgcCLyYaXDi8hvA3IBgBYaK1w//7n+Nkf/gvcvElCekpKUAuwHafYcQ9XHYPUEsc3L509SRACocL1UgqmRzXI2WDtBWsNhDHwlhVCBbWazxTyogACSgIv4bjywdmBoxGJiDvKm9NnmM5bJojSeKSOdc87Sjc0Phevo8vzvV+76bWlm2GR5zCGSKFCSuTB4ci1BvXdsfCCnGTPxJcQ8MLTnBZCkFYPBKyjjtjHt27iF//qX6I1LT756HdwnYXSXAZN6BuUQF4VmM8OgNDfxAsN61sgohoiOho+YZpGJySZH/099z/3rw+d1fEz2TV2L3cQYjq2scDQzvORpm6His6vz+F4LWSjaUj6uixLHB0doaoqCNGnPqQkpUvhe16Ht7RpViEiz/M85vs3gciYGugoVpJE0xRJC4jQKrfrOmRZhqalEtSvvvoKn376Cf7wZz8bEC97w+gvDSoPHrkYjsqTRO+iCulRTQqU1S0cHOzh7Owc+kTiYrFEbQA4SUGTo5JPeAkNj2dPHuHpk6ewxqDxFDHytaQSsHxvjFQwagFPJbxt22KxWEBrjfl8DmMM1psNXKgQmU6nFNnaIVcgD6RczoPWdR0dj0pVWCwXWC4WaNoWeZbHqiETGNbM99BZBo0ehWFODRsibvJmDDkDgIhOjTUEweuQP+bvikucJX2JhK+RGqmyrGL1jnAIhGIdWerjjWCMGvDr6XPmOZEuNDZsPPfYWUsRp/FmyxvDwAiPovnU+WBiMP8udQzHC5bVGPm96WYzdlbGKYsxNJ1u/kKIRDvBA+E8RZFjU9e0umNVQTA2QgQFy6GxS6Pe8X3xdaffLzn8QOgmDAAizI8JunaKrtngrbfexv0H9/E8NC1k5PHlj/EfU6ozKzROnj/D559/hmvXbg4qNHYd6fPcdR9Xvoev4iWuuXfAAgqcjJlzCIiCHTgb1hpY10GYDnCW3qsBnVNrBKqUC5V/XgT7ADg7lJ/mzblHEfpnRimU1JEn29k2ZrBWd0W86Vikr/McGc+TNDhK7Rj/O9MZ2rZD13bQSmNvbx9lqdG1DdqugTOsfioA+JgechBorYNxJAipdQ4FDTgPIxx0nuOd772PP2uoWvLB3c/hGgMlqJokLwvk8ymqvRkOZwfAPbqXzhoY5SF1sgHveNhSyrjmdMJpyfMCHC/1cwy4PG+HKdpx0HTVfLr8+6FN5L/Z5ciMydzps0ydx6tSpFcdryVXnud57E6aXjSLcEXuQbhxgkvJsE9nM5w8J47ApKrQtC2yPI9RJEfrAKIWRQqn8WBKSSzzcj6H8w7nFwsoqfDhbz/E977//YCC6KB/MRwMKammvs+UsSFB6G3ERCbK8/Xw8xRSBnEdpXFysQEMkUNhw2TzAqbd4tmzpzg7P4OSgOk6ZJkCfIiiBAnBSBGakYUx45JV2hR6hMh70ifhe2fDxOmAzXaLIq9iOsJ7HwmeHKFXoZLFGINm0+D05ASb9QZSSMxmM8xnc0xDhYvWmiKJoPFhvEXbdfDOYbVahVJLH4XCRCC7Ou+AEC1vtzVFlMGzZ8MhlAxGlVtmq8Dl6D1ovub5fE6OF2jTm0wnEH5Y65+iEuOoaizgljoY/D6uqBpH7WmjuYjADDz7HkJmR4MRqnSe8VeKSoyRgHSx07+5GmRYWcKOUNpIiX/H700NACM8qeEQECDFcAchaD7qLCORK++DqmWfU6a1MkyfvMjZ2BWxpsZtsGnzvwVtJlmm4ewcbbvBoT3E22+9jbPTM7T1FlU1DRvvZXu0+0iuI3nVtB2sEbj3xef4+c/+BNNJqF7jWgQh0rfuPN/ls2IwFjQ2ASlASItdceG98yf6zSpsOt57QgmDs+E8oxEW3pPT4R0hqZLF8nQW0rwkOEVlsCJeTT9HuOzx8hUB4VJY+Mz5qPvjnYFtO4gsA3R//Xwu73ku8Ln6tBmdl5SGU86VlDKuVeaMpU41ry+qwiHxORFsZ1GU0FrRM3Shr1QnYa2EdR7GATqOrSB58DCmxgJaKRjv8eOf/iGc9VicLXHx/JQCBCEx259jergPVRbQ6FN5MqPO4UIE6XfhhzmTcPe85oQguXA+iqKATfzcPqC4PE/SfS8NHnYdKYI4+k0/pzDUGnpRQDN+LXUGUxHBlzleK40ymUxid9K0GiEdEJF4eumE0XkG4yzOFxc4ODqkidl2USOC0g6kKHh6ehrLH1kzQAoJ5SVM3WEymaKrO5jGotAFJsUEX91/gAef38NPf/rTwLDvekY9AIvQpdBRftOHB6CUojbGQf/dOaqS0SqD0hndn5SYlApFXkHJHN49w3K1JufKtaimBZrtBo1t8OjRV8hyTmdYZMggA/wpnEfOJVGgCDQrAhKh+v4ybRA/81JiUxMhtCyohwhNLBe6OQp0hoilWZYhU4ru1Vi0gTCah1SM9x7rxQKb8wvAGEzKCrev38B8MkFbNyiLArnWsF2HrmlRNw3aro2Rvi4JxajrhiTZlYZDH9FqAXR1DW87CA8olSHXGbRQUFDQQiOTGbTIoBDy0zqIfCnqkisDcfHw4ABVTInRJhDh1eAA8MZrrY1ReJyrQkRH1hoDwxU/AbXpQqUUhAgpNNUjA97DG+KJmASF4PMKIZBJKidsuwR1A0sxk+MqQj5Xs4pm3HCDNH+oXIrRo6TGfl3C0keIlHVw5gH0xDcAXUeqo94TKsIOcb+x08ql3AiVEnadidLEMvAmMpnBKQ/riVipdUY56wSRS52KFGLnn8eoYfosUidJSo6iKAqFAiazGbJCo+kaNG2Ht959H1/cf4CTpiH+FKjX0eVoKkWceFX54a88CYhJqSG8w8Ovv8HDr7/EBx/8BLatoYspSI+Zoz1KpTpP6VQvEqcqzEfeWPn1XVE+ZaJIzp/HIUXQOLAQoOvjgIXvwRlHcuLeAt4CvoNzHazt4L2BlQ4OCjrTQJbRl9TwLC4IAeGJ/EsOS98zxQcZaqoGGzqwEAKNJUE0YSzMtoZyBrar4YyBzahUWmi6N2MdshCEWIvwnGjudZ0F+axcPSWhdYa2beI4pMgYBx2MzHrvURQ5lKI2A03dwDsJKTIUVQbnKI0ueU3pDsY5IvG3BsJYuM5AawFvSURReEAoDYgck7JCZyy+98Mf4fT0Ar/8h3+A9x7Xjq5DaeKZeQig68dHeg/hLAWmMVgQsYCCpx47GjwX4rwIfWWGqZUgEYBhgDRGDNP9dLzRj536dI30fLC+DUKf3rfJOfry9HHaZYhs7CYXX3W8urMhJabTGYBe+TONdPhIyXVpyiPLMpRVie12i9b0UqtVVWG9XsfzLJdLALg00N57tHUDCUFaDnWNTCoILXF+doa6rvHhb36LH37/B1BCUq+NEVeCUA0y9IAAFOXoOusA25d2KalgOpZ71tBKQsBCSY+92T7sMVXXNN5CeAlrWpRljl/9l0/w/OQZjOkATxtVZwwKIQCpoEIUaT0ZMa0V6Y34oBjqHTk4zmMbdCw40m/bFk0QO+OoW2vy8o2zaLo2lFjRked5JIBFJGS1xna9wXw6w62btzCtJhAeKHQGCYF6s41dYFnfwgHYhM/VWiPLM2zrGl23wnw+jwvMeR8arzkISffFOhpSUHmaFPS6kNRoTUgJERq5cUplOp3i8OgoQQYMlFRwAoNNK1186eYnhACCeJqUEk6SLDGTdMeprHTT5MO50PsgGMT0K40MuHLlkkSzJyga4L4TaVoB4JJfvo8UUuZrCqeJayrCqQmKMdzse+G1McEvHp5TUioo2jpUpcF2W8N0gT0fromdozFycRUMezmiwuD+BgiJ8PDSk/6ulMjyHGWZo2lreO9Q11vcvHUL52dncN6DOtdfgRCMPjZYHoq8YwWLRFN3KIoSXWdw95NP8YPvfR9S5Rz4JRt9OG+4TsT73l0Rkz6vdAz6exaj8RFXfPG98HUwYZcRKxd4FxbOO3heR1pDZhmkzonzISWEpy6uRGGw8Jb7XhhCYz1vhn3qJiIVAqEJXAaIltIOTYOuqWE7Kkf2ZU+Y9GKctvOBV0djQgTgFBkcpgbob/rNrbdt7DQ7KJUB3mOz2WKxWFIauZpAaJIAgKNgVSoNrXwUqLPWQviOAi/Voe8wrqG0RJHlADpke/v46b/4OYrpFGdn55CSxRcd6m0D3fTzIs8yaDVaE4PZQCOZIpfO9X9hjYVNkIY+heEH9iW1a+M1uGstXI1spFfIc2s41/tzUBWh92awvvkZDa/35QXxXiON4mHtUN/gqkFIJxdPLMoRFyE6riM/gWWwGU5jhIPlvCOEE0oCqeoCBMFmGbptjc1mAyElvvzyS3z77bd44403UJRlov2PeD3Ok3ohCedQmauxBiKB53nz994PeALOOZRliRsZNTpbrZZo6war9RLr9QpffvUAputosnmCqjtmSAe1QhK1UhBKRAXNfvKQ4WE9EYCg/+12G2XCU8GuLKhw+q6Nn8MRk/GOovMwhqvVCqvVClmW4/DgELPZLD67PM+xXC5xcXERuRJt26DtOuSBPMopsqZpMJlMKIcbWtcLQaqvXdfBWY8slLpGfkZMl8iYOkm/lOKW88RPOdjfB4BwbvpuXf8sUk4Eb/rpYktlzMfkTR671DkYz2k+duUp04ijz38Py/ZSR4J1VcZGIOVejJ2N8dpK74EPWld9KTBzba4icHK0Ob6/vKBAoBZN6BtCvSkI5Lls6HYduyL78TUM/s0+mQfapkPTtJhOShzsHwDeot6u8c477+Dbr7/GdrMZnGPsGL7c4YOmCDV+vH//Pp48eYI7b7wFaugleijcE7LAKFR/Dz6mOdLruOq+qdlg/7sXbwb9ddLfuviZPXTtYEN5MjmzfdoxyzJopaBEQE05veZtFAIjYiltauOodXCNAsQfiXMHaI1B1xk4a5DxPQcnCm44N/h6U4d0uLb6+cQw/Tjtx+tUCAFnya45T7ZvvV5hOp2g6zoUJaHCsaTX9z2amFzvHaXouTs0k8yj3RUC1lgcHBzg/fffxzffPETbdgG9N1BSQ6t+TeVFjkwM+SfDZ9zzdmLQnfyZsQYWl9dLdI5E74wBl6vQ0vNemj0vuS6+az1f9TfpnL8Kydx1vIZcOYmsqB256DSSYueB4de0qVqeUzkbIxlaqEg0ZCVS5mvUdT2IXI3pkCneaESM1ruAKnRdh/PzM/zyl7/E7du3A6FqKEstBEOhnoR9PJXzWmMgw7VzLp8/m9NFfJ9ZlkEqhTfeeAOb1QrbeovFxRn+0//nf8JyuQgLx8M2DlmuIZxErhRMECJTmUZRltTXxPfyLi5EL8vVEl1Hgmm8gRhjYplpSsptthsIRbn3qqyQJeqk0pMX3nUdFosFnj17hrppsB/SYIvFIj6Px48fx018tVqFTSiPTgRPNDYKbdvGFJpzoTGb92iNgxeA1CrKkVPr+Axa55AqC10se30NNgBsOA8ODih6cjYaDebapBDjLlJmutmnjgDPP/557GywQ8BzOP0ODBEHPlcamaUGlsfquxbkGMUYr6Xx7/i86fnj2KCXKE91Qob3wzCtGpQ85nmGosixVpQ2MnFToy7JY7g03Tx2OWhXXe/wWhBVd4nE3GA2rXB4eAilBNqmxjtvv4N7t77A/Xv3Lm3UL96wdx95kaNtG0gpUTdrfHr3Y9y6c6evWgnXJUI6pt84kmuOf7P7vujfCfImAk8mcUpe5JT5KKTWE0IjbB1EyATIaRcyg9YFEWxVHivJOKBiu2EMBW2dteicpfXJYnoBUYRHSBuFa+FurgCEkgS9g/qC6CKHCnIApBRwmSO16xg79ikqN3ZQOKXS235iuXLJvncuVMSFBnFR5E9CSkCGEl2yKSwC2I8zX0/bkcS4dQZSKly/fg3OOTx58hSbTQ1jOigtoLJ+HStJVXtjzlX6HMfP1SWImDUW3UhLJ0W3ds2R8fi8jLNw1di/TODwMud6lTX4WnLleZZFoag0Ih9Dwjz5eJBYaZS98PPzc3jvMSkqwPf5Xp6sqXBUHGgAcMGIe8rhdl0LKQTyssTFYoHlaol7977A48ePcefNNxDlyj2rn/IkTtTsErEoPsYbGV+b972KalkUkACm0wrCWzx8+JBU/JyLk50mfCBGBpKQDBuz4A8WAs4Tj6DtWljT5zLTiJo3ehYGy7IMe3v7aE1LXRLbhiSYJZUJM5JircVisYAxBteOjnDzxnUoqdA0TewGyx41IwRKKdRtAxHGlkuSU6SAHY5ePIyeOUmSZ9A6C+JdrKnBPwclUZkgHEoFkpvG9evXqXdFUYTxU6Glel8JM44o0rmTzrt0UaWEpjFCMO4bk5Kg2MkaR2gpmpHmUflrHIGnjgl/5hB5uOywpGsqPU8/L4cCcel1pZ8VphmAoIliPVyIjpVxqCp6xma7jcq/gLrkbFyFYFz1+q7In+6TImjS0KDnkmU5prM5AIeuPcZ6s8L3vvc9PHr4EPVmOzjH1U5OChMPD2sNrO2gdQFrDR48uIefnf4ch9dvww9Ka/kclyPUMfK86xnx7aZ/ms6Jq5GZtKuqg3XcDZpInfRMiGMhAxFU6QxaBYfTp1VJoWol+XKRACzBYnqD6wkl3QAgRUgDgPL3DgJeBM2cTEd+EUDOSvqcU4c5DRbTMaPnPkQ30vemkgjsQAuhQoXeEmfTc+JhUBMcCJ/yuSSMIZtkjU3WsoMxNt630hnKsoLiyr22g7UORZFjf38P1lrU9ZYcnW5kR9RlOzJe6y/6XRpQkA25XMyQ2pcUsXwdZ+FVHJN0ro7P96pODh+vmUbpH166aNIIzhgTUyFc9gf0BD+W1uaLL0LKhD3RpqHoIw8EP+5+qpWCMRZlWUBKESoeSDFPACiKHM5aXCwu8Mtf/gMOjw7j4vHhurynfGQWHnDnQtlUuBbexDlFwAacqxj43rLgdLVtgzzTWC0XePjoW2ilAG8hBWCkoLRTqMxQDHnmGSAoirTBMHTGYFtvsd1sMZ3OkBeE8tR1HfVHeIy4vBQA8rLApJrAsuqjsajrGs5aFBk1juP7uHnzJm7fvAlvDFbLZYQZq9AHhZ0JTrvkqiB4NvRbYcSjLIl3w6mV2WwWkJcaLLVMAkWKurxKCSRlrrHiQiasc0H5Y601rl27FucJoSoCpjPQWV+GOy4fHUdUuzZIdqJ2RVW7nI0UmRg7nfxa+tnj8/KRCuWkBmOsnZG+bwxzj2HpPr3TXzuvk3F6pj+/AEXOVE2jWe9E2tiNeFs3RNoUtOG+CmLxskbIex/ngpICxjh0TUfOqicHZD6f49rRDXzvez/AJ7/7GF+vv7rS+I1fC1c4/lRCVgvqJl1vN3j27CkefHkP+9eOe85G+C6ECK7Gd9/TVY7Wrmt9UTToPeBDnWq/4SaN+BwAwUJaGkLq0MhOAqDOyly5wc6GcxSYkH5KX7JNKU4NiJ5fQC3kEYI/uk4FASclnAAsPFzomWOZ9xGuh7qg9fM43R/SeQygn3eilzHY5ZTwuQB2okh/6ezsFEIAWpGoo9YSZZ5hOp2SwnPX9sgjej0d57rEdkhIRf2qFCgYzjKNpumiuKExFqvVCsvlCl3dOwfGGFjYS+vrhXMkvafRvdF5uIrn8n2n3I1dY/MyCMOrIiJju8bn2BU8vczxemmUoO7GR13XcM7FsiVWlqyC1HY62di4RsO23SKTVBaa5uKFEFEsiiW4pZSQmQKcw3pDvAMhe6i+MyamBE5OnuHTu5/ggz/4A7yBaRw8GiyESNxRp9hQ3mraDpnsS7EYPeAB5U04bvZ1DSmA7ZaEsf7mb/4a6+UKzpogyGUgQM2eENI91aRCFnqLKKXQWZrMm3qLpmlwsVxQiiRMrtlsFjkam80mWazkfHjvsd1u0RkDnWdhwQRl1q4DLKm5rlZE5Hzrrbfgug4qz6JuBzt+nD7ijqvL5RJSKXSmg/W9ZgpPfBYB49LaSPAVCllO4wpB/VGUzqF0Bhmelc4C6iFkUG111P/FGBwfH2N/f586yAZSah10IFK0bLwxszO4SwFvjGKMnQleVPx8U50XTqNxmiUtyU6/+Jz8bFPHhI3qGI1JHQO+L37/GJVJ+Rip0ZCy5/WkvI70mngMtFZwDpRKUBo6U2gbA2sMlBLIMkUdYEPKygStlF1jOO7NkhIEByZjh4ESgqF76uYZGKno2g6mM5hUE2gl0bYNTo6O8KMPPsDDb7+NY6a1Rl3XsWT+8rGL/OaR5xmaJgRBRY622+J3H3+En/zsj0KaT8N74quUZYmuG6NngdCMy6mxwb2Hj5aSRNfGxLrxWPZfNA7GkEJo+rz7K5AQIIJjlgfxruBA+DAvrDNwxsJaqlxxvoNz1IQxtT+D9cHzx/fcHna4VJZhMptjuwGKMgeUhgWQCVrLQgJNs4VUPcKXIhuczknTIvQZPtr68XikCJ2QGsYEZzRItC+XS5ye5lQVp6vYs6lum0FJO60F4rwtlysI0e9NeejcTZ/DHc0tyor2psPDfQjh0TQNlucn8RqNMdRldmSDdt3D7uPye6x1cczHdmv8Ga+LLozRpfFrfIxTv6lTxO/9Lsd5fLyWzsZ4MnDEba2Nhpij7vQCGa3gyZdGYyIwc8sA17PWxqVIT5J0rbUWQhqUwdj4MAGt81TS6oGLi3N89tln+NObPwfAEtO99xhRDt0TT+nvevicPzct++N/Z1qhqbfouhZffP4ZPvztb9A2NawlZVSEsjIhQkoDkjgNbQskTs16u8HFxQWarh2SJcPmAiA6YszZGJArfUAMtIqpIl7sbV1DK4X1eo233347lmKZ4JjxNfDmyp/Dz1JnGSpRoQ2OFyMg/HyYxLtarej5Kg0hVNigKNpKNysB6k/gBfUbQJhHOsiZ5zmhGlmeAY5KNGN0F9n5Q+XPFF1II6qxHsV4gY4XdKqtkSIX7FxzB1p+/rui2fS5jOcLf3aqijtWuE2dH/788VzwYQ5xmpGvna8py7LoOMaoMxkDEci2EIJy+F0H6ww8SHtDSgGpiMkvVZ9rviqqGX/fdYwNL/0bANc/CAXnPJqmxTaUVedZgfl8juvXbuDO7TdwcHCA8/NzCCGifdnl3MTP2nE5NGYsT06I4snJMzx5+hh37rwF50jFVEoZ7RXdHHoHwwvKMez43PE4eN61rxiT8UFEVdbpuIyqcA8SliJXkvq7MNfEuz7tSsTQ3hbQs1cQou9HlPZekZIdDIqyhaf0svHEbdNZRjwq6WMvEOKFUEWR1pqbbQ+cjXQs+tc8EHgnw+i+X8epIy8+N4TYAAEAAElEQVTATlefcmA0hv+W2zO0XYfWdLEJmzEWXdslKDkFLFmWQUgJrQkRMiHdYgyRla0l5+jg4AC3bt2EW9TxXpz/bsSP7yne33c8fyEQn0eK4L2KY/GyCEf6/WUQudf9rPR4bWdjzLQXQsQqExaUGg9YCgcBwGQywXq9hrUOsqANbrPZoCzLGCGmAy0l5eeMoY2OSE+0ualMI8s0lBdQSkcy5aeffoJH9ga+DwTCkyVyXJzMtKjSyQv0/T/SAR177NZSlH94eIBHjx/i/PwMWaZhTAPnDJTkHhtEKOKcsAcA76nyY7mAcw5122C1WqGaTHAUdEXato0LiCPVyWQC732sCPGeSmirSQUviLxLEQ1BsXme4+T586hdkucZWu9gwmbFmzI7GGxkN5sN9vf3SR44bI7Ui6ZFURTUoh59g73FYoHz83OsV1vsHxyF9EmQ0hYypFLoC7HUjowmgqGC9yjyAoeHh+haisSc5dJjiois62vxGdnhKCZFG3iD3QUF7tKL4L/liA8YKnKyAzB2QtM1kRrLsef/ogU9zm3zGknXGTt4qaHor3UorJciDClqwnl8VoU0IdqMnx0dHUIFnLOQgpySXRvHru8vOsZGGEIEGF4CgtqIb7d15AgIOEipMJ3OcPPmTbz77nv4zW9+HZ8HpzR3HsGRGb0A73meEA9CKoHl6gJ3P/0Ex8e3kOUZXCAK0vpm5ESMzvfi+4ufusMZuvQ36RgK4h/QJjquPuLUh4p8KKWy0PArkNxtz3WwJlShWNLpEIK4VEr35GyRbuyWliZVqzoIKBifpHGUgi5KWMsiftSjRVhBlRbKIchEXHK4+JnRPCKnh1N04xQBz2UOHOjOae5y+kUpFR8HE105tWxj+qlvNsfVj23bhKZuGkpJ4txJiTbsKVnWiwRWkwqmIxt/7doR2uvrwXNNkZOXesYvsUZeRCZ/lbX2T3mO1z1ey9kAhl48S22nXvRV7+GomjeLoihg6jYaTzaqXMmSDg63Q6YImOEvkDqnpHK9TLN4FV3HyckJ7rq7+FfYB4BQ9SIjdOa9g3DUq4EiAzeIKlNiJl8fV9Q426EsMqxXK3zx+ee0ecNDC1I4lZIEX6QSQayLSmz5vIvlEs9PnkfW+Gw+x+HREWlabLchxZANSji5KoRFb4QQsKH7LUdbEXkKf7dYLHDjxg1y5AoSyWKODFeA8D3xhsbqsLyi07/JsgxlWWK5XOLevXto2gaz6QxlVUGqnNQMpSZ4X6ogxhQ2TknolICIiEeqwskqqnQdFOXxdXAagO8v5Q7xkc6ZNE2SogkpMpc6HjzWPEdTCJu5LWkjvXFkkEZk6c+pQ8F/n/JM0vJZdpTS/i+cvkvvj1Nl7DCnKqcp52aXUXHeUTvyIIue5RoqAAE8/nXTwPuOxn3EkB9H8N9lwMZORv8cZKI9QlVZpjOx3N07ija1znB4eIi3334bn3/+GS4uLgb5/hcZ6OGFAOTdUDmohyNROg/cu/8F/uCnP8Xt27fRdQ3JWquXPO/o8D4hlX4H3+MSGuJJxCvlaYwDHnIyiFwtBHWIZu2NlARJkubEz6G5oaiKRKlYbt/PTUIaRBwjCpDgAWupmZnzHsFVIN5GTIGR3SNi+nA9AH1vIX4tnSYp2pEGpen7yOklR5FSJjo+Gyal9+kSR03qrIULJeGmowB4vV5ju63RBQeiaRrUTQPtJFqTakr4yBejgBeYC4n68ABCdDQ8sn9uL3I2XoRs7JgNgzWbfn8d5358HeOfvwvZ2HmFr4hmpMdr9EYZCpUAiFE4kyYjX2D8YQHWThGL+XyOk+0zLBYLTCaTqLUxJuCxoZdCwYea6yIYRQDQeQ7jPKQLKEW4vrre4vnJCYB9eHB5I7OnyeAJR+V98DQgnF9MFwFvCOx4AB5FlqMsM/zmN7/Glw++hEBYGCqoAnpAqDCBPaUKTOtwenqKxWKB1WaNtutQViVmsxn2Dw5iWsFaCyWolwmTMAFKRTFHgpGGuq5hnI0RiwjXDE98DhtSVpvNBnvzOZy10FLEMlPe1DitwgRQ5xyyPKNIIUzM/f19tG2L7XaLx48f4+T0FPDkABZBATXCnMyFkH3ETM8moBvgCL036rP5LKnQ6BcIM/KVzqNRYUQjfUbsWPC5+StNY6QcDf6s1GikTmbqIPPnpI4Ev8avM69jAAEnBiRFGnYZ5dRZGuuGpIhJPyakF8PXxPM2XTfD60SMfiHk4F74WtmxlpKkob3zl67jMrH10nLfeUTEJ6xBujQ/UFpcLlcoyxx5RtdRliX29vZw584d3LlzB4vForcHklMiIvl+9cFzgf0Taw2kynB2doqvv/4SN28e03jYpAtwujnyfVxx7vT7yxzjv6WKk15fg4Ykle/u9WqEEEEviNBM58hJIZQiLfkGpKL2DTrLg2yBAgvOQQgSLgupESGolUKvnZFsVOhTKBzQ0T8cnDDR2UgdGUYv4utIkbYhqpeOCz8rawGtuOqEUU66diLoU/o5z2nvsL5HUXxAX3gtOdengJ1zOD8/R15q0kNSrFLNpf2ky0HOmwnPgp0RBSF6m/Bdz/zlkI3deja7/v26xz8GsvG6DsfvnUbx3mO1WsVoC7jMnk2dE14oTUNStWVZoppMcPr8BJPpFEprmIBuROMP8qp5cRnrYGwLnRcABDoTOv6FP1aauQcK220Lp0Pk60ndkjaZLEbW3nI1ioBXpD/gvA8TUA2iRa0UdJbBOwvrDbrO4dNPP8F2u4HSEsZ2yJgrIDwgJEUDzqNuatR1jbOLC2w2G0wmFW7cOKZqkgkJ1CyWS2qWVk3grMVytYLpOkglURQl5ntztG1HCqsCEQolcpGB7QysCfLZCEI2AJFZlcJyuYSCgA3ORrrBbTabGFVbaylVIoDVdgvbtZhMpxCKIMdvHj7EN48eApCYzmaA1OgcoLWEEMyVSDgRbCwFbwqOjFvsS0HzajabR3hcClbgDIi7H2r681e6CbOz0c+5fsPtIzgSTFOhlTd/PtsDetbcVZjK89qWHNw0BcfXzN/HDkaKZqSGlNGYsQOSOkpMTubfpy0BgN7RkPHZ92gOvyd1XPpr9XGDIPRJomkNyZZLEvealCWlRr3Atu4gQpGB9z4ovUpQqaqAT1QRESS+r7AaNPZJekN4AZk4MQoSgAq2wUFXJYQAnLEoygkOr1/Du++9i6+//grrzQrGmYAw9j0+hKeJwrNscIR903vqNeQBWNuFdEGNB/c/x89//nMomdNzdiEFGLgasTJlZGzHEeLQGF/O3+9MtwyiWKBXKe3XEM+P6CwDcLaDsI56fliWkLZU8u7IiZNByVeHEtmo3BvPDwgoCDA46uHh0LkOzrVAlEr3EJ7IqTTOFPU7pOust9MyWacc/IDtrPe904u+uyjf63iNWyHIFgiB4NHEz+N2AnlREPm1bYn/JkgN1xpLHY41dYjt2hpS7CPPMjRNjc2aSOh5nqPIFHRB/WWY/9I2baAKJF1RhUju6YoNfOQkvMxBJc7hGXgffw6nC68nsytBGndew+Dcr+Zo7EI+/lmRDQGaGFz26ZwbdLRM89mpR8b/Zg8z3SDKsoT1Dp3p0K3YGdDobB/ZkUaDhHUAhIZUCnVjoLMM1jis11vkeY6qygEHWGeQZTlWZhvaqNOh4OGthcgoiofgLpsEjZlQ9pqHiJcfps4ylOF6ifUsYNsNzk+f47PPPoUHNSsTUqILnIJMaXgpg35Gh8ePn2C92cAag9l8D4eHh7H9OwBsNzVyncO0Bm2zRF4WUJmGF7S5nF2cD+ByVvgTzqGpaxKaQV8tlGlNEY6hrpBKSKwWS0zLCgYuiWCH7aAZ4Viv18gD7Dqv9iCEwNPnz3Dy/BRPnz1HVlBljdQZrBDwEJCemy4xB4C7uwY+giCyFwIRTipqosQOCs8l50gYzPteIpc2/Z5Eywc7t2MEiol+HBGRQeS5KILzMtTEAFgEyYTUCSEB/F7a2DmFchkNGacQxxyMFG3YhT6k/Jm00mLswKdpIB1ShwDie9hZSY2RtTZA7oLG2wt4GzZ9SEB45FqjqgpstxobR8x/+m2QLndB4MoGRITHTYSOoTukvHs7MPqd8+HvfUQ7vLWAA0xrAmJDOhJZUWIym+KNd97G0fF1LO5fAMKiMzVJVUOFhE9g9AOD9E/8bH5eoSdMrvOwWVp8+eAenj97its334IUGTrjiIApZXSkHGzYlC/rpwy5Osknj+bILk4P/Z5SYol7SHcRuwvLS5uyD6rH0of0QUu9TxDEuVRoJpllCiI4GkQS5c0S4e/IsbeeekNZ7yG8hUAHCdImgvOBz6HC2EqApdGFpAoYH5AOeED6HuGNX3QO7xy8GK6fdL7yffKeASGhswIQAsZRHyTrPMRmg2JdYLFcIctzTKdTqnrbKjS+oceW5xDewzuLIsuwcqRO62yHItfY1lt0bQPXtShybkooUOY5mqZFkVHHWZcWEViLtMx3vJHv3tCHm7aUu0pKUySJm9tdPj+d43LX3ctI2eXrGDsML3p/6vylfztGW17meG1kg6MznvgpisEG1jk3qG4YE+vYWdFaY39/H5vNBgcHB/FzmGjKGyJrCBRZHjcKa/ookaH19DOUklit1sCU/HWpGJEh6V+GJclrpUg2Jb/yhOdcOhuKpt6iKnN8/Mnv8PjJI2zrLbTWmEwmaNs2Vmis12usViviWoRxYz7Ker1G0zRxc99ut70BkgLL5RJ5nmO9XmNvbw9FUcQqhPQeeax0KCFmZ6MLBNM8aGR0XYcyL1CLGt4RqsTn42uYTCZRclxKiW1TYxL4G+fn53j4zbdYrTdEzgyCXDSeIIkC1TPdpVJQqjeQKQGSXut1IthY64gsAQI+4aHQZuXRczXSjZxTKMPF4AD0c5FfT/8uRei47JefezrP04PGd9hAbYyypOeleThERMbrZpfjx/eZGt80fcROuZQy5Oj7v+fPSNciADLQojfk1hgA4XoThyg+M+6tIT0kgoy5JWVLn0S0rx/wBIMVPARm+XedQVM3UJqcvCzLMJ3McHzjGMc3b+Lrrx4E54m5SvSs6WCn8kWfm/6SIu6ua/GrX/0Kx391CwoUADhPpEdOh4qw0e4yyruMrxR9NVDqiF5lqOnZ9Q7yeDPh9/bPs2+IZawNXJRw88EBlJmCzDIomcFDh00q3LfgNSBCGoZLX5mg2m9+lEUQV16XC00IWSHROcEzhOY5ET+oZDScdzwKPKfTtUccvxyms2hkC4BE/iSAjTUoco2LxZSaSOY5Mp0NFKhTR18IEe3zdruhVHTTBHSH0F1jHPKsgHMk62DMMGW467lf+T2O3uh9L0j3XXUuYEwglYO9apej8V0pntR+7Xp9/DOf93XSMa+lIJqqHqb8ijQ/lxq41LlIDTm/bozBdDrFZrOJZa8kQ6sGvUGEENSi3fWlXAwv88/8ef3mkWFvPgccNU178uQJ3n33PYjQYEcphSwvwOptmc4HDzYtn+TryrQmRCIH/v7v/x6LxSIS+tq2pTLWIMbF4zObzYJhoC+q+V7GCJbJp6xxIZSMXUpZX2O5XMZuu+kYsPHpmja2hc+zDDoofpaJgJoAjQNAfA52onj8VqtVLIVVSkFqhc1mg4vFAs9PnkMphUk1QWdJ0rqPtNVgw2TkJeXP9HOonxPOWlgp0HUCVajCMdZCSy7n4zRcQFDDhsvOa+o48LNP55u1Q2JmKlHOBo3nC49Ryn8YOxPpufrmWMPFN3Y4+O/TNEl6sOPEJNQU9Ug5FWMCdhzDkexxmsrha2IekjUmQOs9wVUrIlibkEpRSb+NTnt0xhHCEXoaet+z8NlJD9b6irx0Gq0N7jz+ns7leuJd+B+nEZTSyLICZTnBu+++hy8+u4unTx+FbsZJOWVEtV/B+/HkVEAK3P3kE/zlL/4lDg6r/jwCEF4O5u1VbL8xWsEaG+xQpmv2Kkiax3XsGKdzO86JEFj06VAAIqRGAqqRZxltzjKD8yo5RyizFYKebUSZhrZlDKfvvB4wbyj4Gj5JAbDjICxgPKSX5Bcm45k66/zvlIPlPVXaoaXry7SC1tQComk7rFdrrKdTTGdT6jId2mNw8Mk8tMlkAmstHj16hOVyiYODA0wmVb8xCwpmGtfA1S2Moe6xdUPcOPikonDHcxs7Cc4P12byxzFYuWoOpd9Tp6I/xVCckNf+rnNcdaTPMv3bsf1Kr+11HA3g99TZYGVPvrCUeJdyAVLDlh6xCsH2URoTIPM8jwbPGIOmacJGXMTonaFijsLTFANARrwoSRodDbBarfGrX/0Sd964gzLLQt6eNwbywWUwqOn5iEQmMZtOUJUFfabpcP/el/j07l1CDEIFxWa7JYMuRBTY4gfDGyWPYbqIWeEzEkHbFjqjzZ9TVXmex5QL6yow2mOMoQ6riWgOOzyTsopVJLbtsF6vkIUSMr4+VnvlXjRaaypLdhZZ0NvYrjeAFGiCpK9nvQFBKpBKKehUflwKDNfXLpGa3iEp8gJlRdUyY66f97T52IR4PD7XZVi6N+p8T2NjNobCgWHvFX4vo0XsLFMkeFnnoXeORlGf66tMUuchXbyMWKSRGL83RXMu30ePlvDBznnqjALcALBv/GethVAKjBALISCVDN1gC3Stp3bkjF4IjnpFiNoIUaD+Ph46EA2Hxw5nI8lF944kzRkuZUzXspIKVTVBUZS4fesO3n7rHZw8fQJGViB8TOH0SMurwS1SSmzXS3z77TeYzvYhRQbWnEBE1hBgvH68vsuYX3ZSv9vZSM89/gKGJHaaM4GfIxCc/NBJN+MyWSo7l55LpekZOueBBB1hRIO6y16NvozvW0BQGa6zlJZylCr14dF7eKrEQ2jRQGZjkFJKgwheZ1yxCICkC4wDQJVKzkqILEPTNGjaFiboZ0D0peK7qsoODg5wdnaG5XKJyWSC+XwWx88JAWctjLOAIMS7a7sYOIIl7Ud2Y+yIJb9MBi4dw7HjkM7dq9GS1Olg28nve9F7d17bjtd2/e0uG/nPgmxwtMmRcGoYxyVLrIPAk4iNJL8WDSto7R4eHmK9XqMsyzgJ06iAJ5AeRYdsgBnN4H/Tewxa28Trun//a3z78Fv88EcfQCkikfoAJaoQTUNyx9Xw2Ul5KHEztnj8+An+3//u3xE64KkVfNM0sQFadMCSyoi0e22KTgDUWI07sBpjUE4qvPf++/joo4/AImlaa5yeng4md7pY+bDWRn0KdvwuFheYz+aYFCWaRkOEBUmwoaFyx6AEy6kb6o/hkIXP2W630JmGsy4ELIReKKUofRIiqYiKKNYEuJwi6BGQ3sPnDY7Hy/mUFxEmN4aiV2xExs4BQOqX6ebMBjo1bjxGaY54jBilRMv+OSJe0zA11FdapQuXzzGG0nlue++j43jVkY4b37sN5OYU3UjXBH9uFEEjVm5co1KqkLpAPxZBMp5UNg2atg1pI4bmmadBDjq1Owe8t6Qf/TKHoLw9P3ulZUDGxKBpI4mMhR4gUmM6nWMymeKHP/wR7t79BKv1grIowRntfZpXcDREYKUIASM8PvnkI7z9zruYzQ4go6PhwzgzAH5ZBTT9Hj/fX+byvMhA0/j3t3GVwxGfPYj7YD1xnESopOA0JTVA1IGrIamaDx7UXdQEVMeHZ2hCcz5H58WL1TFHVw6tcyq1NR1xWzw5oy6kY7wjUjHNHcQGu3w/VyHjAFCWk7jmrHVw1sEIAkhkA9Q1BVZN3SJTGazpYgCX2h3nSKvoxo0bePLkSdAyqmMaue0M4AW0LuAh0LYGdVNfEt8bjwn/Ox2vF/19pOdeCr5e7GgM54EavD8tMd6FbLzIKd51jWMUY1cw9yrHa5W+8uTgAeCoCxhqAnDejA0fgKgTweWyRVHACzrXwcFBjNjTDYIj8Pi66JGVXeJbg0EJRg0g27her/Dlgwf48R/8BEVB3A+VESkIAvDWIg/GjjZeSnFsNht8+eWX+N3vfoff/ObX+Pjjj+GcRV4U8KslNttNSF/kgKAISCYP3qPXQkjTFkKIeM9pHt7BY39/Hzdv3iQ58uDATKfTwXjz2KzXa6K5hZpyoFf3XK6WMMZgtVphVk1x4/o1lEUenYqzs7PBJsvN2bynCqPtZoP1eg3vHCbVBAChG94TWYvr9pkEGjd90fdBiZvdpQUq43PNwnNgQR5ihIfzKzlYQLtQiNRBYGPLcLRJ4ObxJpwahtRRSL/4ufHz8t5BazUgR48N5Njh2OVsjLkdKWzOyEfqQKSITP8eBSmHGiOMxvD5I/kOiI2p4sYFEfL9w2oWEunzaFoTek0Q7E7XLeJmJISA4koBpP1owgJE6jT2h/MWWojgzJNTmmU6jGl/bwAjQzmqcorpdI7j45t455338OGHvwIka8oI4gRYh1fVyJBSEZ8FwLNnT3By8gzz+R48004dldRLqdC1LbS6jD6kz4Sv3+145lcbbE5j+JhiGG8w6Rwfz8/0eth+pnoc5GyEuY+QIgw8C2cDwTR2mTWDz+Bzp0d6fQjpNKUyCAh0HaN0hEgwWZRTXVIKcHv6XZtyui5iB2tJ3VxFoIY4wY6HR72tUdcN2rZDl7ewQcQLQExP8ppQilRBNxuy2Xzu2I7e03uatiN0JKCp5NzhhdyVscORpiJk+ndXoL7jsUhtRBpMieDo75pHYyRtPLbjY5eDtOt8L3OuFx2vlUbh3CNveqkKYarcCAy1A5xzMXLdbDaYzWbYbDaxnps3OG4xz84Eb8xaa3IE0MtF8/lTtCAVSdIy9GORBOW1bYPPP/8cf/M3f4PpdIpf/OIX8eEVmYY1As4azOczmK7Dg+Bg/Mf/+B/x8e9+h812i7Ztce3aEXSucXZ+gW1dwwSyrHEWOs8ujYuWGbq6iR485yNZmCtNf/ADfv78OQ4PD2OKgxcdP4N0Y5rNZlhekBppWZZo6jqWspquixu/lBLXr1/HpKK0z/7+Pvb393FxcTF4XrwAiQxI43vn9m3SMzEWQtYoigpN20JpFTd1lTKkBTkTWutQdRIMS5jH5BRoOEcLiDVFvPeUEpISDj5GvORw9QhCOgYpwtY7HL0o1i6YMSWB8oYw5kvw36dpNTKA3QA9SKs/xuRPfp5pmpFf4+9p5MU/p/Mhva/0Nbr+/jx8DVEhMgkMwpsGr3ddBxMqIAid8BBSJB1+PZq2pa+gzhhOE8bNA5AQIQXC0SobbY5GkTgm8UshIBk6OBmkilmWOXRwOojD1QKQyDJSKJ5Wc5TlBLdv38Hdux8HFI/Pn24Ao1zcCw4hJKyharLVeo0v7n+Od997LzguhBh47yB8XyEwdgTSZxoPRlzimLwI0u4zNCL5m/H5x0ibD2mJNH3C6yOVJRdCAj5s4o7SgD5IkjufNH2LCqbDNOMLnQ0p4SGgVQigTEBKPKPhAImpAUKQBL5wu9NFu8aVhLhETziXCE4g6cy0bYe27VvMAyCNoVDCz44Ez3tOpyyXSwhB8vdN00DoDFpTkEb9kChF4zxrNE0vXfMubgM/GyVZLXlI7uT5OV7Ll+fEbqeGzh+cuVFgtOu9u1CIXa/vmsev6lRcdbw6suFJV0OpPqpLPa/UEwN62W/vfezgKSURJpdBU6LIC3hHpEnmDaQqil3X9VCzMciUHmwK6XWwgWZHRFiWXwbggcVygV//5lf48OPf4a/+6r/Bn//FX8C5DvP5HLYjYuLzk+f46//wH/Dv//2/x4cffojlcomu6zCdTnHjxg3aYLzHydkJTs9OYa2NzcxYQlxrHZVVmYvBjeV4HPnga+Z7JOdE4NGjR7hx40Z0phip4PezQeHUQJZRqet2u0UTWNjOORwdHuHG9evQWocyrhbWdLi4uOgbEoUmRmnJZZ4TWdaEEmQdODrW+ZAHzuN9OxDRl9VCAYYKg1F0Di46Vb1TYJ2LjP2qqnpnQAqCZD07ooa66xa9IWW0jOdez7EJ/T7s0Gngg40NO81AjxClSNmu9AgjI5QaGla18Lwff08NUrrA2enk36evjzem1LG+vGH1aRW+F04rpj1T+Bykx5A2kAOk1gEVsNBKQykExxVo2w51SBFCIIjWUWWQBxk95x3gJFTMHgwNX+pocGpNyxxCeGilUFZFdLyzPBuQpr0nZWCAOz5PMJ/t4ebNm7h58xYePfw23puSEjrjXkcvbySj3RICbdvgs88+wS9+8QtU5QQAV/yQU5lpTXc/2hCHG0LyGtVj7Ex5Do+wLnagQLuQNwCxv5AAb3yKUpuaUsRS6dhyXUCGcfFhk2YxMBLM6xENA9bBoXTp7nRR+jOEAmQGL6iTM6Wdqd0ANa7zIC3zUIcyIjKm62h8jyKk7qwlB1YFpXKp+vE0ncV2W2Oz2aIsCpiut8OpGi+nw7XWODw8hFIKjx8/jOs/U5QmbpsNlqsNlssVzs8XJGq4MQAO49Md25Vdz4rGhu9tlKLAZVRnF0o1eM8ggKErSUmh4/FMx/Wq6/znOl6dsyFEVJds23YQkY+Z1uMNlTfNtN+HEAKb9RpCiOiICNFLmqebStu2aJsWRnTRgeGNIDW0fJ0A0DYNfOGi3ZEC6KzFX/zij/GXf/kL3Lp1jMVigf/wH/5HrBYL3P3kY/z93/0dvv7qKyitMZ1Osbc/j+kb5y2E1Hj2+AnOlxewngSCIGgZZXkWulkibNIAQqTZK/JdrYEfJ5Ojny8uLqJKKKuGptwDNsjGGCyXK7R1HcsHtdY4ODjAnVu3YwSe6UC4NYRqOOdQ1zX29/djXxshBKqqig7WZrMh1dBQFeMhUWQ5hNLIQwTs0YvsAKT6mBcFpO7Rhx6N6Hks1lhqhBecUUI+HJwgyD7L2FC4AecnRRH432lJcD//htEH/y0wlP5mnkCKzrHzws9rKEAU4Gc3rA4Z5+fZCUmfW4q88ZpIHY3U+Rivp/Fr9Hqfakzn/rh01loL03VRrKifi2GDyngt9Z1VhZAoywJFXdC6NQRXmC7Mbf68JCKiiMsiWf7hWqkUvShCd2Il4VwHKQXyPEOeZxACl6Ta6R6I2CgFVY9NJlPcuHGMmzdv4eG338TUGd2PQGf8wLi/8AgOtcpztG0DnWucn5/g/v3P8bOf/VGM+JXS6Fruo/SSfTEioNTPw3S+7H7Ddzsa9FrPZyJwgZAM4twEZ0OqoIjZBwCxjNwPm51FZMN7gIXJxTAtwN9TR1xKCS96wqKQAkoXUElPFjJoyT0ld7nr/lLn3DkPKbNBGgKM4og+CF2tVjg/v4CSAtZQGmc6nQ6QPCbKp40onz9/iuVyidlshqyssFwt0TYdzi+WWK3WWCxXFGDV6cdf7rm065ld1bDNO08N7V6w4V+FmPXjhLjGrkI0dn1/GSfjHwvNSI/XSqOwLgM7CGOjnBpcNoocgXvvMZ1OUdc1yrKkFvMB6iuKIkadm80GrOPB/I6u65BnWdww+zzesBU4p2vKssSmXtMmoQkqpJ4PwBf3vsA//MN/wd/+7d/iq6+/xl//9d9gUhZwHWktHB4dRjQhy6hK5saNYwDAvXv3sK03hC4EQlwbrqUoilGuPdkckiZibEzTTYwXPwB01iAviqiTUQcCKiMmTKJlZ2FxcYF6u4ULNeHVZIKbx8c4Pj6mUsdk8y3LEtuNjUgLS8yzEek6Uihl0ii3jvfeIy8LmI6krvPQNZHSXyG1IyipKqWED5uxd2bQ8Ek4G6XM002WkRQhRZCmB7RmqXaPoiihs3xgnPh7qoWRzoe0i+J40fEGxZsb53iHzgodY/ImG+Yxr2K8oNMxZWeDf8dzPnWWxg702HEZn1sIga6zl97D6M7YcVFKQec6Oj3ee+oamm6GPhUY8yjKIq5VE/LWVH3C40TX18Pvl69bhHQKORY5JpMJcUk9OUhUgZaFNVeAm2hFVMmJ0O5dBB2FEgcHh3jzzbfw+Wef4fz8JIrYcfT+Kof3qZYB9TD63e8+xPe//0MUxRTGGhR5CYD6xfBueSmK7c8YXr+cckmP8Xt8RBIu58wvk4xFQAxELLGlElcNrTOa+0GgyjN3wiHwMohkSd/tYD7Dmx33019vGtHHexIklCc89YHKcsC5HN6aULGUjgulFzjtkwaqu4jUzgFZ5oiEntgNa0FKv+HeVqs1zs7OMZ2UkMLH9DPbTN6HsoDucsr5hz/8IX7zm9/g6bNn2Gs7NI3BxcUCnXGo6wbWGLTGwnf9eJAjNyyrHz8v/pm/rBM7Xx+P7y5EcPweGqNLj+fS9Yy/p9e36/P/KY/XqkYRsiedpRc7nCA9YTS+V4iYBqiqCmdnZ2TopYypGXZemqaJxrksS6zXa0gpMb1xAxIiRnGp18oRKvMhIkeEG0N6kFIcPB4/eoT/+7/9tzg9O4fz1G20yHMcHl8HN/9i9EBKgYODfTTNFg8fPsTz58+Q5RnKsoLx1F2RNy0BkscVIVUggGA4LTT6nDwwdMRSkmuKALGDwSkGHr+2bSPisFouYVqSNGei5dtvvYX333sfFxcXIQIQyLMMbd2QRHxZxHNzt94U5uXnwNU9bDBN20FnObIsh/MOzjjU2xoQCNeo4LxDU2/ha0AoiTwrAa7ftxmy4JgoqahJXYhAs0yHiEHEcmgPBIl56oXD84jHqWkaCNET4nhceQ6kizMlKztHGxd1nUUgxTYxzy0EkgXdy5XTe4fkrtS5SQWJ+n4MvRDX2FFIuU98H0Df7j5dO2MjkToI3vvoQKT8kz4V4eP1eTd8r1CUFuzC+5XWKIoczlH6SiuFPA8OsvdwnlEEEXgeRNLzLOvk3aXrTo0mkfE0ci0gJZOGZZyLgETXNRHuFkLEVuYCAlplyHSBqpzi5s1bOD6+ifPzU0ip0AZ9nt64vkQUB2rw13UtdX21JFX/+PEj0uV57wcwJnAj0KMKfOqrHY7LEWfqyF7ezHdwTXzyzQcBPFK86zd5Sc4GlARU6K6sucRV0Dp1Ht44eBs6wZoWxnQk6uYNfMLZoBSKvzR0vbMkYrpUQMaUmoMnhVAhAUkl1kapkA4Ndi/ciFISWvW6MsDl/aQfV+r0TZ/NFWOWUhvOQmcKea7RtkAdUimIiAodHMzwZxVFgc2GAkatM7zz9rv48KMP8dWXX6EsJ9jWDXy4tzzP4dBBdkOkXkQFUZDjxvMCfZokVSh29rudjbHNStf5+FmkDyj9/fjvxwHQ+LOvQjH+sdGN16pGycOGnsLT8ffJ5GGoiqs5Uq+YNwkyxB2Ms8jLAqUoIYSkMi4AddPg5Ow0Og4X5+dQUsXz8mad5tXTLy0VtEjajXuS8PVNA+88bl87QpaXmE2nWK6WmBYVNus1OktkxKzIUZQFus7g6bOnODu/AIRC11kYu4HOc5R5HtvXd7aFEgJt06IoSzTbGnlRQEkdWc1lWQ6cMK6UUEpFvY62awB4HN+4hu12i3qzplLETKOpN1itVlitVrQpOQfpADgLqRQO5jMcXzvC2ckzFGGc2qaBzjNMD/YghcB6tYaWCt5YnC1PMJ/P4Z2DtQbSAwfzPYLKrYuddPn5zqYTtG2Hw/09rNcbtDoQbDVVpbCS4Ga7JRKYElgtVtAq5JClxNHREYxS0FkOMZng6GgP870pyjKH9xab1kAqjc4SzC6kwmqzgdY5sjyDd8H5cexUCOTcBdO7IP9B+h+psJjnEkFFE8wLDwcLSAeh6TWhfGgS5kiR0VNKGhDwwsFBwFsXtQV4DRCqQK0bKKJlZAsAZMiJ904PvYd+xzovpH3gYMyQqEotz4dpEVaCZOeKnRleYyla0kdDLqIGQtBW4a0FBJVcQgRJf2dhO0NpF+ughESeKWRawbXULdVYUlKFo5oG53zYpLiiggv8+u3TcVm2BzmtmsuCEdFPMtAu3JsN5wXJZ3sPBYlcV9CywsHeMd55+3v46stv0JkGQilYTw6Tt33fHb4Cz7v24KDnrqQH9QBxcE5gs9ngwYP7eOft96CkRtjtodVu3SBeHwNDD/Ql3JIqbzyon0coaxidQQI7JN/5kgUEuByDBdWEEKElgI79bliFlNEK54KAl+sAa+FtA9gW3nVwtoG1LeA7UGTm4kd68mrAlTh9EBUcOk6hgBVnZSCjK1iZQ4ouImVSyqAhJqOzwk07uTiA9w5OaxKqaWC7juaYRuzv4r1DZy1a0wKiwnyvQmssTk7PMJsUSVQPNM0WWcbpSup90wVdDm8dqqLCu2+/h5Nn/wXffPk1Dg6PIKSCVhpVWSHTLWzXJE9JoGtDY1AIUhkV5HDTIyX7bi2AgGgM+giFKqfxk3a+H3OA7FOcaYGjwf/mqpz0eBGCMRC/2zW5hme64vXXP14rjWKMRdPUA4+JkYg01zaZTNA0TYyoUqJnKg3NlRbr9Tqw4GlDfvjwIdbrNQBE5beLxQJF6GvCURundZigSW3kqbTTG0PksgBnzipiJk8mE5ydnaEsS4rYbIeb165hvdliPpvj2fPnKKoMZVFis9niYnGBr7/+hpCWqiJj6DyUtNRuOcDXUhFfQ3igzHJkkqDtzWYTUReGu1PPkqNghvTnszk608a+LsfHN7DdbvH06VPUdU2Lk5Gk4EBlWmM+m+O9t9/B4R5VmOzN5hBCYANEdGg2nRKXxXtMJpM4tlw9w+jI9uKCyLdKkjy199jb2wuOX47tZoN6u0HXkBJp19YoyhLVZAYPjyJ07XSuQ71ZDiqGnKnJGcgKlFWFervCr3+1jztvvIFrR0eYz+fIg6w7AKhMQwckwgZuSZ7TXCFItncm4LnygyHeEVkRPvl7GzdfYzpUZRXSHu2AWJk6+d5bkO5D7zSkVTwcubJDGTJmoBJgDNAPYMjfSVMsYxQkdVL4dfpsE987TmGmpGm6H8rns7PB1+G9J+JjQpJVkhR7oQWqokRbTYgz1Vl0LKrnLEQQOhIYGrtB6iq5P55r1mholQUkqU839XwtGe/PGgtnfIyQlcqgVYGymOHO7Xdw/foxvn34JXTGUt+7DOXu9Ion4xCMdx8EOGvx4MED/NG/WGA6PYCzHbKMnMZ0PnxnLlwkqEZAIYRz5P6MdxsvLr8WD46mk2hXKEAoKCGghIYUqtcdcdR63jsKSOAchDPwzgDOkHPhOjjHKbURciOCLQvOBjsdQoQ+NJELIkibSIR7C+tRKSKRW9PBhw40QHCUZS+0x/sGo4GEfPVOPKeirDMwHaG0WityRuHRdh38hvSQlFbAykEJgaqi9HNRZFBawNiW+B+NRJ6XkFJhu6nDeACZLnDn9pv44rP72K5rXD++gdl0itmkwsHeHDVW8Ul0XQuRkf/W2V7wz0ddFkc+hiM0R6BPNwKACKJ4AwArcDBGiRWkr3jfv2lsl646ehRjnLK5+j2EVf3/GNkAEImEnA8b56dZfZInz3q9JvJN4q02TYPZbIau63B+fh5LYpVSpOngqf38uH+IG23QaW8PPgcv/q7r0Gy2ODfnwD7JMldZhclkgu12i729veiwzGYznJycoJrMwvXkkSR5fnGOJ0+eRCjeWQsnQJLaI5hYyR5dYfSGNyI2suyYMZoRiZEgOB8A2q7BbDbF4eEhTk5O8PDhQ2w3G2y2297JoJulRec8tNJ4880347iWZRmdGK6MqesaAn3FQtr4K60AMsZgPg+OSlPH62SoniuLeKxTRde2M5ChuiTlJPBmycqmQgrIjt63XCywXq0wm81wdHSEd999Fz/60Y/w/vvvRxn19WaDqiS59jKnTrnpxsoymALkgIDhzUQJkRUqnUhq/R2lAaRXEF7CWw/buUBME4H1Tu/n4MB6C4teLp2fORNseVwYfWNngY+UmJdWsqQOyJgj0j/yoRFI38Pv25VmSR2MOGYYopH8/pSAC+GhnU+qRBq0xlDVBNIcvo+O3dhRSj+L10CeUXqGm/BdlevmuaVFnyoj9V+FPMtw8+Yxbt++jW8ffgkBCecDAoQh6vriwzMDgkxsMPqPvv0Wjx8/wg++fwgi4lIKbozovghy3gWBvxii3vW73ekYrsigJox0x8ID3pkwr7n5HqVQENAo60jKPyX1x+cWdkEhZOwl1SNsAU3BsFzVB6cjfY16QFFlCFe0jdsXsF0BehvCfDyuSErTpn03cErF0JwmW79er5HlGjAdykxjb28aAzghfeT4USq6L0RwIeUKARxdO0Je5Hjw4B6yTCGTgKtKzGbXsHe0H59M17UQuSIHyBhw6T85GyGNKQAZN+2rnN/0iff9VC79pQ9l3clvXzyHdjkW/zjOw+umV17D2eg1BmQgB/JGwl5p0zSYTqc4PT1Fnuc4ODiIiMN2u8V2u8Xx8XHcfKuqgjEG+/v7WCwWODw8xNnZGY6OjrDZbGJDsrIsiQDpEY06G2uu2GCvmA15VZbYk3t05b7vT1BVVewNkuc5ttstjo6OcHqxgNQqaj48fvoEX3/9NUxwaCBDeS96g5hGiByVMSEJQPw8ntzMBeGqnvPzc0wmE8xmM2y320CunGCzWePp06fYbrdYLBaBvDWatJ5gNqkUZrPZoJHafD7HdruNGz5HEQAw3Zv30XXYeIMeIcH98NibzVA3DUpBhNTlchnPwU3jePIxGgIpsdluASHi82ZHkf9OCBGRrbYz6LoWXWdgTIeTk+d4+PBb3Lv3Bf7zf/573LhxjHfeeRs/+tGP8M4774BbvFtLKQ4RAitvR4qHHpBCA1AQIpR3hpQKdRP1iGqGoCg6z3WYN7aHjQUQdApBC5jTED03IjWwl1bLaOPlzTPd3K/KqaY/p+mR1MHiv2OkLz0Xp1BSvRcWUUudF3ZCUlJpRNsElZu2XR2cYxqnpusgnYRXnhXrB1EXH2NYlzcMUnfMo2MDIBKneQMcS01rpUDKlzbyPoqiwHQ6xe3btzGdzlDXFH0676HwKscQkxbhFdt1eHDvHr73/o+CVoMD/FC7ZOwojY/UGU1txqse6XsiAVr21V7Ehaa0HQUZfRrFOgtYC2daSpFZQjT6eUxuFiEY9LOUuk/ZkJEABLWYp52vRzxkQDWkVCENRMhikRfo2hzGNmFEee72hNde7Zkc3IuLC2y3WwDoHQ5Q1RXPUcDDudDPKqAgPHekyqOuxnq9xnw+xWRahqC1iEGXC6XxQmpY56CzAsac43vfex8f/+5DPLj3Obx5E7n0mFcae6rqn4W0MNbAGg8uPSeELKRD418miIJIg8TL7E6/0yFJ7QJVEgk2elemOfzo53+8dMjvw+N4dWcjQFopqZAX3mQyiYgGPeR5KMlcxuh5MplgOp1itVpFjQZWEn3+/HkU+srzHBcBxj88PIyTDpoi2DQNwd+5MoM3vqIoUJUV7JqqUfgaGW2RkoSkVqsVqqrCxXJBZVHWojUdzp8/x9dff00lvlpjuV6hKEpMphMirHVD+JoXC3NVeCFwlQN761x6xY4Jq4KuVitst1uSbd9s8eTxYyAYKhfy6oMHHwyOEhKZkLh2jfgdVUWNhdiTr6oKm80GdV1Hh4SdM4BgfXbUiA/pqRtieD+A6CDVdR3viZVKBxUZgvg4JjiSXLXEFTXMvWEkSsgWHqKPvgCYzuDZs2c4PTnFs6dP8dWXD/Dhb3+LN996C3/405/h+PgYe3t7lFv3bHg5CifYUkoJ0xGHIjoD4X+auT0u9GoQBAkrqQgJsSHlwSSG5L/eE3mVpdjTninphn8Vj2KweQYjy6/xGI6d2NSh4HOkpbMpOTbOjcTRHfKretLa2HCkzgv/29q+vwSVrVIF07Zp4RDu0TvA9mmb8cY7dp4oyOgdjvQerzJmaXRIJEaPPM8wnU7RtBvcunkbb77xJr64dxcCGt56cMfglz68iNuEFz6QAzXu3b+HPzp5huPjO+SOewXeYF7G+I4RpLED+jIHj1GKVtF3TmQEroyjcXIhLUIIBjkV3ho40wUHhEuwXdjkGAFDyIoICNWL9bEj0vfj6KtQ+lbnQbEXCERjStnleY6mkeAyWB+4Pdb6gUhhylnbbrcRAZ7P5pBCRTIyOafkbBC6ngVBOBJ9nEwmmE1n0Erh/OIcUgJvvHkbQgg0TQ14CWtJP0YKFZ0Nnpu3b9/C+++/i88+/QSTXGKSS1QZgHwOFvXKc4ULs4b3ElLmcf74GJwAHJxcCShcSqFdyqHE1/uUW59+Ga/X8XzZdbKXdha+w3l+neO10iicKuEonA1MWhmQ8jjquo4qbtvtNkJs7Hyw0huzhYuiiBLd/FpPcvLo6jY6PIxAMHufnQzmYpRFAbHpB4eREOYp8GTfbrew3kHLoAhpLR4/eYzziwtKQTiLMnyWdRY6y1BkGbV1x7DRFkeJ7PhwCS0biK7r4pgwEsSO17Nnz7BYLIiN74YaDARXIk6EGCnBo5pUqKpq4IRxn5nlkvgSk8kE3pN2BzP9I/8DHkr25cesPiqC88COHzsy6ebKz7ppGuRliflsjk29jbogfKQbc4zuhcJsNoUPDhBH4bmm8rT1ao3NeoOz0zM8e/oMX3z2BfE58hxHR0e4efMm7ty5g/39fUyn01gWrPICmVbBGPTaGXzt9Oxd4iiACF9SQityDEVAPJyjjqAiODJk1BkFGvZXSZEBXivsaPOGwegbNzJM52FaIpl+pQgA/90YYk4rUVIEJE2jEDLTrwN+f3rdfE0AYIJjrxSRfwFq/14UhPJ1nYicl7FTwccupMY5h7Zro0PK9wAgIhdpiop6HFl4hwD/0/ydTCZYrjIcHV3D7dt38ODBPQAOxnaXDPHVjgx6uywEkfw84OEgpcDF6TM8fvIQR0dHULqEj7yORIp6h5M1vv/03nkeXPX3Vx2DFIqUgSdDRFjvgCic5cgJJM4GNVXzjoW7eieYy1K5RDwMQUTB+gCHkzS9ozF04pOmYOGzpJOQmQxluAred2E/JdTJo1eNTqsKJ5MJlFI4OTmBMQZ7e3tU4eJZW6LvBE3XiWhj8zwPwm4a129ch3UW6/Uaq9UK0+mUmksaD6Vy1HUDIRTygoLHrmtCy/kaP/7xB3j2+CGWyws8eSyRSSDbswDeAQASjjM07iK0CsAoRfTdz5PJ48PXwlMYPvfki/99lYbH7+1ovOD4Z0U2fIhmGAbn1AFDs2xYeeOYTqcx5962LcqyjCiFUgqnp6c4PDykJl9B3e3s7Ayz2QzOuding9EArdQAMeH0CkfPXKVSFAVt9AzvO0SthzTi5MEzxqDpOmQQyPIc3379NU7PzigCr0p0nKNW1FzNGAMTWPpchsv8CHY6yrLE+fk5AMRFlHZV3dvbiyWRJycnWK/XcdNh+MuGzVfsgNtTg3ywvx/IUEUsia2qKiIYqUiSDakTijgaqhQCkXK3DaEdSinoMGGLosD169fjZsDRx/7+fkxncSptMplA6RwH6J1PRr/YkVmv1xHp0HmGtiO1UxvamCul4WGRiwxVRWXP680SHi6iUEIIfP11HnlAPB6z2Qx37tzBn/3Zn+O9997H6ckSjx8/JsdlvcazZ88gpcD169fxxhtv4o033sD+/j7yXGOzqaN8MRs8ntupoqVzpHrK+XIhRCxVTiui0tRan8N2cWPiCq2UiMpzJXUyUuRkFxeCx54dcyZZphwOduJ8IKFxQMDXxnNjHHELgdBDyIeGhUHlVQrI9RrWGnRdmhbS8f1jByO9p6ZpkWc68n5Sbgv/Tfpz13XIlUTTtvBeRJKrsQbT6QyL5QXu3LmDW7fv4MsHX6DIc1hjB/fyAqN2xT/6DfqzTz/FD3/wAZyrIVURnNihBsJ3njr5uyuvKdknxghI6mSkTiltWixURw6E873eSe9gGFhHHIXe4UnQOyHA/BmKnAWcIFIrIQChUWVUCU4213gtlE7RSoMdIEY3tts6pDlAjeNccGiNid1def8oyxJFUeD8/Bzn5+e4ce0YcB4yNvrrET1Oo7DtlZI6iZdlhQ8++ABPnz3Ber0gRzrKmrcoipIar3UtIETo7EoB7vHxMX72s5/h7scfYbVZ48mzp3CrFh4/hwCV6ctGBM0hB3jJfFoMUhc+TWOkyZXLr/EzT52F1C6kP1NvJkKRLqFlsl+7NKWYcHaFsy12pAFFzw5Jiz7GfLBXOV4D2RADI8yREJMp67qOUHtKSqyqClmWRUXK9XodKyF4I+oNIuXaWEqbo7bNZoMiz1GvicMghMB8PkdRFGjbFtPpFEVRxDQBVxVIOQ2CeD1sxwTXrut6WXSGFpsGy9WK0JaigPM+ssh94DUorSE14DsbURXnXHSKGPXhqE1rjcVigdlsFitzmI+x2WzIAPBOACA2mQhHNNxumOtj+NI5HwmzjO5sNpuopZGmVYSUyPIMFxcXuH37NgBgb28vbpap85DnOWwg9E2n03ifLLTmvY8CY3zFy+UywLc29thwzlF5bUCf2Bmqmxpd0LcwXYcs09isl6iqCfIsw3azRlWWmE4qtK1B11KTJebQTKoq6m9Ya9E0DU6eP8cXn3+BLCux3dRhQ5NQmuSai6JEXW8xnU3x7rvvoSxKrNZLaKVj9dPRtWu4cf069vb2UFUVqmqC2WyKoixwcHAIpRWaMMdEMFQcraYOAj87Rrb6Z9aXjfPmwWPPzkKPRmCAzIyjZHb6mT/FxoBF21LEAOi1RXhd7UIA4rUpcqqsdbRBKA+pSPGzaXRMYRHcPkwZpNc7nsvs6HDQko4Dj48JzbTYQaP5S1wC0vzQ8N6hbTNMJ1Ps7x/i6PAaHj16GDeyVz4iP5J+aNsGRVHi6dNHOHn+FNev34DKKtgrBJW+ywC/muMz3Ah2v9dHD5I4Sy4gb9wm3sJZQ2mCkH4QImxEI9Jg6tBAIKiCAkL0HI3I6+A0CmR6hv6SQO91nkrEWWTMWLpGpXpnmeexMSbOBe89ZrMZ6rrGyckp9mb7mE4ngRvkoVT/qWkau+06dJlGZzpsNhtUVYmD/X00DckFECEUqGtqLFhNptjWWzhP5HxrDKT3UDrD937wQwAe97/4DMZ5pDOK9xHu++RCn5l+LH1wOa4mfEZTL/px6zl0fTVddOKS50UpMn7maYn8VWhZSF/tOHY5DmL0+qXfX/H6i47X6I3isF5vUBQ5FotFNMZfffUVDg4OIqpgrcXJCek38ESYTCZ4/Phx5GBcXFxEmWytSaVzu93ixo0bUfBruVxivV5H1GO5XKFrGnRth+vXrw84Ht77CPOTImEOWIfcZoABrHMxpcAISx9p0Wa6bQ22TU0aFm0LLwW8BMpQWdF0LeqmwXw2I8RSuIEQU7o58MYjpYxVJc45nJ6e4uLiAl0q9MQLXcreoRCidz78ZfoQt5GXWmK9WaMLHAp26pgrk+d5dK4ODw+DA0WL+9q1a9FJPD8/JySnLNGFjZShWkaiUq4AV7u0bYvVakWojVIwjpp3MRrA48BO52KxwLVr1wIylcF0HTbrNZ3fkOPW1JRu00rBWQOtchR5FoyxgAoG1VqLpq5Ji6SuI9LlvYc1CzRNG3L8/Wa2XJ4RyrT1+M2v/4FgWK3x/NkzMNueo0KpFKqyRJ4XODg4wBtvvoGf/OQn+PGPf4ppmNu80acpRXY80lRHqpORbiB9/r3/zgf/O+V3jKub+DMZXUxTOoycsXLiLvJliiKMr8XEqIkjGiLa5rmGzjRkaMDm3NARoinbf0+NEp+v7VroViLPTUw/ps7uMC0EEE+DcvSkIyHgUSBrSFF0f+8Qd+68gQcP7uHi/HQob/0Sh2B+H80ASg9o6rVyfn6Kz+5+jKPDI1DvkOzVzi2Gm3rqSA7+bvSedCPZtSlE58ITusFkTxfSJ8zXsMYC3hINVngISfyAYfqENxDu5BrGgzkZDOQHZ4P+n3JXQpoxmi3aUAUE8iyHVhkat4ZzNqRdfFxnKULHc6EsS+zt7WG5XFIfp0lJIIL3cT9I1wRA7SlqKdA0JVarFSaTCrMZ8QQZ4cyzMvL2ICQ649B0LSnktg2UFNBKopzO8bM//lPoosDDb75GY+JQoa5b9A4Xjx/dMw0TEae983HcZDLWWitAjtKm4fmz3ZCB/Dt+9t57OO+pIi6xB+P1N5x/uMLt2e04jP8yRY/Hr73s8VrIhvcEZ5clMXzPzs6wt7cXI3yOrlnumqtRHj9+jNmMSkun0ymOjo6wXC4xmUywWBA5c29vD1988UXcwLn0lXkZbdPAtgbvv/9+LPHc29uLyAinKQ4ODkiVFNSciAcwzd8zwZHzxk4AWZGj6ajnC0J5XVmSA3OxosZx+7MZnAks7xCFRgIr+vJBIQSuXbsWOQ8XFxdU9hkrThD6qPhoMMCvoc+nDoc/RASJU1OVJa4dXYMKjoUxJqah2MFgh4IRjk29RVlV+PbRI0IiwsZVty18aDw3n88xm88jl4F72jARmKteODrWWkNqDVO3cUz4GgGKAk5PT1EUBdahH461BKGy0eCDUa6qqlDXNWmG7O0RfCklvCcIfrE4j2RbundgtWqxXi8xmRCZy8OF8xusVzW0VvAbG1NtbduhWV0gy6mskRACSi9okWFbb7BYnuPZ88e4/+BzfPTRb3Hnzpv4i7/8r/DHf/wnAyQodSjYQeODDWJK7GQjy6+zsUiJpvTYRXxGPE78O46yhOgVSPn1tBKF3oPRJj5sEscHnzt1jqQAIDWyzME4QGsJrSVaIRDFtyAG79+FatB5qS8O3w87Z0wYZyeJqwqcMyjLApnWUCqD94DpQsFlqBwqyxK3bt3G4eERzs9OL1vMFxxiR9DnvYOSEvVmAyk07n52F3/8x3+KrJgA4uWdjdRpGGwYO5wN74eXnaJfO6PJQJpk4iQ7hYSUcmBg4YNYGYQDF/jujGgFtxwgON6J8HkJBC8lCY+lrQDoWvjeRIi46QKl8EHCPINuNLqOneJ+vvPcFoJSmBwEMsF9vV6jqRvkRRb+vpc1T6hMaFoH4R3WeYb1eoLlskSWEQlfCEE9UKYCWZbDWofVcgWpFep6i81mTel8pTCbzjDfm6CqCrzz/Q+wNUB7tumH3QmoTMP7vhQ7AD0QwgPSwQsBnedgAbOJ6G3B/v4erBoGGxxcpShFv26H5HHnHDpj0Nm+XJieP3GadszCqPy96xigWmEivog7lL7vZY/X6vrKMq8c1XK7eCFE5CjMZrPIGXj27Fk0vkwUZWKiUgrPnz/HwcEBjDE4OTlBWZaxQqNvdU1pFNsZvPXGm5jP57GSg4/pdIqmaTCfz6N2h6kbtKaJA8O6AJzuaJoGVVUR8lJNcBpKTLM8h1952NC+2VkXPW424NaaeD8Mv9++fTvez3a7xfn5eYz6uW2xFCKSP5mTkTw9GmeGR5PXB5NBkBM1m81wuH+AalIh18TI5vtiB+3p06dYLBYxUm/aBpCU/2ajPplMUJZlfJ58NG0L6TyM76XVVdDQSMt4eWwE0+PR18VLKTGfzyPHhjdmyqE2UIIg/zZwfrabDTVxEwJNXUdYuK0bCCHRhWqKOpCIhQi9X6yDcZRzLYsC/1/2/rRJly07D8OePWTmO9d06kz33p5udwO3GwABNAgQczdBQAQpiDYhk6YiJIUYDjscjvAH/wh98z+wFXI4YJNhi1aQ4gAKIISJBECA3QDZA/rijqfPfKrqnd8c9uAPa6+dO7PeqlPnAJAt2nnj3Kp6hxx27txrrWc961m2qVFV1Jguz3OKdF0D0xg428DZhoiOALQEjKkhABSZhNcZpc9AC3mRBWTAO6yWF/igqvHsxTl2uxJf+cpXIKXspAK75EYbjSY/Q+mD3E+L8DgzYsHPWcp9YKTDex/LBHmOp5we5nG0hq3rDPRTO6kR9N63xNjwXV4IlRLQmUaW5dCaFzoSO0sXxn2LUTQwIfJO0TJ2tBipjJGUd8i1CNF4OF/pothcUeQxWLl//008efwYVbXt+RtpnryDIVz6LdjJwEmxGI0HuDg/w6PHj/D2Z4+w57Ku3a4ah72fDf/vOxeX9xHSJo7VQdv0iXMG8CQfLjyghODwNuzHh2to73uUIBfE0yAZ9LanUjyzS/tKyNNh3FLugg8eVJYF+QJn4F3Tvgd05n6qh8FptqYyKKsSWd4S8GXooeSciKlEYTy8abDWCmU5jevZaDyIWkqE8ipKPTcWu80KdRI0WevgpUQ+HKG0BnowxuGtu9jYeRz5wWAAV7BjxSkPAJJQIyE9vJDIRAYJOmeyVLReHcymsLqLdgGA4OAimZPOUaUeI0bOU18b0zRRUCx1Zq+aV10F0+T13uf7TnH6s4/Q/bk6GwBBQwzDO+dx7949nJ+fAxCRBFhXNdarNSbTCTKd4fCIUizPnj3DcDhCkRd4/Pgxwbt5Hnuj3Lt3D0VR4OmTJ6QIJyWaukG528HUDQ4PDnF8fIyjgwPUYTJOxmN6sAAM8hzr5RJFUaCpKmLtunagWHQLQmBX7ujY2w01PTNNVKEbj0c4Pz+LPTqyLIMMi/t2s4WUAgOdo6wop3x0dBTTPs65SGxKdSWsNZSS0Do2fKMIui2V8t63jaSS+5hOJCmIdzEejXB4cICDgwNqRhQcQS6j5QWcDRd794DH4fExpKCooW6I78IRsmkaWiacR11XsTyYo+aqqiJPg1EcrqjwAZrl/jVKa+RKAx4Yjyi1k+VZTFtlWqGpazRmi+FgGIxZ6MmSGLlBQSRd4QV2gVQoBDAckhKgd2x4EJ06Ug5UcHWNcreNBDcpJbabLYy1mE4mGI3HEAIkSx+ayrHgmRDEx6jqKsDG5Gw2Zg1rPX77t34Di4sL/MzP/hUURd5JVxAJlqqKZKJhAbTVS6khSUtb04gvJV+n3wfaiCdFK/rpmD4cb0yXdJqmUVKnx3sPJTNCIawLGSwH0xhoqVBkOYZ5gUbXMHUNK9q8cupwdNaO1LhYB2tdK9qldcxT03mR9orWCnmRAd7DGRuk6qlfirEGHpTmzDYDDEdT3L//Jv7kvXfx7Nk2eg1C+DY47xDlro72pCIeilJ03XW1wx//8Tfw1iffRpFnkKCUp+R+M7JNI7SbuzQmL9+63Vz5HhLxjI1AqEDxFh6hRbwn7RmH0OPEA46Pz9UcIJlv8tiS9Ef6U3ABMPUGaa8ncTrDZ+P3IVokVoR5SScKE74hVQGdTyBrC9d4SDQxuKKrprWvrgKPJ6xpWilYX6HZbWGHA+JIBKfUOgvpiHCqLamnOgnslIq8DGMcTONxfHQLy8UGi8UC2+0G7H9fXJwDAIpiiCLLQ9pIYrsrMR5P0BQe+WAIO2riuQ6HE8hRK2YW7wk7G2FsvZOAY04WQI38AKk0fDL14nNnKch0AaGC56oTF+YtOem03rFXR5CKCP1sVCz5TpDGML6XXve+fT3e/m7KJz2/OBOudICv3l5DZ8PTzQsMYgGHsxcXETLLNClLOu+RZQWcBWazQ2zWO9LTyIcwjcV6tUGekbdpPFWZ6BCJZ1qTGEzTYLveotzu0NQ1cqUxG40wHY3hjIVwHuPhMBgl6vFgGoNhqJbIVNstFoJuWl4U2O62WDx7GuXMiwDbGWuxLXcwzkGHFIBWCk1Vh0V6h8lkgsmQSkiFc5geHMJ7j/V6jaqqYjoo5hAT0h7r5jeGDFdV78JzmhKL4jAjjSRaz5eY0N55HB8eQQmJs2fPMR1PsAkVP0dHR5F4y/wFRiSKosDMz+DhMRwQqXc2ISRoOBhgMBzAKB10PQATuAicMmMDmMrTp6WcVVVDeAE1lm2zNmtRBicoU6RnIYSIgl9CaUghsNpsMZvNIJSGNwYITofWGg6CHi4Q2pRnpM8gA5KjtYawiHNIShn4BgRninD97IjoPIdvGqw2GzSBbJplBawN5EXnUdZNpwy0qik9JZWGNRZNtcPzZ0/xu6sVPve5tzEZj5DpAsIjVABl2O5KDAJyxuXGqepuigxxGiZNlfC1cD8dRj04XZk2bEuRJ5qvJvKIWkeGPsuf42PGOZqgJ+QsIEDW1OlSCAGZUcXAMCtg8hpbKaAFAK1hvYBx3U64/UWKHUWEbqRNw9VtCloT2dl70n/QmUae09hsNyW8c7SowqGuKxjvkBUFqqqEzguMxlMcHN3C7Tv3cXb+Ak1TQUsJB8v2FQIKwrORCMRmQeTJ1rBKOEcOB4KIm1LAgwcfYH7xDHdOBxCKBOIyrVE1BlmWg/P17QXTusg7Tp1EHvsOgoOQruqlUGk8krWBnTZhAFg4/i+kJqwPZHt4IPbXEBBSh3Ho8Q2QpkP4e6GHSfdiwuW091bwNfaumwIWEvYVoAZxMpPICgljHXxNc7fl/ZBCq1CBEwIPLTNI5eGkh6tLVLsN8uEYXihaD0QgLzsDqyxyJeEtYKxDXVmsljsMBxNUJaXkTm/dwcXFIooSOudIf8R5NEKgGIxIOiEvoISCbWxAhjRkIhOndEZORXJPJae/HZNDCU2gNdOgaX0VVFUNq+g58D7t6GtDMM/IdhAzc4Gbgz7a0LqF4ARZvGFtKgvhPrXf4VQaOo6FAMnTsIMcX+8FOelrN91eqxEbPyBp63HOTTPJk8tQN5sNzs7OAFB/E4CixslkEo2UAkWmkdQJEctAt5sNTEPNfIpihMPDIxKcCdHPZDyJlStCCFhhY9TIC3V78j4eH0JgW5Y4nU4AAaw3awilYIJ8NpM5006yd+/ejSWt6/Uatm5iqa5SKgpqpZUE7SS6gr5+3bbvZgoBa0wUJ2vqGqZpcHxwiOl0GvU7OKXE2hhcYiglddg9OTnBfD7H3bt3MZ/PIQUp97FMuVckG8+8izRSZS4AG0ignYSZ1pBB9z8EZ+AcMu+rMRTRsLE8ODiIpc+LxQIApcTSCg9ueb/b7eJ1suLq4eFh/CyjLJSCa5UEUxlk/h4jFzTXDIrCRTItVwwxH4nv6zpUKWmpIdAAUsCYBr/6q7+Cw6MjvP3229hsdhiNRlgsLqB0BmsNvDUdvhCTPrnUj50MdjQiLCy6LeKBlnPBedq0bJTf5/fSqhh2mqTUHUeENz6/NIfubei2KtoSXwCxtF0IgUFBiFCzq2MpXvvI+c7c6aAd3LwwVCEBAlrnIJVExBI+PnfShakA2PgM8zNGjc0EsrzAZHaA+2/cx0cfvYflqgZzDjhSTPUdu89W7++wuEfehFZYrZZ48NGHuHv7rVBRNYZp6hsvvP30Uj99JtCiDp1TaXMT7d+BCAqENJRPSiAZEYkMDfpfP22W3qcODO89qOdN3NGV1/OSC6YUZzCcUmYoiiGUG8P4Bk1To20nELgi8aQcREBmilxjuZhTV1uVw/oGqiio2sN7eGFBo0frhTUuqBJbLJcrDIeGeBVS4ODgIK7vzHORgTtH1YkZdHDEfUTwXWfWMCqXrvPpWLYOQdsTpa5oIL0HIdDq8hzol59flc7gdbFffdK9HWEBJpcocQzFpX23x9xvdnhLOSQ34XSk22shG5zn5+oCPgFrbexrwlA+E/cODg5iRNdf1KQi8ZhIsLM06Ofn55HjIAQRh8aTMaSSWK3XJGs+n+P09BTLxQIZ6wyYBlmWowxkTK+ngACUVlCZhnJBXt1a7EIFhVIK1nsYZ2MKJM9z+OC4vPnmmzGKZLJnpnQkZXI1Dfc2SfPQPKHpbr7CWDtaeHxwfwVAPRCEwGw6Q5ZleOP+fZimic3pdrsdlFKRPMscE3YMttstDg4OUFUVZrMZ5TMD4ZMl3NOeJlzm3F+MUvJeWgKbZzl85kOqwcC6Ol52yjdwYR6wgBs/rMPhMKajWJiN5wpfF6vTeu9x69YtPHv2LAqW8cPqnAtObxbzv3xPeP4KISI5lQm0aQojyzIcHx9HHgxAC8Dh4SGm4ykuzi9wcXGBo6NjvPvtb+NXf/VXMB6PcXBwEMruhlA6Qxn0RmJ/nx5zPK0w6VeO8JaWmvMiwc46OxH9FAjzktghaSuDWqOfRtZpCofHh54pH4/N58mEXnZmq6rCZldxe5q4EKXOTvo620NjLISooZSEMcMY3QHsbLTjZJp2gUvLg3lusE7DeDTCvXtv4Pj4GOv1MnxHQlApw82fvySF4ZyHzjJst1t8+9vfxvd97w9hMJ6BCZlZlmNPDoWuFV3kYF8qrH/MS/tIPpMu9GQEXTwPDohTQnA3kpXR4PC5tM82wNEuY0DiJQvWvnubbnwePA84VablFLX0MJs1mqYGjANgkQEQ0gXEwAQlVCrZrkMQKHSOfDRGXdNzJbyFFB7eUPUQQlk+azSt1zReWisY20QZBg6CrTVhzRsEXaY8ksfLsorqy03dQhN1U6MEISIupG9pDFy4BzwWrbNRVgLAAIDHbldFgmgajKbj2alU2eOkXuXo9V+nz1KDQtpXt7rFdcrE04ZxV99P3u+rOB2vhWwwcYcNa5ojZkEWjoIODg7iwplGwWkuGhYYDOjmzudzrFcrXFxckCMiJFRwTobB+JRVhcl0gl25w8HhAS7mVCZbhoZhxlrkhYB1BPdXtgKGrTKhDYvT4RGlQISSgJI4f/ECT54+j6TRe/fv4/DwMF7bxx9+iEHQ8BgMBhAeWASnhBfjNCpMoem44vTvy77XLg16kmNzDkcnpP9QZBmKnLqvKiFjczQW22KCLhvaoihwdHQUNUpYuTSN5J0j9dCU4MiTKeUY8GscnfPkt9Yi12RUbXLPhRBoQplmPihgXSvylaposrYKzx2uYBqPx/GcOOovigLPnj2Ljl5qCOmzbeSsNfXR4B49jLIURRHJq2VZR8TDex/7brB2DPeZAQAJibwoINaEbOzKHX7v934Xt2/fxl//6/8hrKMx3ZU7ZEUO6RHL9digczVUSgrtRJdh3FKuRvqA74uA+G+gTdGkokfkNLXldCn6kTo9vB9nCTXoR248fvw9Poaw3cm8b+FL57XzAlbSYk1dcSnXztEmiViSOBTfR3YY2Yni4IU5RVpT9+N7997A48ePUNdVTLWKkL5JTgJXWXg2tM654PgoZFmOFy+e4/GTx/iud45RlTW0zoOzxN1gb2ag0+ql9j5e/b00qmU4nJcWH9NCfSOUwuc0xv3z4H1GBdBgLAlO3+M8JfNu389915vOYyEElBhCALBOApJKTo2pAeshbKiccZb0UjyV8RZFgYv5Eio7x62sgANVgTjvYEHNMb2T8NZBBnR8vR5iOByE8wDW6xVmB5NIFqV52/Ys0vkAg0FB6X3r0TQ1qrKMji1vVdWgtE18lvk6LxvedkycuzzvXmasrxv/l239z7XVQzwHeN/cKblNi7EqbX9LHeT02m+yvVY1ynqziS3KOZ/Mpa4sLwu0+eZo5BO4mF9j5jlAKZj5fI7VctU6MkLChPLIUag8YUGp6XSKpmk6KYOiKKJaKZMRizx0lJXkiCitMByTQuRyvcJuucN6vcZytcJ4PMGnPvUpWEty4w8ePMDh4SEpZYZr5UEX3kconhnDu+2WUh22zVsLRif2DuhlyBQID2hYaAUQNIYI3Tk5PkaRZZhOp9huNhAekU9x69YtbIJmBfd/4aiW+80IQWJoZ2dnERFYLpfxHjE6ArQlqClUyA8Ia5rwPAAIXncmRL7exT41jTGQYXI2TQOZGAlOj3BNPX+H9VUYgeC5M5vNMJ/PAwm0iChCaizJEIl4jrvdLpbjslFKuQ8E8cp4jPF4HIm+L168iOkbJsc6a0MaoMFySdFzVVX41V/9Fbz92c/i85//LpTlFrygcAM0ntfp8wG0GiA8hoxGcbqDHaA+1Jo6hTxv+Pq4NQD/nSInKYqRooy8D95S5yitkOG5zcfn+WXKtustH2PfGhLnuBDwTqBpDMqywmazBUAdPVPNDedcFA5kWDtNS/G4cAXOeDzBZz79Gbz77rdxcf4C3lMuXO8t/7t6wWxTT21n5Kou8a1vfR2f/PSnIJBByNAFNClt7G5dRzB1NvrRq2T+w54x2+dspPtNf+4zAn3Hte/ApvfdORdy+TdHNq57v1/GCZVBFRJDmSEbjFHtNqjKDWxTwlmurnEQjlNfApkuAA+8eP4MgMDRyTGEzENjOQMLDyMFrDRAeHaFAI6ODkOpPaCUwHq9jufhnIsVjZz6FyI09qwaLBZL0nZqmg6V2FkLK7rNDPl6038xSPRdeXopVYcgus9wpwhjHLfkvasckf7nwm+XXuvPp+482F9psn/fN9teqxoFnkhROsswHA4xnU47kSOX2/HPFO7kxY4jEaUUhBZYLZdYLZeYX1zEOmE2tFKQDPV0MiEUIeS0LeetGZZOcrek+kl58eFoBOxI1Ms5hyrA6NuShKDqhqpQjo6OcP+Nt/Dtb3+7s5AppbBerTAMfVxiCWsg9lRVFcWVRCDm7Vtk+4vRdV6wDB6GlJLEhkKk/clPfAJFXqDIM1hDxktJFRuesaOx2+3iQ8TaI5vNJt4j5iKwY5FGi0BXXTJVr0xhd06lpQ3mvCcmtXPUXyLLs/i9tPKBHzxOQY3H4+jYMMqyXq9x69atSHbkNA+TcBm14QgxFQQip4I6MPD5cTqFyZasHcJiYGl6gK/ZWkuk1TC2re6DgxKspmtBnVEbfPzxx/iN3/h1vP322/DeYzQeYVuW0FkR5wlvnC5hPoIQ4lJvFTbYqWYGv9ZfwPsLAY8LC47xIpKmcdhRYIOeVhg55yBVy8tJzysVBUuvRdYGopceuMoAcp7cWoumNthtyTkaDgeBSJsB8HF8rW3nH8tZM0KXplW0JpGv27fv4c6d+5ifXwQlS0rX8lPW2uvrF8123CykpMjwww/ew2J5gdNbd+G9hXWAVnLP7kSHjNlfwHm8u6mM7k6uXuA51cG6PP1zTvcTnEOhAi9CxJ9ehLkQkBHiqSQiXpeO2z/G/r+v27xXgFTQuYbWhGrneY5yu4Kpd6hLIpZLCMCzVhIR3jfn5zh/8QxFkWE8mYBVND2fp3PUxWWzASnOsm4FMJtNo45LFtJiWUY9dgbDYViHGsznc+zKGsvlkgQTjUPWQcS6xpjuIXMe2KnrphxSZ6OPYvW5F9eNNc+V/jO/L6XV/UwKr6cKpped2fRnus/+M/0q2ys7GwLAdDqNDa84786LTqoEyI5Gn2VfFEU02E3TwAlS9lwul8S6TS7UeodBMYg9ViBIKAXeow4cipgdEwKbkAKoQqfWYWjOhR3l01brFWrTYBXQD6UUHIAiz3FxcRG8YYG7d+/i6dOnUXrcA7Eigx0MnlSsCcBGlQxte8O4Oyv3jdg/EdLoBIhFVZ5SJ1ppUlttGoyGQ5iGKiNsGF82BGwMY++RQGhlldX1eh01UTi1wpwJjqT5nqUIVHp+bGQ4SmclVmuJpMWGW4QcIYuGDYMmidSaFn8hcH5+jufPn0deTFEUWCwWUErFcuI8z/Hw4UMMh0OsVqvOhGcHg4mdLNVOxofSFJz6YKOeGlug5T5I2dbwM1udFUl5nnNKRScdUKlSQWCzLWGMxR/+4ddwfnGGg4OjUBrd8kg4pcjpxhRG5znRR5VShCCNmnif/UUsjYhSQ0bXTM9Bn1y2Lw0jhAimp3vfeX/8Nx2nrW4xCW+mv790857KX8lBIF4FPw5NYxKkBcjzDNQTRce5yr8PBoO47sT0bNOgKIZ4681P4cP338dm00DnGRA6xtIJ0P+8R0QRO+cHxPJrvk7vCGaeL+f4znce4PjoJOwjdDWF7oIBgrQqUoejH0UyahJLUpOx6i/83bFs26yDyz96jhMfl++j9+xohM8LPjck+2HHY39q4HWh/fR5815S3xWQeKIOCFyR52iqLTZSY7dbwVmDptpBKzrd4XCI4YCew/nZc0h46CyHUAoOPgpXNZ64fipUuhBPQ+NiPkdRUGB09+5dPHv2jBDwLEduiCNijEVdN1itt1TxZAyMcVC2j1y1Y9QfEuZFpOj2dQb6qvf6a0N/rPv2Y98c48/0n8frHYf9SEh6D191e2VnQ0qJ2WwWF3nuXEllj1XMS/ODn8LsbBAARGi3LEuczc+w225hwnvdyI60ICaTCeXIwyKnMw1TWxhrMMgGwfiVgGibYunQdrhKcuPnywt4BLlX57AMjb0gBA6Pj1CWJFV+cXER+61wimK1WkVD7MMKxQ4TkyRZQpw3RlfIeF/ua7Lvb/aOlQScsVBS4fT0FMeHhyiyHKYmbsN2tcYsOA6L9arDZWAhKSbtCtGWmtZ1Hcl0TdPE62NYlz1/oIWR03vIomF8v1OY3juPRjTB6UFEfEbjMYbDIRFvBwNUdYVHjx7BOYe33norOnKMJJRRS0ME2eFRFBtjR4rnEfMylFIxHcTR6GBAjga3rOYInBEEdlSEEFHgi/fNSAMbNG6st1qtsHIWmT5ElulION3u1nAO+OD99/HVr34VX/nKV+C9hdYqXlsUPwv7rqoqOgT83HDKpF9ezHM/5SqwUe+TTvl+Xl50ROf1dGOnlXk4AGATNn36PKflsuTk0jjqqolBQMdp6S2K/eNbS+iNrjXEmJyOprEQglrJK5V1EMO0+SGPBafIWLtHQOH27du4c+ce3nuPUl1UnRmeu1Cq+TISZH+zjkjv7777x3j77c/i8OAWXB2CJFBTrnat9kj8tc7WN0LetznzdLvakPP4qvi91jCEjqThOZCybxgvnxB/P/3JiMj153H9eV52MsO6GM4bHoSuiBxZoVEUQwwGY5TlDLvtFqvVBerdArAOUioM8gLldovtao3hYIBiOEI2GMALCdt4QDooiIjqlSWr8ebwcDCG+GxHoXrs61//d1itSB2aG/2tVitsd9WVUf6+60vn+VUOdjIKuGqX+5zyfchD//c0uLjK2Uj3vQ99T/eVXk/6s/8838TRBK5TtLnmJLIsjwsOL3YpopGSI9np4AWZjRdH4MvlEpv1OjoadPLh5KSEDjl97uaa9jPhxmCsRroN/TF2u12sitnutjFH57zHdrdD3VDpZRa4IlJJLFe0GJ2fn8d8PUVcOqYh2GDxe1VVYrMhiVsbvOL0ZqpgyFID011gmBm85x9EbJwzm05x+/Q0KlQOBwM4YyMxsp8GYEPNRos1NzjVkXITUoMLtGkNhqT5s2knUd4PI1fMMeDPp7wIPv50OsV2u8ViscBqtcKDBw+wWCzwuc99Lva4OTo6iudxdHQUJzGXnHL3WkYFOJXCzg/f55TzwOfO18bzJ0VjUgPKLHSeq3yv2FkoiiIqrS6XCxKcyjUuLs6D41nj+Yvn+OpX/02ScqujcS7LsgP7F0XR6dbKc4vPmZ16Tm3wazw2qWPAz2J6b/l1fnbTf+yk8CKUoiJ9bkj6MyX18jkMBoOoQpvOd/5OGsnHcwHJjEupgmNK511XJANVFEWUleax5PPiMeTzSc/bGAMigUrMpge4c/supFRo6pDOjWtZSETsp01dsQACDEM/evgAz58/R9NUUErCw7aIJn/+GoPCxqGbDtsflV55Oj2j1q4/l9eb1mmVl97btx8OetLzvS4632fk0vvTuabgZEhBAlJSKhK6ggRUhnw4wfTwFCd33sDte5+AlFx1FfrjKAlTV6h3OzjTQHhEmXXWROzzIOq6JhG2sJZzqvmtt95CWZbYBjSTmvvVUbSSr7lbiQHAcwqL1nGE3+GJ0Mzl1vvu4VVjtW/c+lw0/nnV7+lz3X2dytidpX/EwfX7/znf2U//36vMUd5ei7NBcKmMEQUbJABR7yCFW/i9pmnigv/ixYuIHlz2sCgUcM5hOBpiNptR9QeId+FBKprr9TouzOlCBCAuOrvdDjsxBYZoJ6Dxkduhs4ygRCECm5wcpvPzc0hJMttpmoQjzrIsI5ci9SYhKE7i1vZsJOi8+oqO7RX3YWwRGiwNBgMcHR1hGCJ/hgnzYOyFIL0KXeTxPBnZ4OZr3rddR9lgAq2XStdOxomrQdgg83f4/NJeFmyUOy3NhYTwtB8lFYxoDfaTJ0+opLUmIjDzMdhZ5aZ7h4eHWCxIeIdLTwHS3uCKkNRR4vnjw3hFlMkR1MwOAjslKak3JcBaa2NreY7y+WFnh467Cpe7DV68eIaLi4vo9DL/RQiJb3zjGzg/P8fp7dswTQPp2z4oMqBdaSqg/xBzxJ7OoVTVNOUJseFng8LnnvI82u/Sgpje09TR4OcnpjCCwk/faKRzldEEB4myNthst9FZ2hcRpQZRCn52bCDDmuQ80igdMffOpNl0TvOY8aIKCOT5AFmW487du5jNDnB29gxa6rhfITidIHClV7BnE0LAeYvtbosPP3wft2/fxWw2RNPYkBJPor4b7Ct99qNuBNo14frFvAvh8/j2iaf9Y/X3uf/+0Bmlr193LlcZVT52es3eCyjZOjRCSHgkEulBL0MXCjrLsTk8xvnzEqapISGgpYJx1DHaGu5mG4y+JxEtwKNpWg7dcFgEpKOMHb9fvHiBO3fu4Nmz55hfzKGzAqPRmFpqVA2sNTBNa8DTced/Iiitek8GvB1PQroikudaZ8VZt6/zVW+MWqejP55XoRwdW4Sew+VlGNeXV8HAeXhxNZKSoiR/rs6GEIg5Wl6UttttjCR5Yef3mKnO/IDFYoEXL16gDryHvlqeQNsb4ujoCKenp8jzHNvdDta7qNHAKAZH42yE2KB673FxcYHt4AgYhqjMO9jaIisK1HWF8WSCpm5weHCAdWhAJgRVfXB5FLeLZ0PRNA3JxRobPGUV3jeAB4qQ4yeDRlLPzrU3+fJ4pgsCjwC1ZT46PMQ0aGYoIQERavqDocyzFtFgpyE1ammUzJUU/DPtD8P8FeZs8Pn3dRrS6DFd2NjrZy6HtZZkhdFW7HCV0K4ih3QaOCc+y1FXNTWPshaPHj2K6MzFBXVo1Vrj8PAQADrqf6zjwvecEQ2WgmdEjM+XkQs20KmwFqeQmIwqpYwiXgzbl2WJwaDAweEhvLdYrtdYr1ewzqBpKhijMBgO8eDBA1xcnOPNt95EXTewxkenie81C+AxGsRIS4o08MYLSRqtsOPL76ffSdMuaYREef4gP+5cSDvSveNFuc3XB0OenAPP38jLES3nQCnZQTYA4hv5xJlJHQ1CGWh/JJDUxHultYLKVcIXIqeDRfc4DVjXNWnp+Fa4TYDWp+l0is1midNbt3F6ehvzxTnvqM2kBP7CjZZLz89rm3L48MMP8IM/8BdhLJUUX0qZdEiF3a1/r2JAI9hnuVzifHkfgA/OUvo+z9f+xmtCPL1k3yFWCr7SZU2HlzkcVzkbqWGK1wxACCIge8W9RSSso8o95z1IG0UBQuPO3XvYrudYzXfwxlAPkbC2maYhRWnpIEUIgH1bpcTOaVHkrZF2VOLf1A1GoxHe+sQn8G/+4A/QWIu3P/NZjEZDVFWNsqRgWArZex6797GPRMRrDWq39Cy237HOoptw7+4vHbt9Y7zP2egb/9TRIDQJcU1IP7dvXtF7+1M3+z/78u21nA0pZVSlTJUE2ailXiAbAybtPX/+HPP5PHICCAlQQPDmVZCqpd8zTKYzGOfRbHeoGypRdI7q3htjSQ56MITOiIswv5gH/QyBzWaLxtjO4De1QV4UgAMG2QC2sSi3O0hQv4zRaET6HGWFuqmw2awxmYwB51HVDnWAb6npEXm1Yc2GVjnyIodSGgiODbyDDCx470KeLix2Mixy9OCJwDsIkSk8Dg8OcXp8AgGBpm4wDGW3xoYmS0qitgZZwspPO63yPZhMJlgul9HIwgO51mgsNcLKdYbKl9TIrG7QVDVsY5ApDVM3MM5eSi2kk4wXIa01tKIpVdc14AkOr2sJrD2KPIOzBuv5EscnJziYTGDqCjY4loMsg5pOoKTAsKDI4/ziAtstQ/c1jk5OACGw2awJmQrly0eHR1hvN1BSBQ2WEgcHVBrMfKKDg4NIkmWHiq+BUj/E5ZhOJ/DeYbvdQSmBPC8ACFRVGeZ8BcCjGI4w8gLL5TLwC0K7a2MwvzjHfH4O4R2caSBFHp+fNHWTlj/yPWMDzlygVEOCETx2/qh8VtGcQ9rxkTrqOkfVGUJQmsIGR0MIGe6pRd3YEK0D1gMIJEhnLZTOYMJ58Lzia0hRTecckfWkxyCT2FhyjpVWaIwLHHgyaI6S9BBSgFpBsGomaeBsdzuK8KVEnmUQQNsrwnooSGQyg7BAta1QlRVFlvCQkCh0jto3sNJD5xkmsynu3LmHDz/6CNZUobLBh0qMsJ7sSaXEjEn4nRMkXG1lncCLZy/w4KPv4C8c3UVtHYRSENFda5/tfVvqTLSO4mWUpe8gpM9eOCXGamg/CGtq5Fxwj5N0Hy4sQz5eV+vw2NDrpQ1+gK5D0p4H3b+I1MZ1j0/Nh9fiGQZAwAGhWkN6CQFCBJUOJExP/C/jLeAFhuNDTGYnmM+XcLYBlEZVN1BZDeo43EA6CaGIq+Lg4Z1HBgGlQo+j3Q5KkQS7bSx2my38YIDVZovhaIjBcIgXz5/hcDbBnTt3MB5maKoKUpDqrnAtQZQRPwiaz957+gla2z3fo7DWC48oeAeAKquSwYwzJowhzwUhiRnLgTIhJz5ygaNvGz7vPECgeCCDx/MI9zh8Q8SDxb11t/CdxJVJ3gzzIN7jm22vJerFpXSMZvCix7AvgPg6w/IXFxd49uwZ1qGagKO09vdQsmrp5gyHQ4qSsgybUEmRhX4pdZC4zvMCk8kUm80GT548jVoI3gNPnj6J5Ug2acQWHDYsFguMRqSDPx6O44Jd1SUMQ9LB467KkrxnRjOsDQ8ZQX8sOsYpIo7YRHiYISRBozz5kltH6qj0d55pSEULxqgY4ygIolEOuhWBYvSFjU4eqyJaHQX+ncs7GaUAAK10hK+VVCjGBeCJ++Kdi91Wl3xvBGJ0z/tJ7x/QVnTwXIBo+2+QFDVpJ2y3VI72ybfegtaK+lY0DYyjhlaZUhiG/jXeO8ymE2zLElKSg+HQEjj5nmZZhov5RRRgY7LX8+fPMZ1M8Pz5cxwcHODi4iIu3OwsTyYTLBaLIGEenM2QgsoyjSxr+SsAIgqw3ZUQUqIoBjg8VNhsttiF86JjOHz04YfYfulLgJeApInHTgUbaubdpNUvPrnXnBbhz6UVKgACcmUiQpOmEtl4tLlmKlnP8nBNAOrQ7psaDWYQMiggoi0l3Afj8rml1S5SSmQKGA0yLBU5CEIqxCZuoCic9y98KLt0XJZJz39Vk66INRZOaXI2LIk8MVeo2pWRpNyX1LcKKKRA4wzGkzHgLU5v38Hh4RHOXjxH7DMiEMuXSaWy3RhZ2OcmSABCUk+Opqrwwfvv453v+l6obABAQPjL+hSX0whd1CI6FL63rF8ZdfIfqeIjaK1JDFhXLZJRCUfcCFa7RGLsfOAcwHcMUnJAsFPTRr8p+tFTaeUFlwOT6OQ5qh6BICcdkfGAfmpIqgyNs7h195NYb0o8ffghvMwwmkwBUP8cDw9nTfBZqQTZOWrDbrjs3jtI5+GNiz1vTNNguSCRw6PDQzx/+ggff/ge6nKN0WhK3WkDaqZcytnwoZxaAEnwkN6fiBwEByu9Tca62Emb7g0/A6nya3pfiQfSsR4iuW9AvA/x+HE/rSfNzolH12nsb/udjO5r13/m8vZapa9MPExJoZwLBxDFk5RSgUi3xNnZGSlWhpy64wnAUZFSUGGS5HkeW9SzkA8bWFa85IVmMplEqJvr7jmKZ6g1NiMMC+JoNEKe55EkxOkfaw072xAQsA3BbzZgi01VJ4sEO7YyllumIkR7xy6BkKn/Qwth83ga25CuyHiMQTGIDkMa9aa5vL6BYaeDjVKElmMER84Rk+t4sqeRMztOLDrlBbD1Pup2pGW/6blwyoU5E2zkUuElYw1un94OEPcGWVEgzyjq97ItXdyExm15nsdqGUBAqBKDwYDKpBMSspQS5+fnUErFHjyDYhBl2fk8ttstBoNBzNsyz6IMKoE8Z9l54vHg64mLg0CHeDqbzVBVJG3M1T4fffQRfV9qmKaElLqDUqTQNM8dNuBsQHlc2dHgz6RQrRBdYm6KnKQy5z5EmDZwOfi41PabUSsZEBEX+RE8dn1YnffZEklbCXpKe5Yx5eBaexMXV4oG2bA7hCwPmqaGMVkHkbTGwJqmo6fB1VFpVQ6Pj5IKriEOz26zxvHxMW7fvo3F/ALWcr8Ul6QMX7bypUsqcyI8vHP4+MFHePb8Kd76xGcIWRKX0Ykr99lzQpy/6dLNX/K9YxFiQOtYiKolGzPEiFWEnHxUjuTdSTJf8hoeSz8lQmPZYiQ329ggW0KGIagUVnBUz6rU1J4BIsdkmuPe/bdg6grnLx5TH6jhAFkxgVAazgMKOo6/lFQUUNUNjLHIwzNVGwNlg76Mc5A6g28azCZjTMcjrJYLrJYLNHUDoQpYJ2Csg/YtsmGthZVdpz5eWWLw+6nQ9vdW1jx9llIU66q0FG9dwup+DsZVqNpVr99ku2napL+9nqgXcEnhkHO4ZVlGAiI7BBcXF3HxIpgPIaqmXnW8SNhA2pzNZrh79240PKwYyhUmq9UqNnLbbDYoyzJWSLCUuHMu5sN91i7MaR4+1aFgXkJVh94mnhZprRQcV9MEgy4FVckMhxM0plUYvG4L/kqysPne+21+fXowxSjoivC4sDFhQ54SGLmnB6di0gnM1RiMSkgpoTN9idyaNpzjMdlsNmScx5f7jjBPInU00rmRlnMaY7DdbvHs+TMoRWW83ntMJhNoSTL0Qghsyl3kUrCAFxt75xzqpoEMCBMby9jDJqAVPCeMMTBNjTfu38duR916z8/Po6HPsiz2geF29whzkUmmrMDJTggb5izL4AEsV+uIsrCc+sXFRXSEnj59GhbEcUhltP1J+H6zk54q07ZExy4DPEVF0sWJ0njt/OH52K8K896H3LOMAUN6TN6uyrPz78zV4gUuLY/muZllGfx2R45LlErcH93zq9Tp0sEYjyZRTpXBY+EGiDwnUp5Qem4KCtaT/omUElLJuKZ89OEHWK9LKEVRpLVNOOfrKy2SJzUeU0oJB4HF/AKPHj3E/Tc+AQgSn7rplhqay0T519/4nqe/v8xI9O/3dQbsEj8ADnA3dbFaaD+9ZiE8tGKuiQNAjdC8kPCQMF7i4OQuBqMJFuf3ML94gVvHhxiOh1hvN1isVqgbA2caaEkdoD08qqbGcr3GdDyBFIAxhFZkOel6ZEpht1JQEhjlOUopIJ2DNQ0UFJwjscb0tlpnOyllHperHAUay24qgtGM9Pspn6KfYuuO1cvn6p/Gobhqe11HA3hNZ4MXyVFoze6c63AF0tIijnJY7VGIQMYSglj6IYoejceYTqex8yunI9brNbIswyj0JNlsNpjNZjH6ZCdhPB5ju91GhGG5XOLo6IgMY9BPcMGLr6oKJycnkWTK+hnFgHth7GDqBnVVUcvyAGnnWRZFiIQQmB3MsN2W2G63najqqo0njUwaTDnHWvT03mw2w907dzDIc0ilIIA4dsw9YCSCj8VOGTsLKfGRUx08TkIIYm/7tmsgoyqjILqVGqjdbgcEByXVn0h1FtKNnaK0wRkb7sY0OD05jaXFeZ5jmLc6GJvthh5gKaLSaTqumrkdwRlIxeEGg0EUA5tOpyReNh7h+fPnODo6wmKxiA4xO8mxlDhpJsjN37jSaTqddjRHYjVOggjxGI7DHH7+/DnW6zWePXuGjz76CF9453vC/ltDxQuHc66jhJpyIlLjzY5nCrvz98mR6Zat8j1KnYZ0HjIRM0XiUiQqXeD2GXXgsjEiAjR9r21OSH1OfFC5BBtX2kMMPhhn9qBF2QQNEqUU8kT9VQhBTqpue/n0F2ypJHzjkeU5jLUYDUcwTYO7d+/i6OgI6/UCxlhkoSswba9g6AULxA3is/Xhh+/j89/1DmYHRwD6RuUlu0uM9yWC6Us33/vZri0IY0nrDadRXECYfG+9SqPu8Lfvvp5eEn8mTQ3f3NUIu3cOkCIiWkJ4uJgGorQawrMlVU6pD6+QeYHb94e49+anoBXd72y9gB6ssS1LNOUK3u5oXfUeHg67XQVjHQZ5DgGqRsyMhfASW7uGsg7DYYbJoEBZ5GjqEuPpFLoYwlYGtfVwCaWT+HddZ2Cfo5E+P76TjnRo01q49L3OUCWOB9B9/jq3Zc/r+xzI647157m9urORRFf8wHtP7ea5tXnqWHBkzRyO3XYbO6nmeY6joyOcnJzECgI2TmVZYj6f4+TkpG3rHfQgrLWd4zFKsVwu8cYbb2CxWODg4ADOkTIpKGiEdz6iGNzPIi0XzbIMm3KDMsgmF0HsqSpLaKXhQ+3xcDjEvXv3MRxOUJZPw7DcoJwIvLBIpIublIxekNOkFI0Dyz4zbJyynfvRJIAkHWRj5JqSd9kolhWV7aYS42xgWG2UZHxJjn6722KiJtGhSR2S9JzY6UylpNNGcFLI2KTv4OAgomLccVWEXHC5K1EMB5Hzsw36GkJKbEPaI9WtYJEvvobdjlq8W2PgA7eInQhOdfA84uqclBdhrY1IBTs8AOJcJoPWynhzqoorX+7cuYPJZALnHB48eIB3vvsL4Dx3Og/SVBP/naIa7AzuT1nQRga3gtattgnQMtHT1ExcGGWbuuN9p8hIiorxPeW5xdeazr8WdWn5AjpjwT8DZ7umMCwjEKBeIPRaeCY8PQ/shGVZRq0RtKYcf3Dw2OlLS4djlB2eCZ4XPF/zPMe9e/dwfv4cy+UCzrU8mptufM9q2yDLBLy3kFrh8eOHWC4vMDs4QJsV735v35aOf/hksPHXQ+jdv9PXyVtxzib3dX/JaxehSPfJxs0FQqHvHCWlt6RrwMu2Fm1JeBwegHdUtCM8TFNDBN0NoQAlQjm9UDCWKvEG4xzO1vDOwknACyArpjjQI4yaGnU1hKlXaOoqGHVP5HfTwAuJPM9gdjtsdqT4KwY5lHMwNSGow8EQtakxHA4wnE5RzdfYrkto35K8r0sZ9W1B/Jl8xjkPi64qcDpG+1DF/jhefS/3OyHpuf1/Ynt1gqijkiE2JCx6xTAwtfSl9u/z+TwaLa01FosFtNa4fedO1CtgyPXs7Azj8Ti2OmfeRYyuAWw2mwhjswH9zne+g4ODgxjxcOrm5OQEL168iAYEACDQgebZwHCTLmMMBsMcRUBiyqqCt4EV7RrkWY57d+/i7t27aBqDxraRJxMzgavhR34/3YRoF7BiQHoQzjuYuoaT1PGWPteqWaZlrmyUvPdxnLi8k1MuaVkwlTYCRdb2reFmZUVRRGlxjnwPDw9RPn8W95WiJEBLVk3LoAFEQ8CI03w+hxQSb775ZnTwttstxsNRFE2r6gqHh4eU2mjq6KjMZjNyELIMu5J4OqzPws4mGzyeMyRt3z54nCbgc+S+CGxMWbLdORc5PSwSxoaYlXMvLi5QNw10ELfj76WR9iT08fnggw8IBdSD+Kzw8Rmd4f2zMee5ki7k/I+5UCkXh/Rh5KXvpshFi1ZZCNV1WPsLVV+zg5HM9HMsvsZzqG1FkEWUjFC/MnQPlYCgigMuvRWSqyf4WZABWvdoXAMdnk9GNuBlR2ckJS3z71GDxQFO0LNprIkpsJOTEwghAqLRRoo3WX/btJNHnhfhWSSS+HqzxB9/+5v41GfeRlkZtB1USZIdojvO6bhH5E4rGO/grEFaObIvat7vaLTGP+VTpcfitaZvxNK0S/y8DNwOJtR2UIz2yDKgHNTUtOVy9ceNf6fUGVXqedtev5AKHoCUodeOIgI5ZHCQAxogdQals9h+3toGXmSw3kPKDINiBKMd8kFBSLQjiQLvSAfFC4HhaAwHUJ8lP4SpSwwGGQZDQtatAPVuyXIMBiMstw1ss6/fVTuu/XU/HVMhRHfseghlf+unwfrby1DG/njv+86rONl/FttrIRssWb1arWKnPN7W6zUGgwEuLi5iT4ntdgulqPeF1tTjY7fbxVbmFxcXOD09jVGu98RDOD09xXK57Og+bDabUGpIkt28v8ViAYAcEgDgjqCUSggPHESMcNiQAYiQfpZRNUrbUhiA85DBeN29cwd37txBVZaoqwbj6SzuK40i07xbO2zX32Be2AEQC9+RnogNi3jqqKSyzby/1HCkhiutpIgLTfidDRePQRTmCqkQjta5coMjfj4GL2zs/LDzlqYoouCTa9VNOSWkQoqKeT7M3Vgul/ACGI1GOL+4QBl4DVx58OTJk+jkMa+CkTZ2PDabDTKtMBoOY/fbyWQSxxpAdLJSLgYb1dQpYceO55XWmqosXFv9wyk9Jkd7TxUlL168wEcff4zv+74fxHa7jWNmjIlE1T7y0IdnO1F7D0mie9oen/+xswe0TdJo7liY8DykSqQ8bzkFwsEEEzDTdEX/HFqeSKKnARHXhqpuepFzskjHygNPIapojRuPZxMQKnjXebbSseK0nbUWjTFUVeZMW+od5vHh4SFu376Djz/+kCL3ayK9qxb7mGXwFoAI6VmHh48e4MXZUxwenoKJlwKcwmifwasX+ZZ8/GrbPn5Aet5tKoWqU9rvdY0kr1HhPAMqEHYKXGHQ4tlfEUHvG2PvPRBTOqRHJCVVx3kh4B01YrMAuAzDhzQbbDDQjsnDFtZQUChAZGQbhN18UBUVSkIqDcBBBcdOKAUtFcrNCk4CMlMYSELRcqlwfjFH2QhsGovGOCh3+cbsc6qucjj6Y8Uv98fqqv3/ababnNNNtj+Ng/LqzoZvy/J4AeMokVEJXlQPDw9RVRWOj49jnn21WsWqAYbBGfl49OhRTMOw8WDeBy+6AGKVCkPcZ2dnka8hhIiVAcwr4VzZG3qKv3//bwSUwMRog/Ok3hO73DkXYTLhEVJHEoOCDFIjGvgBpS2qu3UQ9EFiHBiKBLoLQf9GdZ0RpdrIrV2d6QGLN9lTvo/Xhj78BgQSKgR8FoyUaHOzgZ5LD7X3kF5CGepcqq2O6RZrLcSITsPcMpSS8IDSgZgXzsuNA6dChIVMkp6IUqSdIi2p1pVvfi+sdRhnY3jvoNbUjM1ZCycdtNdQXkOVCnVB2i1wAn7qYEZJrtRTvpVKxNooHIKqbFpDLSnqdI5UYr2PP30YB4brRYCZZSMJ2vSANAJOeyhBhtY4A+WCAFhuIrjsbD8PC4gw/oBHfpHj9P/1ACe/sYMxLRdGiNYx7CAL/L9oczgHz/f48msI4+99ew7ee8hwDwiWDo6C9+iuXwHKDnPOI+GD2LYJH48a79uz8UHqQLTXROiFpTSK4/nazv/Etu2xrcEBEgI6OyNnTiSpCX85SovnGZwgUhp2MfXpnIWxAnV9G6uDL2P96TWdxKuuncl5+/Q1AFle4PbXLAaDJf7dWxbzIfAj763wleVz9G5q3AQPnO8b/quNTHvc6wx/+zmRvBiPF3fkL30uohi9Y7zU8CXXln6UHYouQVICntVc24lAz4KDQA0hmvhst58JaWiKmBKnhcaMXhZwfnzF+LRl1jRXjmCbmqosFXHTpCCdDmMsNXjzgHOAEhoiUHz+zjfHMLBIHse+y9cZFAEgT5wVKQRcsn73xzc67S8x7q/jjFyHmPx5bq+ts8EREEPDu90udum8c+cOIpQNYLVaoaoqnJ+f4/j4GMPhsBNxc7mk1hrHx8d4/PgxJpMJ5vN5RDBSuPv8/By3b9/GZrPB8fExzs7OIorBkSJHy6PRCGerEk+aNe5mE3x5+Am6kMGfYtTy5PfRn2I//9+yUYBGPxnVSzM+Ct1rftnG3xXJz2HyfprSVsnnbfgHtDNTAdgvhvh6W//ZTVHMFCX1vdd08nf6nZd1F3IAnlrg6dNXOcv//xa38uUfeaUtB8QpMD39M95v2JYAljUG92gCna4avP3sz/oa/qe49R+8l3l51ztc12/cp+Sm282MgRcSNkybzyw0hHuVY7Tb47Gl1NAeZyNF7K5DPPjzr7K9KmryZ51meXWdDSFiFQDzBLTWuHXrVkxpMKrAsDeXHh4eHiLLMpyfn8Na6kPRNE3kWjDsvFwuIyrC0ucMXzM/JOUNcIkrgIiSMBR+cXGB36oq/B/Ofw3fnZ+gqRvSVtAELXM+1RgDZx2EoMhvOBziYDYj9VAI1FUVo1iKkiwgJJYrIq8q2eoQkMRz6trHwUNU0GPILXjaeZ5jNBwGhIWEtZQQsJZz66kAkAJLnzvXRkKcyuBJq5WGdS2kzogHR9JKEULBx6/rOjqCUknUFRH0yrqCUiT/vV6vqf48QJFSSJIlD5RypXRoB+6Dch+wmC9QNzUyneHg8ABCyCgH3NQVtKIUyWA0pPOTAo0xWC6XpLNiDKqKlGLLUILMTZLquopQP0CoQlM3yDIdxdZ4ru12O8xmU1RVDaUovcaVKJHvoBVM0wQ0QEZ+BBF4BYbDIPrFwmqeuBFNXYN76+R5FlJILZ9ASpqXb7/9WbzzzndjOBx10nhpRQilF9o23/wjlUxO5eKzLCc1TmNjmTTvi59Z3hzPGd5tmJOXFyIHa0N5ZxRqEhEBFJz+6MxpQASehHUO1jhUVY31ZouyquFcUJJMUDnP0XbAyfl3H0JFSulpIokqBSVJGTPKzDNfxPvYuJAKWzykkjHXb5oGZbnDbrfFcrnA0yeP8fjxo8BhEuiqa+7ZIqgST749Tz6oUMiyAl/4wvdiMaQr+cb9EUa3DiFI4a+9p9ccyDsH60xM/9J9bPV92nvkab0Cpxm6MH66BonkPsX71bs2TuHw8iRAwoR8L+LPfUYqhPgEYiRXySmRgBp7z4KHRD6NOGNE93g+9uTBYSEU8X48BBBSj0pIaCkh4JApAdPU2Oy2MD6gzckYSEZOaJA6CJIH8ToEHLQmnRZjLJFrVQbvBaTT+CtEi8OvfKJEJdpy8fSedIaFryuBjf7oVg2nQoLtihTTTZGNV9n2cX/+x9xeq/RVB8IWC/iw2ud6vcYHH3wQmfxFURABJ+S9y7LEcDjEdDqNYko+LNbr9RpCUJUCG4jVahWJp7w/Ln199uwZ8jzHZrPBdDql5lihnwYTPllTwhiDf/ziW/hHAR2RUkI0AmZloqKpkALOEOFoOBjgzTffwr3JPRRZFomCxhmcn51hMBxACAXjgMdnz2IlRNoAC9g38RQYFeSJzjyK2XSKQ0niU9ZaFHkGHcaMkSTeZ78PSkr4YuY9kfWKDo+AJ5uSCuPhEKYmGDDPc9iNw2BQYCCH0CONuqqAgo638Ttor2C9w1n5AsswZpyT55JS70mhUltyNpi78GL1Iqp3fir7FMbjMU5Hp5jXc1hpcHx0hOViidoYbDYbFEWByWyKnd+h3JXUkRdAuaxgrI+SxsvlEuv1GuPxOJZHSymxXC4xm82oM2TgjzTnDawxkBuJ8Xgcq1AmGem11E1D0thWYrGax3GmjrUzeO9I0wMFlJIw3sEYi7qq2tRDRj2CBnKAXaho0pmGsx5aFxCNwq0nL/CTn8vxl774ozg9PUVd16irKjovmc4oFeV9MKgipiT4/nIZKABonZFhECKSZrPk+eTngDk4jbEQUkeHlD+Xckb4vrGqKZNLU30Q5sl0hYVI9pv5NXVtsCtLnJ8vsFiSBoJxLLkMSvPIYARZpMEDEC4aA61U5POMRwNkOoPzjsZJgEjcjrsTm7bsO9PQmQIgYZsa290aFxfnWK8MHj+p8MHuHL/3ra9hV24hQFUP9IxeHUG2RhxkuYLhdHCAV8jzAawR+Lm/cIp1LgAHvH97BDucRmdmH3crNSoCRBBtTNtELw0g0nOh7zkylGzMXeuQ+JiT4xSXT1IVgrM3POiUCuX76QHJzkYvxcNOUDx/PkpY3NK0IJ0Hk5QRgyMBD4mWZ5ZqxwgBSNE2quRRV1oAUsCCAhypNDIV2hwIoNAC243F+byCQRO+R/MqJBEhJQVxcfwQ9F0EEVaFt5CSU9GkYaM00Bggqw1+5gPa1+/cK7HSLTqfjkY67syn6s8nicsk3eu2l/Jf9mxXcUVehb9xHbLyqttrKYiOhsNIigQQ2z1fXFzEGngmFDJysVgsIjt+t9vF8tPBYEAqf2Fhe/r0aSwv5TQNk9C4QqWqKJplXYjlchlTOSz2BVDEuNltiIegJIYDMr67cgNnbMtU9g5wVIZ3MDnAJ956CwcHB8ikAsJCz8ZUFzk9Is5iW9ad3DGX6jEhMn1ACUlIxjHx/pUUKDIF4QMKIQQkPKqqjggOcxPYEPQjYWtZMbKKRqDmltpeoKqa+LpSArVtSaawZJAhg1pouYv3YLfbYTaeYLVaQWuNyWgMUzdEuvRAJhVcwzLoKl5T0zQYj8d48uQJFosFyqrE8dExTk5OUJYlzs/Po1jWfLmEzjTywFmRUmK1WMbKmVxnqOoag7zArq5jPx42LFxtxL/zHFRK4eDwiMjCgyEODg5Q1zXG4zEePnyIk5OTSGrN8gGGQZl0MplFlIjLN7lD8WRCDqExFSQEJmMqcWVdGe+ov0zGzd4a6sVT1xWaxqCudvjv/tE/xHt/8i5+8Rd/EW/cfwvIcsADxjiak9bCw8Ha9p5x3xX+XWta0KuyQpZnsRIm1VbRWmMYnlWuGhJotXDS6peU6MlzLn32GNFkByY1epHA6j0kHMqmhgeglECmJUajAtvtGmVZQQqSv2ehrmCl0EbubfTMfBTlHExoXJVrCW88GmdiZY/3AKwP9zyDUhpZnoMrdJyX0HqIwXCKzbZEMRzj5PQUByeH2D5ewToLgZx6GN1oC0n8CDwxomlgLPDR++9j9EYJZICUrUpnHx6/HGmS86KUgtIq3re+Qeg4HQF08kRdoL427GAweuDJaXDCx15xiChGa+hp3FtniIBcJpSHvjbOQwjVORcis7uYuUjLtflfv8Tbu66TwftSuhU99L4lRkuZw3sB4QAlPIS1dH1CwzQOQkusS4PtrqQS6Vi2y/cotKD37XgBbcM3vk4IIpg6ausUkD0BKQGt2nvA07cNHHk8WkoM/e1grb80HnHtTbY+msGf5TFOq3zY+ef30n2k303ny5Woyyts6TVctd+rttfibHC0zXLOg8EgibS6csmpDDbQVoswEnFwcICzs7NYusb9KniRLcuSBJqCfDlLTLPDsd1uo0gUoyRdiDmL7P8ynJe1BJnxTyEFcp1hPBrh7U99GtPxhNQqQ6WM0kG7PyGhaZ3BNNu4yPP1MoydeoTpTRUiJSxQaJEphSxT4XcaL+4XwUaTSbn7PFve0uP0xY7SyIGjdo6QudlXWnUghIhjXO7KeL8ARKOdKlTyeItkUTHGxM/B0z1nlGg2m0VnlMetP3f4nOq6hgn9UyBbWW8+Ty615KqiyWQSnTJOUTBBmcnMd+7ciWjZdrvFyfExXjx/EZGAVHqeH2YuwY2GW7Qt7r33kaDM4873bbNeI8sHMfVVVTt84xtfR103+Ikf/3F8//f/YPi8gBAKu7KEygQyxYqjCgw113UDKYNol+EePd2S2HQh4uofRt48uimY/uKU6njwM5jKmfNznCIivDnbkE5CQFpcGAOtVUi5kdIknZtDEFdAx9HozecYTfvQUwMkzidDl7BURp2r4uiecEUMNdEzUaenwHg8wXo8xvGtYzx+8iA4NZefpavOh40SEDNCETEAHF48f4bT7RbyAHDepp/qbP0Ik3ZDEu5I1g9+//pF3SdpiVCZ4UOKBT3nJNkvBFXT9I0hyYXLaJTZqPYNZepsEnKyf9zS57WrvNkfF3Yyw7UIH+aJR+xuyyPvBGzTAKF5p7UWjQ3yAHts6NWIAKNr6UnzLyzG16Zc2uuKt6mzb04H7bu/fSezv3WRwstoVueZSKrU+LiX72O7pTbhJo7Gvvv4p9leW0EUQIy4udSQF2lOkbBnztUl4/E4Vjs0TYPz83M4R7odrGHAC/l8PsdoNMLh4SEGA1Lq49dYZpoHnnulAIhVLGzAtpsNnCdpdK0UTNMKZPGWaY2T42O8+cYbmAzIqB4dH+HZixfUwyOU7kYdhXD+2+02ojV8M/oe63U3n8+XBMVy+CCvzIt6fyGNeiFXbH1ng73h1CiwHoSUbWfRtHyTUyJ1XePw8JCMYzBmUT5ayqiFkvYOMcZQrt67mFpjcavVehW5NlxyypVILLC1Xq/jOU2n09hnxHtK2SjvUZlWAyKNlniOAIgiYoyYTSaTOKY811gwjt9fh+6wrDXCUfxisUBRFLG9/cXFRXRomqaJJdxlWcJ7H0uD2REbDoc4Pj7GxXwZzq2OTvUf//G38PTpE7z3/vv42Z/9OUwnU+x2JfIsg5cuwNWMYOmoyMn5bGMtdBCASxVbeV7xM8BOEb8upOqUOfNnmAPFcyjV2+jP3bSNfFRWDd2IudtskziTw1CC3Fgbmph1S7XT4/LG95PLG5umgdIyOqKsNMzzjxFFAMgzQja4p0ymNdRkGu7tAMPhCPfu3ceHH7yH7XrdtyNXno8QrfopwGYomD/noLXC2fkZpmWJ0QG9doWv0dlvimxIcEpKxPvTHxMOWohHwSWqlA4g58ySaxb5NgTvO/pywtdpfwrZisgFmghSomV/+Wr9Ffa8eGTCBXvi5CglA9cmpODg4QQ5Q+1QkrPiHDkYIt2XpxRRSzWhkml4R711hIQ3oauxZefu+mi7a8D3f6bv6LF2Cp1Bd1/70hFkny6ny/r2J/18Gqim53mV85C+d52dSfeVOn5XbTdFK15ley2CqFQKmWjr/1uDmcXy1qOjo2j4Wa+BlT/H43FcgPI8x2KxiJLWHB0Oh0OMx+PYC4E1NfhmjEYjLJfLiG6wkBX3CWHjl2kS7zGGYFfTGCrpg4B1FpPxBG+98QY16wqLM7VkX+HWrVuYz+ekM1GVsdRWK4X1eoNdSWyhtInWdZ7mVeMZvX20E4EdOVbsTFUvX+Ve8ZZGsuxwpAaInUGewKzwqZSCzpjwqKOMOMm6l9FRAYJx8g4q01HcDUAUbyuKIkT2rUAUP3ip8QMIAROCoHsWlep78ryxI8GfZV0Xvpfs1DDywUTY1PHlLRWOc85FR4XTB7PZLKIh3LyNS8A3mw2ePn2Kw8NDzGaz6HBsNhtkGc1dMn4NmsbAWoOLC49/+S9/G9vtFj/1kz+NT37yk5RC8QbW2PYeaI4ibYBqRdAhELDOdJ4NHlvWY+lqhXQdNXZKUwG4VCiLnQVOq/C4MjrCDg3NS4dcKXhJjcBk2JcJCMxwOITd7oDowJAgVjpPeX6mC71z1MCvqiroTEW1YQ5uAMTzaxEC6srKc0gIge2O+ECNKTAoRjiYHeLw6Bib9RrXeQMR0eggG5cjcussiiJHVTbJl4FQP3/lflunykdDHJ2ZZBz636WjMt/AwwkXDXdofA5yhZiYCUDIFusPP70jXRPhQypMykDkbdGK9jI9ZMo/EQLcgs0FpwbJDyll6H5NvWikBISSoQV8W7LK1+u9C44FoUTtONkW5JBcWO4hPKES3no40wDeXXkn07FM7+nLtr2Gdw/ilO63mza6wf6u2S4hiL20ST8gSNfV64510+v/s9pePY3iPUxjSAArwOy8MPHDz2Je3GSKo//pdBqj5/l8DgBYhn4WLEXOPAGWoWYCIMPAHEGy+iOT33jRZAPCC1LdVJRLNbR4SwhopWCNxcF0hjfu38ft27fhrEO53cJKKsM9Pj7G2cV5NKpAu6DxgsjHB9ooMHUsbjKplApEvoYqKHgc2bAzcsTjwqmOq+4NT6B+2oTf7xv5NKLd7XYRil6tVm1fkOC4AYjIh1IKBwcHEd3gceEqDkYXeNy49Pnk5CQKq6Xnk1ZRAFRefXBwACZ8WmuR5RnKza5zPWxM2VCmfAWlFM7Pz6MEO88jYwwODg7iXDLGRNVQnrc8LozStferjf6ZI8LjOB6PY4dZAFHvZT6fYzgeAcJDZwoCAsYQgrfdrlHXNb761X+DZ8+e4cs//WW888V3qGunb+8VdzslBc5Wr4P4KK3wVh+m5bnIjofSrTw9jx+nStI5xJ1m2dkFWsSI738aoQlBi35ZlsQ70BpKZxFJGYY02q6qYB0fn7VBfGcu9DdO4dhkvqb5auaYpPeGGsD5eO583qPRCFVFwcmgGOHO7ft48ugJYPY/U5ccjXh+wdyJ1o1g3Zau+ufe3V7a2ueUdVJcx4DsNwwxocCZBnh2VBj5F0hoMbz/5M2IAATkw3mwWJkAEWzpPLrnSecS/vGxvU/2x58HAJ6zjNYICC9hw76oIo/GyloSSGN/iH0i2q8NL6T7F+Gfg/OBf3fFlhpjvpb2HPd//qo1XCTfR+/3bqqC0jDpOnzddpUjka7n/Ln0vT6fo5+C4c/2z/V/zO210ii0UCJqWnAvDACXUigMuU+nU1xcXGC322E6ncIYg1u3bhGxc7vFbrtFWVURNmQjz4vdYrGIJZwcrbLzwU4GlzFyxE5RZA0IIgcR6c7AOYHjo2Pcv3sXB4E7sN1sMCyGMEEe++zsDMWQ8vxVTdUCQNAECWTUlJAH3Ayeoq19yLWmNEJIdEf+BPeJ4QhuvV5HQ8nHusrpSM8lRULSc+PcPEPP1tro5E2n0xi9j0Yj7JoNBEQkV7Ljw1LkTO4lxxPRAWADwMaAESk+P66e4GsyxkSHlIm//LuUMojF4dL1cEqGf+dSae89bt26FdN4/D47Pywax4YWQKyYAhDH3DkXq1fm83nkMPhgzHj8+NhKUSv7Fy9eIM9z6v2T6+DcAKaxofRaY7slsS9jGsznczx5/AQ/+EM/gK/85S/j/r034yJR1yxP3jLcfeivwOmp1OFN7y0vOn2eSx9NSktwYy+j4ODyYsbpi7RLLN9PeOrWqrVCUzfUlltrMJrCAYBrgox6kC1PHd/OU5Isrt77IOx1GRFIeSbppjUJxzV1A2sdMp2HtUtBK5KhPj29g8l4itV8TlF64vhebWwSI02QQBwDaxnxYSeM/rle1Nu/3mgcYhqlC3nvG5M++kEVMgLwAb3wlEYhRU5aX4RPvoe2vDqOM2xyadQl2PvQgIQRGj5XEfbi21Jldmr4MA6h+kiS0aXSdqpCko4xEc9uS0BhXHt1PgwiiwB55u4wg4eqtZwLMu/eQFzRVK8fCPLvLqng2fedfXMgnZf9/Xfvl7w0j69Ko6T7432k/LH0OCmKmc4RDoJTZ6PvZO07Xvr+TZyR13FYXtnZkEJEsiBf8IsXL2J0OZ/Pce/ePTjnYk6cZcmVUlGgiyFabhFurY2pGM6zcgUEt4LnlAlHpAAiUgKQc8ELPjfFsobgZroJFkVe4NatW3jrjTeQaWqupgSpg1ZVidGAEYQ2okSAerMsgzYGy9WKZLJBHjV3ugS6k7N/k7uLjQ2VAW2FCEDkN3YqUmcmzZHzPtiQx/0lkz1dkNlwMOTtHOc1aePJmZIzt9ttVGkVQsb+FOyAsIHibrwMw6tMwwHxu1waO5vNcH5+jvl8Hgmm7EhwGkMIEREtALGpG6djuCU4RNtNMeX59BeSPM9jBRNzXg4ODrBareI5DIdDLBYL3Lp1KxqApmmQ53l0kNnRYPQCaCs62GmK3WDDnBFCRCdms9mgcHlc1DwcigTZsdYAgQD3/MUz/OZv/iYefOdj/O2/9Xfwmc98Jly/jPcwy+h+VmUVU1SMOrCjysdlJCoSXyGghIppRS6VTtMifF+01phOpxDBCeb3Ojwa1fYDqqoKmdYYjoZYbzYdJ5MdHEpj0LNP1Q7topreu7jepI5QSPf0uSW8pd/laxJCoKlN1EBpaoOiGCDLcozHM4yGExweHmNxMYfwLpYIv3zhZfPIRoMRgVDeHhRbA48VzrZoUH9j48N8Bi/adaCfHkoRHe/5OMEIO+4f4uECJOCtbQ2pdzGl0hpx1p1wfBnx+uL4clrDx3foeqORDo6fFAnC0QIpxjgopUnXR8hAXgXgbTgnHxU9aRdhXQ16MHHXghwhL1vtCgkNbw2cMxCwCMP+0oCvb7xpuPajD1cZ6X2ciX2fT9dj3lK0lPeXIub8Nzsafeem73CnWxr8vmws9jm17fzye7/fR0xeHlzT9noSaPzlJFLiDqEcES8Wi+hk8MIEtJB6+pMiexFJgryoDYdDTCZUdsmseoZ2eUG11nZKH9k4c8UMQP1FMp3h/r37+NxnP4tPvvUWJqNxcBZo4m9WaxwfHccILM8L1HUdqxgYxob3MSK/jvT1so0nUiyVDYsET7gUmWidBBe/k15r2jumv0j20yl96C+FojmFwPom7DW7EO2y88DjnZJApZSBE9NEpIl5OEz8ZINmjMF4PI77Ya4FL65MvHXOYb1eR60MpWRM2/G84QWcj8WGkHkeHPFzimiz2cTePMYY7HY76va72WCxWMQOtKxqy+XV6T1jfgv3jGEOC1fEAIjkRSbAkhNS4/z8jKpico3GVCHy5ftDTaU2mxXeffdd/NIv/RL+7b/9t9GhIMNDInnz+RwQiCW36fxnB5zHOk33iTAnWESP51Wq48GcDV740oUnRXTSMfHewxoLHXhbUsp43uzw0ljmVy5i123ee5ShCo2/X1VVpyM0rwn0TDSo6zKW5TeNCZU8NIZZliPPC0wmM5zeuos8a7Vi9hFi9zzBe19y3iLPszje5W5LNM4EvWH0a/91OlhnO2PEwQJfW2vkQCgGuZAQUgNSQwgFJTNImUGojH5XGaTUxHEgggVxJiz9i7SJ+M8D3sI7Iv4628A5A+8IPfDOBgeFuDpCeOrC6kiriH6SE2BMjaYpUdclmqai/TA3g0tUvQe1ByZyqAR3BCYHBN7G96k5Jn0Xno4nvIPwoZ/LFQ5iP73Qrof7jfa10bvoOhn7Asvr9pOmSFI78DKkor+PNPXcP+5Ntr7N6Dsa+7bL43ez7bVKX20wcLzIc0kh62ek0DN3iH3+/HnUxmCy4GaziQs+gEg+5CiJKx34exGtiEhFWxXD0C4bKr5xk8kEo+EIx0dHmIbKCB8MEbzHeDjCYj7H8dER1qu2YqJuGgglY6daFlpiI2SdvXSjb7Klk4EXSGstjCXGvEwcg9TZYD4CH5PTTSlaEu/RnsnAixU5f4h5/xhRhf3wOC6Xy0gSlcFgsGZDirbwfWFUi0V72JjneR5TbMxn2G63WK/XmE6ncUy4ORsjWd772MyNDehmt8V4PMI2pLDS606vleclcyqYPMrlw1VI1zG3iBGbIstjRMzf5/fTyN97j81mA601ZrMZ1ut1PJ/hcNhJqQgh4rym5msFAI/1OuhOSAkpSXWV4WA2SB999BF+6Zd+Cb/4i7+IH/qhHw7nrdoFDgJ5kcP5Vi+Dz5XRlTQFQtGg6KQ40/nE8zF1UtL9posjzx0WnCOuVkE9HxygfBshsZhYSjJ2zsFb/wqkhratQTpX2PFhdIc5NM6xc64DUsOogoewAllWINOUSjk6PsZ4PMa8KqPTlUaXN934mSDuAM3Fh48e4nMzxPPjFBw7c/uMn/CuRUZEq62TOoVCMLISpLlFUHWNhFEXKjoo5SBBpcZeEEnYc3Oza0plWoeHuBFChMRFBxVo709EeTj/0eZBCE0NyIVQMqAibaO3dC0CfEjZtOki7jdESJEITnpAkzztyyOtWNm/7XcqEMdgXxTPr+9Lj+zbug5HuAdXGOir0iN9JOOq47xK6uO6/fCWzsmXpV2ue23f9lqN2ADEhSOF+hhiZ84Gn/Tx8XE02hw5cnTFTHE+6VQwaDKZxIUMABaLRYTd2YjxAsTCXt45jIM0+mg0wng4oLI3PqdAtmyqGpPxGGXgkHBlC1+X1hq7ivL32902GiuuQLjuhly3pRORjJlC09SQilXraONIn28+V1GkUGwKszFI1ScYpTBc6skjOQ4vYOzUAOT4zedzDAeDUEbYdvvl6D4tq2QezXa3A+o2R8/3OcuyDteGBLImEUVgpyJFP1rSaUCB8jw29kodTX5A02oJ5myk5GF2JNmQFkWBsiyjAul0Oo06MDz+zO1gxyZNOQGIjvAmpA3Se8tztapKqEzF89lsNpSyC6iJNazkiegQKK0gBT0v//gf/2OUZY0f/uEfjlG61jqigRDtc8OOXVvF0iKAfF1VTQ4Ec2FSVIkdHU5tsSPJY8zOB7cN4PH2nmS/tVLwSkJIC8tqkZIk5J1zyIKzsdvtLhmum2yMmnHate3s7GLXaJoXMqQoNaRQcI71RQABCR3Ev7TSmI5nODo6wnq56Dw/r/p8e+egcomqamLzx48//ggXd2Y4uXU3Inn9qLezWIf0RopUcjDVdzboe1zRE1ITEhBCkcaGdxBCgmjxHgI1PFQICFJo/npSpffscFwmH9N7ZFTZ/Qm0jvaPsMngCDgbHB+fRtA9vQ50DTJ3FG7/sRMTdERCBU77vZsZSnqdnYL9iMS+oDJ1DNLP9pENHx2u/efCjue+1MtV590/Rn/Ovsr2OnbsOofpuu2VnQ3nHOqKohluka2Uiq3gx+MxVqsV7t69G8ll3CcFaCs6lFaQSkFnpB2ABL5hiJa/Y4yJnV6HgwFWoRx2u90mwlwSh4eHOD09xWQyiQZ6VFDevq4qjII2h6lJ3XKz2WBYDGKJbKY1dJZBSAkXtB5Wa6rLh6SFfbVawzkf1eheJ5XCDy6NhYK3EoNBTqqTdasYyNE3R5Dp2KSqprQwXd2inLf29S63hCNx1rfgsS2KAmVVIQ+6EEzuZYeLkQc2PAcHByirsnN8XiC5EuDWrVt4/vx5LI0VIdIeDocdrgobSUY9hBAYDYdYbrbRKKbwOX8mNZSMcKQOAZ+zUgqLxaKDRKS9eKSUsYsxOz5StmWyPA7L5TKOMXM0mKzK6YimqVFVZUBaynCuQFXROKrQN4WcJBMc9wrTyQyb9Rrr9Rq//Mv/DMvlEj/2Yz+G4+NbkFKiyHOsVus4l3i8GJZ1jsqAtdYYDIZomhrb3Q5KZyGHrgJ/h3PKDk1jY3olRRLThRVAx/Fih8+YGmXZRAfDJ/OP0UylG5R1jSLP4aom7PMlD1EIkp0jnkDTNKjKKl4nO6yR2Bm+RFQDD6EklACsJS2GNh2ZI88HmE6nOD09xfOnTyKyetNcdGumiD9BDnh7SRdnZ/j2t5/gx07vIss0XEJmTcfT+zbS5+eTXidCMPUmatcBRhngqWpEAOF7hGXoIBLonIVSFs55wAo40UDYFC4H9hnDvde6J4qOYyREJPBy1Yn3qcFuy+6JmyKAThdYQimkVBCydSb4mlISpxDMk+MxS52Mq7ernY3LCEPfCDOakn7mJshGOr/TgK+PRKc/96ErVzklvL/rigVetqVp+33Hf9n254ZseJAwEXMY2CEYjUYRNnfWoipLbNcbDEfDmLcWUqKsKwgpsatrOAmUTQPTVJiMJyB+hcWuLFGVJW7dugUPYDweoalrOGuwWMzRVFUU58qyDCd3bgeF0VHoyyIo0qwrbE2DIsswKgZx/wCVdt46uYXHjx+RcyIFdJEDSmBXlnDeo64soBQJLQ0K7Koau6oO8rUqEqR4ceB//UWZPX/vLFRQCCUxGvLipaaGUdZ7WKJHxUg15WfwJE2j0RYmR1zo+B/lTtvJTvO7JZilkFkayTOK4ZwL99NFY8FlwOwEMZlQCNJGmc0OcLGYx4iYe5ZsNptoqNPeG4yQ8OdTkmOqLuq9R5bnUFKhdhQ5KqngVYs4MAoBEPs/1RJhB4Odn1Q/Y7PZQCsFl7lY3dSvruJz4HHZp6bK/I9+BKq0xm5bwjkPYx2EVMgkjXVZ1hgORhgUA4yGIwACjaE0AYu8aZ1htZzjV3/1l7FYnOMv/+W/gtM7t7FcLyClxGx2AO+pIqdKOCtZliMvCjjnsd5s4D2gFbX1lgLwjnRAGE5Xiua0NZQW1UrGdBgLnnEVC2umdKNdip99MDTsAFAJKrUeaOoGuVIYFgXqxqIxJhondnoQ6jW5wsEni3VjPQoPQCo467EuNyh3O+JJKBnQdwcv2bA1wVgrSCXgaxdJxnk+QFGMkOdDHB4e4+DgEI8fPUKWhxJ0RdxI0JmF56iTN4hGk6pIZJDhlrHKQ8Ljg/ffxV/8ob+ITGuYuoHUWWiVQERQGjsHL6i3jJCkZUQpMwXvJXFNsgJK1aibMjpUEgrCBRQkdF4Xku6zB7VhgJVwwgJCQ/oCxgt4JSjdYg2NLxu6SNoIO+Kr3eNgdAx0+B7PAu8DDyNUtNAnGUGNX6L/Ubc3COWCoxFejnsDnDfxLlBJOI2uVAIeMiJJLaSSrsNdR2Hf9ey9pt56ftmmis7rqQ3gY7RO5P7tKsfzOi5GGsilx0nf23eMq1LO+4LTfft8VdSkv71W6SsfnFUaOcXAC16WZSjyHA3zKDYbbDYbTGYzQAgIKWC8xWgyoftlG1TlLop6TSdjHMymUWDp7MXz1tA5j2GeoxgOcfv2Ke7fu49duQuLKCC9h3AemZCwwbkwroYUba7Ze1J6fP7iOW7fuYOzszMq8axL5MMCqshgqhpNXQNCIBsUsM6jbhyo7lwRkiBu7k0SPO6DV+6RZxpaU0dNpVXgZJHn4oUgff+AaLDRZx5BfzK1BNHW0WBYMkKcMdzqQpTp5OT8OpcUc5UQa1Rst9voKLDyKBk1QlvyPMfBwQzr7SZGvEzgdc7h4uICWZbh/v37ePjwIYQQODw87DgazAnhvPxoNIL3xJHw3iNXGlZnqBsfnRweE+qBE8ZSCoquvY8cDFbZVEphPp9jOp1isVi0DdHCWPPx+LzSrrLc5TgdJx5D4i0MkOd5dMaNMXQvpUZtLVxwdpWUGBRDDAcK49EIw8EQQkhY08B6j8aS455nGt47lGUFnWf4zd/6Dbz77rfxs3/1P8BP/uRPwjmPzXYTEccsKGduNxtMdQbnRXByaN5nWkJLBQHqygpQd16t2yaCeZZBawVrasjgeLJDx2kodjbS19JxIEeDNB1UIBmXTQPvHYo8Q9NkyJSCsa0cfFxg2U6xcU9y6s46GEMdaZvGwDuP4WAIknoXkFpAaQljLKooPU/y6NYaSld6ASF1RGeLYoDZ7Ai3bp3i6dOndBxvIDzLaPLCHxbjzqLbjwK77wsl8fzRQzx/+hj333gTraOHEKwk3AURrRYhAVIFg6/IrZAZpNJAI6IBk15CearyEACkEhBKxioVckI8lZlagLgcAtJLKkv1AnBcLtrjSfSuLDVa3SUofC804gseQdxHTIvw5SVpD0JoyFGSEBDCdvYax1EG4yfahE0UHOt8sO8UtvclNaT9dFDf0PLr6bV3jG3UiEn3LzsG/fJV8Cm+3HD3zyE958tp8atTLdcd96oUyqVr7e3/pqhff3stZwNAlIfmqJTTKQxn77ZE+hwNR9hsqQJAao28yFGbBuV2CyFr4k4MBpBoF7XNZhP1D7js0TlSc7x1dIzJaITDg8NY+TIcDKOh4MUvksiUokUnk5ErIKWMOhFcGrlcLnF69zaqpoLzbTfN+XxJZa5CYLstAQgopS9N2Ks8yzR1kWdZXCS4moHPhz9LEGPorJt0ouSUge0tznzcFGbrkyfTc+HtqsnSJ61ZazEI95fTZv0GX4xuEPoxwGw2w4sXLyLXgYmZg8EgKnfyeTJngomGzHWQksi5bNwBYLPeQOkMeZFjF5rFNUmnzyYhRiqtqTMouuRGrXWsMuGeO5w+WS9XlJ4IqAajLpyaYW4HK4myI2WMwWQy6Tgr6VzIlCKZbiFgXBgPpSA0ibkZY7DZbih9UTfI8wxFJH4GAqYW2G23yIsC33n4AH/v7/3f8fDhQ/zIj/wIPvWJz2C1XMFbCwuSMz86OKCSz9AmfpDlIKjaJedG8LQxDaw1Mf3DCr9UcdXlcjAaxM4Hz6WUX7MP1uVghDlag6LAYGDQJPwb5hik0DMR8dq5yQ7RZrOBH1DzvExreG8poJDkWHgPQr7gO+qzKcEVQBASHGEyHuPWrduYzQ6wWFwk53GzjVUfLm8ey+UC7777bdy+cxdacxXP/v1EE9WPloMxY54Xpw68cPBCkUOhg9OoRQhaAIkM1ntI7wDbkMqnC1RS52ClgDOEggrvQtDD6Zwrrl+wMFjvzMVlQ3QVxE/3yCVBUrf0snO4PYhxut999+kmzkQ/4k/LrFODvm/z2G/cb/Ld9POX9rvHeUhJ/Pu+2//OVc5M30G5ybH/LLfXFPVqIWnWXGBNBJIUzgDnkeVZq4qpFNarJXSAoSUERoMhGqngTIPdrozcDGb5c5rm/v370Frj7t271GlUaVhjYhnjcrmMkD6AzsKSKpvyxsaMOnxOYhTunIcHafmvVhucnZ0HhUxqJGZtVzUUuOz9pq/1J4UMXrzWGuPhMJADDXVLlTJyT6QguJCdNzbI/eqTdFKlkSeAjoFIt30wYbqxg8EPsnMO0gNq2KppcnqDu/ICiK8VBemYpCqwfBxrLc7Pz2OqIU1PAMTR4b4paYqIlT+tXdLrou0ZkXrnkSwr2uvQAcngdBQbRkYutttt1N7gMWexr9QwsXNNaYtZPMf+fU8dQT5H67oVP8450kEANV+zzoBum8dkMsZoNIDzFpvNFnVVIi9yFKEUeb3eoBgMYY3Bv/iVX8G3vvlN/NX/4Ofx2bc/F5wHgclkjPU6IDPOQ0qFosgAEH/EmCYahvT8+dqFEC1iowSsbStOmJCZSs6n18VIUnpfefx5PKuqggraJyqkYWluy5jqC09QZ86yE5JWyfA8EUJE3Zyyog67DoAJgQejGHxveM4MBgPYyQSrxRCz6QEODg4xn18kzwj+1Ju1Fg8efIT1Zo3jo1toGotQz8lHSX56BE1uUCqAX2dnOQ9BR0PvCQEnPJQUEEpAaAmhNKV1AkqqwW3UFSFrWkBAwksDYUNyw9WAF3Cex1nG4+7drnpZtCWdQLdUN/0yzZnu/U0j9/7G96xPhty/5u5ff6+Lxvvr4kuRByR9ZgLaEtFpz1yYa0fw+v1fgWBc93n+eZ3zdZ1D0d/H66AX122v7GyosNgKIaKuAesvMKKQ53ksQ9qGPLZSCuPRmBb4khQnd2tCMAol4UIEpkJ+Nc9zaKWR5Rlmsxlm0ymWqxWODw9R7yqKaIKyKDdl4xJcLrlNe3ewk8CfraoKh4eHWC6XsQ/LarNGbS0WywUWiyWqKpCxIILB6Mo8A/tzafx6asylAOA8lJKxv4b3HnmiV+CEgAQtoIoX0KSlPCMzfC1p5M2VOd63FSX7mPX8c99ESh2NiJQAELmIeXvWq0g7bAJtV19GBlgOnFNj1tqoHmqtxfHxcdSo4H2mTiE7nixzLqXEcDRCXRvUdUscdmjHPJYI+iBdH+rx2cFIm85xb52DgwOaQ8FRVkpFWfXnz5/j3r17kZfEVTU8hqwfwlow7EQx2hFRNmNgk86abHiFAKSiBoEsde28gbE1skxiMNCwTkEqDe+osdZsOoXSGRpjUJY7fPjB+/i//Nf/Nf7SX/pRfPnLX8bp6R1sNxsUBelIuJDKIAfAwTpSIWUDzBUdfM/ZaU3HtG1whc41pAsYp8A4nZI6LjxPUh6OkhJZnsX0IO0/OM5XlBUS5N5bTCOaZ+l5tw7GUZDiRIuWphVK/esYjUY4PDhCtdvh7p37ePLkCapqm6QD/nSblMCz50/x8Dsf4+T4hDrBXmpxxO5V2ruGlDuprTugFKF1SmZhvCy88JRi4H8JEoJ0HD2ZQyVzkuaQgPcyvu6lo3RKMJTtEF9lcPrjkqKmyVUJCt76m/OtImxagn3V1k8hXO9ItCb+qvWuj5L037uJs9H6iFcjOQqiOyAv2X/q6KTITT+A7aPq/evZd703IZFGdP3P2NEA/hTIBpeZsfQ4lw1ydFmbBgcHB1FG/OLigqpSnIOpG+RaUxlqlqOpK+gQNR0dHWE0GmE6nXaqA9arNYoshzOkvsiL5eHhYZSV5lLDFNngxSZ1RLi5GosxsZhY1dR48uwpNluK6gh5cDCGGPu0Ty4l5bzp/txW6uELQQI11tTQWR5FsLhSgqM00Xn42lJJAPGz/LkUyuZokrkbzBlgPYmrJk7/IeR9pVE4RCvfzGOYRvSMcjFxUgjKWR8dHcF7j4cPHwJoOwTzeaf3abVaRUl71mXh1IhSKo4XpS2aSM7kc4g9UVJipqQyTBXGiFM5PC+qqoodhCeTCWxIjXBn4eVyiaOjo5hmY9SGx42PmyqgMqfGOReRETaQpHHQRvlhKaSoyFtIqalvigOEcCH/Ts4IvIf1DlII7MotjHUoimGcw3VV4Td+/dfx8Ucf4Wd+5mfwhXe+CC0pim1Ti61w22A0iMafy43Z0WBtEBZdI46LjmXf1tpYOdRHwdJSbUYcUhSCy1PZucl0i47RfQvoFFJiYhd58YmGhQlEdOQ58lzDCYG6qlHVZSBUtnM1JQczShMR2TwHLHHLbt++E9J6JaTw8Je8gi7n6UabcNisl3j06AG+53u/D62U+WVhrxYVaI0lORseWSZJN0QpCEPpIghQ4zbh4UDzBJ5IyADRQLz3QBDu4nSFkILSKVKQ0JdInRtCWMLj3736a1MDAMX7EqSmys/bHscRCsxR2WdA+1v6erpuXZ2OwKX3+1H9dce6KZKQnkd//QxvvPR6eLsqAOy/v++abrL1z3nf+/vQoj+r7bUasQEEP3L5KEP9nNeG97G3yXa7jZA15+u1UmgqIuQNBwNMj45wOJvh5OQEy9UKk8kEu+0Ww8EAJmhLzEJLb+8s6lACNhgMsFxS627mjTCZEmilzOfzOW7duoWqqqIMdwp3M0rz+NHjSCrzHrCG4TByLIgZDhBoJkMDxcuNc9L9tjdOQiqF48Mj5DqDdQ6Zor4RSkgYa0N7FEqnGOeCPHe7OLLDwYs5G2kmI3LUzTA2gGgU+GHgiDMVa+L7mr7GsCXzCFiMCUArRGZMTJ0x8dIYg9rQ5w8PDyP/hp0NADH65xQLn+92u+20oWcuBDtRz5+/gHNUlaI9VbNAtE3G+tCjkN226Yz88GvMN2LUhRoMZhH5YkSE5xeXzq7X64iQsQGfTqfRyUi/xxUZ1gHOenipkGWC0BdrUBQ5lJYYjmjOusZgOMxRlhuS0pcaXkpoCBjrkWdULi4EMBwUcIbKBefzBd57709wdnaGd955B7/wC/8RTo5PUdcVlKLPF0UOIUUcdwAxVcnOFN9DvqdSSNTBMRyG1F96z7z3bRv3JFXCDho7jdyGIHKPHBGjWaE3nbPc2Za2NOIN4k4ikJmbBiak1UjdMpQVhrJWn1RwCSE6842vV2tN1W1Vjcl4hsPDY9y5czc0/6sh+Pm4jsMQn39yQ7qQPF2PyoBvfuvr+Kmf/gqkyqGkhjHEW6C568KxOJBBdPbTijMpJbTSqEL07oCgqcEllR5SAqTeSYRaH9AtAQdvSjjXwNg6qII28L4BfFAGDc6cj07DZcN8mQTJIxRSrz4cL3I42D1LPu/bUtZ9BrW/dcXMLn+26yBcb0z5u30npx/596+9874Q8Z8QoQNzqLziKxXh/T7qwHMy3V6W4nkZ8tPfR98JeZmDso//ctV9uanT1t9ey9lwzqJpSJSJ5Y2LouhIVA9DfjTq+nuPpq6p86oHxsMR7pyeYlAMMBzmMZqcTibY7Xa4e+dOUKSkQdhutzg8PISSkvYhJc7OznB0dITNZhOdmpSAyDDuvXv3KCcfbhornXL55na7xXvvv4csJyY/vA9wbXvd9F3R6QfQ5h79pYeAF2tu6mVNg3unp5hMxgHpEJDBaFOqBmEhbVtsp8RGfhj4xnLVDxsKXpBSUbQ+p4B/9klyQJdgytfSTsB0DGSEvZVS2Gw2UUVzu93i8OgQkCI6dWlDNT4mR8g8bmlKiK+fX1utWtImOaNr1OsGDZdZBwIgn1+MXjWVcTrhOkRadqBGo1GM3pumwbyqMBwMo9Ac67ow9L5cLqOjMZvNYvqNHR3uMZKmD9J74+ChBRtG0gagCJMMxC6Ubx4eTjGdjiBxAJ1lWC5XmF8s4YXApBiiagy8l6iNRR2M/GZD6SB44OL8HH/4ta/ho48+wl/7q38dP/ilL5GhzQbw8MFR6UqRM8KxTzDOoU2J8DimfVjYieT5MxqNomHoS4inTpgApWgcRETLuCmb99dHVPwsxNLagKZyy/I8y1AMBvCC0p98fH6eBgNCdpgLtNtsUa53mExmODo8we3Te3j86BEWy3NEHgXaZ77zUm/bH517ON9guVrg4cMH+NSnPgsiWfZlqBlhCGkA3/4uogHlajj6LD1DlN5WEvDOwaEhsrC1MI0JqTQA3sLZEs4aWEef8aBKFGKTtTofKbJC19VeDSMVPqZbWoJCa8ATAyUQF5GXGSaeO9eNbSegEJfTCRG92WPA96EDfUPKf/cj/X1bdER8Qhnt76/32atQmX3Gu78mX3X8m6BDV238XO9zitLz4M/2X7spAvLqaZSwYHCnTyFEJzKJsuKaBGxYIEp40uKYTCY4nM4of+896qYmToS1cfEfjUaYz+cxCmL4vixLFHkBG4Sk8jxv5cRFW3LHUDYvgqn0dFVVuH37Ns7Pz6M2wkcffQStNJrGQOgc9KCniEWrldFOJA9EcZl0eOgPRle45PLw4ADHJ8eQoiXYAm1rek4/9SdL3xtnJ4mvrc+kTj3nVD2078n3nQ2gJZWmcCBFVy3RljkjTBacTCbYbDZg6H293mA8nSDLcux2uyh5vguVDpwaYdSJq3FIC0FGVcrNZhteFxHRic5UuE5qWV7G8fZonY10fPmag6kI0bvFar3GeDQihyLcq0mQtOeUDoCIsLCOBkmtzzoNstJxTdVmAdI8cHAhRQDSeYCHVkTE1EpiMhnhE594Cz/1Ez+BcruCtw2lcVZrfPjRA3z88UMslxs06wpOKMALNIb2eXgww3AwCERmD2ctzp6/wD/8h/8tnj59gp//+b+GJginlWVFDkeYHyzcxc9Jin6Rw9TqVKQoBo8tp0YYOeAUWepI8hh63+qUaK0hVA4XqtBStC01dmFkEWNjT2kMay1MmA95RiJlDgLOW2ShL41xLqnuCKW/QfOHHUoAaKoao/EEs+kR6rrC8fEJprMpFssLQhwSw+nBKMFNIWYPySRg5/Hun3wbn/zkp4Oj4Do+S4sESEJOBTsdApziIGdAQUla44Qn7oMSEtIDsAa2caHCqIGpqdLIhz4m3jckp+7Y2Q8pO0HdZhmJIIQ1PbcuWhOfKU9PFS0Tlxt/XYk+XGM89xnLNGBItzQAS8f8ulRX30he5ZT099v/u8+J4yMD7ex13rV05z3Hu26t558vM+Z/mrTHTR2Zfed+E7Ql3V7Z2Ui9NI4sGFplY75cLiPrvKoqSCFw/+493L19B9YYUg+0FuVmi9FkDKUJGtxut5iMxjEqLo3FcrHELPA3Mq2RaY3Neh0bhfFx2GhztAkgqlveuXMHL168iBoIFxcXEEJgPp/H6ojtdovpwQF2dbcjKqMXQjiKNKKUblDQQVsGlnrE/BCwDPatk2PSN/Au1JUHiW1BHV8JdgvRgpCxgoH3ld5sNqhcussR5j7D159MaYokZeWn95Sdmv49T6tdADLau92OWs7f+19g++bfhnMepWQYmM576hzGrhsxASBIP4U+BbAUpG3gnIcT7UO/CmXAyntMmJwrBabO41boeMvX4C/lmoNpEHw/23ekkMiT6yQWv8QuLLrWOigpsPWegWgIADupsKtfIPs3/6vITQBILE4pFcTlCJnZlTsoXYTzC0RMAQAeh4cHgHf41Kc+iV/4hb+OP/zqV/F7v/Pb0NLj9PQ2vvCFL+I//pv/c3z84BH+1e/+a3z9330Tm00JEyJKFaJcYw1G4yFMk6MsK4xGlN76rd/6LSyXK/zNv/mLEBIYjccw1kZ4l9VgGV1gRzA6FtZDqe6iznONnT9GNplAnDrNHIRwn6O0KsxLAa10NP5MYu0iGyL52YNvXVe2XikFb1UkXTvXOt3s5DIhNj2XLMsxm87QlDWWqzkm4wmmk0NI+RDO+U7EzijtVVD9vk0pCW/Isfjgg/exWC5weHgrcnkAQAgVjyOlDOJWvcgXNK+ZR2NsAx80M+B9EGRr4GyDpt7B2gbW1EQK9h5CWDhpg8oFd4mWAXgIglyCW8GHfiuJY5H+5LGIwQyLcSWG3nMqqLOetu9dte0zfvvg/asj+Kudjasi/xTF2IeW9DfqxOIvXVl0bsXLu5z2j7Ev9fEyVOVlyEt/fy/bTxqsvgzFuIkjlG6vlUbZbrvVA3VdY7VadeBT1lpw1hKcGdjqWYBWhaMox1sHXVAJ4mQywXw+x/HxMc7OzgAA40Diy/Mc1rSdOJn9v91u8cYbb4CbsKXGFKAeH48fP8bBwUFs/pbnOZ48fYLz8/N4TdPplIim+ZCHsnPdguX54qLXeuh9D5ARgMFggHv37gWIO4gKIUC6fANCBNkS7VgIZ3/ejY1AurBzJM/HTp2TFPFIJ0d6zpf5Jb1oxLZOD99j79tmZHVdQ9wt4PJTAG3TbSACz9jf4/Lykt2PDHzvJ9B9iNU1+77pts+08fFk7/XoeAFA0IWIBFXvIzLCTvhkMkFhDXYVpVsEPJSWkALQGTe4K/D5z38Ojx8/xh/90dfwnQcf42BG8uLr9RrL5Rqf/+538LnPvo2qrPHkyXM8P5ujrjdwzkBCoMhzmJoQhpPjIwwGAzSNhXfAe+++i//q//x/wi/8h7+Az37+8zDWQiVoIICOw8pOQl3X0NmgU6nC84ydTHY4+PlmB1uIltfB39VJ2tBaC+upBJidFQoa6uB3du/KvgW1H3FlOgM0cRYgJLJMQLN6sWhl3BnNieRpB/iGyN8AMBiO4nqx3ZoY8QOJUX6F+eW9BQSlHy7Oz/Cd7zzAwcExOQidq6S2A0IwcpKSu9uffK/4c955OOvgnCHnwlZo6i28a+BdA+kdjYlygLTkMHsW0QLgPO95zxjT/ySPteg6nj7oa6RPaGoA96UEANA5X2Ekr0ov9Pe97zvXGb99yEH6Xr/6Y19qgTfnXTx/HpfLJxXmsX85mpEa+f7n0oDwuu0mCMe+6+6PC9tQ2UOqrtrvn1sahXdsjMXz5y+iqBOnOhj6I1SjFaxaLBaYjMYYHx+jLHcwtcEsVLCUdYnRmCStj4+PI+mTeSBpg6zpdIqzszOMx2M456IgFIuKAYjGN8syzOfzWHq5XC4xGAzwnYcPsd1swdk0ITXqxqIYjmBs+nDwDQxLn0R8MIE2H8cRuxBU4ppphdn0BEdHhzg8PIT3QF3WyHPuVxCUN3UWGm7JzkQHKOL2AUUV6Fa2sMCW9x4ydlD0lyZIus/0n0tQkz5c1yd/ERHPJJGP6MAG3hPRzip1uZrv3/NNANBaRYfLOwcdSLp8P4QQcMZCS0VS5SCjSL0iLJwxGI8OcefOHRweTPH222/j/MVjCBhYZzAYFvjoow/w+OlTfPbt78bhwQGaxmKxWEMEcrEQAk3VpprqqqQUZVXT+TiPp48f45//8j/DdrfB59/5AoWWUsQUiRCBj4QWSRsMBvCQaBJipxBtkzpCRnZJJ2AfSaLssPC+WOeEHZKqrmGch9J5rAqJZN497h8t/uE9Qek1IUnmnyNynWkIAMaaS84F/z0YDCIqyikgqq7jVAMwHAwwmYYy/e0KUbbbC4BbtF+1vvbsoADguNJGAE1d48njx/iu7/oifG8nMW2SGPa9804E3kZosOZ9UIN1DaypYF0NZ2rE9u+SSu6FlmhAnLnOyUoB4Tzg0UFb6GAB3RCIaIsNHBB2lVrjGbBB3/LaqF1Cq2BMy4eER1vFdHkM9gyr7/992bC2zsx+ZKRv5NO17ypk48rNUsmxkKKlpETUi8ZCIBQKuB4i51sJhX3n1v9s/zrT673Ogep/9rr3ea3al77p3jtx6bg3BFZe3dmwzmGx2sAF6HowICRglBcABKw1aOoa69VZ2+ZaCBhDE2tX7mCdwcFshtVmhdF4BIZzp9MpHj9+jJOTE0ynU1xcXFDX1e02LqTL5TKWxXLdPjsRaftp5xzm8znGgwHquiLvXgg8+M5DrFZrQEp4qckRcIAXiiYQELUZVGCAR+0AKamW3VM5lxQKw8EYg0ERb0imNYajEYoiJyb/bgvAI1MKrjEQAEZhzJxz1MPAU57d1OS45VkOJzwaazEctUqdg6CUWgwHMM6iGA5QLhY0AVzbp4TTW/0JxIs5V1pwVMrGpq+lwJNQZlkQNCORJ+okKVA2BhkEhLEQN6jh/vdxs3WD9YLE4XKd0b0zO2gpoSDQ+IrKUBuHgc4xGg2R5wplVaJpdsikwna1RqEz/MD3fz9Obx3jjTdu4atf+x0s5ivs6g2UzDBQYzhvMBjkgHfQUiBTAibwIwaDAUbDIhrVuqoxHg3hnIXMiJPx+NHH+Cf/3T/Exfk5fvynfgpFnkFKFZyEHMZYNNaAuqEKVIZ60EgpoDW3cW8b5ZXlBk1jkGUaeZ4hywponXU4UlxJxFvaQ6lqDKlbSgUphmjqCqamrrbRUXOp8SIjKyVVhzkniJchBaAUrGdytYbSMqZMAERtFUaheMGs6xplXWJTrlHZCvkwh8wkJtMRDg9n2G4W9HwaB61yWOOQFzmMNeh7FikGwb/LgBcopWNfpj/+5jfwYz/6EyiKMZ2/kGisxbSYomlqeEEtTQQcnACcsDDeQzoVAzqhMkhVwLo1nKsghId3JapmAwkHKQP5FEREFzqDVICy5KCgQ4AnfQ7WMCEeR7gCRj0E4jPurI+8qdToaK3AHVRjKiwap5CmDRU43REDUl6cc71xTVElT9/b3+K91STpv9U33um5p4geH29f4CUTTFUKcvVkSDk5T12OhU/GCxRoxHlxhXOTHjOmpZL1t++MpEEll5anwWZKyu7vq38e6c90cw5hLGk2t88jcZiIpMy8lZt5G6+RRkGMZIRAp/JDKQkhMjjjAF8FcR76nrUWT58+w6c/9QmCTMsq6gR473Hnzh08f/6cWj2v13DORVLeeDyOaRKGqdn5yPM8CkPx32w8R6MRvLPQmYaxFs9fnAcRsBHKJhANeSKzV+5suA6eEBZSAlJTQywWtFJKotAqSCX7uKiyhoA1DalEhkZLFh5a6mQcW5TBew+tNNSg1S7wAjHK5M9E5y1MGlbcdI7K2/g+pOJM7FikkyrNV6eVL3xe6fnRJi6dc/q39x6ZMS/NUf77tnnvUTU18qIApECW51TCLMOyJAGpFTJBSqEIxMaqskC450JRxcTX/vAP8aM//qP46S9/BZ/+9Jv44hc/j69//ZtYLteAVxDQ4bkhUbO6rjEoCuxcFTVLGEFgFIFKoXVEHne7EvP5BX7t134FtWnwEz/xE5TCGIxi63oviKciQG3uacq0qT4hgOFwkDTsyyIq4ZzFek2VQUye5dRLmn6LJchStuq5nvRzJpMJFovlpbnXooe9BTKUibqA1GRZHnozUX+XdAHmec9Vc0wQJxS2xHa7wWa7RmNqDIoBjo+PcX7+HOvVGloRQqRVlgSvIjnDdOshFh6A89E0lbstvvPgY7z9uXdgjYXOFbSkVgbxuuIufEQVlG4rhrTWUFkGY6jPFDWapxSS9ACUQEzDKAXIoPMS9C18dDha9OGqtAEjvNHoJ++l94Jfajk33dcvJyn3IRCXXornsi/65/NKK/e6594VyeqjAPsMburcpK+lnwmqJJSOEuwscdlvuETPDl/3OH3jfhW63L+OeOwe7y4VGuyfe+rE7LvGqxCPFMXo/+NnuL+vl22vIerVNThMxuRFTggiq6WTwFquyac8V13XQVOCTvzo6AiPHz8OeeaW2b7b7XB8fIztdhvr/FkzwxgTFRzZk+Pz4YZcSkpIIbFYLrFar7DblXBBz0Fokm8OJwL+TypaYI0xsIlg1Wg0wnQyjZNpNp2i3GxgTEt8Y4cgLeOMk9856Fx3Hlr+yehCarytsyiCpoazDgezGcog0sXIBWtEAIjRZh10SXiysbx0usjz/UsnNyMiffU4ukcsp41LkyxW0zj//3POBhA4KYrg/G1JaqlSk/HY7XYoQnn1Yk4lvPCkpWGtgdIes9EUVVXit/7lv8TxrWP83b/7d/HWJz6LO3fu4gd/4C9hPl/h+bMzfP3r38Biscbjx0+xWW+R5xnm8xXyPI8VPBy550UBZ20s8QYQHBLSr9hsNvgXv/qrePToEX7u534O9994C3XdQAgq0YUX1EEVvjNH+gt7KgyW8nlSjZG0GgVo03supJysd3ANfS8vcljrkGU6Iii8iLMB47RmhObDXE1LawnRyCLPIEVZmFuSdq2O/+oKjWkgpMR4MglNGl9guaRUijEWmc5g3asmDFmnh8Z0t9viW9/6Bt7+3HdRhkYIKEmlv1cv/t1nkte9WsrgcJEujxQaUliKwqWHkIALZHbvXRC5ax0/NsBdWPxyf6erzqW/9RGEfZ9PHYCXwftX7T8dh6ve3/fd6yL9/vlecmT2rHD7DHmH09JDJPZdb98mpM7EPn2RvlRAyrnb50x1UYmXoxp0DE6dXP58Ol9epSLlNZyNrg5DWjHB6oNMKEw370lLYL1e4+joAOVuh+l0CpaHZoLY0dERttstqqrCZDLBxcUFBkGavGkanNw6gXGWFvQ86ywiQgiKbkKzo6qqMD+/CP1NGkitkA8KwAs0wZvnPgzU7l1Aek/Ql/MYhqZiBwcHUZWTb1y1a7UiOA/NE4RRgxTu4jHqczNSpy0+POFnXVHZbJHnMI2hvL8xFBFmpCOhhIR1Ji743CyNF1mG2NhB2QcRpkS+fR4v5fV9x7Hhn/yguFdegP+nvznv4QLkT/P6KI7HbrfrECuPjo+itL8QCqPRAHmuUVZbSKlw+84d/O7v/j4WiyV+6EtfQpHngAeWyxVevDjDe3/yHprGYLlcwwuB8XSC4XgEKTUyTaW2Rbj3u1BhwpVKjLZRRYhG09SwzuH3f/9f4+HDh/hP//P/DG/cfzPcTxBaEVCadMFjDlXTNK1eRhLpEK8jj2lMnj8pSZSRDqUZcakCmTaPasEs5d80bb6fxjWNWltHKF38+G9nHYRqiW4suMaLNKMb6/Uaq9UKm/UKTUNN7PK8ALzD7PAI9+7dx9Onz1BXNbwHrLNUzqhubiBlqM6i4fBw3uKjjz/EajXHZHpIWiCCOkBrHVorREJ6u3akzr0PwZpWGiYgQ0JQuwfhSbsFwoXUiAU9si7wC7qIad8w7ovs+0aTt31OyT5Dy5/dZ9j3pQr621Xf6wdp/Qj/qn31Df8+FCOds0IIiITLsg8xuOr6+v/SAoa+o/GyffbfZ3vTdzbSMUuP29/X1ZtAiypePv5Vf1+3vXaLed744U0jZZ9MYj4Pfo9K7cY4mB1EB2Q8HkfxJtbvuH37Ns7OzjCdTgEgqpNudztonaEqa9RB2TLLC2gVemJYi3JXYbVaYbVewxoDqTQyIVHVNQCKqBDr5cPkBADv0DQ1tJIYT6gT5HgyQZ5TikhngyAL3iDPNUZDWuQ3m03HWUiFkngysZZB36CnsCi/prVG3dSEzgQImqt++HM8yViSnIWleLFmcl6fj8HnwRUt/MACl3N8vKUy1OlDkubj+3nW6zYRF9FrPoObZgKBoOiNm5xCQOP/TI4thICDC2Wn1N14MBjAOgetFJx3GE1ISXe3JQdwMBxCaQFnDerGYDyZAsJjudpgPB7h0aOn+CdP/jmc8bDO0fwN0Sg5AgWcAzabdeiKvIEQJcpdGVMCjCJweiWNkqRUGAwKrLc7zGYzPH/+FP/sn/0z/K2/9bdxcnIC68i5pHRPFQWwUgPDi1YqIMe52zRa5lRKuminC63zxAeRMov7HwwGqIY1rDWhTQAvdq6zwMYpmiB2abUMO+xA6+zwd1nxeLvd0jqxIkfDgbotDzBElmkc1BXu3q9w+OFHePLkCWl5OBf5ojfdpNSwjpBdQCLLNFbLBT786EN8z/d+P7ic3vtQoeEdSPSL4Hkp21RnmtqQQiLLcjSNhrPcjE1RHx1B66FxhnghQQ8o7fp7lXHq/506cfz3vs+n7/UdlX3RPI3N1d1e950L/77P6KbnuG9Ly677hrd/rBRh6HM6AETS/svONXVqrnJG+scF2oCuPzZXITP7nJirEKmXoRr0fYBXwVQlG1wIIVtn5LoxT7fXTqOk3jav3O3fnKvqNizz3keFxu12G9tNP3v2DN/93d+NBw8eRAP5+PFjvPHGG7H7JIsp5XmB1WpNXSOLAeA9iXEFcupms8VyuYgiUoCM5MY8yyEUSYPzpqJDQA/wcEJlb9PplM4PiOmhzWZLfA5ksfSW32eDzyJeMcIKCy18O9nZyeBSwFSvhJESISWcM9CCcu+r1Yry24J6fnghsFqukBd5HH/uQ8NiUil8xsaGJ1jau4Ih8P7k5QXcubarbH/RiZP6BhNuMgB+9i8Af+GTwMUG+If/GvjweferUgA/9l3AdAj88teudyCUBH7ks8BPfhEwhj7/je/s/44UwPd8Avjx7wJGBfC73wZ+/32gbNrPCAH88GeBuwfAP/qDlzsvzrkoGY9wT+ehMWBd15FrxIRI4gk0UDoPkus66Eo4DIY5NpsdFvMlMj2AN9QkjRC1HXa7LbJMxbLLqq6wWCzQGIPxaAZjDaq6RlVXsIZKW2cHB9huNtiF+ZBpDSFZQG8UEYo/+ZN38Wu/9i/wV372Z3F0dAKAIOPhaEht24OzmQpzMdeqn44DEI0//86pvRTZc96jaSqoIOhFqQ0DrTUGA0I5ZVUnBFEfAoPuguoS54arT5RSobldN32otY78rs1mQ5VwocUBcSI0tM4hBkVECrxzOLl1G4+fPIXONKqyRCZ1R2Hh5YstkQjDigitBcqywvvvv4t33vkChCwASDgv4CzJovtYMuLimtjZo2AuSgatcpggPiaVBKecnKOWAtbbkCqWcVL3jddVEe9NUQpeZ64bj/RYNzF46b776Ad/L0VrgZYrtM/o7jvuvuNfhyq0f1y+tvRzqUNwk+g/HZc+QnOVQ9h3ZFLSaJ9Xl34+3c++e3X5tZZMTI5G6iDuE1Xbv70WstH3uqRqZbGdJfKbAHnefIIusNq5B8YoqPeVZYnj42M8ffo0KpNyO/mLi4uoxGmtJbno7RZZMYAMi0k4ISyXS1xcXGC320WUoe/1SqVjR1A22ghRaNM0mEzHuHVygqOjI5IbD8qZu21oNx483bJuAh9ERESB2PllHBeWEqcqkkHgeqhI1pvNZlEro08IyrIMHh7KSehQFns4ndHYCAkJGsdbx8fUm8ZaqKxVs0w7xbJoExPkmBPDY5pOVlZpTdEOeg97Jz4vAMaYlhh1xTbMgF/8EWA2Bn73XWAyBP7WjwP/j98G3n/Wfu57PwH8r38W+J1vX48uSAn85DvAz3wv8K/+GMg08L/8ceC/+V3gax9c/u4X3gT+k58Afv89YNcAP/4OMN8B/+7j9jPvvAH8b36WHBb/B9deTtxc4nhzxMwcmNVqBSDwXLxEYyhirxtyuAeDAqPRODidBsVkCEDANB7eEsO9biycFyiGY2SZCs5LjbygOSXqCrUxyBjNAqBySiOWdQULD5XpiEBK5zDIB2isaZEz5/Dbv/3bGAwG+Ks///OQwZhmRYGqbDul9g1ev8yVkQh2nplEyg63EKQGe3h4SKiYorQncbpMTMWRyFeOqirj8fuLKqUGXGff/Qgv7UDrnIv9m7gb8Xq9juuNUAJaUnuBPM9Rlzt4Ry0W3nzrU/jjb38bAGBsg7zI4gS70rAmP2OpqCAb1TQVhBR49OhjLFdzHBzdAZBwEMRlI9hHIjhggVeU8jI1vLfIdA7jDUxTU5BlDbzgMmxceobTbZ+RTfkHaVp437n195NyCVJk9Kpo/yrD/zIEpYNo9cYqPe6+ffXn1lXIQBzvsPVT5OkY7TvP9Pr4vPi19Pz7zlO69UUa+7/3j9V3zPhYffRkf1onQQ97nlXI9MfvaH0zqO+1nI2XbUIkIjCgEwvDTERJreEsPfzj8TiiHMy/OD09xcOHD3F6eorNZhO5CMvVEpPpASrjYKxFvdths15jtV6j3O0iYqGkDMgAq3vSv1aERSFTEkJ46FyjqRvcv3cXWaZxfHwEANjtthgMitBe2yDLcjhHUuFFkYXFrpsnZLSA+SVchqu1RlPX0IWOERbQpiK4syvJaIfW5MZiNBgS4hHUVQd5AeuoRDbLMzR1Q7l9ANa30vFSyg7jns8vPW66SAOtOmj6EHHTN25/zt9LPWfeXubdjogqg//bbwJnK/r7P/8y8M6brbNxMiGHwdyA/nFrAvzl7wH+we8AX/uQ5ph1wE99AfjWQ2BXt5/NFPBXfwD4g/eBf/o1Kuv6nW8DlWk/Mx0Cf+fHCQG5oaMekTquACqDc2kSufQsI6JiXTeom7ptmAcyRJvtFk1jQvqLZLfhBPJ8CID6EDFSZp2htuxqgKou4Z1HMRxBqwwqlIHrpgG8Dx1RK2itsC1JZn04GAAOaJoau7qEEBJFcNyd8/j1X/813Ll7Fz/6oz8K51vHN5XX53Jznu+pUad53EagnLrj9KgQVCG2Xq8BIVAMctSWHDBG+lzoPcR9aRiVlELB+csOL3zb8ThFVNi4ppyoqqL06na7jf15+HkVqq2sGQ3GGA5H8BCYGYO7d+/j+PgEz549gcozGGehXjWXQicLdkMEBFbrJT766EN86fYbMIbWKNKg4HLCLsLgnEt6/QRDG6TLCYUJnai9p3HzhJp5tLLZvcf2Ujqhb5T3ORjpmgdc7cDse61vKF/G/7jq76si876R7iMi+7Q9+sFe/7js0KQRDJfe3jSq729XoSc3+d4+xGXfPtI0fv+Y/Po+p4qdDb8ngEznA9+/tEDjuu3PxdngLb0gvghjDObzOU6OT3B4eIiyLDGbzXB+fo7xeIzj42NcXFzg3r17IW2SR9nxyXiCi4sFdnWN9ZqarxGfoVv21HZPDPyA4O04xyJc5AxZa2DhcefOKYoiw+mtW7GsUElqV039XkgpsSwNnCUnwxoD7sUBdB+4tBU7G+aiGMSqFUZfGNXoG31GSxgVMcbg4OAgpmm0ovRLHj5DJYSSOsWGCZbmyxkdYticHYd+miWdnKy2ymOaTip2TF4Gm6bb2Rr4v/56a8hPJsCtKfA/fJ3+VhL4n/0w8HQBPFm8nDMxG5HD8ugiLOEeeHgO/MjngEHWdTYGOaVG1iXwv/s5QEjgl78K/NuAaigB/Edfovd/70/o+zfZvHNRbI7vV1olxQRo6wlA53vMPUiICF2jachB5NbtVVlFVJDTMR4Ijkce7gPtL8+Lzr2JxjM4nDJUUzVNg11ZItMadW2wKVv12bwYxPvyT//pP4GUEl/44heh8xxKtkrB7Fh57+O8THlB3KacHSqucEolzZVSVNruHepGE+qiJExDSMtkMoYxLiAx9NxXVQ3nBSDaRY6e7e4zk0Zq9Jl2nqapE37++DnJ8gxSCYzHI2idYTY9oLJkUDfP23fu4v79N/DkySPkRYamqqGy16y94s6x0qOuS7z3/rv4wS/9KITQwYEI3BeaYfT/NEqXeyJuGXrDuDoJCizxfBD26SScsBBufzkjHyc1SKlhTg3T5Sj4auPfRxWui8qvMrr7jtPnEO0d6p5B3udo9M+5nzbq7qP9Tp9rcpXBT9OM6fvsqO97b991sH27Cl3qj0+KSPHr/fPsj2v3PBy8p9YR6aYUyep7R5WlAhFJeOn25+psAC2Zy3sSmOFBy7IMZ2dnODg4iPoadR3azocSV25hz7LIH370EZTOcXaxiLXLqZfvPS88PoHVAuYjQjQvRCj/cvDO4xOf+gSyTCFTGrvNhoxDYzAdTyiFstshU1TV4YyBYeNsHYTSHSeDb15a98yL867cIUuUQtOumamwFoBIyNNaY7PZ7G3jzpUmUkoIJ6jsTSJRehQx+mSODPen2OcZp5wMAJF42jQNAHnpoek/HDdx8PkzQgB/4y8Cjy+A95/Saz/6OeBLnwH+y38A/PwPAvolGuSDnByKFJ1Y7mjfec9ZKDQ5J2+dEK9jVAA/9/20j999F/jS24SI/Jf/APipL5KzIW6AcHggGq40yjdNg9F4HHkai8Uidqaledc24DINGUOlNZRa0fzwDlnWCsqt1gsAojNHJDvUziPLddSXGY1GGCQdlwFgMplEbsJwMETldlRWHe4FSesTkvDw0Xfw9//+38P3/YXvxw/+0A/hE299CoeHR9Fp5XvPCIf3Pr5O5ao2jkfaQ4VVO7n78mwyhVBAHcrki0GRcJjoGckyHTszW0ORQwvxdhfNvsNBFSd1RPe22y22222naSSfm5QyVuAMB0NMJlNkipzHzXaD8XiC+2+8iW9889/Ce9M6DDfeBDqJFUGLtJIez54+wcX8HNPpKeAlrHGQChDCdxyqfXC8EALwDkppZDpHY8puIMBN5DrWoLvfPqTOW58Qnm774Pl97++PmK/e0oCnbyzTrb8Gpcb3VQKgdH/7EJL0n/ddeXXRS3VddW39lFF6bvvGcd9++s5G/5j980idiH0Oxb7fuz8B7wWE7KJr9GYAEAQCgmZwxaVf2v7cnI30xAFSXJuMR6gqKgmsqwonJ7dwcXGO2cFBhFmzjMhn9+7dw3w+B4TA8+fP8ez5c+RZjov5Ek50vcI0N8h/x8XfujB6ZDw4wpyMJ3jzzfvItMbBZIz54gJ13aAxFUbjEYwlcloxyKC0gPcCOpN0A7xDlmsYi9bgJ+fCCxmnUDgC5AoQRjfS5lAAOpEi64yMRqMoT85ORloiaIyB0gp1gLaLoqCGTGHRB8hx4EqWtLtmOtm56iX12DmN4n2CGvUWp/ZBvdnDPS6IF5Fr4L/6F0DVAG+eAH/jR4i79l/8ZeC77lM6Y1cD/89/RYhDf6sbSo9kqrtv54HGdD87yOlz//0fAb/xTdq3VsBPfwH4zhkhKtYB/9mXgc/eJWfjv/gK8N/8DjDfXH891jvYkk8wQGnOozEGi9UKZbnDdrMh8bjOgpZEBILSYLamFIiQHraqYtdNY0gDI33wpVCQMjwHZYtSMamUU4/suLKDYOoawlOppdYa8JQy5L5Cw/EYD77zAM9fvMDX/ugP8T1f/D58/vPfDe89vvd7vweTyRRas0hXlyjODj8729yKXggR9W9c6IlUFAW8oGoJL0iMTwqJsqlgbZuW4bYEm82us6jFxRTsWJhOWa6zBmW5Q1mWseQ4kkHDWHBgMBoNMRwNoXONyXSK0WgM7z3GkymV4m+WeOsTb+H45Bgvnj+Dkq/bjcfHfx4k5nZ+foYHDz7G5z9/BKWymPbgqbIPGaBrFxRcOEJydZbBNgqNBRpjO307pAApffn2u/GMEtQkHdvUGPX5BXsNeTK/O+/2naTwuT6BMXVsUgO/b0vPLy37TMt5r/redfvj89n3ee+JAnCT76XbVc5G+u8qxyndx750V/8c+O996FL3M2hRf1wWEaP3gqPRc6yda21EWpFyk+21nY30Yi95smFS0cmElyCwLWtIIWGtQFYMUdUGw+EEZWUghEaW51httphOZ/j4O4/gPXB+QaqfeZZjU1aUSwZ58ynbPPX8eKGjhYUWP9sY4oM7UvJ88603kAWOxXq9hpIa00lB3Vath7NUZlhVNXbbMvS8kGg4EpUa1jVRw4A3bg43GAxiB1puCy8doiFgsiaXtPLfxpgOksGLZ1mWUS2UG9Nxd1xYGt9cKTjvMRwMsNqsg+GSEcqWwaFgAi3fM+9bEp1zbdkuX1eW6c5nychIEBfYxzTWy7ZBBvxvfw4oMuD/+I/aSpCqAf7xHwCHY5o6RxNKbXz47Gr+xnxDjsWtGfB8SVP+7iGw2ABl3f3stgJerIBNFeZuOKZWgHHAP/8acDAm4tNkAEwHhLjUPaelvwkEGWrXQEQtBQkvLJEb6ypYA0BIRetup2+3TKxK6DYccsGO2mkEXQT6CM9va6mxl7FN4DmFQfKehN+EiHMsRcKyPEeeaUxGI9hgzLMsw3qzxvxigfF4An/2HOPxGINBhsePHuDDD9/H3d+/h6Io8Cd/8i189rOfxWc+/RlMZ9OgUhqIhyBuj1IKOstR7qhnitIamSay7GAwhjE7jMczKKWx2i6gdAapqK9KYypkmUaWaWy2WwjhkWUCSnlI5UEt0kndlEpEacjI4aCUC6VqJKypsdtto4AX9XCpooMvJTkaw+EQw+GAkJUswyCnVJY1lloPDIfI8wKHR8c4uXUbT548jpwzujdXGLb0p0BUK/agwMUJQYUhzuHDD97D5z/3RThXgQS6RFA07lYnoHM8NgY0r4RSkDqDbYCGLYmQAEuPC5BOROcx9b1nupsqgBBBYZnmNqQg+XQwyhQ4TmFfQnJ/nfbaWaEZUlCuFIB39H0PESu+6HbyddOzQc9UYlN893v0/IWyVLTf9yGyfNmalDpTbHhTxKJdI6+6v12kI303XnZCIuV98jPcOn4i+f5VyEZ7jDS44/faecEoGjt0XcIn/WM+EzkVUvrQGM+F3QaeCqhKS4ZiCAiSg9ealH8RrkWrP0fOxj4vLfwRX3PCx7wzTwbnqQ+C9UDdOGRaozHkQBSDAelfKIVnZ2dYrdbY7bZhkitUgSMhlIbq3cBURCs9R6UkqKWjQZ4prNYrvHn/DRwfHaJpahwf3saLFy+QK5YyzpFlMkDeIop4ZaHzJXwN7wSU1AAEvAOGY+rdwk4PQPA1w9qUhkDkiPA+uRzSGBNLJDkXzg4Glwkz0ZTFw6bTaXQ0uFNuVVUYFAXqusIwL2DqBqPhELsgmjQYDABJEaaQMuo3CCE6zloaLXAUaG1a3cPOhQ4VBEw0vX7O5Br43/814P4x8Eu/SWiGscTleL4k7gYvzCcTcgR+45u4RGjj7WwN/Ms/Bv7jvwT8t79HjsxPvgP806+SA/FT7xBK8RvfJMfhf/gG8PM/QLZ+VNBn//s/Ap7OgSfz9tjjghyYX//G1ceOc0xKzCYTLBaL0GwL8Gk7urgDWijDI48weelkRPI3fQnOMnwp4nI2GI5w//59GGPw8OHDmMYAEoefo8awL2stCUYBqJsGddOg0gp1SC9475EXOZq6hrEWZmWQaQ1ra1Q1kUoPZoewpkTla/zWb/4afudf/SZunZzi/htv4PT0Nu7fu4/T09vI8wLGe0ynhyFCp/PQwehl+QDvvf8+Pv3pT2OxXEFJBS8JxfGeHCNjbSjdpMXy/03evzXblmTnYdiXmfO2rnvt2zmnqrqqGkA3GiBAEGKQgiRLlB8shSkLphmW7FA49KDwL7L97AhHyA5RoTBN0qGwxBeLussiAQIkmhABdHd1VZ3bPmdf1n1eMtMPI7+cOedeu24EIhzW7D519ll7rbnmzJmZ4xvf+MYYJtMolEE1yeG8w+FYw1sVy8HL8KkwR4U53O+30Ea80LZtYpYL6V5riSANjJGaF9wwJ+VE2gYoDZNr2E7WqslylNUEz198iB//+A+eDC/wWYwP6d8WDKk3QZEhTfCgDT7//OfY79eYz8+CDiwLxrzXVEWvXdOIBE84y9C1DtAZdFHA1xlakNFQknRL+4PUKKbXP+wnEgkILgqdtKH3HirN/Aivg6GZE7ZBKSVgSMu9s4iZivM7CZEp6gSCAU7B1YnvhEqAGKgbArwfZnWMn1nKnnhvQWCnQpO/dF3FcRlSNnEMxywDAQv31GFGTM8KnGIrxiH54ev9HtwDRYR77vcPEfRL6fqIO4NDgAAolGJ9Ig/vOngvNool8TMD5DpUCTZZaF0QshYDYPdAKDD5zURu3wlspEa9r2jXb3Z8FDxUmPTcIJ2VNMHDfo/pdArSmj/77DPM53N8+fJLVIlo7ZtSNeM4LD9p2w4WHX74gx9AK1HEl3kRK5cypT3Lsqht4IPkeXifsbNmUk+Dk4KCOHZkZTv72Wwm2g/VswwMgRRFgc1mE8/rvY9alaqqogaD7EeWZbGqalmW0WhUZRlTEbfbLRT6BnKszgpP71ghC1UaU/qRwCO9l9SopZQfwz/8/q/zIv65XwjMBYB/51+W13ZH4O/+HvBf/BP0c8YD/80fCbPxVcbeOgEWiwr43/5LwnL85z+WLBOtw0KC/Ow88J/8rnzfv/mXZIr+nb8v2SmpnwgvqbHT8uuBBiBjOV8sRPBo7WD+x7+/AePz6FCJVxueS1mWuLi4QNM0eP/+vbB3TTOgy8dr8NTB1Fm+v+vauNHTMLddi2MtKbz1sYa1DmVZYrU6R9sa/Pzzz/D6zWtkJkdRlCFzpMNmt8N0tsDl1RV+6Zd+gE8++RhnZysoaOx2e/yDf/C7uLt7j88++wy//du/jc1mjdX5EmVZ9op25ft0cefQtgrz+RJK7XGs28SIaCjDUugWzluZ15mGyUSXxUqhaUE7snhFUcQ016IoBj1leC38uSgLVNUEn3zyKa6fPcerLz5Hbr69QFT5BGwCMEqjaTs8PNzjs5/+BL/5F/8SmobhKT5KH7zQHnBImFjEuB7SwwbOQDsjZQhUKLLnpWYH54MK//u6Q2thFaAMuAxSIzhOM+1DF1rK3Ku0f0bfzMuTIZH22QmU7nd4zW+kt/6V09nBJYXfopcPACNtGveoFHCkYfcUHKSfS/c17ZNnroYl/MchkXHWxvh84+9JD14jx/Wp96XvYVhwnAreX6+HD89DBxCRznfaljjnM4Vca2SZgQnO+PjaeW3jlPinju9YZ8OfQH+PN7vBgwqIk/Fc5xyK0Pr9Yb3Gdr+DtRZ3d3exJTwwpCn78z2effx+GUgTgYd30pXygw8+QGYMZrMZFKSc9HQywWF/wGw6BbzHbrtFO8rNZ/0Ehh7SPgsuoEuCgZTmZslmPnzvPKCTwmfoU10JNPja8XiMGSS8Dt5f0zSxJ8rhcACAGI5Jixt5eJRVBbRNzG7xKvSoUH31R1LtadiEWS+cRE+VBE7v9+vAxt//E+CffCEMR5xHXgDA+PiDnz9+7dRhHfAf/tfAagpYDzzsw3kt8P/+AwB/MHzv3/sD4Hd+Cljbh1TGxx9++c2+G5C52dQSszEhhCWgpQca43E5FVM9ceIBSPHe47Df4+XLlygDqCRrkdK0gzX4VQc9UEQyPp4jBZ9aa+nS3NkIOKtqGrUgXddi/W4dCpOJTkXdvsfDwy3u7t7hH/+j3xOj4xzKosLt+zu8ffMSZ2dn+L1/+A9g4fCX//m/LIXpABgvzdyoFWKKLcFMlmXoWhv3BgUFp6TYmPaAMTqmETulowaKokkAMStmMplEwC6brjAdWofoAzR02IDz0PH24uISV5fXePn5N5yg6ZCj37UUwj+0h1YeXdvi8y9+jl/79T8PKC4QP3imp/QINOJGG3hIun+e5zDahPCjAI00evLUOuV5YzxeaYDt0+ElvBNOKcym/CxhEGF5vRc2RYceO145OB/2fq/CmwML4gFgWK+JYIHXk4Ktp66Z3ccBFRp/PhZBynn4mk72MT4Ik7z3sQYi2jiXvj4EEamuZXye1DiPx/vU/jB+z1NHes5UQpBmZQ2uN4DUyWSC6XQaxeS0P7QDWitkSsFoKRJnEgaG4cv+/v9Me6MAEg96nGd9arNL3xNjx0Ek2bUtHh4esNluZUEoFTfSx4P11Zs0JxDBgIQiDBw0Pv30U2nBXVXY7/dACHPc393jk48/xvv371FkeVTcU1AJID6IFDGyhoUyBh5t/D1RIeuGUI8hgCKHsj0SlAyAKgrnzs7OopCNYZX0etJUWOo5uIlygrVti/l8jvV6jWo6iZu1hLGEwpNCQIBFHz5JN2MCG6ZLpuCQz4ETOy2f/nWHdZIt8qd9OA/cfo2Ik4cHsN7/6X239x77/R7GmNjB2LlQadP1JaZdAgr4uccx+EcnF0CgpYplF1gmtk1XSsWiccCJNfjEIVQ14vVhDJDkTfHHw2EvRsU53N93mExqGHMZmbfpbBoAQAdoH2jyBu/fvZG6I00L74H5fIm2bbHZ3mE+W+L165f43icf46OPP8L3zPcGWRQpA1GWVQzjVVWFtunQtvT+pfy6pOO5UK20xn7vURYiuk5DhQAGHtyQpSuQ5xmyzEi3WGXQdZJSWoQ+NWVZ4YMPPsIf/9H/gK7+lpPZj3/28NaFgogKr159iZubN3jxwSfoQgt3GtvIDng8NpSBjkcngCLPc2lo1+4DoOyDFU+RGqcNdB/iIOBgVMU7Owz/BR2S5jVSMxFCOJrANs57hi5GmRJReRGuNTAbp5zL/tr7P/1nhywFcMppfVz0Kv25d16Tqps2eR+GY8bvGAONFKykf3iM1+wA3Jx4/atYFM5l7jcEELLmPTJtUBSiQ5rNZphOpzHcnzZV1FpDw0MrOrkAK4VmSsM6G9eqc11cW193fKdGbKkieTwwpzY7DgprPLAb5W67hfceeVFI98cROhxPBkU4feLQXBxeqpgqAIv5HLNJhUloZrZer3Fxfo7D/gAFh+eh/0qZFwPvPPXwyFhw8yM4AGRhN20zADrW2liuOk3/M0Z6FlB7QWPB0Aerj1ZVFTNV0lAGa3fwu7TWmM1muL+/j2NLWsxai+PhIOPKmH3TIAu/R1jDaTrruPZGqi1h0TECG27eUj+kfJSS9T+WQ+byPmQ0TFGWZezt07lh0TTOAx58jicXKkGAc1IzRkm9h4eHB7Akt7VWar34rwD9J44UWCitA7Xaf2e4uMF88N6FCpU6snxta7FYLLBcnknYEA718QCnHNpOgLqsKY3pZIbJpMB8PkVdH3F5tcLr169x93CLH/zoB/jgg+cwpo/7CmtiY1v3pm5R5CVmM6CuWzhXwzmEP2yNIOJRYxS6TiPTw15E6WbKDZbzVlrRq0c9T5RSyEyOPC+htcSuP/zoI1xfXePVl9+c3WDoQnkNFwt2SdaJVgrOW7y/ucHrV1/i44+/j7aT3iaPwpPJjxGEhOt0wRjkeY6iKHDUUnJeKR9aTQ6vZXyf44MhjFOvp/t0fy0I4Zth6mlqeKORDO9VnjLYAFQgOtLw5iEddOJQ8sGRcQ/fmRjgU2vi1D0/9Rr3tsEWd4K9OBXySNdmykyNs1RSAPHUtfD19D7SHly0M3Tm007sSgNGiY6RrDn7KHFt8HNKKTBIx+fNqsDe90X0AGaCtacu9dHxrcEGPQwWJhp4xmQ1RoOeTjZmVhwOh9jrwzmpdsf0S9L3pzZMpTSUd+hCISSpYwE426EM7Z93+wN+6Zd+EbPZDFqLBuLh/gGL2Ry7zRZVWWExn+Pm7Vs8u36G7WYTvXStNabTaaw4CPTt25lZwroCkpKoYsro4XCI9QTGsbTOWmjf94Yh08MNnWiUbELaUwLow08pgFFKYbFYDPrHUDOitEZrO+Sl1Hqw1oaMmlAVEhhM0BR48OD3jIFIGvKJIOtrKL//fz2Y+kzgxfFhwTlmPKSLFeg3baXUMKVutMnwd16pWK+DoSvrXG9sko3s0Ub16Nn0MeFT38nPMAQi75G3WetC00SD7RYxe0o+4lBNi8DKecBbHA8t1vf3qI97KGi8f/8eu+1Gsk+sxZ/8yR/jt37rLwNwmM9nOB5rlEWJtpXGc7KRKVTVBHXbIs8y1KoBveIYXoAL71fIC4POdnC2X4Nc26SNuY4kXi19S5zr4LWBc13suSKN66pQKM1gNp3j+fMX+PLzn8W+SmQg0yyv9FAuePsKA7NPtsAooOsafP75Z/jnf+tfCuuwb3BZFNng2Y7/eC9rsW0cjAkC9byAdw1YUTTOOf94nY898uiAeIc0e4qKDxsygpIzBN1h1zMb/IQS/Qy8pHd7H9pZJGwE4OMUTWl5ec8QsKfaAWER2xjGE8Zdrtdo07MiBG4UupJB4fgNlt8QQKXgyieaDYXH6+wUsMKJ942f4/iz4/fFUX6CiRkDEAKNIvQdkmsh8EasZ8Xn3mdo9UXDjNFBN+eCzlCKxLGWTq8hklDWNzm+A9gAiuDNphuoCsyCGoGNMZr0pNeCx0Z6LWUPTiHEMNywXQflLKqy75kCsI2zQ1kW+P6nn+B4rDGdTKCUx363wy/94i/i5ZdfYrVaochy3Nzc4OL8Avf39/DOSRt3a6PQjzoHghCGJCjodM7BK4UszyL7APS1MviQWXWxqip0TR2vOTX0fI3GPGUx0vANKfoUjKW9TwBEgCLxfBVT/z766CNsd7uQEizfT20MNSLps+zRvH7Ua4aolsI6dgf9HyPc4HNh0SjOlbIscXl5ielUGp6xgiWBIQDAe2gjTfUk28k/BgajNXA89iIX8egBnWxsqTc3pmxPnvOpn/tvSb6vz4JwtkPTiH5LmDrROxyOG9igCwIU6oPMP3EOZGzu3r9HXhTw2uAP/uAf4R//41/Hr//6b6Bt25DSfUDbdiFEkkHrHN6rmOWVZU0ABMMuxakH1rYdTMJicDPluNFRERBiAh0M5HkQVSoPbUQDIWnsM5S7HapygqurK0wnUxwO+0cZBScGux/DxKsXVgOQlGfA2w5vXn+Jt2/fYjq/hFK9Tot7w+lnqiAdcXUQREtmjTY6lHcPmTv0nN1pI5mOTdTsmMQYJnOBLARoxDj3Ak2hEvDgHeCtGHqtZGyVkZ+hPIyWLBWT6dD0L0NMoc7ymC0E8DUDZl/wOpumlcrPbYOmbtF2NgIMP7o/uZcx03L6GT5ibkY73NcxJeP3jN93iv35NudJwycpg5Oy9L2tcdAKaNve0UidyDHTIg1HfQQT0kbAgY0jZZ74eO5vcnwnzYbRBrpQUSfAC4yiztHAaa2hfILmfA9KfDKZxxM+PUf8bqOhjdQYAFJdh0eWazx/fo2izPH8xTOJoVtJwf3ss8/w6cef4N3NDY7+gElZYbvZBPZDY7fbQQFo2w4efoCiU4PPngqTyQQ6kyZIm80mGm96q6wsSaV927TIwjkBRIakLEscj8cYAuHnUzDH+yTYIDWWKu1TcNC2raQqeWk2N5lMsNvtI7jJjEHj+74p494s6ZEuglR1zGshhda27Z99Odr/Xzu8hDo659AFQ2itxXw+x9nZGc7OzuKYcp6ltOSYjXiknfiaYywOZYwW6EE4f/+ndkhKhXxHANZty3lh+zQ6Lamd1sqbm7qGQhdrWJSlgso0Dsc9/pP/9P+F+XyOH/7wRzHEKtqNAASshXUWWZaHBnYdvK9hbc9w9PitN0JFAArciMkypmI47mFQLZTKw/l0ACB9pVYBJjmyPMfZ2Qrn5+fYbjeD85w2FGKBI4+hXAgbeEDroH0DlPLYrB/w889+hh/9uQsAvQPGNW+tg9U2fKcAv2hCA5CROieyElUAfGmpAOVS3cfpcHcErsDJMEY01hyrwCCwEBS/FyqkU5oshKoYtspgQmHnNPzLPXesIVCKIDptuCbGztoOdd1Ep6puatRNi7pxUS+XhtM4Duk9f5fj1Ir6KsAydrzHTPJTzMb4etP3pgCar/O8LHDXZ8QAJjjkKSg/9d2nrjmdL991P/nOAlFAJgCFkKnXGymrdCB5I+gVx/zhEX9BxHli8KX4iI0KcgSqdTqb4YMXL7BcLlEUBd68eQOTGcxnc3RtixfXz/D5z3+OPM+xOlvh/c0NVqsVdrsdDBSmVYVj2wBaow359cfQpr6ua1xdXcF7jy+++EIKaQHY7/c4C54T41/czBhqIcrMjIG3PjIT6UbH1wgemPbHcU2FO2n8kZMprTNCZqjtWjh4mEDpzxcLbLYbCQV1Hbqg7mfMjR74GHmPhaFj8EHmxjY7mOY+zA01MJqpEfXj501AqoS2NaEqJgIYTT06R8//q2K5KmyG6dxKPP3BXArK+G+yeE55ObZ5QBEAZuodnJ+f4/z8PH4nm4/RS6YWKAVr4/E4BTq+ipbl6+PNiwzctwExyTeefln5IFxFKL1OxoOMJABIcSqlJBuBX5/nvSBtulhgt93i9vYWf/Nv/k38O//O/w7n55cwOg8gvUBTC0uR5fJ9eV5gMrGi17AeLWS/6cWTvfaLVDLXCjCsxsjXuw7Icgf2nNG636NEGxMqrAlMwGw6x+XlJb788ouvABnjoVR4VObcSehHKQedGTTNEV988Tl++Ct/AVnWC+XpOdKjHMxXsjoJyxOraipAY3i/ZATG7PE4lCJGykOFZ8tbVGoY/jWmZyM8ugAIdEyXpEYgz4uQ8WPkZyMgRScAw2hJnRVGK9Th8BL6idqDZPdw3qFrpTmhc1VwgCyapsOxkRL1h8MBdV2HsVSQcvoSouM8tN23Xxv6ibE7dZxiLb4L4EnPk+rkTjouo/MrDXTJ+kjnUarB/LrrPvX7b3p8J7CRXhgNXJqVMG6Tm4INmhulks6wYWF8k01fa43ClNBaScMqrfHixQvMZjNMJhMoJf0Xrq+vJS7lHKqyxKtXrzCbzWA7i65tcXV1hePxiOlkAm8TTxP9gp3NZqjrGvNQuCnLMpydnSUAoa/LcXFxAaUUbm9vo0Fh2IUAIA8hiTHYSDvepiEbXgffR6MmTMUuTpi0sys3lrIsUQeDNp1O0YT7OBwklbEodAQrHLdUowEMWxDz36R1AURmpq5rFD//92F//n9F11loJVUtPaQyXd3USeyvTyEmXWeMxmI+x+F4jM+N8caiKNC1LeqmwevXr6VeSSadSPmsuCkRaGR5hul0Cm0yTKcz5FUZO362nRS3AhCN/W6/R13XQR8RUuXCfFOAVMHMc8B7tG0H62ycz87KM7u4uMCzZ88Gz4DsFEvxs9Eav5chOWqBguV5GkslngXXDZ9PBGSJ/iM1Hn+q7EY4N8MBSDa5tMBQ/509CS+evDReu7u7xfL8HOv1GuuHLf7G3/gb+O3f/l/ixfOPYK1H13nUocxsWc7Qdi3yPIP3E3SdR9dKzPi0QG04HinLOgbNHEqlfLwvoY/7uDTLwxutMZlO8OKDD/Czn/0U6/U67n1Pbr5exkDx+SlAeenMK+Zcxs174Msvv8Dt7S2ePfsIXdcG1lEl54lPIO6h1kmXV2stnGd9oaeN33g80p/53Dx8ZKDTWL5SKmp0yFIURYEs19A6AJDMhEJQGTKjkReFdCY2ZDZyGN07Gf1zGP5MNqNnVcfetQJKH/buXnDdtPKH+zfBBrv9EjAJ8PAAzEnh7FcdIkAdGfRTzvHIaT4VOvm2azNd06lYk+cai/WVkmgCQhfgU4kYp64h8fmevDc5zze77u+cjcKLZEwo9a6ZNsqNMw6qc4PXBHEFPUKCuIH05mRSyUOTG+NEWSwWuLi4wHQ6BZuWsUvm4XCQHHptIhjw3gO5PPx3797hxYsXMRvl7OwMdw/32O530RPmxCxDAaxdaNRGUeBsNoV1TLlroriV5cpT0dhsOkV7bGASgMD7JCAYG3oWHaKgJxWRclKRMkuLjUn8XwUvIoupulI6ug/pAMPugzw/r2P8LMbxPaZeSm5/ACRKqE1jtGyAroPRIffednA2CCptuPfWYjabYruVwmabtWRcTCcV2qaGs+KhLJdLwEshoK5r4rhysaWA19oWCh7GSCv3qfLQSgo+1Y2FVsDd/X2g2IHz8xW6rsO7m5vgPabTXcHZDirPUE0qTKZA23Rg/44sm+Ls7AzPnj0bxD1TVX5Kb/Jnpss653A8HnE8HCJz4wert+eCtBnWA0iNQ9exWmgvGiQFDXDXgATRn2QsvgOlzDkSsgLk3wFw+FAlk6dHDzbazsLC4uHhDkppzGdL/PQnf4L/8j//z/HX//q/BW89atfAeyAzGaQng4LJxPhmmQnZIw7e22TDFPbBOwQquW8Kl4Xy4/SS+8/4mL2gg5jROdFvRV0ayJwAWV7g8uoay9UKD5t16H7JdfKIu0ueYzLUITQj5R9E+6+0wfruFm/fvsH19XM4b2GyiWjJvJf7HYDZHvB572BdaPLnCFrVgLnzzg+ubOwhD7QtCsi0gjGA1iYWd9JaifiUwD4LTEWukWXiQUdwEkqOK8Xy1vIZYTEyGDwGGmOPPBUuDgB1BCZasm1MH2rOC4eic6i6KsyBBm3bYTqRcgBt16I+1lLOvm7RWf/o+XzdwQqgTx1j48z5lr4+BiFPsfnj86T7CB0XPkuOT+r08NxaMY21LxqndR+iSr5ltJ4Q/z71nJz7ZmP3HcCGh4MPtHcvNjFaw4dByPMcrXVwStLmFMR7lZhe0AIrDZVlMFqha1s4L2lyeS66hc7aKCQtqwq77RZFWUJ5h7PFHEVR4MXz55EZONZHTEIb9+YgbesPhwOgHZaLBd7e3ODF8+d49/4d6mON6+trvHz1Cs+ePUPXdWi6Fg5StKQM1CuAQaiBIIY9T47HGnVTYzKZxPoarE7ovdRgWK1WaJoG+8MBhclgnWwc9OwBoGs7bHZbYSCOvZd2bGq0wTibPINvW0xmU9jOQmcGXRB6FpWAoWMtPVzqVsDe9fU1DocD2mONaVWha1spy1wUyLM8Vk6t6xomy9BZh1wbuM5COY8ibMpO9cJXikq5MGjgqTsRYWuDrmuCV6bRdSIsAiQ10doOWnOSahwOdQgVORSFQZYVsNbDe4X9/oDJZAZA48WLD/D+/Tvs93tst1sAiGCLhc3obVnnpJfDfo3JbhIb20VDX5UwRrIcCCbrwy5kWkhA2XZSVtk5j84Dk9CYKzMZ2qaGQv8cCSDTVLQ0+6Su67hBkPnjXFFKoQ7CTwHfYY3YEJc3dC8cqmoCKIW2bQAlPXqcMjDKQqVxaefQUqWvxJB55wHXAd6etoePjqfjVZFB0SMjqwy3CWEIvIhgNRQ62yHTQOdCbRANuLbBdDJFkWkYePyD//6/wy988n38xb/4l5DrHE0r5fW7poYuFIyRvjFlmWM2qwRowMXaM4BC21hoGLSNxVE1KMoSk0mJoqgGQmjWCKiqChkKZCihvUGmDQyAxjfwkLXWdh32hxoWGnk1wfziCs8++h5+/uUXgFZo60OYRxy3MAwKcNoHVqPfvH1QWygPUV4o0XW0rsXnn/0xfuVXfxkm0zi2BxhToHUdnFIw3sJ6B4Mg2PMtnBJQX+Q54IGuaQHnw3lFLO5DB1iN3mBRL6F1L4Rl+mNmDPJcITfo0yHD65zjciMC1EymQQHJI0BjTAxnAkkIgs0F47+lFLZ3LmZBKR0qxWolDQQhlTx9YnuUUeE0Uk116nN4F9gpKzVgrJOQSxdaRtTHGofjAU3bYXc8oj7WONY1XGCb4HvjKusZyHuFzMDoyzAMCx6mn02dj0cr7CsYg/Hvx47GOHPzVGgsdRi7mNlGp0hAJXOLxB8JmUWBzfAxhBbGmM6D97HN/DclZr4TsxFvcISgtJIGQ0AfIvFKxU3H6JDxnUwUADJRrQuqV7nJzBgUhdDl64d7nJ2dQWuNq/MVJkUBHzIAKMp01qEqpbw36TPnHMq8wMPDAy4vLvD69WsYY2I/kfl8HtNbvZcKorP5LL5GHQUzAGhAaNSsd9HYUsSVgg0+ZCJzay3y0IiNE4HCKKVUFI3SIDJzgeXP82AweV0AYlplmqVCY7fdbiPz4JyTDT+gYHb5JIPivXTjPB6PIVXNx4wY2wmTwuyc9Ht479TuEGVPp9NYhp2FxmLcdUTfpRtH2qUTQExDNkbap2t9jZcvX0bgR/YpZYXquoYKTaO8UthutzHc1zQNlstlFKUBiLUxqqrCbrcbqLm10XAQQHP/8ADnpWV7bjRyhlcwbMnNn7uui0zTdrvFbreLoIOeNsfc0dUIh/ceOhPaPi+KOAe41haLOQBgtzsATsGppNKi94HMCHH3uBa91Hn4hqlqXO/f/l1q8IL3HipkGCjS5zpU64RD2xxx2GoURQmlNP7O3/pbmJQlfuu3/kWUbQ7nZI9obAsb9C4Sx88wmVSgloG3Lpu7gFWh7A20poDXJbRzoP21QZEVKIsSmc4gzdBooIcbt4KCMgXKyRQffe9j/OynP8X97U0szjUGZ57/Heg1ZI9MY3X9vmhxc/MKh/0as8VKugHDQqtc2F/vgz5DvHtRzITU3xBm8GGtQ2kwDVWYCEKcoVHiLDXwCgABAABJREFU2ijCPBNWNkeeAUWWCsOTtPcAyBGerTY6aHdG8yL5nsczJvGWEwMNraNZ77V/fZg0fSbpfsI/RhlkysQMij7E0gkDaLuQwTJF23XYHRscmzp2Bj7sjzgeaxA9yfeM+pi4IbgY72WDOZD87hQoOPXeU+M4fl/KPqdjPWYfhp8dRhZkvoSxVTqSoONnFcQPyTpLEpy/ISP67cGG6k8utGlPEcWL8FR0hwYVPiAmDONJMmDM/fZRsSzncKhDf4YXL55jPpvh7OxMihCFgkaz2QwAYkGlu7u7qEtIwzxnZ2d49+5d7DXinMNisYiGgCEQaidIQ1Wh4ugk0G/eS8YJ2753TR2NDVNkaWRENV9hGwqXzedzNHUTvQmGedLcfAIOTkbqOHa7XbwGAgVeK/uvkHVgwTCei5Utmd4aRVHBoG42G1xcXMTQELUhk8kkdtF0wcilFUX5vNM6C2nNiZQBYpfaWFdisPAQx4znYHiEGgeOQ9PUKIoc3/ve9/Czn/0shqvSkEWMR2qpX5EFgNQ0DRaLRQw3cXyoW+GzGy5UPbjX/WaD47HGbrfF+dkK2aKv9EpxL70+ay222y222y2apolZTD4AyfliESvF1nU9WD+AMGqr1Qrz+TymGKdULMf84X4NC4226WJ8+ng8wiVi3yGdqiGlm/90NRxy7fGnOJ95EGClQFW2Bxdy+I/wXqp0Omfxn/1nfw+ffvoLeHb9HLPZFIemge0srGvRNK3oJzJxSJwTRoGsETD0AIuiQBme+/F4gHM2XEvIMsmzKCbN82xw3dyLUv2SNqJXev78A1xfP8Pb16+RKRXDxN/mGMy34Ki9f/8O93e3WJ6dw4d6JDrLxJsMc1xK44e5Ga7RewtvA9jgdSgFDyMZI07Ko8MNjR3XAecyQa0xwliQ/eD1eu8fedTaaCTJal8xRwK749NQ35AJSMclMgsQG8HXhutUBVFveOZSzgxyqr6YnnNZ1OFUlUXXSYiqrGu0oV/X8XjEdrvHfrdH29oYprC2nwMAQ1d+tLb6P2mIc/x3+vv0d+nPvL9UgJzed68nelwmncf4Gsbjyt/RuR2HcZ4CTl/1+686vj3YSC5OGwPbdpEm8j4gb9tBZQY6IHrJJ+ei1bF+frhtlGUJhAXtrCyUoihwtjzDdDrF9bNnsqlYh7vbW3jbYbVaRcN8d3cHANHbXS6XkVY1WYZXr17h+fPn2G630QCzSif1Dk3ToO06GOeiAbsPcf39fo+u63B2dobj8YjNZgMAkX7kQc2EMSbqO0iT73c7aKjIQoxTXmlgUzEWQcV8Psdut4PWOta0oFaBYILgg8dsNoNzLpYdp7B1vV5jPp/H4mrPnj3Dw8NDBDOcdGmLewURSRKI8Hmzr8t+vx8Yk7TBDw0tx4fvSdOuOCZkh8bN34A+s4Cg5Pnz5+i6DtvtNoZ2+DvZgGUOHZo6Clmbpom1Lzjeh8MhhmE2m83gujwgdVe0QREo8q5tsb67x3G7w31VoiiLwQYZWS9rsdvtsNvt0CUCxtl8jmfPnkWg8fLly9iBl2nSRZljOp3g7Eyqc3I+xCwD3xeqmlRTWGg0tWyU/E6yPumGGDcuLYwHDdWf5jH22tKfOTa8h5bhU4i2xDsR4Falw5dffo7/4D/4v+Hf+/f+9zgc9qhmswh8+6aFCkfdOxVpP6U8p37HxrkjQOwQx5F9Uoa1OEyk9Qmc6aAAAgLLKsNUV8iNwccff4I//qf/A9C1cg/fAsSd8la1Apr6iC9+/nN8+NHH8CoXw+wcvE6KmPn++XHf9NbBwsLZNmAehih8IBKU1NnQ/fem52KYUZjFI8rCYFLlUedC0JHORY5NChS+2b3ryFIMWIknEIvSBmNDOR7LuA5FdJMMrPxljIb2CgJAMlibScfx6TSCjbZtsZg3qOsGdbKm9vsDlOvXcZrV8VX3PQYbvNYx+5A6Neka4e/GnxkzKWPGeAw8xqBkfE3f9HiKefkmx3cKo6RfnIp3dLjRxnso1w+cJur3IlBirXw5h8PxuA9ZCQbT2QTz2VyyS6YTGG2w266hF0vc3t1iUpZQzsSYPbM15vN5QKwV3r9/j+vrawDAbrvD5dUV3t7cYLlcIMtzvL+9xWq1ks0OPhTA8bi6vsJ6vY6GjkAhFWkSYDRNIzTmaKFliVGmaLUoikRg1rME9KZT8WC6YdJLXq/XEQSQDaFh6roOi8Ui9nOhp85wz/F4jKCmbVssFovB9TrnsFwuoZSEG8h8cPMxxsB5xBDRbrcTcIg+hMOKspzkZFk2m00saMaCV2n9kshspftCGA/WHCEIIfuQZX1lRT5jZgrFsJwRr7exFmfh3owxuL66FjCTS8n2tm2hIZU5t7vtIAtIKQUXmk9lwVgppVBrjSaMddc10PukdD8QhYYAYpox0Ge0XF1dxUyo9+/fR+aryHOcn59jsVhgPp9hOpuJgaG3oaRkufVWaE+vYLTMj6btRdpMy6ahHG8M3kuxp++6YXz1MQxlpOwCr6dnlbqQPWGhwDLjQm+XZYW2a/Dzn/8M/9F/9B/ir/yVfxU/+tU/B+vYgE1q/TjnYbRCWeTo2hxd0AB455GZbHANXWdD5lgTs6/IemRZFlMwtVbokuw0ghiu0TzPUJQlisKgqWt8+MFHODtb4e7mJgo4v+nxaNMP4EEr4LOf/gS/9ut/HuV0CZMbYS287sGGc4APlUW9VGCGtUG/EUJAcIPwjRpGtwZMIICYjn08HnE45iiLDMcqx7FuUFUlqqrvkqtCZo7JFHQ0eE/PqXQuAIBWJv77FDgdj402p8c13Tvjn7Cnx9T29G+EcAA8MmdgPVCqDK3t95qqlHlorRSs2+122G53cEXfvbHICxjTxOfx1D2cDmUMj1NAJNV9nXp/mr48Pk+6zvj6GLgMxnbEWo2v6alr/bbHd6ogqk3fdyAP+dReeMxH1I0GBlqOLMvYKjBUGgTmsynOz8+wWCxQlRW06Tfr3X6LqqrwsL5HWUoapIbCbDbD7e0tPvzww1jRM89zHEP6JEMAz54/w+dffIGL83Pc3t1hPpthebbE7rDH1aU0lDrWNapJhe1uF/UcAKK3T2M3Rp/WuygK5QNMvShOGoICWBeRe8ziCGNZFAV2ux3YbIv1K8gi1XUdN0mGQShIPB6PAzBCzyPLMmy3W5ydncX39yGJJtbWABDDL9PpFACwXq+jXgKheiPZoH4u9Fk7vC8+B4aJeP2z2Sx+F8GMjIONmxQXEWnd5XIZx4aVUlerJQAJnRH4zOfzyFAx7lyWJVYX8syzPMfl5SVu379HURQxzZZMTp7nKItSUhGTDVhqB+g4ZtH7yjI470J2Tb8ZsNfIYKNRfRGp6+trnJ+f4+7uDu/evcN6vQa8RzWZYDIRJuP8/DwWkGLGi3MO9VEKWPVMjyjIre1wPNSDqrSpIR1Tu0o5dF4Eb5LqjSG7cWoz+QYG1FP86IeeM+dJupEBrJgrheekp4ncj3MddrsttDIwK4P/7v/z3+Lm5i3+F7/92/iVX/u1XswGoMiz0AdJoShyMPvG2x70pWtF1mQ/PmziprXob3pvvRuEFVIK3ZgMZVFhNp/geDjiww+/hxcvPsTm7h7ed8LgDtiNEEf+uvHjJu86GGXw7s1LbDZr5MVU0tTd0AOOpFT4QYXS4hJmdiIW1iHEghCpH3mlYyOU7m1N06DJNZo6k/2xqjCZNFHTwYaAnNsStnmc6aQoLAz1N3p932mwkc6ZdFyyfBi2ib8L0y49h3R/leJpfM+jZ+IBqL7yNZlC7z26Qmp1dJ2N9zqZTGGLA5Q6At5jeXaGzu9jfaXUiJ8KaaT3dGo9jN+XvvepcAZ/99iheFpPkjIf6Wvp94zT59O/02fyVQDq1PHdinqFixTlcCiprRQsvXR45EZ6mNAj8wGcTMoC+3qP+XSGQ9vgfLXCRx9+gMVihqqqsNlsYJR480VRSIaJNpgtptKISmtMJ2Iknz17Fr1+xs3TRmZt26LpOhRViYftBoszMVTHpsFsMcebdzcibMwzdKFwzMPDQzR2RIhp1c40BbVphd2IXm9IBU0/R4rfWovCZJGRoVGnN82wS+pN0VvledKaHGQPaIzv7u5i6CTVABD4CLWcxxAGQRG9NoIpY4zUsghlyLkZENiwWy3PQ2YjrWSqlMJ8Po+FrHjt3vtYip3MjlImGgJAwBoXb6pHIXV+G1ipyWQSr5lME1+Pz6Np8MMf/AA3NzfQAKaTCWaTCbZBuJppDdd1yEIIQxuNLtTFEPAFmLyvyKl08JzDBo7RQjwVz+RCfvHiRSw29+7du5juPJ/P8SIUo6PIVgRt0oekaeoItAgY5Dl0qOs2sgEEebxWgiiyZQSpwBSH/RbH4yE+Dx+uc9Cj5cRxivaPr4Vbp6BvTIczbNGD8gJ9kSqNrmPYTOF4PKAoSmy2Et67efcG//e/+R/h36hr/OZv/nPCfLQNlJf8gDLLYaZKKkkqYQ2t63sO7XY7uUAFFEUZAX1VlTHUyb42NqHTmabZNHW8V2MMqmqKLCswmUzhuw6/+Au/hJ/90R/heAzhi0T7KV97WiNz2th4lIVB3bR48/JLXF0+R1MfYYop4Pu0UoSQDdkhOA9tFJr6KELNODflnJIOD2gMjddYO0Bm0jmLpraoM4X8mMceP9THCfiYxBT/PM8BR9D42KgKQNTQIVuJYxnv58R4DOabpwFHL4T10ruGnxXc5eFgA5Do16u1fcE5fo2IPiXbLAXyWmtUVQnJpOsi+O+sAXAEIALt1pu4d3ONpfeSstanGJz0GaTvT8ci/TvVb6TPbBxOGX93+t4USKS/H4OWFISk70nPT+b9qUyb8fHPVmFa9cIcblgxT5tvQQSfMAExF5l4JL/2a7+Grm1weXkOpYD1+j4YP4PJpIqAwXuH3W4LQKoxHvbS8IwZJcfjMW7Cx+Mx0vZlWeJh/TAwzMwA2G5DqmnQWHRdF2OINMrMXgAQ9QJpJcKqmqBpm5hFkhbr4uRieIGbP40s9QU0xHyYPKhdqOs6ClJ5j6TQ1us1VqtVFGNSpMr7SQWW/I5eRIdoVFMK3nsfdRg04uW0L5Y2n88HRb0Iqngwc4ShDWpTUkRNFkbu18bfpQuNFGJaaVNy+iU0RaaBnlbXdZjP54NwVh0M9Ww2w8PDQ5wzy+UyhpSY6mq0hi5KuLrvPaKC1xZDYM6BqvToNaE3IzTYiiGqkH00nU5hrcXNzQ122y2gFK6vr3F9fY0PPvggGjpAgEJdH9F1Nm78vP809DXQJ2RZZGUISHRi9NPQXp6XUHAoijwWOjrs96cN3wnP5UlqOLGnpzbEdL5Th3I47KU9QLJheq/i/R6PBxyPoqnprMV/9V/+F/jRj36E6XSC/b6VyrjzRbjfPIAzYYCg+nRkFfYpk/V6Awq1yYal+xbZkN7T8xGU87POAQoGs/kC3/voY8xmc+z3OxjTFzVLx6YPHJ8+OGZGayjnANfhi59/hh/88FdRTnPAOxjutUqDLWqVUvL+kBYqnVSl/4hSUpvDgwJSeVtKsY9Dbf3PHtY7+A6wzqNuWjRth8OxwXQyQTURTUM1aYIAN0cZdB1ZlidzMNEHgex2DlZpHQtEnxidAXARVi7wNYphzL4AG5j9owhmQ2E26uGUivNDhq3ff2KBMCfAl8CzaVrYqL3yOOwP2LldZIQju58cKdBI73M4zqeZglO/H4ODU/MnzokTzMr4Gk5931PMV/pv7jF87c8UbKTZDN66+IWP42fhxuHhleRQO2txeXmF58+l2uLZ8gL73Q7OdVLroW1RTaZ4/fpVKNYlKV9lUaBtGljLPgEW0+kU+/0+Uufr9RqLxQK73Q5VVYlATitMplMcjwJQmBJ7eXWF+5C9YrIMd3e3KANCp5FLK1lyUaYpi4f6iNVqhbqusd1usVgsYk8Uajt4PkBYjNVqhYeHh0HopKqqGKZIPXZmyDDLIX3wrPvB++fGSkAF9LUdCCjS4ldcAGRtuJEWRRGFsWQhVPg+alfIHPAeGEoiM0PAR2NGkMHrJKATAKVgTB7Zk3H8k+PQT3QX2ZfeW0cEWRS4SZiqwHq9hg+anjoAts1mg7IocXd7h+fPn+PNmzdi5Ls2ftd4E5SNySdtyFOvy/eeZBpuU70Q8tWrV8iMwWK5xNXVFVarVcyOYYM2gEXW0FeEdMzg6ane1FB0nYXJcyjXl0DnNXP+TadT3N/fo65r5HmFIi/AZlesMXI8HIaL/FtSpPzMVxlVrh+l0r4+glJSzzQNydVNDb0HvHf46c9+ir/xN/4GfuM3fgO/+qt/DrPZPJivmOAJrSWk4ryCdWlcOkNR5IMQQJb1GT78vlQwaR1F6zYySwJOqgAGDRQ8VqsLzBdLvLt5e2JMvs0Ail7D2RaZNnj98gscdhvMluyVkrAFMR2xH1vvXUh7ld4rnKtSt0XSZAmg0z0mBVbpYUwGgKE26RvFyqpNMMCHEK4uigJlZkLIYRLBHPcFYzhvOd6PBcS8j/Rv+dnB+8fGU9ZJF59dNHwKgPKhtUUPQpRyAeRIan8bOsb24TJhtG2oC2Qty9l3sNZB7VrAy9zc7fc4omcceS88xpqKU+DhFGB4CkyMs1KA06wUXx+fK2UlhmPoH11PymqMfz/Wg6TC7K87vkPqa4LiPKT41uiihK4LCFtJDFMpj7Y54ld/5VcxS8Rvu90WRaZRH47Isgyz6RRd0+D59TPsdju0IdOirWtcnp/j7dsbnJ9fRI+Phnqz2WC5XOL+/n5ghOuuxe3dLZbLJTYhc2Eym2Kz3aCaTrA/imr/4uJCCofpLNag4Eacxuu54QO9YSE7wc8xPMHNnhkP+/0eQF+pD0D8DmoU2IjKe0mXZaZHWZbRqJPBYBYKN0myL1SNPzw8RL3EbreLaav8PIAIkDiBtttt3CxSwOPC5ygK5bikIR4uCIJRgovj8RizZgg6mIp7PDbR8KWKf3oMvbEloAW22+0AuDDFlnQmx3W/2wGAZBd10uNiGsDcbrfDbDrF7bv3OOz2oKKf3y9zPDT4erQRC5AYL17OEWttKGs9jc93Pp/Hnim8191uF/VGQN/jQOpNGNR1EzwjAjkfGQ8x2r3hoPHg9RPsclyYUdV1Gazts18YduTzk3XtBwDq8RbwhAUNHxmPS6+B6ZsJMmyWZUyllng/1wIBlnMt9nupBDqdL/GH/+THePXyJa6vrlCVFYyS0Ffbihg3Y6q0A1TnIuPKcB6F6MI0ZnE9p3RwOp5d13eX5diK1qlDUx9w3LeAUlidrZDnBawbl06nZuObog4Pby2yPEdbH/H+3Vs8/+hTOEarfF8uoHOA0S6EtSX1layKD+Fs8e7JbiCyI3yO9Lwfg40ewAHDPYshUWHhmFYvzEYVsr4iAAnAjuxRCgqiM3rCSPNeUyYjDUfwOLUGrSNYCCFpJ6Lq9FnazqFpa3RtB+vaAYNoO9bmIMCSMcxrD48VFCQrzZn0XtSj64ojecKYP3Wk8/AUMBiff7wW+71h+P2n/ozH76uub/w5oN+vvunxHVJfH198jFWllArZDfkHPDxmszkO+z0+ePECt7e3YVPX8K5DUcpmsNttsDo/x83bt3Be4vHOdyEkch8ZAGOkONfd3R0uLi5wfn6Ot2/f4vz8HPf391itVri9v0M1meBidoH1eo2Li4u4qVZVhVevXuHq6irqGqqqwu5hE3UR+/0+Gk0OLgfYORcLRpH5OBwOWCwWg0wAouaiKFDlRTSGqT6DE5X/9r4vMJUyEmQgaNDOzs6iVoMIHOhbwJOpoNdMISc3DaZnsjMm04KZHks9Rp4X2AcdR1oqneEphqI4cTmWvCalVNR/UKxHI8fFyklM4xlL3iP1EoCuE20Cs1tms1nU0XBj67ou1uBgBtF6vcbz58/x7t27mDZNo3Z9fY13t+/ReQe0iZfkZd5y2VL4KJSsglJZrJ0Rl0cYm6IssVqtsFqtYphkMpkiz7Oo7WHxMnoLnCfiPbbxtTRkwvf22TkZmlGtlrSqK3Ugs9kM6/Ua2+0GZ8s5rq+v4jimPSQGtDr/fgp0hJFJw0ipx8r9gXsC9URp2K3fqMMZaVB0EBxahpJavH37NoZN/u7f/bv4q3/138BHH34UCjVZUITYdU7K4WtJr5exMmG/kTEr8iKGUFi6v9/P+mtLxX+8tslkAoUGRudg3H+xWCIvcthj+89UwaRnKiy8V/jJT36CX/5zvwmdZ8laIYsMGCUhBDippCogkefo70UcPhVDfKmxOQ02INVPg+ZEGSNdaq0wB5316FwHNB2MaVHmBkejsQ+sL+sRUdtBTRGBX6qT4HPqGQ//aO6wwFbKPHEf5nOS/dmhsy6pkSGtLehkyX7cF/hyzsL5Dt73pemtDayI6xuLam1QdRrACgAEwCS9UcbsxqmwRvq+U8BhMPaJbU3tQ/rMTjEV/XPvv28M4NJ/PwWKToGVFFSlLGDKsHzV8d3KlVsXKqi5GO9GoJi986GGhgK8TtTuYsw++fQT3Ly7wSQpxiS6i4OU9C4r3D+ssTo/R54XeHhYw2Qam+0eq7MVnPM4HmucrVbYbDa4ur7Gev2Atu2wPDvD/cM9Li4vcX9/L2g6y6CVxvlqhe1mC6UVppMpbm5ucHV5hYf7e2R5jklVoakbmDzDfn+Q+gmZQWcFDWcmg20timBcdWaQZbk09gpeONuIc/KnEyTPMln4TsIFRVmgMAW2WwEUeWZwOB5DN02LY2AtttstqqpEZztIy24bgR3TeTfbrYSZknRa73s6msxG1FcohSKEd6rpBG3TBmNYYX84YL5YABAa2cPD2L4sbjqJWYdjPp/HGhUp1Z8idOpICIDW63VYNAr9vJZFrRRQlkVwsIcesjE6hAPyEPfvM2SYusu4PIDoZZ2dnUVxKYEuAKxWK7x7905Sp3dCTwu1KllPPnhK9MKip6oUlJYy3JYqfKVlXcDjWDd49+4W+8MRk6rC1dU1tJaaIw8P61Dsqw7zf4H5fBKMCFDXTWTIyB7x/jmeLEQHAE0AtM5aAQbeR4W9VjLfjNaYTac4HvdhTsxDeEBi1HleRHHkY+V+YgRkFgQAwiAGQjohPxKMRfg5blDeo26aWLclQLf4PWl4SOxmv5kpreFsi+Nhi7Iw+L1/+A9wf/8e/9b/+t/Gs2cfIDO5dJd1Yii8dYCR68q0kv48kO7O06pCWeTItQk1fxTakFkkLIlFE3QizoVeI170FHmWh5Ld0utDejoBWTVBUU1xpEAzGYtvRWwoDWQZms5CGYMvvvwSm80Dzi5KeG9h4OCdAFxlFJRy8LYBVAflLLRy0EoyvKS+hsxVqShqYn+M+HWJ8Xhk9LSC9knFUMha7Qt6hXblHqidw9F1sZNrWRSYHo6YTieojkccjkcUZSlC06qSEJbWyDMtbRNU0JMoacoppcY9bKjU2QUnkX+s7eT5WIuu7VCH0KmzNjAb4qS1If3cpQDF2r4cOoCuayLj0QMc3bfk8AL+UieeiRFjFiIFFtEJ7yFkYGiY0joWrPKKUsCePqe04JlCOqn672R4aVzoUASzDIN5T2HqYPLF6+xfTzKfkLK53z59/jtpNphdAu9hlMBf1hNw0VNPxEGQG7u+vpb6895iMhHPfT5f4P5eGIvJdB5Awwr39w+YzQxMLmLO+XyOphNKf1GWuL2/w2QywWa3RV6WyIoCdw+hCNfxgGoq9QaMMbEuwvn5uVR13GxwfraS6qCVhAy6totVMwVkBEFYnqNuW5g8h8lzdM4CWqFgee5ODDU9SO99DAP0SuZMNtngdRpk6JyT7B0qyuGTrI6wwJyAAeek94wN+fRaGzjrUE4q7I9HFFUJ19koBGX4hp47jfPxeBRNR55Lf4VcrjMrRAhaty2qqXSUnU6n6JxFbnI47yJjkNbxoJ6FKbtkPdL+EwxJkDVhHj+Bymw2jR61GHQfQYV4FWk7ZA3nbGRHKKJNq5/yZ4ZZKHLNsiw+37OzMzjXVwxcrVY4HEWUKa2xMzHCPrAKmkI08eoIqh00lM6gvA2t1vuSzR4addehWW+w3mxR1y3Oz8+htcbd3T0Oh30Mg0hqpRj8uOxV37AtzWhKf0dDbMO4G3o7rq8I6J2D4t8Q0HvY13jz+m1kV9qmQ5GXsrl3oWaDfBHgGY+lRUBoFKdDK4Kw6SU4bLh/hSwX52IdHunb0qHIMig19GDHsWJ645Ja2aFrLY7HDGUxxZef/xz/z7/zt/Gv/2t/FT/60a9it90jzwsoH7qhWml5rlWO3BgUmUaZ5yiyDJmW9xioyKRZ59B0HY51EzJabKiVIh15y6LEpKpgjMZ0WqEsc6zXDo3tkBUllueX2Ow2cG3PypFo+CZYw0PBekDpHFZ10ErjUNd4e/MWq9UlvG/gtYZCBnjRjBhlUXcHaHRQqoXWUl9DUd9hwtxVSuarfRyCSD3ZAeDwPXsl09rDhMZn8hwBJe26YUPvlaa18G2HtrNorUPTdpg0LfaHI6qyRFGW2BdHZFmOoqhQVaU8M8X2710If3SRrZC1ykqesq9IenIXgYc8L8m4cc7G36V/0tAYDaU4hzYYUwLk8LNCn/njH0Hw8HyHHv+A6YdohdL1KgMpi6UHJqlmiSEjM/ocMCyZLkAk7bLMI22MxvMSHMh3qHg+viedD6GDTgIyerDhHNlbBaVMeBbfDHR8pzAK6Sd62PKAhwhP62HciAZKKGWp+slaCZeh3oWItizu7u7x4sULrNdrdF2Hi4sLbLfbGCqpqirqM6Rfhg7K9Hk0MKmAlFqD169f4+LiIoZimI2y2Wzw7NmzKIgEEDd2GivGtReLRTSY7FfBSUNDl4YUUjEWDexisYC1NhbbAvo4e6p7SCm0cSt6gok0ZS/dtCkEpTi0CFkRZDbaIKwiHc/PclyYxcG02bQUewqqNptNzP7heBA8pH1ZOLbUnjBk0LZtDIul1wz0GUC8B86vNAxEOpGq8TQbZ7lc4ng8YjoVQMN/53mOu7s7dF2HDz/8EPf39zjWUmn0EKpuZlkG17Yhu4SlfGXB0fAq9JuKRUI/UpkXmBkLj7vb21iVleEjAlSCQ4ayOB/SfjOHIOBMn3GaMsd5wLHjeuNzIBNCAe/NzU3UFRG0kHXiZ+lAfLOj96we/0o9AiBKq2AchvVBxvfFdeW9h8kUHIDDfo+ulcygzz77Gf7W3/p/4K//dY1f/uUfYbPeANCxqWJWFtDaI89N7A7dixbNMDshXl5flVQAbF8fhh2OrXNgJ1QJDRS4uLjEu5vXaLs2jgc7s36bcbTOwZgs9MsBvvjic/zCL/wARSHsqOhxZOsWrUsIMwLCZHhEHZRhJVV4eEvD2NPeqVCUx9dR/E8dLrB6nHPUdOz2+aBaq6TKlihDYzyyjJybfTijD1unwINrYFw1dhxeSV9L59dj+r9f0z2L91hPMgRijzM1CGiG7xc2Y8wCPCXiTDUQp0JcvI90H0xB4/hZPrruE6+NdRdfpTcZvz4Iu37N8Z16o2gtnk1mjDT28o+/PP03r10rjfu7ezjfweie4n/z5g2WyyW22200Qj/5yU9weXkZ01RXqxVev36NZ8+ewXuPzWYThaCM1dPg7HY7zGYz3N/fw3uPs7Mz/OQnP8EPf/jDqC9gZsV0Oo1Glv1V2EeDTABFNwQ29OAPh0M0sAQAzGKhQR57DEr1TdBomPm7Pu7YLyBOuDSPW2sda1g0TYNJNYELvxv3d0ljdPweY0xkUqhpSHu/sBcLa3+ki5vPFED8bB0MNcuXpxkRXFQEePP5fACwCF5SoWOqOaEnky6osZaBPxM4MVtmt9thuVxGzcKbN28i4GKNgPv7+zCGFequGwCZcQwzej8+hArDZsExsl0nsW2MFqn3QPCenXNSmC6pU5JuVARczJpIxztdV2m4jj9z/Aju6BlxXvGZCEPVREaOc5H3wnlirR07TV9zpPRr8qq4YPH7lQqNu2zfiwPex67R4/kv9x2YIw/YzsO50L7ee9zcvMXf/tt/C3/tr/2v8Avf/0UR1sKjLPKg1ZBU+qLIg0C0F9aqADJ0cn18nUZCLr8fGz6DPA/1JZRCURa4urrEF1/McDjuBx7oN0UbkcQONTPgPOA8vvz8C+x3W1TVLHjtNfK8gFY+dt6El5Cn8hhcI+8phpdkYAeOTLo3jOf8qeOkgNGrSN0zHZUiza6zaJo2Fio0JkORlyiKMo5pCixSwMB/j9fjKRAxfu2UEUydw3S9fNsjXYvjkHH6OkMlPAhGUjbk1JGyI08JMU8BrRRspOcYX1sKbMYAZ3yMAUUKgNJ9+OuO7xRGGRRjaYfq61NITSnAaw2TGaxWZ3hY30saoVKinbi6wvv372OhK7IZzL6YTCa4u5M0xdvb22hACCoOh0PUS9CTBhBrPNzd3eEHP/gB1ut1bI5Gmv2LL75AURQR7KSgIPWWd7vdgGUA5GHPZrPoAVEo2rZtDBmkoRSeO+1lQoNNDzY1Pnyw4jn1PViYecMqn+vNGlVRwGgzSN0de7c0PAWAtmujMeN1kakg3U/QREOWbsT0yJnJQvaEYlQauHFmzvF4xOFwiKEMnpPjw2fIcE3K8qTahXSyn5p3NNrMenDOxYqwl5eXsUEaN6VjfQziQRuZEa21VMulkC2Jr/JIjTT8iGodeSQEdOl7OGeOx2PMdJjP5xHEplk+ZCE4LxjWIrhIsyg4fulG3W8k/TUzcyhd09z46e9904OG7NQx9hDl2gzYEymlpdP389/WejhvoZWB9Q5aZ9DaYb1ZYz5b4u7uPf723/5b+K3f+hfw5//8n0dRVhFkiEixCkxEDzSEFvaAF60G1yvXMwEZxzsV5fF5kJEqixI+tFl4eLhLnnvs1/2Nx5HtHZTWsF2L7eZBhM3nV4AHWtfAli20zuGp4YBNUoBpBACvRDtmSc/jsVgxBR3p66eOscEfP1vG9flSf24Rd3ddA6VaHCDrLZ2z6R/ew6nvORVyS68v3T/T/XxsVMdMwql7PXW/gLBmX/WZ3vY9ZkdOnTMFWU9991exDONzAT0o4DWM7zW9znSMU4djzBCN7+HPHGzwQmNKKIYTLp1AnHh87ec//zmev3gGOPHqz8/PYy+T9+/fx9zs9XqNs7MzHA6HmMnw7t07XF1dxZbh9Ko/+OADvHnzJlJyrKjJ2hdd1+Hzzz/Hxx9/PCgvvtvt8Pz5czw8PABANNRZluH29hZt20bmgyGX4/EYS6R77yPtx/j/YrGIGSn39/fReALAfr+PtL4xJqZ/0sNMDQQBCA0nDSUZnYeHB5yfn2Oz2fSei+oNCj19boZkL6QCYolubwcGkGN5eXkZs2pSTy6dlLwm/p4K/YeHh2jcaMB473mex1AZmRkW40oXA9mZdAxoHBhKSMFPKsblfTvnIvgk60IWS2uNV69eYbVaDdifoihgoNHZrg/FWIv9YQ8XQk8+Mq0+yUzpGQkC0Ueb9Wiz4ZpxzsU5x7DcZDLBfD6P84XhuzTE1Neo6EExqei0Tkjq+fTeUQgBqWG2BXUwqSH13qOpD19pfAa3ObrX9P7HGy5blDvVX9tTdC6ZDwUNr0UXxtCatRbbzRYfffQx7u7e4z/+j/8O/viP/yl++Ms/wm/8hb+A58+foShKSc0sC7AbNcGGtVaaRya0PK+FmWTeJz13TF8ZlamzHK+uNZhNpwPHwH6FMTt1ePgA9FvkZYXWiXD11csv8b3vfYr58gzea5iQeePh4RmKQuiHAq5XD995QHm4oKHT+nRa8qPr8E8zG2PDKw+rB86poR8DAb5urQOadmAr+LtT3zMGJel7+e8+pbkPScbaGclaoLc/Zka+6h5lPNP3PTbA6d/cW2J4ZnSkoOJU2Cddv+kz+irQMR7nMdghOz5+X/odYyYkvcb0WaSv/5mCDQ5OpGm8h3JDSo4bgVDhrPY4wbPrS9zevUeRSfW+w+EgNS5CmidFfhcXFzgcDri+vkbbtnj37l0MqSiloi6AOg6mqbL3BFNjWfyKWgHWfGBp8/V6HQ0TdSQMs8zn8xhj76tYZtGbJxggm0KgpJSKdQ2Y/nU4HGK3WIYHuoS2p3iR47tYLOKGmo5pnuex0ylrYrBbYxqCYIgk1ZV47yONSRCV9gTg5xlW4j2lBmiz2UTD9/79+3gPrLfx8uXLKMZkSInaD46HUmqgo0jZEX4/dSM8Lw0hM19opMeUO0WxbEjG8czzPDJb8/k8ahsoXN0fD7H2C0ukZ3mONijafRCKYuDdPtZQsLaFDQAlbkKjxdu5vtIoAS51NZyXm81mIMrl+BDQ8NypcUvnVBpi4bPw3oXy230PG+o5OD/osTvn0DYa3vdjLIcHtDBOovJPvCiPR8Zm7N31e4eCd8PwSjpXoxHiuMlFCEgJor62bWBMhi9ffoHr62tMqgn+/t//7/HqzSt4ZfHppx9DmxJt22A6nYDlrXtjgCB27/c3XoMJfZ+yLA9t7XvmR5u+4qgx8twPAPIiHzwPimJ5j8NxfExbSz6UCy3hpWpo2zZ4+eUXIoRsGhSFZC51TYMs6+C8aOgURNwrlUN5bzJeXj1mV04Z2q8zZE8dfZED+Z+Anz6U0H9BeITy4yOv+qmD9iS9tlQjwX2SZdXJFNNOpDaLx2AOJPN0bGRPHX2I7bQeJGV0lHqszxiDjVPsxfh+07E49fr4PekaGt87x/sUa5HeR/qH4z1+7ZvOle8ENlJqUXlJhU0ffIr+ZXEi6ir+8A//ED/85R/g/vYO1lpcXEgNDAIAenFp2GQ6neKTTz7Bj3/8Y3z88cexf8lqtYqZBovFAgBiUSxuDBQv0nCvVquoRWiaJtaTIItBFEyP5RgadpHOJ/hQqk91TQ354XCI+g+WqSaIODs7ixU12aWVYZ3JZBJLhJM5YJnv9AHT6C4WCzjnYkorFzj/vVqt4j2yFgjz3/f7PdoAAAnuUu/57u4uNoNbLpexBoNS0vPkcDhEcS6BAAHEp59+GkGm9z6Gxo7HY+wuWxRFrI/BEArZjIeHB+R5jtlsFpvIkYVarVZRMMraIql+g4a+91z7RUsWyTkXNyICUs6HQzOseUFWTmkN33USQx8hf7Ir/Hlc4ZPvG4cJuI5irYdg5He7XaxIy2eZbhTpgu/Xl42ACkCczwxHHQ6HuAYQPGGEObbf79E2DXQA2JwDsUqwVqFZ2nBDMQljx0okzrk+U2G0YXOM+KyhEAooDSsweuceGcV0vPvrEFW/7Tp0bYsu63B/f4d21kBpjZevvsB/+p/+J3jz5g3+3X/338UPfvBDKCVsBA0FHU/rHDrb66XSGLnMz15QOgRESR2G0EiOIVmTUbynT/q2T3nU8jsXGAgLoyXj7fb9e+y2G5TlBD538LaDh5ZmgM7C+77Ohg9PxAer7r3wHRqPAccpAMTjq8IAp97vfZ/5IP9VEcR9lYGkUUzn9ZhVGF9D+rvh9/dMWc9MPR0GSD+X/o73/qS2YjQG6XUPAbaGMakOyMX9avzdp+7t6wDFV/0uBU8piB8fpwDnKRA0cJa67muvb3z8MzEbOmU2Am01vkgab/FEO3z00Ud4+/YtPvrgQzRNE0MYd3d3uLy8jOGJq6urWHCK1UBZfGs2m8H7vqkXr4cGBOgBD0VJvAZmrbC0OSfmdDqN/VJSURLvlywHe34AiMWj6PVzQjG1M1X2U+eReulkONI0VT5IFn4CZMM7Ho+xAmld15EZ2Ww2YoTrJjISBCgEDAQtvN6mbVAFupfGiMCAGhMKYgkC6e0SwDEdkxVclVJRC0PGhHoCjinjgUBf/tw5F8edc4ghs67rIivCCqhph1ka61TXwjmYandoKLjg2FuFQLPrOljvYuGn1HMSz0SFdM8E/Yc1Ftm9ZG0w7JOm4uJEiMBaG4EptQLM1OHGm2aXpLqKdFNNGZ6U2SDQ7BtFxW+HyXrhbx4zBPry0gSnWmlpbNVfeNw0gdDBNwEYCv0zTkFGeAFaa5ydnaFuamxHzB2P1CPjd8I/Lh7kvYKU/FDw3mGzkaJlWilM5zMoDfzO7/x9GKPwr//r/3P88i//Cp4/exH0HmSVHLq2i2Wth4DDRoOVOhupsFDeG5qcaWHUXrx4gV1eSCbwt9iM4xD7BJyE+9vtdnj55Ze4vHoGeI+uazHJK1jbwbpO1Boa0K7PgoBXAMMnkgv7CMeNjWT6WspQjQH82NCkwsfxPY/DADznKbA1ZrdOGfQx+5CeK2X90uf5FLhIrzv93diLH8zHJ64//Ww/LsNeYVy3X5X981XA7NT3nXqd5+qv43ThtvT7UuA5Bm4pcBmLdVNy4auO76zZGMSmgpdDOnYcX6M3U9c19vsO19dSdIsGYbvdYrlcxmyGm5ubeG7e1N3dHT788EO8efMGs9ksGi56umkWBMMBLNV8fn6O5XKJly9f4vLyMtadYMEnajEo6uR30wAwbEPjRk+eng4NGdkTMhpN0+Ds7AwAIuOxXC4fxYbTMITWOhpQerys2UEA4VyfIjabzSQctd1FIzGdTiNoARANeZz0YePksyErRABxcXERx8Bai8ViEYED9RkEUSlAIDvUNE0ERvzeq6srfP755xFEUnQ7m80iQ/T27duoXaGnT4+dTAg3krTyJkFQX+1PR/FnmsXDyqY0GgRbxhgUeYluf4zVVI/HIzx6oOKSzIwx2EiP9Pu4CLXWaINQNV3czjnc393FcBHXSVmWkbniPZ8ydATY6VrkRtd1HbbbLY6B6ZNrMkAohkGAppQKVWLzAaDh75Qa3aO8ABOuJ/1eYUKSLI/EEOR5Dqv7RmdFUcDbDof9UDdEAE5hMsdMXsuC5yzZKSoR7GotfTK8d8izAsf6iO6uw3K5wH/1X/+X+MEPfoBf+7VfR5qS75yDdaHAU7zHYew9BUNMT+8NYVLHwUtK6Xw2l9oxeYZWrhRPxe0fHz78X1gbFeqXaCXpsO/evoFCqKcCSPpwywq2gf1KhJl8VkppeG1C7GJoVMfG7ZRxf+p36b/lWaUFrABA8xLiZ5T6eiM6Bhpj5iH9exzmoHP7FPhJDSnfM2YA0pDLU8Dn1DWnR7/Ge10D5/FT2SXpvY/DS6eA2VPXw/tPHeanznHqnONxSIHZeCxSEPp1x3duxMajaxpk2kAHyp6bJi9WKcSiLbPZHFVZYLtb42x5hvXDQ9xgsiyL4smyLGO2BdC3Sb+/vx9kMaRlquu6juma9IabpsGHH36Iw+GAh4cHfPLJJ1GLACCyJtR1kDGgEUu1DGy5TsBBQ8D88XSDTEtzMyyQZRmWy2UMtfA9ZAXo6fM8vLc0NEBDyJgku1ayJgYnAbUtBAaMXSolQtTcy+ZKxoP3o7U0KWM4KW3wRqPL6+IkI9uQsj18PychQ1sUn2otdVEo3p1MJlGHQXHkZrOJaczs1bJeryPYopFNdSocp3TBpl7+ZDIZpJOy5DnHZXes4dEbXOeleI0xBp2Wyq5xY1J9nY3xxsgFzmtJ18yY+WtCMSKyRfP5XMphq76oF5kiMoWphwkgMmB8PmSC4nvCjk+hI/cGnpOAhuPM75bz95u7MBdymATwON/rVSZlFcEznwvDMwSD8iwqTCZTONvXT0gFfpz3vC/pE5PGi3n/TA8XytoDMLlCVRXI8gJ1fURVTfDf/Df/NRaLM/y1v/bXUeQVpC4N+nuK4KrflPv12XeLJfvjIfWGCMIZVjq/OIe1Hd4YARtajQ3w1x3BwCD0loKCUQo6N3j95nVge+dSOoBzDq5Pm/UezotmQ8AKNRMqhFaGoPTRt58wcGPWKf1dOu8l9DMKsz/h/bMXzvgYU/2p8UvPlX5Hut64/lIgzDV3ik2go3gKTKVsh1JqkIHiRr8/ZXDlcz07cIr5GI81rznNIHvKmD8FHtJ5zO9MM0zGx5h5Go/H+HmPX/8zBRv8UqDvUsmHrJQKlQyllK9SEvOE93i4v8Nay0bAGPJisYgFiYwxuL+/j2mtBAE0BjTw3Iy58Cn25EPnplZVFR4eHmK5alLSjDWzwBPpbBpWa230MOklp2wNDTSzI+iB87XxBE4nMe+L7AXHkg88NSbjBZICmhTZey81Hsg4UJBKASCZmFiDw3pUiUHj9xLAkckgCGJ4JfXIySalqZe8JopWOdlTgeNsNhs0ZsvzHA8PD4Nz8X13d3cRqKWpxam3zOdIb5n6DHqi6XUz5biua8zn88iKSR+XDtOJFIHLsxzL5RINNURKs+YfBlUVw5FukHz2aTgjBQvppsPxMsbgxYsXsfU6U7YBYZsI1njuNCbN+XoMxcioCUIwOikTwt4h3stcPT8/h1IK9/f3keGoqirOB67n1MOjUTaBedJaGs6dLc9QVRWMlloYu90W220fviGg0Vqjbmq0bYM8M9GgpyFLimQJosjqcE8URqEvBuacg8n6VN4YMoOHsxZt0+If/eN/jF/91V+TTTzvm/oBgDYaNopgU0+YbEZv2IwxMJlUfW3bBnV9hLMWWV6g9BZ5PsXhsIsCTZOZb1fTCxjYYBlyGbe79+/wcHePs+WFaJysiEJVVM30htx3Gko5KK/itaggOk0NMOfvKZo99fxlbIY1Ksbe9/jvMSju72lYMn18jAFHHJYT36WUGqzxsUFP11zKVoy/Z7wmx+8Lb4rXkq5joM8CG78u4M4OXhtf//g70/M8Ff5IPz8+uA7IpESw9ATAPDXOYzaDR1pDidf7Z5iNIl/Mza8HF72C3GjAW4si9L1YLWaYTUo4Z7GYz6JxZGVQtoWniI8hiLQyZVmWWK/XMYyhtRaDEMSCt7e38N5HUMINezabRaNTVVWs08FNnJtgiia5YVHvQWCQNhxj2IMPh5U204nBjBZ67SklnhbN4oTiNab0O0WD9PyUUoNOrfTOTZFLM2itcKhrFJMKOjMozQTtZoPL6yu0TYNjuKfb29uY3srCaABiyXTG8MkQkZGhoDWtA8HrzvMczjopn91Z7A9HQEkfDuVCRca6gYZCUVbY2z029w+4vLoSVslkOGx3WCyX2G23KPMc3jrYRs6nIUaOY0FKMk0LTeOJTHtNFxB1QGl9E6VEmFjkGVyR4/5+i67tAO+QKYXWWSjvAO/gQxlzk/Xn5eLj3+kmHTd03YunHTDoY2KdQ1YUqKht0RrQGjrMQ9U0kvWRbBZd1+FwkI7Fx+NRhJbc/FMvCULDa1ak1QrTaoLZTNKAmbHF8BnDW1zLzovcxHtEz955j8VsGgG8NgaL+Qzz6RRZnqM+1vCuQ5lnyLIcJjPYbjZYbzbYbjZoDo30LEI+ANCz2SzOv7T+B3U6ZZmjKPIBKCXLVhQ5tDaBQQrh29ygay0ur8+R6TUuzlewXQOtpoDRaK2MqzYKbSfFsax36Jw8Y6Wl4JcDkBU5TJ5BGXGkOttgf9zhUO8Bo6CRodATCX0WFUydAx6oawvnFXIzpLWfMsTp4b0HrJUy5l0LwODzn/8Ev/SLvwBnGyiTAbZBriVc4zsbi3xpFUA5ELQyAShiaFjHBm8wn08Z3+SzmTHwpi+Ln867J8+JVJMz9KhTgJ6OC41meh1jQWm6f6fjewpcpPcsnyEzphIQqpPvDH1FUrBhLTo1FEmOrxnoNSTpa3SsU90T74XzOXUqx+zB2AEg8OPv6ITHa3VDoSfHJw3/p+fWOqSlBxDreI7QbZg8mQuZTk8xJuPjOzVi82GA8ixDfTyCJXLTG55OJ9huN1gsFnh+fY3tdgvv/cALZYrqdruNdDobe5E2J4vx8PAQjex8PsfDw0PMlnj9+nXUfFhrMZ/PY7aEUiqGWBgioGedeptpq22iuTRmztfSjZFhIxpaALGvCDdEpuTyHKlWYrVa4Xg8JpX1es+Mnvx+v49aEl4rWYeqqmIJbOccDqFWg8kMlJPN5bDfoyhLPDw8YLlcYhqyaarQCC8tIV6GRkm8d1ZQ5TPl32m2Az1x6lO2mw3augdlgFDuUSjpg7cZqGfWTfHe43y1QlEUuL+/j6Gp5WKJu7s7MSjOwcHHUB1ZLJaTHyN6ev6kEQlk02qvk8kkGgHWLClDZcPdbieAwxgYpQEltQs0pLGV9UAfmuiLno1pRaX6Uth8Vl14TWlpMpayP9vdLoLJNMWXoJmiX4IrmxbWM0PNBDekPM+RF9ICfDGbRKDIRnYUG3NeWGtjWJFGJMtzLM/OIlMIIFQQ9vC2Q5YZTCcV6uMBTS1My4sXz1EUBSZViSwzUPDxWTErieB9GkTL7969G1QUZmYSHQ3GpLk3AIihQmYYbdYP6BrRERwOB+Qmw/Nnz7A6X2G3OwDeBPZL+o/UdQ0Xa5DI2nG+z2hIvTgPh7o5om1DCnGWAxmgzTSCJXNrgA7QWREFtE8xCI+OxJNmoMR7AUCvX32J/X6DZa7RdRauaaB8F5ksn8w356QvjsyHbDAnxoAjva4IFNSYw8PgPtI17r10SU7j/Kfo+f4cp0MY47/Ta07PM2Yvxp8ZH2Mmh/+WZ2vAfiHpudOMEaUUWj883ykmIwUC8r5EZJ6AtzRcP2Y7x2OchiFTwWuaIZXef5qFw5/T6yZbzPmcfj7+4bzj665v6ObTcn8j5+arjm8WbBkdrP4Zx973KXlirHTMMGCqaVmWuLy8hDEmGvZ0w6AokYPPrA+mZZ6dnUWmgh1D6c0yJRJANPQELKTq67qOtR244ZOpSMs5S3+WLD5kPoi0ZgEfDCcBNyOCKIpBaQjTUBEN+Xa7Rdu20dASjTLNlGNBEStjkzTC46qkBDD8mcCIFU4BRBaC30WgQ20Gf8/JzEqoBB4pK0PgxNCGMQZv376Nxjb1PCiA5OJIBYgEPvP5HPv9Huv1On4/geHV1dUgXg4gjjdfZ+psKuoleEq/jywUQxsEbkVR4CwYUp4LQAwrmKxf2OMNlXMijRlzsxp7FRw3nXhw3kvK73q9jtcWi2oFQMHvjWmxmw2awGiw/Tf04+XM0ueLxQLnq3Msl4s4fmTGCMa11pEtiUAjrnEf5+lyuRwInYHes2TY8vr6OrKKPFc6L7mW0nlEkEhwwWcMAIvFIs51sm5ArwcC+gw09h+Kht8YTGcz/M7v/A7evHkD5/ygTUET1lvczOIx3Pj5DATEWnSdgJksK1AUFcrwpyonYP+RdA9Jz/PNjmE2gVIqFCtchxRei871LA+/Y8xWcIy5X6Vg+OuM5pihGH8XAX6W5XHNp2stGt3R55V+fP70etKfxwCDcy0V2Y8ZC15fet70vr7JWh2fZ2C0nxi7qGOKr/VPM90XT/07vYf0uaWgLM6M0Rwaj00KTGKo/QSw6EFjfy7npDOuty6WzFeef0QZpXz/OpzHNzm+Y52NPpffKAVnXSLmEo1GYSZRXU8qGwDOz88jaDgej3FA379/j+fPn+NwOGC73Q5KmGst1TjJaHBTId3PjAKGX2i4rLWxsuWHH36Ily9f4sWLF7E3CTcy1rhgTQk+IAKU+/v72MQLkH4um80mhiG4kfLhpQaOwCNt/MYU37ZtsVqt4L2PxbJ4b0zNnE6n2Gw2MfVX6OQyil7TewV6gOK9j6GkNC6+WCywXq+jQQOAm5ubWL2Sxi5lL9KwA4AI8gBEg87x2KzXUB4x9MTNyHsfq7fyNWttFJgyg4cAgPqdxWKBu7u70CTtEIEANzoCMIZ5UlRPYMtnybHgWFFwy3FlTRUW16rrGscg5OUclrBSGyjE0x7ZqU0h9VjSQmoIBvfu9hZd2+L84iLe1xjQMGTS1PWAth7TqtxMCcLYB0bmuwuNvnRkFPiMCXT5bE7dG0E4x1EpFYW3BBCcOxSqbrdbbDYb7Ha7qO3huHO8yTby8xQ5V1WF2WyGd+/ewRiDq6urWJyP84bXQAMiwuINFKSQoNTzsPjxj3+Mf/pP/yl+5Vf+HPKsr1I7nc2k42vbDJgMGmY+r8j6jTzfVNjKDDKulbOzM6zzHN42g+fztewGxsZEMlQOxx1u3r3FL/3gl9A0zEQ5bWjT++CaS+dLChzGTMJXHbz3IbCAdMr2Pmj1ACj2ngmesffBL1ZipHwfAhmDilNrasiMDMXZp1iNMQBJmcfhuSRrhM8yZUHS6zj5lNQwjDE+UsbzFBORMh1jMMfzp9qs9HcpKCKwSJ87x7Z/bh2yTMPaFm1bR9Detg7Wipi1nxfsTovwd//7/ujZm29yfOfeKPQitdawzsYsBUCjPtbIjcLV1VVMw6TokKDBORcpbOdc1DZwo2E4ggN+HUIxrJxJb5hGLh3429tbzGYzGCPFwWazGW5ubrBcLqNRZ90Gpr2enZ1hu93GEAY3orTJGI2zMSYaRxpMejzp38yuYGgHQIyR60Cds04Hr5fCyul0Go0nx4GT6CFk8aQZK/RO034yfI3UOw07xxRADH845yKo4SY6nU5jpgqZCT4fGhsuAGpQptMpbNtnhQC9vofeKzc0fpbsC6t+klrn9fL6tDFobZ/txGfE+5nP51BKxdLx3BDJPo1pVJ4b6DM6FosF2q7FcSdsjA7gQPlesCX31VPW6f2ksVIAj8aBBjvLMjhrJckxLPD9fo9p0C3QSNBg77dbWG5E3oumw/SpsLwObmhpVgfnrQiFfQDDcs1VVUU2kDVvzs7OInBPXbP0XOwVxFRrhtLIjjG0xcqlLNgH9N1TUxCltY7rm5/nXGWqdFmWeP/+vQCE4ISwTglBBp/FdDJFlhnkeYmu7eC1wfv37/H+/XvMpuLoTGczySpxDj5JIeYc0cH7Zv2VaCg8hX80UPydiGOrahLnwcXFJd5VFQ77dmAEvu3hnGg3uq7FZz/7GX7zN38TgIU2GvBDISfw2NDKOdzgtTHwSY12ulbiWUeMRXSygKB3GetB+mtJWTCZ78FTVsPwwikAlP4+XtMToO0pduYU49Kfvx+bVDMyNvzpkYIdYFjIcuhgACLM7ucQ9yzaidSp8MmY0MYSTAwZk8eMG+9hDDbSPWIMpPtxHQK49B4UAK8UvNf9v/l8/DcPo3wnsKGNialvRil4OxSyONtheX4eW7yTsSiKIuoyJpMJ7u/vMZvNsFwuo9CRBiYFFSzGhXCDNEaMN/NvPpDJZIL1eh1L16ZahK7rohaBAILeNA0zPT0OOCcFvejNZhPbf4+pNJ6PWoGiKPDw8BCNMY01gMjCsIDXeLFfXl7i9evX0Vhy8tHwU+xIQEIxLccwLUdNMR+/P70HeqLM/kkrppJt4EG2g2AgLaddVRXaUaEmTnRuNAzZcGy4GJgKnIbjyJ7w/GVZwjdDcV1aGpqLgwCDxsoYE2n4NHRFY07gRrbtYb2G0ioKl7vDQapU2h5Ua0Xvrd/M07BDumB5PalnIlkNGWzQ/SgtaZucm20rbeHrAJp8WHdxDSYUa8ocjr03ejt9jQgdqttOBpu/tTZWGk09JZNlgLVw4TysXZNSspyvDw8PkdGgI5ICgPQPgSBDcEr1tVPY5ZlrgnN6vV5juVzGNcl5wnvgJirsg8Xh0ADQgBd907Fp8Lu/87v4K3/lfwqFTLQyjYhV27YPV3G++kgP95s6r73rQjEvSIYP2cwsyzGbzcGy6Ofn56iqCY6H7Unv+/GhMGQ1+BnEv1+/eYmmOaCaVFDewHvzyONNgQWvmQDimzAaA2CU/K20jmDXAxEAW++h9DA0yPNyr01DaRoAvBsYPM679Lr4d+pUpHN8HP5JxyBmJY3ACffydC6m85GfT8GE1hrGJYAiuSb+PgV2/Z7Qs7BpuJDfk4Y70jpR/F6CjVMhnrHzNAYg/J18twHQ26i0WznvlwDbORX7ncXz8PmrJAwTfvEVxM/g+NZgQ5BNX+NCFrYfpH1a2w1i6Tc3N3j+/Dk2m00EFPf393FDubu7w3K5jDUyyGzQ8LGMN8Whacyd18GQBxmBPM8xnU4jGKHnM2YCKDZMHyw3b2PMoFAZi0B57xMmp08H4pFSrARCAGL8muI3GgDeBzUs1JO8efMmAgeGXAgsUh0LJ+fl5WXsqpqCtjQjh6CNzck4rmmsnAuVAGS1WsUJRgNNoMHX0thnkfcdXmmgojfv+mJUZIfS5ngcz5SWJptThhg8F1dqALjZs84EQSfnLDUQAGIFWT4Tgqyu66TAmRJKmDQ9tIbJswiQNpuNbLIuhA6zvthT6n2kTA5BG8eI+hOOE9+/3W5jC3oEz2EynWKxWMTPc9y5sRD8MZ08zdhKtS4cM637Z8JrJnOQAp0UIKmwuZdlGQEu15D3Hvf396jrOq61FOjwD59BCgzIiFBT4Jz0BarrGmdnZ9E4sN4KwS61Vgxx8Z57YyGMw+FwQJ6VMQ34x//kx3j58iW+/+kvxQwyWIsiL8Q7txZaB+2PozHsa1M459DUTazXw/Hhenj+/HnQocle8OzZM3z6/U/x9s2Xcf8ge/PNDhGteu8BLbVS7u7f4937G3z00YcwmYZ3vTPC+Q4gjtMpaj4CKj/UEnGudl0HrxS00TA6acgYrgjewY6MDO2BR8/i8XVmyfQXwRokQ2DBuQgg7rHjlPc0VJ1me4zDDPzsmG0cMjkSBui1CyoC+NQYK6Vg2iG7krIDHEOeI4L1sI/StnDctdYDsXNq2NM6Ren+kQIhoK/dQ2eRz5zOxTjdXinp3VMUOZSWjJLCyJrUpgc0zluwRItSw+rFHK90jPndX3d8a7AhDyCUhA2vCZvRC8WyPMMnn36CJsTJqRNgCIEGjN41dRHf+973cHt7C6VUzJBI0SmrS3Jhc5Pqum5QaIvshHMuMhwEFaR1+X5u8qlQkg+cGwNZGf6b8W1uulrrgR6B3nm6aLgREiBwc6cegefnvRJ5EhxxwdEAkpo+Pz8fKPEJZjiJ03shDU4amo2+ZrNZNFTUKvC5ppM/DZ9xLvB+yUSkcft0YaQhE475WCiVxh3JdnDxlGUJBJCXZdmghwoXNRdEylbxWgisOE4EMXwfQRtDVM5JhtLt3Z2ECjITx6FpWuwPfbpYuja4Bvg3nyF/P/bCCMK4LqKh7jpkAaxTYDweK4KCaBy8HxQtoxYlBRyiqRpmqxCs393dRaYn1b7wSF/j2AHCxjB1feC9Jmsg/RydEBoLzmteD+tskEXkRs1nSHBNMS9BdCqQLooCh/0OWVZAa2G0rq6v8er1W7x58wbf//SXolYESqHt2oGxopdHtoKGuOukwV5VTZAZSbdVUIBXMNpgOplhuTiLBs4Yg2fPnkWglBqOIY09nENyEf2P2uiQPi37783NG3zwwTPAdfBuWCWSY5xq27iu0jDBqfmbGicPD4b80/ny1OF8HwJInzcwlN3y5njr6XnT9ZLeE39O30NAO/78qfF8+voFzKXX8dQ1sN4KAEmLDhVkadj5XjoEsiZV3C9j9lgCQlKnKWV205R9njfVZKSf476XZiymQvr4GQyBQ3oefh+/v6kbaKUxmVQoijIwHh7WdhICU4nItvgzrLNhsgw+IE1Ju0tKsgJ4/sELOCsG6OzsLDZTY1YKgNjGnKzFYrHAzc1NNKpsP08jyPDH9fV1DL8AiCmi1ILQ2yJlfnFxgZubm0EmCsVni8UCx+Mxdn4lIEn7r9zc3ODZs2fxOrnhppkwTBkk8MiyLOpPqLdgdg4FcmwV/+LFi5j6e3V1BWttrP5INE+B5nq9jl1pvfcRxJENur+/x7NnzyKwI6jiubiRs5z4dDrFw8ND/Llt23g+hnb4PFLPnROWBpKbNtmjLPWEfF9oLQWGvZfdi7woWKIHmQpLF4sFXr1+jcXZEg8PD/F5vn//PoZ70iqq9DaZCeO9j7H/1AugWJiLM8syWCfnOhwOOFsuYb3kl69jx1tRb4uH16+MU2BiEPtOxo/voYg1DSNogrqmieuMwItjlHo1NLZcW5yHKSigkTaGm/dwYyUjRrCRsh6y6hHrePD5UCux2WxiTxcaUm6CBJYcA64P3geB7t3dXXyGfC+9ptS745imwJxzgfPPWov9bov5fAbvFbQxOB4OuLu7A5TC7/zO7+Cf+82/FPrdIM4RWSP9huychzEipuV48B5MCBmoJMRijGR/SRPELD7fTz7+FJeXl3j79m1cF1EgfOpIX44RDIY5PKyz+Oyzn+JXfvVHMLBAkn7NP5zPKSPz1PelLFT6GWgVs5B8eP4uerUjkAQPZ4Pj6R2cdcJeMOExzZbkv9VpDj4Fq7yvFMTyGvneMWvD19Pz8TNjYMIxTUHScLzTow+blGWFoxanjOLrdP7366T/NJmM9HmkWo702smCcV9Nj5QZTPdPficdYjKJAnBaGM/P9GGoLDMoyyqOT9PUADyM0ijzIur2AI+2k6aHzoXKylkGrYb1Ur7q+PZhFBmNuKiLTBoxdQjdNsPmqY3EQ9+9exf7YUwmEic+Ho9YrVYxTY6Db4yJm9b5+XmMo3MTJhPA9uuHwyGCBL6foIYq9vV6jbOzs6ibYDEkAo6yLGM7e3ZlTetafPrpp7FmB6+NGzg9L7YEJ6VMo6a1jqCDNUL2+z3Oz8+xWq1grY2N55SStDamWtJgdJ30uAB62ozpsEqp2En1eDzi/PwcDw8PmM1m8W8yHARavC8aKIIvxtkBRPEs2R4uvFSHQrBF0NEzP1XMsODC4FhwQyNDxHAC51LqPTIWzwXEOiE20OwsiMYxj8Wtgq7m4uIiAhofGBFuwlz0DPOk3qZzDnmR9ynCWSaVRJu+tXuW58jzDk1nBylw4w0hvf6Ubh1veGk1WepUOB7T+TzOiRRskMXjvaehKqa7knGiZknuzwZ6tN+003AYAQIZq3gvYQPkeqWgl6wGn2Ua5krBQqr74VwikE51PNwj6Fyw6msqUKaGh0XAGKrkvC3LEq4swDb093d3mE4F1Fw/e4Hf+Z3fxb/xV19hdXERwY7zLuoPnGOaOGPpFgxlCFjtYmhz7Jkyk8loFnHLcHZ2hqurK7x79y4C3RRwnthhk8PHPz1b6fDy1Zd4eLjF+XIBJIwC/6RFBlNDNNYVpSzBaAIDSsELaQNAskzSMt3jw3kFF0Sijn1emOHgeR70hcWSu+V85F7BPXEMLlIAzCMFWON7Se/x1HXLft5rK9JwTeq9y3v7Z9O2LVrdtwng95C54Hmok+CflMlK2dixzoKMLud7+tz4Pu4Z/B0BLPfndE4oJcU3tZaGgWWZRxuSOnZKyVwrsgxVWcW9yXuPwlp4Xw2uMV3XX3d8+zAK+loGaUVD3rDRGnlRYP2wxnK5GGR+bDYbTCYTXF5KI7arq6uoF2CRr/Pzc6zX67i5sKeH0Qab7QbPnj3D+/fvY3VO0qdsR86wANX119fXePnyZaQxjTHSdTJ4cLx2evnPnj3Dw8NDpO7fvn0br5esBzc6sjNkA1KDmsbYUuorLQVOfQknBJEs49ha6xiySdNux5QZgMisEHRdXl5GipqTmuNM4MVQFQFb13VxDK2VipI8Bzcqekw0xhxTalBs16HI8oEXkTIVDCF1XRc9Yy5WxmhT4JHSlHVdIyslW4bhKAoq02JkbdvizZs30fhyE6D3wWeSsiFkgah3IdCq61rSIoNh77oORocmbp2Dj+7aaRU8j37zeSx+47NM75lA6uzsLAK0+XwejT1TtK21MezFcUizl9JwlbAZGGQV0KinKal8bgPxGIBqMokZT2T5CDRSA5ECzdRrTqlixrEBxPWUMjK8xzQcxN+nabZkzMhUvnv3LqyZI7q2wWQykxBf26I9HLBYrnCsG/zJn/wJ/icffhjXOVsPSOMsH8O0Td2hbZvBc5IU4SaAEA0pCgVIN1mpOcGKsXmWoywrvHjxAj/96U9xPNZgJtCp4MJjsIF+LJ2DMhpaKdzf3+Hm5i0uzpZgzxg+87GHnRrDVIOQGkgCYj4PpUPFTP/YYD8JNqChn2AaxkcE3ImBTedf+rnUYA7CAqPvGf/u1HWP1yV/P77OMUB2zqFLWYq2hS98XJucG6kjIeMuGoenmAw6VwTfNOKp48DQfwpk0usfM5HjzBjZGzMo7ZHn2aD+EEPwZBwnYY0bpVEETVW/t7JImKxnATX5k894fHy3MIrRMJlsel3dwFuhXzrboSwlRnp+topaATZV+/jjj2ONh6urK7x9+1aYi2ONrmmlOdu9eLAewPZhg7PVmRgva1FmBd6+eRtrXmwaSdVUkNTR3XYXKWeTGRR5gZubd5hOxaDMZnMohWikJHwj+obtdoMXLz7AmzevURRlNEas90EAw4mwXm8wmVS4v78XGro+YjadwbrAJDQ14GXCtYFFICVFcR03z/v7++gRkRmZz+fw3sdS56nqPoasVC+O3G0l7fTu9g6L5QJGa+zqGqvVOWyYHLPZDNvAxDRNg6oscXlxiZt3N9BKdBz78N35dIq3NzdSHRHDmDsNARcNvU/nnJSrrod6BussjDaoqenw8j7rXOgrkUtnSyvVMCfTCRQUdvsd8izHNAhXp7MZ9ocDjvUR8/kcWuvIENnOoq6PUkLZSX3ttG5EGiPlRkBWJHoUWqOaTHCoBfDtApCNjEcASbbrYit17cRoSEjFRyOjgmcYn9nIs0nHM/WEuMG1tpUsh0Iqf87ULBhZhaZpA3Xp4sKXr/dxgyIgSI2JbO4AjAGMgeta1LbD8RgqkrbULTh0nYK1HQDJMvDOoZqIp7MnM7jbYRcExwibrlIaUH2hIhXADQ2XDiXArfOYTPqMLgqWX79+jevr6zhHuXlzM+RGynsjQJ9MJhFw3N7eBuBVoG66YBA6TCYzvHz5Cs+ev8Dv//7v4a/8lX8Fx5A62+YZlHPQ2qHrWNmRRdUcjHERgNRNC2tdMNAqhpi0ArJMIwntI8ty5GaOqw8+xvL8j7HeHsLvH4ONWJlReUg6q4tvMSpUPNXSCaU91rh5/Ra/8oMfhDEf9qjgnAMQx94lhlAFpoYhD+ssDIZZFd5JfQzvZT1ZKx1ynzItSgHKhVoaXkp8q3D9irerAJ0wH1qH8JooFuUNqmc+vPcSkgEGwDnVePHfKVBJmdixrmMcMmGVzx7oyYWKEZd5YK0dgo0EoKVAnYafjEXbWnRdi67jXl0k5QU8+s7BEpoo8gIqijIVtMphDJBlCmz8J/fF/UMjM2wj4NA2LcpSGAdtjLQq0BraKCDsu5EhrBt0tpPPm75SaWYyONtCA5hUJcqqgjEa3nl0UbMhQDrLU2fmq4/vkI0CZLls3M528jDgYTIlG8i0wmq1RH3s23Wv12usVivc39/HfiebzSaGLaqihILGcXfAfDLHfrPHfD7H6uwcx/0BeVagPh7hOofZZIb62ITCMJLW5iwHuYKzFqYy2B32KAwwmUyxXq+jl+69R1lO4BziJiSbpACI5XKV1ARQaFuLyWSGrnOoKgnRvH8v5bM7Z1FVJeqDiDWL0MEyba/ubAdtNIrAItC40EOiIp8bJxtk0UhSh0Ijst1u40ZLqny/36PMc3RNg0lZYlKE8E5RAs6hMBlgHY67PYzS6LoWZZbjuD9gUlaYT6ZSOj7Q5et7KZx2dX4hzdCKfOAJEXWT6UizGLSW+ZB6uV4peN33k5HXEFkEb2kQWujMoA5Us8lzHJsGXitU0wnqusHZYgHXdbBNC1gH7WVD3x5rzCbi0aOaBDBZAAqxmFSqP0kZGhcMqTzPCvZ4xCboi1hK3FsHow2KLMexs6jKEpNqisPxgMP+ACBoMbhdeR+BpzYa2g5ZLtLpqadFo8rNFhmQVxnmwet3roPrgLreozke4LoWzfEQNx2OYaq7IFCNmy48OgcYDTSdQ9N06LxH6xy88uh8BzgH69jcKgjjlMbZ+TlM6BFi4XFsalgv3raDh9caxkgopWtFSJmZDHlQvDvnkGsDaKr++5LzAGIo7u7uLgJBsp+p8JrrJU3hTmtylGUlBrlzARR0UDCo2w4GGtoDv/8PfxevXn6BTz75FLv9HuJkU38Rn2IwcCaOr1JSqt51FrZroUIpca0NtAZm0wrOdYA3ABSKskCmFlhcfoCrDz/B51++gvUtlGcDtcfhCyUt5AAW9AAAb2HgoUmhZxlu3rxFXTfIypDW7/qQnYAWskSBSUPaudTH31knBZyMNtBZFtNaU0Gk8w7eAd56SYfGULwY32elsJcCoBIb5AMoIBj3zqPzHhps2knWIawJraNGxKHXMZDRSkNCqUA8vR7OeQADTcRYN9GvQwIRuWatpUilUg5ta+GS9BudsChpRgnnMfVT4uSIg869U4ZAbA+fV56bMP+AzGgoWLSNjfNXHFMgMwJKsqxEWQioYMiu7TrYLhN7UxRR2Gk7Csg1FDxs28B5h7LIMDElrO2Q5xkmZSFZZ0pBFxWMUTC5gdcOTgHQYe9Xo5o+f1ZgA0BAai26IAZsmjrGms+WIvpcLRYx9DCfz3F7exszFqi/YAhEK4O2aTGZTLHbSyzeK6BualTTSYipeuRlgc5awEj2i/UOvutiOKcJnrAPCE5lBrv9HtVkAuclHvzm7VtMppOA1gLCYzzYObx9+RJVWUbvLaW6ma653mxiuKLtJCNAPO4a3UDRLuldTI00eYa67QtxKa2xO4gA9uLiAm3TYL1Z4+LiEpPpBJv1JorOdrvdQJV/dnYWW67P53O0xzqmDDKUxLAV+8qMq5ReXV1FTQl1LSzo1LYt3r59i9VqheliHoW6Kc3NjB4u2vl8HossjVO1GMJhtgxZojQm34uWsqhnub6+jmXMJ1UVi0lxDNmkizoepk2vVis0XYf9YR+pRdKRLFWd6jiatsV0NsN6s5GOsOH6GS9NjeJkMhXqPPTdsKGroxYufVDGn2OVmWEthHFMmBtRURRwcJgWU2gj2TF58Oi7to39e6xj75IO3inkVSrmkvNPJpNIgUZWJXi8x/0ebZgrbdcF48IaFUmlS2hY57BcLnF+fh6ZPZbb53fxhsVo62ioGKIC0IuVrUNWSKMyGsI0xsxMK7KU/B2ZDeqebm5uBqnDZEKMyeB8qGacZ2J3Q2GlaTXB8Shzeb/boz4e4eFR5CVa20UxujgECM8kw7DCZPOoZ5L3LojyREzH8NpkWmGq51guV/jgw4/wu0qa25ng2T+mCfjiEIRE3U9gJIw2WK+lxP3ZxWowt9JwJz+bhhRSgEujAQzr16Tn45GGx9JjHB5EEh4ZMwoKwxb3aYZFD/YeZ52khenSfSW9xzRsNw73pJ8Zhzt4bSmrAwyLVxJ4xzF2ouXh59J9MdXlcN6mafC8HmqTUkZEgdo8FUPYZEBMliHLTdQGCXuEZH0YOCfjNO5aLmMs2iNlDIwpQp0lqTGTZTmqqgCdC16nDixlZrIgLA3hHy3gNMuG5e+/6vjOLeZppKtpGYV5VVVhvlhAa403b9/ggw8+xP3dHd69e4fFcon7+zucr86xPxwwBTAJQMIqD5NnWO+FKl8/PKCaTdE4i+NWQiVNfYRqLRZnZ9jutmi7VjZFKVsHk2U4HI5oraDAoiyhbIfdYY9uKxU8D4FRmRymcFYo0APFddoIDdW2KItC6EKlYNsuAgSEDay1He7XD4AWQMRQzm6/E2MdcvSVUoHiKpAZAyiFObNoguYhKwp0zuF9yNhZrFY4NjU2N2vMQ/Gy29vbSHHRU727uxtk9uQmw+F4xGw+C9Rvh+l8hrppUE0maNoGSsvm21mLsijw5u1bnJ+fA3WN7XaDq+tr3Ny8RVmWEsrywHr9EFkJLuQ07sjJTHBAcS0UgwtCizrvsNvvMZ/PsNvvUZYCVExmhMrLc5SlZLVkSjrdTqZT1AHITkKIZ7VcBk9UPDK2ij8cj5hMKjRNi2oywWa7RTWdBCarT1dmnJ8HN9vOWXQh5Mfsl8jUuL4AHBmormvhgzrdaBMBxyDe7If6jFNZAgDiOMY01SyHU1I+e7fbomb3YpOhTQSkh/0e3jlkxSR6bvx+Ao3U6JBRaq3oNLq2jS3Hwev2PhoLpaRgXxb6xhyPx1iHZb/fPxI70mjwXsbxcOp7KGDL8iyAsGHxJFLF7G3CP9RpzWYzvHnzJqZv8xlyUxWK3Yl+whhJG1WiQ9jvd3h2/Qz7/RF/7+/9Pbx48QJ5iEt3ri+u5EK4QMZPNmA6J3Wo5cLrpUcsazQY5DDmRVFgXs1RTSa4vr6Wfj/7Fq6Tgojf9Eg1FZwzDw8PUhn5/Czqczg3xhqOFHCMjWrKWqYp62NtQ/rzIxCh1Mn3pOcaz9H0SN87/j4+Uxpy3sMwFNJf+ziFmeNw6rqB3linYIt/DwWj/fXaTrq+cs2m45yWZSAwSHUS3EMY9kz308yQ4VDRmZNsmQJl1YfS8zwHQngrTWdO9xQe8dx5Fue20UZYtyxDWRWBXQl1cBAYKN9nZhEwca9KmY0/U7ARkV/wQrNwY1ILATgcj7i8usLD+gHbvWShbHdbLM/OsN4K7VlNJ2i7DtqY0OvFYbKY41jXWKzOcHMnBjbPMrx/uEfXtijKEs39XWQTEDZHhkPqum9Rr/Y7tMFzffX6Na6uroSerSZQRkoX50UuYYksQ5bnURjqnUeWh4qkIYXLWgutVKQQxXBZlFWB/VEqTOZFgfuHB/F60HtCHCOtNDbbTSy17jqgPUhopSxLNF2Hw/0dJlWFyWwatRFU7FOQCUhlQqb7eu8BrVBUJd6FfjLOuZihYq2FNgZ12+D6+TO8fv0ah+MR0/kM6wDmliE7aBpKyR9Dtg9FuFQej3P2uXiZIXMInmLvVfhYLGk2l/LQRVngGBgOaj6yLMM6dOV13iEvRRMwmVTQmcH+sMdsMcd6u0U5qSKzsgvjZ4zBPtR0adsWk9k0pkEzQ4WLhAJkLpKu6wDdF6ej187iX7PZDHd3d3GRG5PBOuB4bOIiJ/U7kM6pfiOOnukJijf1fJxz0OjZDqV1ZHPm01nMpjpsd+isRTWZICsmvYEMRm82mz3y5uRp4JG6HZBKkM4NiyIBQtcz5ZUgjWHGFEj0xuvpughMec2yDHXTIDM6ZFf2QINhOQpXU8+Wm29d15jP5zHri8+Q4DDLMsCHomuBOlYQ3UBZFliv16iqCn/4h/8ENzc3+OT734+hK+eGzcLgVWC2TNT9tElBQz5oerDJJhl/nEyk9XxVVlgsl2iOm+E8+Q6Hcw5t1+Ht27f4/i/9wmC8OY/4vrGhHXv1p4DG+Pmlzzg93/DZD8Wc6ZG+nq6D9HepkR+DjZQJOMV+0AlKPzsGD+Nzp8zD+PccNxrTLMuQVjFjVk46Jum4kB1jiJkMBg9qxmgfUpsqYVCDySQP3wFkWWDYvIt7SRaAbnrtvOaULSHoUbpnhpgOn4aXGGZWKmhooMK872tzpEBVwEYWAfbXHd8JbEREpns6lTS60gqTcoa3b94hL3IsFyIUzYzGbnfA82cvcHPzDpv1Dk1TY3c4YHm+wl2oKHrz9gZVVcIDONQi5BRPdYP9m9eYLxZoui7Sn1LbQ6FuaqlPYGXTtM6ha/qUyjdv30aK9P7hHh6iKM7zHIfwc1oO+tjUEh/lwuWEDZPGB6+cjbp0CBHZzImwLniH1lo0gRIushxtYFsewoaXF3mg8ZvY6yKzGbQHtFexWBpFf+yEutvt8PHHH8cNl5vws2fP8ObNGywWCyyXy0j3brdbzOdz/Mmf/AnOz8+j1mM6ncZqmimwoKEG8AjJphkeKau12+1QBKOe0uIEBgxDsROuFKXqMJ1MUddHnC1XoDBLqwxlMUFnO+x3e1xdXePNmzdYnS0GMXz2vTHG4Pz8HIfDIVZo5UKfz+exuNzhcIhjxHFt2xZFlsMDsb09r5tlzq3ty3kLmDQhXKDFKDlEcBCP6BFJ2hk9mv7XvdcVMzHg4VsPr/sNm+G87XaL4/GI/W4H5YHVaiVhhLqLm0waOkkPfkfnLI5NHb+fm2bHomIJ9SweZJ/mzXASU4bHm7msl9FrI2qe9VtUKLSl4AdzSxyHNs7n1WoV/wb6VvLUO/FavPex3PxmsxZNzaREpiWdj/H3rutweXUN2zl88cXnohUwfREzoDeIEjoRbQnngPc+0hapkY3UttKD/YIs02IxDyHPGd68DC0A3DcrhsTzc06mWW9v376Jxc5o5Hit6XM4dXwdm5A+N5XM5fFxigFJjXrq9ac1V1JGJX3vGMhordGGQpI8B/ekNAw5BhFj0DK+5pSBG18/P5+Cm5Qt4MHrJbhLnxPDObQ71FKlGVVpZgrfSwDEUKTW4uQUZZ9xpzUrmPhHz4XMDwEFv8P5HqyRZeEfoA8byfmk505/HSoZG4YlCUIej8up4ztVEKXxSNNHOdBt26LIC1ShUVLTtjgea8znMyht8OWr18hMhn0wmF1ncffZZ5hMp/jyyy+leNVmPVAZ26Aub9sW9+s1fCgwAgCubQEtFGlZlpJf3ok4LS8KZDpHFii46XQaGRIaSKbOqVAILHrqdQ0VHkoqJOLDsl2HztvY00JDBI3OWihj4AObwInhnROdSxCRZlkWwwu8T/6xXYcqL9Ae6kHX20XQwVxeXqJpGnzxxRdYLpf48ssv8ez5MxRFgVevXyMLIIjPq2kaFHmOtzdvMZ8vsN3tsA0MU33Pbq4eN+/eYT6fwXmHoiyDXr4vxcyUynTRsTz4dDqVbrbbDZzzsTAVmZAm1NbY7fehg6vUKehai7aTZ7HdC5Oy3e8xqUSzUlYlprM53r2/xdn5CvvdJhR7O2CxmOPLL79EURbQIdslz3Nsd1uYzOBwOOJsuYT3ffofS6Rz/qaejbXyPJVSkdVYLpex+ReNeVVNsNsd0LQ2AoG6qdF23Yloe59GN1bPp/QnDZlSCk4JkDXawLYyzw+HA9q6BqBQViLqZbrooe7z/ePG4voUYl5D1wnQdfCRFSRrEa8t2Wx5jvOLiwjUAMQQwmnPdqj25yZNA8lmh1Jwy8f1x/Aga7eQRSJTyWfI8VNKRX0QNR58xlppGO3hOtF8hQFHWRa4f9jg7u4Ws9kc+/0Rv//7/wi/+IMfwrbDXi9jdmbwsx9647x/Mfh9yW4+E2Ga5iirEkVRIi9y2bdwQrKRfBfPfcoYeu+hjcbt7e0AbHDMCTo49vxdyjo8ZqUeg4R0rvJcBGZpKHB8neOxS5mT1Ds+Fd4Ygx7vPeD6bK0U4KX3lT6LFITE8RoxKinYfwScdd/P6dT9PMX6pCCcoC9N/eb6JBPD38ewo7WDGkN8vzE6sgwR2AYBcco40KFJWY0I5DUerUn+zPnbz3EyG8M+S2Q307DKNz2+A7MhIjBSigYJ0goXf3d/j9zkUQTqnMPxTkIhHNy2lZ4nh8MBWZHhPjRx6lgEKCAv0qP0uDwAKKCoKompliWyZJM6X61kEMJgZEo/mpxFUWAfjB5j8dZKhsEhlK3m+ej5RkOQ0GXOW3iFAYXNnGkTQEjXtgKMtI4hH++lHPVutxODB4U2aCoUBDWWJkNVlJFCXywWUfh5c3MTJ8f79+8BAG/evhVDG5gmTmyyFv3kvYnPi9lCXddFFuN9KBdfVZUUk2ohXn+49nGTLXaX3QcdReekeZcNY0JR5maziQuAC3J/OKDIKzgn4t1qMsHNOwkDvb15J1kGQaPgnMP9+h6TskDdvIFSCvcP92jbNmo4OAdjbNg6HEMX3f1+Hz1zFoPj/JIxsDB5FsNkLCJ2e3srwsLQ2fTm5gabzRZQojBn6d6+aRfi9XKxnvLATm10NO7wQOcliwlAjAErqBiCm5RVVMRnJkOnbWzsd3FxEcv0U1DJ6qBt20r2SJiDKQF6ygnMsiy2cecGnHpzfL03CsPNlq9Tb8RxqGvpKKuVH9TRYONA1ubx3uP8/BxKSfiLgmCyeb2IDtFjM0ZjUuV4fnWOyXSG7U7KljunMZ2WaBrJ7NFG44//5I8EABVVYlApXnToWgtr/eA7e2MrjElKK4tWZFjngNdVFKLdmk2nuHu/Qab1Y7ShJBslXbNjcJCGcLbbbexsPTbWvKZxdcuxUUyPNITA7x7T8SlrMp7P43+PjX8KTsbnSK97/B6TOGPch08BQs4DajzSMTzFTowB0nhOD/QtSeyL15N+5/i7TjE+aRVRzvkUHFRlGY14nrPSboYs0zG7h/OJ4UF+Nh2bsb5HUmETPZFSsn9oHe7FIC/y4OxKh2F4DJ5dek8EG3JPf4blyoFQMMhkgHMRxdd1HepoeNSqwXqziVkUdV2jaZuQR6zipqK1hray8TAtKwuLnoidtH2e5yhD6CEPHiW9IpbW5r/JJsB5tIGasp2VjWg6xXKxiN4tvNCzk+kEV1dXuLy8FG9WKRSBLcnzHJmRrIXD/iC9FFyHopSN0lkXBaab9Qaz+SyWZ2fr9uYYOozWdYhZG6knst1ishD9QttIpgFrRRCYxaZR6CuIZlmG/WEvlJdCvB++l5OZEzAtbMbzcPOkl596kQQcuZGMovv7+wgSGVtv2zY+Yx8S9lorGQ7ptfNZpzoF2QCBSTXFbdBEzGazAdDjYa1F19TIjEIemqKlFCfvM91wjRYVNd/rvRehLBQ++OCDKHpUSkldlqKImUnccOfzOYBep6C1xnw+w3Z7hLV9b4Esy6C8ltLmkSEYFrI6tZmm4QMglHtGiPE6hUybWOlPK40iGOC6ruGCp1GUJdrAPFHvkOd5LJB3e3uL7Xbbe9+SEgAXNpx+o+pZmaj/KItBOXr+nRqtwQYbNjlumPSIUpApY0Eg6Aft5GkgYr2RMF8JlJbLJe7v7+N+kNabIUtSVSWur5aYVhk+/OAZnj9/gd/9vX+E7e6APMvgvayHalLhD//wD/H69St89Mn3A3iQZ8laBpKuPYnX432faZKmhuqR8UkPDzFOItLLkeeyZ5xEd9/ycM7jpz/9KT766KO4dknTj+fXKS+c99RrrJ5irHoGYQxG0nnwFAvD86W/G7MovPYUrABPsz8pGOL5U0CSfvdTzya9Fr5vbLzT7+Khk/1pvBbSI/1dep9pgS46QbRZZC4I0MtSAIfSw+dUlmUMz6bPazKZDK5d7ofrEeAq9557kAios5CKbjTvQUVnSq5bwVrapBrOHSKr+Au/8OTwxuM7ZqOEDSQzgBVdgbUWDw8PuL+/F+PnFeqmES2EZgpRi7puYLSoxeWGPbwVQWaV58hMBpPJg5xcXqGqSiAYE60FKDjnImXedS3yLMOkrOCchXNS1102ZcC1HWDE0GbGoAgpQfWxhlLAtJpgwm6iwRjWof5EURQ4Hg7QUKiKErbrQr0FjWIyRVEWgBLaSmUyAeu6xvnZSgBBXsBog+uLSzRNC2ulumYeUgDpsbZti6osQ9Edh0k1EXrb9hNzXMaa1RtpDPfHA0xgdLSR7Ig8Cw3o8hz73U5aaYd/H45HILA4Simst1sJHameAs4CBTwpesEeWQIuRmo2oueiAAcvQtFkMxrTn/K6AZkyWajA8VgHXYMfLGAx7A6Z9vB7qdypMDR2RVHIvQdvQXlJEaQBkRBIFTcTbpxa3FMBREEHcH5+jtvbW1xfX0eRLg020Ke+xfBDsNRjD60PjfX0fzoGXAM8nHNpCwb5PK+16ylZ6D5NzivZeK6vr+G9x8PDQxRE393dYbvd9t6oAnQW6Nuug0OSThj3GAmxmCzD2fIsNlBMr2m8iQKAQp82yzFPPUWyfoDoJ5quA7yDMXaQhszQA+c+u8mSiSSITqs2DjZWrfHu7Vvkz84Bb/HpJx/jX/yX/xX8H/8P/ycc11tY69F2DmeTKe7ubvH+/S0+/v4vomv7qrwsV65D+EVrE1kUKXQ19PT6Z32ip4b3vR4ry1BWkplg28fN/L7tYYzG69evB+wknQmCDZ0YpHG4gXNubEzHjAOfReqQpPOBrz/1+VNe/ql/872PxveJ96f3eYpReIr9GB/pPE2BxhgUpfc81nukgGbM4qTMx6nx5/fb4KR1XYu2nYb3O1ibIS/6rDWGWtLv5fzi3xwTCjnlcjSXd7h2ccqgQsaWIpvaom2bKCYnA8Owefpz0zT4rd/6V79yfIF/BoFo27bi6Qev2FqblC42UfskqCjkKSuhCPO8wHI5CUI8h8V8hrLIh8ViPNAFoWXXWWgolHmBqpBMBqM1ppMJnC1QlGVUqmstpY+Zf2/rBvWh77IKALZpcQghjLIsRdnfdbGSJ9uyv3n5CtvtFh9++CF8aXHz5i3W63WIOc+h/BSb3RYP9/coyhLnqxWgFB7u7qUYVqBO57MX2G62aI5HbLqu759gDFxn0dY1lrN5TE3yQbA3m/YN0abTKbquw2q1wna7hdYal5eXOBwOQo9pqYdAZoNeOfUJ6/U6ijIJDPlvjluaOpjS/hmGnTpnISXXWun5wrTXLMswmU1x+/AA5/v+DNQrpKWSZYMDFEz0XpXq9QA8f7qg8zyH0QJadQAw3CiNMTEDIC8KlFUF7T0m1SQuTKafUaTKawOCobRSzZTFoj744IOYYsnnTuAjYZi6z5gI6ZrWOZnnJ6jNlFJON8Z0kxKwF9JkM6E2cyNzVyesoup1inAwWC7P4nmqqsL79+/x7t07HEIYicbeK0AjMClhc2NozPPaA+DMsgyLkMpOdoFGPlW9Jxcfn0XqbdJ7I1sm4bUa3jt4Z+Fcz3aR0UoLOBHopPqgLMtiXRWCXY5x27XoDnv88R89YDadYL2+x7/5F34D/9q/9j/D//n/8u8jy0tYK0JUKOCPf/on+Mv/wr+Ipu3Bi6wJJdVTw35H0EPBt0oMyNijHrMWfN0Y2bcm0wm2D38aYMNgs9nEzDTOqVOhjjGDweMU85C+Pv5c6sGn9z0WpaahjqeOMXNw6lqUUiG8/Pj6xozCmKVIr+ep7+cc5Wd4vjS8+Wh8Rt8zBjqpTmPMeIyBV8p6muDAdSHjsm3bkI2YYTqrYhq/nLfvr5KyTmm5856pyWG0CJgR7pckgTAVLIfuwnfXqJtjaGUgwKNp2lBTS1L/5XMduu7PqMV8GGd0nXjOWpu+LG6skihV+OQmBS3leYaqEoAh9derOEhFlkEjSb9SSqqghU2HIQMP8cwWi2UwElJDobMWm/Ua9VFQVnM8RurXNh32wbNj07Ku67DZbAAAz549k1LMZYksxMystZLt4hxm0yngPd6/ewfbdThbLoWmMgZ1fYDyHov5PMTQgPp4xO27d8I+NA2W8zk2Dw/YrTfYhYyQSVlBeWC73kRauj4e0TY6llLvug7lR2Ic9/t9bKn+5s2b2C/j4eFBPL3M4PzyMlLRAJBnGWbTKXb7fUzVowiPGhTvpSJhNZ3CWYsiz6MHViYsgXI+NppLU0apI6EhY3Mqxs3hPfb7AzpIpVmj5XkzKm1Mhq5zA9ZDqPEuerFSe1/mXJ4btM0Rk6qKQk6tVKQ0GX6LbIo2eBZqGzDUQ1DKeaC1pJpZ7yR1NfRu0VpHXQ+9blKU1krlzTzPUNdMTQibmnNAYCNS+voUhZ2CjbgZKanbkYV4rVHiiWQmE4CjpFy5a0MpcaVh8jKm0e33e+z3e+x2uwgICCQBoCxKIHR9hHNoU6GfDwhGKSjflz5no7P9fh8Nbuo1pwdZrNS7FiBAASyk0BaAPDOwYVNNQReFavxudualqJuASBTyTNUTYiYzOtS0mWByVuD9zRt0bY1/8uM/wA9/+Yco8gxeSR2D4+GAajLF7//e7+Pf/t/YECrru79mRkIe0ia8jUDLxjotvRx47FEnIyL3DYQOvhplWaEqJ9jg4emaXt/k8L3jd3Nzgw8//PCRaDM1dqdCHPyZxjV975h5G/+dPvPxPDj1ntSTH8/71Lk59bnxkDwVmknDMal2IxUWj4/0OtJ7HgOB9MGkqa9jfQfvkecbn4dz+9H1J8+O4W0JmReoqhJtN4ll/MuyRJHnmIVCftJl2EcRe5aFPRjEvQp13QKQtSjMRAAVdY1jsJ0EFV3XoG3l9TYU8OzLpbeD+eH802AuPb5jnQ2DPJcWzM556XDspWSxCv7XZFrGuNOkqrBarST7I+QeH/Z7aCMbS308YLlYIM/z2DGW04vGuLMiHmu7DlohVtWUssYInhKw36xhlEdbH+E9UB+OqKoSWSab33QqdRfOzhaYTISmKoIwZjqt0HUWh8Meq9UKs9kE0+ksTirJCJlHNsBlOZaLJdq2QVVNsH54QJHl+OiDD/tOlZmB7SzOz89xuTqXQc8yVGWJWfj+qipR1yKkvDhb4dbdIg/UNTd30v/e+yi6ZF8I76Xvy2G3x+Gwx4ysiZXCQc5J+anjbo+66bu7Hg9HyfYIYaRXL1/BefHai9UKznvst7uYsZBqQsierFar2JyL1zYpS1jnMJtIc7vdbofcZDAqFJzRGkrlcN6HfG6EmgYyv/JMwkxlkVCFRqFrG2RaIzdGyupqHRvKGWMi8MjzHHmWo22buBkbY6LRJNiqKultY60FWFo8hKjY0ZTgbDKZwDmHN2/eYLlcog4AzqOD8/JH+pV4AFKbhd6YbEQSLxXywIeNQMfXAvGH3IRNQgHoPBor95DnOSaVF7YjL5BVU2SZQVO3MfrRddIhmFkj3BC40RZFgSLPUTfHwOw4KOWhnJUMLi+OApw0FZtO5siyDJvtNhqxNGsm1TEAgLUS4tBGAXDobAdjMhRlBud02C+kZoAxGpkuY5Eyskxj75CUsLUWymRorIU7yiY4n82QKY+7d+8wnRQwxiNHi4v5AsYDBg280viHf/+/xYvrZ3j+wfdQ5Rlgcmz3NbJMWIt3b29wd/se2mTIMoPtdg/vAGNy1LWEgQHAOWnVrTTrG/jkj0VRGHhYCbNw4npAeY+yyIL2q4DSGSbTBfLiAceDZOcoeHhnYUw/5x/vu489e9c56MzgzatXqH/5RxFYRs1MCG1554UNU0OxfM8yDrUTY1biqVCC9326LZSHzjS0Twws2PHFxZ+gZT2LiF6ujfoFbRKjrQDvHZz1cS35J8AHgCiW9G7IRDzF3DxioXyvL9Jax9IH0OI4mVSjoxDfE/U6wSviuBrda9TU6Dp0Ms9T5kl0QDKnxK4dYW0HG4pVHssG1aTBJLSQF62cwny+wGRSBRZXobMdjk2DLvZmcTge2sgU0oY0bYsmVhJuYTspG9E2R3S2TjQakr7b1A2UZsfX4Ag+yr87fXwngWieF1AADvs99vsDnEevD8gyzOdTXFyughcYiojkGiZTaLsOx2MHpYHlUspbT6dCK3rvcTj2GyWbtjknNSGurq6g4KG9wk/+6I9jme6zszOUmXjts5DOSvHixdmZVF8MtCtTOOmd1XUDKfPqYqv46+trvHv3LjZhe/78Od6+fYvpdBK7m+73e5ydnaFtWyzmC7x69SqelxTwZDKJoaXpdArbtNGIpbVJJA7sUITJc3V5he1+F8MVLEPOniksOLXdbnF1dSUKfa1xtlzi+bNnePXqlTTJ0RpOG6gcwlpojTzLsD8cUBYFlvOFCEfLCpvNBh9+8EHMZPHOw4ZwxSwpLz6ZTOI1p1VFl8slVqtVDNMwhDMpSvgrH2uDsG/CbrcTUWam0VoXYvdaGLPYy6MLxljamWuVoTB9FdWiKGJdiXSDzEwm97nbAV7SRhkKiPUsfF94J9l94txzzkV2hTVEFosFvve97+Hh4QFZpuEh12wyBWOlr4GNNKVF26lQBlwM7ZBK1UiF8f3GE4CHA6x3UdtklcNhf0RxdoZJWYRr98hDETKKfVnNlZsXDUGvHG8CuJCNp7M21nLh98OLCLJpGrx//z6KfSn45ZxMw0MyvqSuHbQxKEKfFOf8oP+PjIMDoGML6/Q6x/S1CULqcjKFbKRW2g2EKoqLxQLeNZhkGZRr8dGzS2jUKDKF+4cHVGWBn3/2U5STKS7Pz7A7dmhaJ6wRgM1mjd1ui4urK3SttFJoatszuDk9RAfvWaVSwixQHsYAWZ4jy0WDlFpCrQzKPEfbaHRtC6MNlMpgshJFORFgqBDZPhmXp9MJH3nmAdS+u3mHTQiVaho9F/qKuKe1B6dCDOl7xwY7/TnN9oASAKEUhCEMDKZ3LgFeDHsM02qVVjDKAEnoPX6voDV4L/cU734MHMJrCoAPhn8cnlQjA5/eT/p5HYBC/FzwBAahnSRnKN17IqDg+U4wXqfEqoJpVAI2el2F921kIw6HA/JdHsuR180cMl80mkbCGeziTD2FOKYd2tYO9j2GttNu2/J9oo/z6JMLyOopI85ClhloE/Qm/puVqPv2jdjCQAN+MLDe9ypt0T0I9QOU0TPLskzKY2MYz9tsNjGGztg4jUmeS+nq2WyGh4cH/OL3v4/j/oDvf//7UEpFIRlp1uvr60FzJsbnKbKkgZzNZlHHcHd3h6urK+x2O1xeXsaCUGkPBlaWZKGs1WoV27M/PDzEwlHL5XKQ3cG6Dof9HqvlWeziCiBem1IqquzZvZKTZjqdRjo/7TWz2+3i5l1VFRgr3263uLi4GHRiZfyQMXOGFQhavPcxhZV1PRCeaZnoYRjDp0AVQCxyxb4XpOuZOnp+fi5N4UJmC2uzRJClgGOY7CLELKOnQEahDTU6FADtEcEGPeHoFSU0MDMyWHuFCm+ek/OBXrOjYXbDFs0ptb/dbmODsCzL4LxHlhnkNkPXdtBaDBS3MS5SpRR8spFybFNamtctrJmEH9NNzDkXWQuOD7Mx0o0zZSA4PsO0TRt6KvRN2+I1ysWIFkEpKSAWOuDyXtJeKwASDUbf/+Ty8jJeL8eSn+U1AhJGK8JrLFE+ptN5/zmLrxUFvLPIcwEpm/UWRaZRHxtUpoSzDsuzM0wrhTLXOL+4wnZ/xM8++wwwJZQ26KwwDF0nxdzqpsbt7S0++Ogj1MctyqJEfdwN6HgBixxlXl/PEpDF7eP3/fN23uOwP4b28iJ+tVyH2w2866AzDal66gcNzL7+EGP28CB9UqbT6aBOw7hWBDtAp3MxnYN8/RQLMNYj8HPhKgbvOR1SwsnfPcWy8L1aGylh8BXneHStie4s1Q+l1zz+HoLmNPySHgOAgsdsz/je0/Olr58KPVIsr9Tj6+N6TkNEtAl1XaNru1gigfsa/xAkiLbCBufNPbr28bgDDkr3AIr7yDh7jnvUNzm+fVEveDRNXwxJRHuyoRhtcHV1hWfPrlCUJsaMuUHt9/tBQRgW5enqBgjMwjyk73ETs84JwGAsPVRt/PDDD2McnuEUGgJjjHjOpi9Ewz8SzxJKk4zBZdA7kDKvqipS7/z34XAIxaT2sfjQbDaTrrVVFXs0pKp9MjIAUJQl1ut19DC5OTsnTa6YK//+/Xu8ePECh/qILM/x7t07nJ2dRQN9OBxizQqlFC4vLyNIoa6DsW0KNFODewx6FhYrY2jBOReLRNFw0/ilmwDZAT6PPic8jynCBCZcbHz+BGL8XdM0KKsK+8MeTaixYl0faiGYa7sOVQAbu+1uML6sJJk2auLCJNCjXoU1X8iESKZBKAplu8HmTP0LG7txTlprcXV1hYeHBzR1AzgfPLawuXgAOogHvYIL7IRWw46UvH5uENzsZG1YONdvwATSSqmBhiGtE5MW8OH3KKWiqDcNfzCbIop2vbgJKtDGaQ59nvep5cwS4VwgECUISNdtnuex0R+fY1p5VinR1ACy+bLAHsNzaaxbNrosfj/DA1Lk7g5tCxidwQFQ2mC92+PXf+0v4uc//wlWF+c4djd4f3OL27t7tF0XQjkZtvsjihCa+vnnn+PXf+M3wvx2US/EudW2IYbvQ4dUY6C1gvO9uPVU7B4Q47zZbnA4HEMH6ALWSqGvTfWA42EHhNLQnbXfWLIhG7IYCHbLff78+Ul6vn97z1ikAIO/O/X6+EjnGA+GNhiqkVHq/X+FPnSgtIr7uNLynt5DVpKajV4UqrRCbrKTGSmnDB3BRnov/JPe4zgLJxW7pkaU/z7FAo2vJ72G9JzpQSBzCqiMgcb4efGe0/XLPZ3voQMxFgkbkwfgIsxF+GYMiIOUEfJ9iCQdl/F+9U2Pbx9G8bJ48lyyRyRmhBhDurq6wtnZGbquxm63w2F/iDcynU6wC5kfZVlgPptjPp+jMCZmTVAMqoJnxRsnkwAgFluiEZ3P59H473ZijLIsk9h6yJRhyhzQGxCCHj6g6XQaGQGeg8WFGMJIa3qQGWA3VT4MMhY0BKSJCQTyPMdsNosCWZYZv7m5wQcffIC3b99iEbqwnp+f4+7uDhcXF5Eao2FhXROyIan6mBOCzEgqLKJRSgWl9F45FkqpqKHhRAb6iU5GgMwS9QJAPz845hSWMlSltZaeNHkuBWtamUfn5+d4eHgYMh/exxRcY4wYeAw9rRQgpeAWQHz+HHuq93ntKeujvIdNUr34fo712dlZZI9S6jH1vnWIuXfOoutcsAWPxWfUI4xDEfI+UvaPmywRQB/3ezT/X9r+/Mu27SoPBL+1dnf6JtrbvlbSk0A0AoQQBiwwGAxSImxjyl2WC6cza9Soqv+n8hdn/VB2lrELG7dlmc5ZtgFDGgNCTwjpId333r1xozn9Obtdq35Y+1t7nh0n7mvG8B4jbsSNOGefvddea81vfvObc6YpwjoMIVPdOAdkKIWMntauuSA1P7amiFF/b4s0O/2eHwPO4U6twQJciKrb7fo+N7K0OecQ11NbG5CXzrC6dtixB4Fy3TeAywGhsiywWK4QRU2NHKPg0rqrCpEO8LU//QZ+5If/HKKkh9UmxWaXY7ZYwQbPUBm47st5Wcevc6gwxJ9+7WuuuWHchTENsGoYpPL2Rk0JQg385PrjYayBVroW3VWIow6KMEOmQnS7PfS6fWTpzj1brbyG6f0ecv48e/YMH/3oR/3fOAe4zxm7H+LhIQ2N/C7vQxrfu8IAkt1QdWjCAwx1W0gpAcDedaC5Dp9N0boWvu/Qtch7OPR7CeKttY1moz5k7RZ5rrsAhQQjhz73EOPB88n31VgfDTfaHIeADm2Hr1elm6wgWS+p7eDIHkiHWBb3ecYD6UPXcteYvOj44GBDwVOBpqZsmLpGVfRms0Jcxy/p6Xe7XURhhOlkgtFo5Nujd7tdTOoQAr1oaQDokZJtGE4HWK/W3njS2DGUQE9Va+3rJKxWK181ku3uGUoYDoc+TEPgst1ufWEUGmEaYAoy+XBpYGWBF7IWBBl+6Kz1DAAFpEwd3Ww2/ppOT08xW8y92Is9H3a7HY6Ojnydi8Fg4NkKggqGo6paiCszKYIa1NEAaa19DQkKh7jJ0ogSVNCQK9UU0JKxdRpBWVuBE5KCJElPkolK88wbft4rx4yeM99DvY0cU+mdtDdEgkPWaqBnzbFgiqtSzptiWEGW/KbhY6v7KIown8/3qGprnNA1igIM+q53z3a7RVa5yrcyRVJ+JwNAD0SGKNobuxzTTqeDfr/v53wcxz4djpVCOUdlG3hrDYqiRCXCJ4eoZG6AWmvkWV6LUt21dbtd3L9/3zMR1OuMx2Os1+u6zXrkmTQCTYIbzhUAKLIMTbGhBkwRPMkNn7/PirLOyHH9daI4hlYKZeb65XS6HSznN/jSv/t1vPbay9jscnzzyTvIigp2vkReVMiKCovFCmHkmCFlLb71rW/i7bffxqOHL6EsG2ZPgjhbN8EylYEx8M/V7YfRrY0dqDMMau2HgkKnk8CUXRRZiqrMEScuHdGaCrDO0zfm/W/guu7rorXG1dXVXnp528hYawBRUfkuVsOfW4Dhtqe9d49+TTW/IyvBeSSBQ8PgNeNsW+tYzkkGUORnt4GA/B3uMObyPRJwULPUZjDaYygPJf7+XszHixgkuX9JsCHPzS+um3ZojPv9i8Ag93xfFwgNQ+Xe14QJlYITjQG3n4XYyw4xZy86PlQ2Co0T41Zau0Y59PQ7SYLJeAhjnFAwCkMfvx2fnrpOmt2u01IUBdKtK5x17+wcT58+xfn5uYs/9vvoJIlrc60DRGHoMixqKr89kQlWWLa5qppeDPROaeTyPPffO50Ottstjo+PfaiDaYTSWBljvAaBXi89eHrXk8nEsyEcDxrnIs38A5aeJ8M11CZsNhuXuluzGGQzqA+hboRGmBs/jbFSynvfQON9As6gsRW8FJ3S2PPgxsAUYgr8ZHXP2Wy2l0FAHQVfQ70FaXKCmTAMfVXVyWiMqij3SsjHgyGur6+RxDGGg6EPD+Vlo7NgjJ+fTRAlBYYcV+/Z1YtRAke/mLX7v7G3BVRhGOKqTmdmyEABMGWJQClMJhMYY1FWFXq9AaLCgeB460JexpgapOtbm4w0vlzQbmMJ/fPlc5GGr9fr4fT01LNODO1JjQ7ZNR9yqUrsdluUeQ6lRP8Ha/fGUbIRFawviBYEAc7OzvZSgwmYGG5kZhDXm7UWJycnyLIMb7/9tncMHLPlujrLTZ1aE7mRSRZIKSCKYnTCEEEYoxdG2G03COMEcRhgudng4eOX8Y1vvY2Lq2tYC1xeX6OqLE7O3HuMKVw7hEBDh6664reefAuz2QwPHzzeCxkSZLvnUMBUFYKw7pdkXAimLAskcSJEuCWsdRomawFYheViiSBwHYOTTgfdooeiyDAcjnBzfY2yqKBUgCovod5nF01gPxRXliWurq68E9Ke94EOgGA/xNMOFfBnPzfEwfdxT/ThsHpeBsoV2rPWwsCFFLXSNVhQUMY6XQqo8zOuNHY9/0KtfTiPa1KrhgXhXJDX2AYn1jZiT/m69vriVxjudzWmho66MJ7TAaNm3Nzf99ez1I7J/V2GHdpjKf8vxaHy9W0GiL+XrCfQ7NncLyQ4cfMZcECGjGcDOrR2YUFeb2VKv0fw8ziObdakPU/uOj64QFRpxLErmEUDDOvqAHSSDj7ykddhqwpZusHp6Yk31EwnXCwWfvDZ52G9dAWhWJjGN1jKMhQ19b/b7dDv9fHs2TOcnZ3510hjS8EM48jsEQG4RcXeFhSBMstgOp0iz3Nst1tvnJh9wVAAS6fT8F5fX+8NNF83m832AAMN/WI+R6ibcACFqdZaz2qs6xTDXq+H69kNklojID1djgUnII0uwQjQGFMa+36/74EP9R2r1cp3R6UB5/lIHfPalFK+2Bb1AZzQNPCS0mQohhOXY0cDttls/GZGwSPZGS5aPlt65tY2DYbIWADwoIufLxG8rLnBZ+RV1TVDQ4ZJ1957sRdyaAwNGQTS/UWe151Fu649u7WI48TpLSqzZ8CtdfVM+BzlRsl74xx1i7ipDSA9ORoOAiUKVqk9ouiX5+LGx+fnWI8cGi68IylkbvKy5kmSJIg6iRebvf766xjX4T324GHoU26sBOPdbtcX4LI1qwfAMzEwZu/eaDD5HKWhcMazQhxG0EGIoqx8FeH+YID1comsLGEssNqmyAqL7fXS7T2VQaffQ2U1Qh0iigHoENCuTL1SCiiB5XLpsqlKWwODwLMufu5UFRQCYQiwtyk3860xJmmaIs8KBEGITqcHWFf4sNvtoSqds7Oqa73QSDb77XsoOGzjuOx2O8xmMzx+/HhvPomT3Zp37Tkof759T816bxtOpeD6Ayl354FyaeheRqvUfsaIdcCEw6T59/r1LqU08P07WINCsm5toC7vU7IX7fuSTEEYhjAiFCqZNbn2OR5i2G+NVZt5kXuiHCv5s5zfSgEu2+nFQlv5LF4kYpV/U0qhMkxvd4OsrJjDmiE2pz9zab/7IPSu7+/3+GABQjRrwVpXiAqAF431ej10uz0oAK+8/DKUUohrAemwrhvB0IQ1Fp2k40tuTyYTP7nLesNkO3AO6vX1NR49eoTdbofhcIg0TT04oU6CLIUxxjMMNM7b7RZHR0dYr9cYj8dYLpc+s4PGjbQvPXp6/EdHR35yF0WB4XDodQcEUjRGpDIptNPaFXliqKbb7WI+n3sw1Ov1cH197Xt1kAHguaWolIuM18uQh0SxSjnxXJ7nPrtkPB5jNpthPB7j+vraZ/n0ej2fkSINNg0JG5bRGPDZ856lWMkVh3Hly4fDoS8PTs9XeiLsILtaLl2J+lrUy7ANdRoER51OxwuSWaKd6DoMQz8OzDySYkQ3b5ssFblY2ERutVp5z4Z1RBj+Wa/XWK1WXuyY5zkKEZ5KdzuUdfjKhVMif08EPNzQeC1t+pXjKwGG9CDam1mWZbi6usLz589vXR+vn+/nPex2W1hT1V4Maxk0dQs4bwkGhsMhxuOxryLKNbhYLPwak9U+rbW+oBjXENcxQXHjJbqukvK+fdGs+uc2MLOmQhhqhKHrIbTbuXs1FrBQqIxFGHeQ5RWS7gBhp4/uYIKj0zNMp8cANNIsR5bnyOvUX6UAFbiN9erqql4DjbCW4+fmFcN3tz1MHdw2zAB8KM7vfZ0ESdxFGERIkg76vQEGgyEAoChKvN923TykAc3zHLPZzI+nNJRyHNtf7fNIQ0fAzedMJu0WYDbW93Pi31hoT2tRj0I14RStlE8zVWIeKvHaIAhcD6kD1yevmwfP8V5jJe9Lri0CjrZW6rZhPRxCOAQmZJZS+7X8e/OZh4HGoc869P+2uHr/2i2cgtfu/Wysq+LLLwsDpffT0LmXSgDzQUIowIdsMU9dAAVmSTdBuksxHA5dmmMUOm1DZRAmCRa7hcsUqQwGNILdLgKtkUQxYCy0BZIwws3NjW+h/srjl5Butoh0AKtczYY8zRraraZqpUfODZMhFF/cxto94SdjylIPItNbt9utb6LW7XY9oLm5ucFgMNgTlBLg9Pt9T0OzRTn/NhwOYYqmZgMzT2h0j46OPLMzn8/R7fc8K0KQwfFnvw4AHvjMZjOcnp7i+vra1wth3QvAbaTT6RRPnjzx93d2dobr62vPdJAqZ5qn9LjJ4pDpsNZiVNffAOBBGcERQz/WWi9EJdtD5sdai363h10NOPNdCm2BThRjkzv245XHL2E+nwNwdRGq2sDJUAlZkranxrCCBH0M8wCuMBxLvxtrbr1fsircwGm4y7xwdUyMQV4WGI8n6PciqDBCxQVe9+mx1sJUJXRNzXIsGR5oeyeMr8vwAYEg570xBqvVCsvl0jM0EsBwTRAEFvV8CSKGZyrPbFi4vj6ogc1g4ITbURSBTQoJBiWlys/htREoksUgAybLnAPw8wS1gWqDLq5fAq36IXgRpoKrMKxNAKs0tmmGIIpRFRkMgDTLMRqN0QljJ1rU3NTdcy5St2mWpkCEEEHggMzbb78NAOj2eojCpE4XLD2Idr1SlNNWKOXpaGcQD2+lxlRI08wxIkq7qqRxjKSTAKnLGhuPx7i+vMS23sMq8/4V/jKUrZTrjMu9CdgHtlqpA9LDfSGmnP/yi79T6nZ6qrWuZFekm6w/ee5DP8trO3Q97b/LcHn7mm6d/73YIHF/xrhaMxLcAo3TJjUOZu+eb4egpBbiIOPSOm6Pk219v1t8etfYydfugysFpcM98CRf2w6FcF8FcPD1nAfvybzJ+33frxQ3w02wrC+wKp3Abzgc+lg901HjOPbFryaTiaew6YkOBgPktaK2LEuMx2MfL2P7ZBp9l6K68wacha1YE4ObG8MJDKswA6PT6WA2m93K0GC6JvtcrNfrPa0CK3hKISYBCsVw1Ih0Oh0sFgv/uRwDAD6LhewJY9y73c6PGz1qa+ELk9E7DoIAo9EI19fXPvxBHQezVtg75d69e7i8vPTefJIkePr0KcbjMQD4+5AsBbMNBoOB9+x4z4PBAL26NC5rdVAcS6MuNwLWTgHg6XSOGcfVjXflARV7tTDkQ20HmQo+R4ZRKIqkQeVCYDo0x5KK7XY8lQYtSRL0BwN0a+HlaDTy9L8UK1I/4Tz3pgqfqQyyeu7QE4Ngilyjo9seV5sGlYtY1jLgayR9Le+B9TcaxsA9E4qKWaVTBUFdkCfc2+jqi/HnpZfMzsxlWWI0GvkCdfJa+TMFt9zkyCwFQeBZOHpxhzY8uTFybPYo4Rr8ZanrduvzFpRCWVYIoxhKB4jiBHGnAwOFLC+xy3Ks1ltsdyms0k7bEScedPlrAfDuu++6NR02TA3QNEzkfGmyUfaZjf17ITtlsFyuwGJ1URQjjmJ0O13EsSvMNB6N0ev1veD0gxxSWxHW2rj5fO7HWrIQriKnPTju0tOXhkY6HG0vX55bq/2MKPkMDwEDHm2GRB7SO5ef9yJmA8CdzIZcT3Qc8tw1C+UY8lokK+CvT37WAcbjEJiSWo5Dr5Vj5Tqs3v7bofuQYy8ZVPmaQ78/dI67vg5da3u8Pwjg+FAC0SAMANtkIbBmRVQbhM1mDQ13EXmWY7lcYjQa+cYv49EIqzrmO5vN0Ikc8EjTFA8ePMDFxYX38EejkW8a9u6772IymSAtnI7DVBX6vb5nHeYz9/pduvNggcW2hsMhrq6ucHx8jNVqhdPTU1xeXuL8/Nx792maugZf9T11O84IBtp5dVEYwcJ6RqPb6WK5WnpQwI3/7OzM08az2Qyj0QjWWgyHQ+zSHYIgxHq9wmQ88ZVIKZgsygLHx8cufosmZEFRLouecfNerVZelBrHsWeG2Ejs5ubGgyqCm6IosFqtMJ1OHbKvPU5qPDabjX89DRpDClz4XESduk6GYxwCRIgBCwxHI+hAY7vdQdeAgOxTEARYbdbodnvY7HaYjlyvl+vra5ycnuLm5gZxHCGOYpRVheOTE2SpK9s7Ho+x2+08TW+t3StDTiMtN0dS86T7j46O/P30+31fjc8o1+mWKd15lmG723l9QlW54l+h1q4jL7DXCrrb7SIIAxi43jJRGACIYWFgdikAi6qqK7TaxpsB4NMGXeqj9SWBJbigMI9CaF9fIwpRmRJ5nsFYi81mjV1drbWu1AUt6qF0Oh3kWYDKWBTIAevCCEGg0e31kXS63vBstzv0+wO8/vpH8Ed/9GVoHeD4+BgXF8/hqqCGdVgmgDHlXgiF40+Awc3Ra0+sEwlq6KbXTRD4+QLB1FhrURkLqApJlCAOI1hbYjIaot8fwBqL66sLVGWB3XaLsjQIgwhBqBFGYcPI5tZ1b67ncVkUMMZV+pxdXWK1mGM6OYI1utELWCtCPK5UNZigqehxB4C6XVGxLCvMlwtUJRBFHSi4asFJpwdjKpSBRm84QH/Yx/XNFZrSiPuHFD2injUKrh5NWdRi1SDAbrvD9fU1Xnr8GBAsl+JZbAVr3byjWkcpd56A4lHLT1T+0xSUF3e6/7v3QTv5p0aTLcF9S9Lu0tuW31+kaZBhAQnKJVA9GHJwb/asnfTGZYgBgBeH8hwyfEKg4a/fCKPaMrBtcCaBkvz8Q/fJe5LMxgc9DjEPcu0A0rHh390s8p9oqdu4zZq4ObT/uxeByEPHBwYbWmtEnQ7SelMJtUaHIZE4Qb7bQVmDs/NTLOZzF7qoDPqdLq6vrzGZTJDtUqdMruouk7HyRpdAgywAkbtE29q6r6LOKCnzHJF2dRJWi4UrwrXe7FWZlEivLEssFgsA8HUd6C2nmy3K2hB0ul30a5bGVhVC7TJeFouFqyVSlT70wIczHo9xeXnpdRAsLBXHMbZZCh0EKE2FTq+H2dKdJy1y6ChEWuTo1ECmqipEtWEG4BkYGhqGA2Tdj81m48M0DP1I75yAjoJSskI0BlmWeVaIRZiUct128yyDimIYNOAjjCPkZd0rItCoaoNQVQZVmiNKYhirnFdZT/g4iBBGCaLENeJL6pAbAAwnY2RFjuG4rsJauntdb91cOz0/3ysKB2AvvbUdcmBWDBcDf+YcYLlzvn6z3aBU2vXpSRJ0kwQnx8e4urp2PVTKElnuajukxQ7DnmOAeoM+4iSBNa4XSCeK0UliZHmKcpXVv1eoqhJVYaF14Dd1SQ+rmmg0tZfD0B7DJsyeIXAC2AlXATDI8hTbzQZZ6opHwbiy4S61z6DXH2A4HAEW2KU5irICVOBtS6c7QNLpIYxi38eo2+2h2+nDGoVup4cirxDoCNbA9cFQIbYbd11l1TRoYoiVxsJXKRWeblmnzBsYaAuoQCMKnSaqgnUx5NK91pWgr8FlDWDH/R5UWWAznyHpJOhEEbZ5jkGv78SkkRurXrfnqnfWBjLpJDA2dlqbrEQQAL1BjDRL8cd/9Pu4d3aGMOwgDBLYuhiVa9Dm0lytdXqToioRhq6AVdJJoLRCaahNqTVJqxWqdYYkGUBrwNgSOqwQd3ooqhxWWXT6fZzdv49vvfMERjWiyb1D3TYAFo5VphkI6jDyxdNnMN9hGmaUhlBZhEo1IcM6HBXoGuyxz4typcetZvEp09hWb7Bs/dIma6Rdap7PXQIOfpfGsO2lt5mvdvikDTDahtVU+1V1bwldVVOOvH1O7o0SFDdgpAFTQRA457PFDslztQ3/XayMfI/LCLl973exB+2xlWMl9V/u/XUdH/H5/nOV8vCTTB8ZPMo8rHWh7PqPiIJ9Xdx7HR8qjIKa0oS19YTOfRnroM64WC1X3qNn4SvS4BwMWc+BC4NUOwtQsT8KDTeLWPH3rImx2+1wcnLi6bGiKJw2oTbAFIfSoLLXCutv9Ho9lCyHLDxibvA0UAyprNdrHxel50PqmFoJVh4lYOLiYwhCtouXdCX1JAC8x65roMMwAIEEY4oEKKS9mVXDmLmciAzZsO4GnwPBCD1qXu96tXIhCaEd0XVqcFVVrheCta6pT1GgrCrkpWNIdunONWPSCnGSoKxKbNMdOt0uwjq7ZzgcenaBm9N4PPbhN4bVnj9/7vUr3JQoWpM547L0NfUassqmDKfsid+iGL2610qWZljMF7i5vkFVluh1u5hMJjiaHiEIAtw7v4cwDHFycuJZDepD8jxHFAbo9zpQCoiTCACLQe2r1bmhyS/5rNpCtvbvg8B57rvdDovFHFm6A0RWg7WufPfxyTEGgz7iOIIxtVDWuBohgCsd3u/36zBmDECh1+1hMHCdfZ89e+ZDNcxeYt8ZNtsritK9rxYl8xoZ0vEMRb0WOJcObcLt0IrSypdcZrouLLupVsjT1G2AdUi0qVQL7HZbmDqVjxlQad2sLgyY+lhBK+Cbf/YWsjx1m37N3jHrjimCvFJesgzNOSPVXPsu3dVdgEMEQYgwiF2mSxij0+kh6XbRHwwxPTrCYDhylWw/yIas4Oc0jQsFvNy3/FzRTQaG+2KPC13H9A8bxdq6uPtV/L/72bL6pGr0HNx/KAh+L5pezovqDqBw6JCvu8u7lkBGAhI5vw6d99Df9n9393vl58r1yvHxOioRvpEg4dC1HNon2vdy6LWH7lfaAXlt8jvQnOvQWLRDOO/n+BDlyt3hGhE1D5BhDWstrAEKU3gPmKEWajmYPcDY/G63c2GWekHToFOkCTgPlhVBmcFha9BDMdt8PsdkMsHNzQ1OTk7qOhkunMLz8iEQuAwGA8xmM3cdaYpIB3tpoLwHCTCMcc2fWIeDMe3VauUNIDcAsh4ET8YYn/1SVRX6/b7P7eYklNkHbOxGyl+mkgJN6IPCy/V6vYc0SZ1rrX2qJLUpNLrX19debzOZTLxOhkaBz7DT6WCXO+CiVUPzF3kOq5p4aFlWe5sf36u19nF9gsaiKDzDcnNz44S0xmA2m2E4HPrCW9S6GGP2xkNr7bNqgCajou0BtQWN1lr/+eyVYm2jNdhsNl40C8DrQwDg4cOHSKIYo+HQMXFDN3cePnyId5899RqUZV0wzFXRrdePxa3raKjrejPQjSHmODJ0xoNAsKws1ps1irwuNleHMnu9Xl1Qyvp+RM7IpthsdshrAMoL45qU+iCuA2ZDKeXqt/A7061dxlFYd3l1c0jOezJMLPd+yBOSlLV0QmQGlLYNgyhDe2xNwHlOr9plom09oJZzsr2Bkhn76p/8idNAjU9RGYUyK2/pYRgl4fcoilz4x4sjm3o1aZpBxS48FAQuFGFMBGMiWJtAa6DbTdDtJHjw8BGWywVcM7b3d1hjENbMHp8bdRsnJyd7jk5VAS7ZRXmgZC2rfGo/F6wo8FQTki0vXNchCmHsawB7KHRwl7Hi+YDbaZq3Ac8d97/nue+zHIeAxl2HfP9d4YG9/4sf2/cif3+ImeHclu9rDHydISXuXZYel0c7W4SHZDtexAS1GRj+bIwBrIK1+89EfhaBhjzvex0fXJFk4VOcgiBwBZmCCHEYIY4idDpOFT+dTnB5eenFVTQiNKCLxcIPvCxnzU1DLp49j1GEDYxxqZ/X19d+AAgMmvAJ9gwUGQpZj8CHHGoxGw3sbrfzjAqLaXGDc+cKffM11o7gQ2Tp9NPTU097b7db9Ho9X7zr5ubGT0hm1XQ6HS+opOiuKIq9TrWcWLwnVrmkzoKhAda2MMb46qPUv3ATYj0SluMmAGPNEVZpleCJz43jYCGFVZXXF5B14HOR2R1U+HeSDgD4aq4EJyxFf3x8fGvRkJkBGvEp5wo/l96b3HzobdGwEiQD2PM0gsDVQTk/P8ejR49wcnKC0Wjkwy403lXlGv+laYr79+9jtVq5zsS1kWUqdK/fc5szd27A/3zIy6J3KFNxKVDkRsWvqnTpalEYols3uxuNRjg/P8dwNNqrDssNb71awdSMFOBKfY/HYwwGgz1vh+Jafi7HkYBOMi5pmmGxWPp1TFZws9n4nkgsa97tdg9u7gSjrBzri0UJoZsEG3IdS6PCL85PMk8ypCgr/hJQJUmCxXyO58+fuw2+bFhSea18UvzeZLwBsuEVUFcOVS71XfFe6oJkSdJFt9vHoD/C8ck57t1/hDC8u8X8oYMZEtLTVMrpn6TewVqGF1CHP+qwnSXg0A5EQAFW1aEZXWuJtH+9gkZQa3S0CprzoKn5I8WWt4TIHDvhNUugcQgIvggktD12fg/2wN/+a+9iN6TRvB1CuR3C4u8PgY1DBv0QYyNZdBlyej9gS/79PZnBO/4u99VD5+IhxcP84vje9Yzbx4fq+mrKynlNVYUSBp04cTPWOCCRRC6LgAZTKbXXeZMbMQecAIJZJ8zwYPYKvWFOAIIDbp5BTen3+31cXV3h6OjIhwNWmzV6/b5nS1g5lGzAYrHwOoVutwsY66l6WSOCIs7pdOoFg6WpMByNcHl5ifF47LUSQJOSyhRSXieNHdkVevUEOczg4CbHolwMF/E7F7X0kl1jqrnfWAk+OHmoJVksFn5BMrvl/v37ePLkCR4+fIjdbudDX5PJBKjqgktVibgWjQLwwMnU9+YKveVIko5/Vrznqqo8G0WQqJTCar3C0XTqQZW1LlWW183iYwRnUrdC5oHGlIsCgAcNpHKlkZRzgXOTYldr7V6tDln1lJlFFxcXGPT7flwnk6nPhlrM5yjrLKvVZuPDcDzkAt/bQ9uvMU1KWrusuARMccIwkvJjmGVZnS68n2WwWq2wWCxRlaX3YqEUBoOBH2PJDADwwJNOQVVVWCwWePjwob8+hgI7HRcKnc/n3klgJhTDKUwl5XURyBBg8rm0n51SCjD7oRc6GATO3F/K0vUhKcsm+4tjwdfynEEQoDIsze8qWD6/uKg1JpHPDGuu1QAgte8EdmHgaglZo5BlBcpCeIQUWipnuKEBVRvrSmnoMEachBj0u3j86GUMhyMs55cfYD9uUhD5PMIwxMXFBdI03dMk6VY4jmuRAEzS8NIj59gBDThrvw6AF922je8hsHAIJPD3d62XQ0f7b9LIy884BGLktbXfI5nfgx78C84lj1uMAfYbsR0aK3kevobgWB53MTZtZucQ2JDf22Chua5mjRFg8D7kOd8vs/EhKog2pcGtMYjjDrRSGA2d1x+Hjt3YbVe+Yujx8bFPVWXH0vF4vFcxlBsCvXcCDmutT3ckHaqU8kKeNE1xcnKC+XzuAQ4nyW6326Nk6VFz8ux2O9eHpC67vd3t0O909wadhbsAeM97OBy6kuv9pq0849j8HFK6QFMzgKEhetSs10GBZBAEvneLZG5YnIsbN0MrMv5qjPF1RZgCx8nBMBLHn55lmqa4urrCZDLB22+/jZOTE1xcXKDf78Na65vfKeMmWKfTQVk4QWgURdhlropsUZZIanBA9MvNX+or5BwiMmb/FupFer2ezzghUGW8P4oiDIeuABJDUVLbwt4yfL70jskQ8LWybD0/R5Z85zVHUeTZLC5sphmvFktMak2JsU6I6bKyRoBquvoOBgPczG4ci1CW3iWSG42Pm4KNobC38ZGVkbRpVbkqq4P+AEfHR6iqws8jXivDlwwD7HY7V4iP11AzhqPR6FYBOclOcZMkKN9sNvja176G4+Njn9EUBK7WRJalHnxIZT/Pyeci5wHHiqwG54ZSyjdM1LWx5kYtY80clyAIxPpPYW1DNcvYuPS8ZdZYFG1hjcbz58+dVigO6wwkGTJ2YAPKQNXFkaI4qvfFwIVWrMjk0AGUCmqBp65DDxpQAbQOEYUBBsMBzk6OsEszPHr4Mr48u4TW+3UMDhkUwIXcOJdkyG02m/kUfe4JQRAgSqjHakJ6UnPS9oqdWNHviPX+RvGg8t8VgErZPW1r22jijr/Je3q/rzv0M/9/yLC2z38XWyJfI8+jtYa2ssHe7a6wfN17AYj2ZzRAroILo9y+/rtAFH/3QcJQbeDTBj3u3A3YlF9ce4eu5b2ODx5GqeNJVVl6NW6300G35zzM0+MTFFmOs7MzZFmGk5OTPe+GN8jMCRYIY22E8XjsvTYCA4o6WQeCKa00Pkz7pMaDLbiNMR7MnJycYLPZ+Aqi3FgZgy6KApPx2G+cQJMBIgu8MEbswMx2r4AVtRz0mGnwmbpKNoZgiVUYqd2gUeGmScEkrw+AZwhoHJhdIs8jQRdDQUEQ+FonNEoEgBTPpmnq00LJslBLkyQJojhGUZZY1YJRoC5pXZc2Z9iFDAXvk2EwsghSo8LfW+t6aJCR4d+ttb7+CMEBwQcFx0mSeJGvBCAS1NBYA40Il94LmRdJqfNc7dAeS9s7A990tyUg2aW7ZnFpNrrrIhGdcxtvrSkAJLOt5I7N93Bs5XwoigJZHWahZ8oNgs+DG9FqtcJmvUYd8wLq1/XrmipyvCSTKJkZhkOm0ymOjo586I6b13wx98BxV6cMr9drD7C5JoFG1CifEecoQTYBNcfrkFHkPdIRkCwJw2J0BAiCyYTe3Nz48SQgTZIE3/jGN/weYa1LlZWghnu4Uo3Q3T1X91WWosy84Z6pYZWXWkLpAEEQQQcRkqSHXn+I6fQUj156ybO+8j5fJNTjwXsjO8d9sVuLnh1TGyMMI4Rh7Gp+1HVH+HOSdNDpdP1Xt9tHt9tDknQQhnH9FYF9XrR2qc9KCGfbhrVt+A4ZsUNe+l2e913nlb/nOj80Ti86pBFv1w2RxyGxpgwptP/Gc0tDz7nfFn/L65fvl0a+fc98fZuJkPuKvL67WCX+vn0u7gdtxuuDjO2HqyCapqjqJi5hEGA6mSDdbNHvurLbQajqWgmxZwdcQa7UGyZqBLhZSBGc1to35Vqv194ghqHrn8Ib5+Sk58IeERSiRVGEq6trHB0f4fr62jdRYxaH9KqkEA3AHthha3u+jxtBr9PBzezGn5ffZUdaFggjuOH7Gcbhd4YIyMaw7DdDJpykZDp47dyU1+u1r0p6fn7um5fJyTOsBY0sAHZ8fOwpbW5MrPdBHUiWZejUhbyCIPDluHu9HnZZ6s9POpyUrAQLBI+SdqMRr6oKYf03giDJYDE8IUMl1NBwQ2A/HIpjGZZiNpPWGqPRyIMbzpfZbOY9eF4XDZZcsMvl0hc7W6/XmE6nWC4WUNZd62AwxGbrAGhcg2MpjAyjCFle1L0i6nlmDAxcFgjnna2zGJhO6H9v97M4eJ0O8FkkSQRrux4AtjdCVvB0wi8ACtBh6Lsg83lJbQRBGMeF5yUTwBL+nCdpmmK72yCOI0R1dgzn/ItSIrlxUZxK408QM51OcXl56T14jgHQ9LrhPDLG1MDGbeZR1BSo4nyTYnXqqKqqQtJzoPno+BhvvfWWq0/THeztfZzblWEBJscQOL0Dr8GiKERZZ+VCKFYBphZIWADQrvhbEjkgGyc9HB+f4eWXXsV0MsXl5YWf99LzfL+HMQbPnz/Hxz/+8QboWetqmrTob6Vo7N3PvC+l9vUOnE8EhDJ7xJgSVgfeM5cGtq1DeJGOo+013/U6+UwO/b09ZtK48m/vBd7awMeY934G78WcyOuWmZkNGD9cAKx9vfIZtsf1EKA4WMivdb3SGZKfKcWp0pn5oMcHZzbglMdVUaDMckRhiM16gyTp+EGLwsh756z8SSqXGwQ7QfI9pKvlA6BmgoaIGpDJZLLXv4JGgGI4bp5OoOcMCQ0NafPBYOD1H6xcuqkBDgtbcdOlYE0CGyfcS30ohKWvuQDZnEoKECVQ4f3Se5YeGB+61LIADYvAzZhePA3ydrvFaDTC1dWV9xK5kTPrhVk07AnDDZ56CnqI0+nUe0MER1mWQQdOy2LR6Eo4MR3L0GhxJFJmRUqyBvTOOd78LAJSemh8XSPCs/41DJUxnDYajaC19toCXjs/kz8TtMRx7EMILFgmFx/1BQRazKTa1WmT3HRl+KEoiz2dhTEGZVXeXpyC/uScocEke8jr8O8VHi6vb5emWK83Hpy2QQM3C58uCkd193q9vYJmBHfy2tuiTD5D2TyPoIBMABvEEWzIrBGOF69JCnmZfiznG+CygBgOa3to3PgI4jk3VA2ICeQIHGX4RYJBzjetA5S1GHsvJV2MqRxX9xiblFHrmrTUNTnc3wMdeqNtARh2QIUDKEqHsHAhleFwgvPz+xjVDKu8V7Kt7/eoqgrvvvvu3rwui3JvnkmBpvuS9PyhNOsmTVwKsf1r9G0hb3veH2Iz2oZT/v6QUZW/a59DrpdDrz/0XR6HGBMJ+O86XvT5h14rGQPuczJ8KOe2ZDva19keN8letD9LPstD4Z69c+t9XY/cow4J1d/P8aGyUarSNQyioQ6jEICj7ga9vg9zXF1d7RlOfu90Ori+vsZgMMDf+tt/23vQvDl6TbvdDtPp1BtBWSacnupoNMJ8Pkev18PV1RUAeKNQliVGdQMyViRl9oAxxns2Pvsid2mYBDE0TFxgkk3hxsnf0TNjyihfQ6DA1Duq8qk5YOnu4XDox8Za69Mn+RqGEmTqMDdQZn1QsyC1ITQe1CzQsLdDUhTizmYzBEHgDSoAVMbVViCNXhQFlnU2EeDYLYC0ZWPguIj4s/Re+TeGAJhVQ0qai4IGjbRwGIY+js/51O12MR6PPeij0SUII/tBYMGQHoEMr6dTZ3MQxBFsMETDa12v1x5Qu3tw84mlj7n4IwJnIzeq23n3xpi9QkSs1Hdr49JNsZ4mBFT4UAXDV/LYo2K1hg5DdHs9n13T3kA4t3j/VbVfGIhhFq5DyVIEOvAhFDKGcpNse+myci11RHLTZDqzKzDXhOrkNXFjJXAIgsALpI1pGvNRE3R1deXXFZ8172s0GnnQdn197dlbqY1yz4WFrpRvwAa4OH5ZGux2WePNE4hAwRoFY60Lo9TVV5WOoIMYcdxFp9vDaDT2QL/NUH2QQymF+Xzuu167a7R3Gov2PDiUGSEBlwSJbcr+RUZWnq99SI/6LsAijeet8CPuBgbtsMF7AY5DgOe9Dgnc3svz59pvAxMCDAnEpfZJ3tuh//N7G3xIgHwI6Mn7ln+X5wX295P3AmDt44MX9QINbYUgdJ7mo4cPkW53iMPQ08nz+RxlWaLf73sqm/qBPM8RRhH++//T38EXvvhF/I2/+Tfq8EGEsmRGgmNC2H+D3iM9JNZWkBsb2QTp8d5c36Db7eLm5gZBEGCxWOwZLLIExhhPXVOoRmEljY4UYDnw1FBNsnaFDMEsFgtvNLmxSe+RGzxBCEMC87r6KrNV5vM5+v3+XuhHKbW3YdKgUrtSVU2xL2utByAEHmEYetEo7+P+/fsAgOPjY29M2anRl5TWjehTa42gFTbhQiKrQi9cxpPlgiIAUMoVhCK7Escxzs7OcHR0hOPj472qrHJxkNqlF03GRIpUeX80lvRa2RBOAhBusgSPZOOm0ynSNPVzOssyzGYzWAtf4l0pvefVh6HrfNl4gPsdGXkfSrty0VEc7fXZkPfpwUlZuq/KtbOvRH2DqnJ1Toqi3Nt0osjF7vuDAXr1POJmI+/XG06z39OB16yU8gBvOp3uNSuMa1aLjgXneGOomzLtDsw0Rq8p/AUACt1uFycnJwCA0WjsjEqgXeaItfV4BYiifSaUzCNZPWpZCGzG47Ff89SUjEajuihhUAN4i4cPH/o6EpL94Ni452LrGhVkNgzy3O1NhtSzVXUNGosmYbIegyBs7iOMoWshKUWdvObDtPV7b/KbzQZXV1cwRmY0HRaasmqlfO7cp9rzgGCD+yQZV1cc7LYX3va2bwOB24DKneLAmGnX9bddHbr5zP2UzrsAi/wufy+/JBtw6L7a18fP4Vi+6OCcpCBdPmOllF8z8lrbe0Ib3Mh1ynGX7Mmh8Iwc17uYj0OvaYO+93N8qDAKUW9RFnCq6Qr9QRdWGVSmwHQyhjXA2ek9aB1gMBhivlzBKoXKGpS2xF/5a38VP/YTP4403eLHfuJH8dE3XkPcCZFXGXRgsdutEQZAVeYwVYntZo3RoI+qKp1QsaqQdDq4urnGeDrBertBt9fDNt2h2+8hK3IEUeiErEGIUAeAsQh1AGsMukkHeZphNByiLAp0Ox3EUQQLIOl0XLuIIEBWFKisRWkqVzzHGOgwQBBF6PV73lCxYh8fBNugM/UMANI0RRiGvpYF/0/PnSEhYww6nS6qyiCOE+R5icFgiO12h+FwhDCMUFUG2+2uTvGrPDOxYQfVWs/BhmLX19cA4PvIbDZbxHEHaZqh2+3B1KWYr69n6HZ7uLh4Dq1d1UMdhtimGYqqQhCEyIsSq/XGxafLCkVlEIQRoiSBhQslKK1gFVBWJaAVVKBrr8711mEjrKC+fxoLGh0APm2YzAzDVGxyJ9OoqVFRSvnib+1x52KkRx7HMU5PT/fEtGzRDsAzR1ykLNq2XC5RmQqrzRpxJ0FeNJlI2+0W1lgUeY4iy1EVJWAaISQ3I6Xchqo1N1YDl+EgSkNj31uS4ATeE3L1HbbbFEVRp2Za5zlXpYU1CnHcQa/XRxR1oNXtdvWcp+Px2LNfvF7Wm9lsNtjtdnsAjqE1aoM2621dkVQjDGNoHSLPS5jKQkEjjjuIowROWKlgDZDnBdbrLZbLDbLUzQNrgDjuwFoFhQBVaRBFMZJuB0EcAQGgowBhEkGHAeKO66JaVgW0BtJ0h7JsUpYlUOBaA1yIptnPFNI0R5YVeHD/AR48eOBKgAcaURgijlkwrHJ9SLQGrCvTHYWOhciLAlmRY5umTX8UZQBV1V+uH4yqQZcpTR16qcXnRYrddotO3K3P7bJbGj0FgLqcuK1rSNfmot7KZQluAwuDd5++A2MqFGUOHSjAVrVqtQJsBWtKWFO639V/UzBAzd74tuO2qYCrtaq/XHntMNSul1Hc1NaRdVJoNKUBc//n/FdiPSi4brqc6w7UNYbd+O+8Hr5XKZ63Yc5k5pEHhS2Dexe70QYr8n22HhN+vpf+Kr7m7gqfh4BQGzQcYkjuuhbe3yFBa/szJRiR9ylBhnOCDarKtZ+w7H5Tp2wrHUAHofvS7uv9HB8ijGJR2ar2vgx6/Q6gDfIyxWa7QpJETu+AANYq7HYZ8qLEcDTC8ckxVKDxvZ/+Pvz0F34as8UNrDZIujF+4i/9ONa7FY5PjmBsheFogLzIMRj0sN2scHp8jO1mg8GgD2MtOt0ONrstJtMpKmMQJwnyskC318PF8+cYjceY12mKu+0WnSTBZr1GEsfQUOh2Ouh1u9iuN0iiGEWW10p9hW26gw5c/n0UR8jyDEEYIs0zxJ0EaZZhOBr6UttkH+RDlUWUmEFCo0aNSb/f3yutba3F1dWVSO+tMJ8vak+tRBhGWC5X0DoAoBAEYe3BAuPxxKcx0lgA8KGu4+NjJEmCs7MzbLc7TKdHuLi4QKfTxWKxxHg89t/feuvPcHp6hvl8DmMsFssVev0+ttsdgtAxOsPhCNABDBQqY6C0RhhGjv3QGmHt3Ra1dxaGIYw1WG/WWNNwCbEZQQXvnUaMAj7W3uC5mK7IbKR79+75ceZzIdgaDAY4Pj72HrTMsCFgkWEyKWhlWIAp2fTiszzHaDxGJYSHQRCg13WZJ0mcYDgYIKjPR4DeBhvNJu5KZhtTOrGd9ESs9TUMgiBwTdXCEFEdztA6QJ4VKIsKZQ04XD0IN0eCOmMgCl3XUbIZZKL4WfIapVdLzYUEO+1Ouk2hrwi9bh+T8RSj4RhxlCCKkrqAlSuRn2cldtsU2+0OWequ21QGeV5ivdpiuVxht02xXm1qcFgiCEPoMICONKwGlLZQAWBQoTIldKiw3W2xq782m7XX9vA5yzRyMoSnp6f1vChQVW4clqsNvvrVP/FzgKyUT010IwtYp8kg2ChN5frT7Da+Tbx7fmZPL+M2+HrrtUBVlcjzFJvNGpvNGkmc4Pjo2IXqdFPjwQMOZeAbVqh9ut9NF/d3YypcXDyry68DAMFDAWsqVFWJqipgTP29KmFM8x1o5imNqgMD/J0DY20dh8wkaoc6JAPBeyLoCAK1ByyUsmjspvFrxdTX7tZKBWur+lolGNmn/Nug4lAYQK659t+UvGkASjfgKAhUDZqa63bXWor7vB2SkYxGA65uZ6K0gQVf02YjJBvSvle+r81wyNfKzyPb6vo46bpCbp11VBeAs/YQt3P38SHCKFQNO1SvlUZP0P+77dZpK7quvLcCkNRx+PVmg26vi7/yV/4KBoMhsixFmqUoygLf/alP4ft/4DOYzWbo9tz5otilG/b6fdfoyzRdZkmB0/viIJL6pqcsB5f1MBhWoYHr9/sOiQdNmqrUSzDLhR5zFEW+10ue557FaAsNqc2QIQyCEHrSBCbURNy7d6/uM+GM8L1797xQlpMAaASvjLFeXV1ivV77Uu5esFin0lIP8fz5c591cv/+fQRBgNPTU1xdXaHb7eLZs2d49OgRrq+vcXx8jM1mgwcPHnhRL6+LoRet970Fbio+xFJvNDRcbE8POMbBdRttwisEXjIcwg2fIk9WF51Op17HQ8EhDSjp/MFggKqqfJYNBb0y9MT3c6FKQ8zsCFewrEnFnkwmMMZgNBr5+yeY5OcC8M+u2+026d/WVXNk+IMbEhf8nlfDjeaAB0b2QYbm+AyYNeRjv0HoU4W3261nj0iBA/C6qW63u0eXeoFp/VyZ3XIo7q+19vdKT4teVxiEYIM56XXKeh7UTDSfqfY2ZR1on5nixkAq8F04M63Tj5n2yflELQ+ZNGZx0UDyuj75yW/H48ePvEYmCJsCZc0abEIsDWPl1vl6vfbAzG3G9b8t4yWdEbf+N3Uqvsarr74KHQTewB3yvsVMuPUb7nusP0SdlnxWzHzw483/m9v6DfmzrHbpASga6l8aPhk+aV+fDDfcxTDUr/avb+tY+DtplA8BhUN/b7MH7VBEG6xokZ1zSKNyKFzUZPq0e4+8+GizGG39TPsz7wr/tOfDXUBKzgOeQ+py2tcsQ20yW+VFx4cKo1RVTf/VRsDR/0OfIdDr92BsBSiLpNupUyQNdrst/vpf/+t4/aMfwbOLpxiNhjC10CqOInzxZ38WnV7XdTM0LlPEKkAHuvYoXa0KggCmilJMNhqNAABnZ2deK5FlqU/xY4yZAjcKTLn57IRAkb1CKJ6kweGmRMOxWCy88ST9zk2X2gTWKqBXReDC80hv4OnTp95oUsA5n889eJCiTopBXfrlAN1uF51Op2YkjM+KobFbLpd48OBBXXxqiKurK1Cce3p66gS1tUiOvWMo5uWY0wjTiDuNQLlXX0MaGDne7fhhksSIBcXN81C/wtAEFzY1NPLZ85qYGm2ty6ThdT5//twzFcxwIDDmc+fzk8ZVli4HsNdEUIbIJFjlQuVCZtYND79BBIErX621S4E9sOH569G67quxnw0BNGXN2/SwBCF8HhRgkqUhKKJ+hfcsa7YQAFJkyUNqZOR1S6DBQ6ZBS01P2yC1N0tmtlCTxfsMAteCnoZA6mxYqr3X62E6nWI8HvuwG8Gr1FTxefH6h3XTuY985KN4+OiRb9bG8fEVTQEwc0OGlIq88EDOP8OW0fCGay8mbmsGLcVmt4XRwMnpOZJuF0VZorKAVRqm7suqrHJfB9vDOnwq9WgUibaZrLaHKw+5TtuGtw02jDGwaJyLtni0ua4GNLS99f3rv11cTB63APkdhxxn+ZxlKKcNcg6FKvzr9V3jfTd70f79i0HVbdDVHof239pgn+v00DkOgav2udog5JATxOfPNfXfDmzUk0VpDVNVvq9GFEVYLBY4OTnxk28wHCDNUvQHPSxXC3z2Bz+LH/vxvwBjDUajIfKicILTqsBiucS9Bw/wMz/zM9ilOxydHAP1JlLWRnO92SAIXHolDTvFnoBrF0+v/eTkBEEQ+FLfYRji6dOn3vizP4ik7Jm2ScAhvWqgEWrR0FFctlqtfLybVH+apnupvKSQ+VBns5lvJMb3EDRQTGqtRb/fx/HxsWdMGE7YbrceICilsN068S0BEQ3TarVCmqa+Fw0NLfUhDCE8f/58r5prURS+gBUFuZx0jONHUYQojjy4oJBOCpvoubVDFO76XBiIYR+yHlEUYTabeSaBDBANvizDfHx8vKdXYY0QGlKOO8cbcOLXomh66CyXSy9KbYdQZB0Wily5EKXoVTJP0pAEgWvxHokY9iFPSxrjvQXaEm1JsaI8Dz0PmY7MDYjXQuPdzmbixsFz0Sgz1VUyh7xGshv8bJmaTN0NQyy8TglGm+3E+s/nufk5ewBDa+iAqYBNXQ0oNCnKSQdHR0eYTCb+WfP6fZir1/PsJp2UMAzRSRJUlWOGTk9d92iGAmEbnRrQbMr8fMAB2NVqhdVq5UEt54nMLuLvpJdLMLNaLbFarqC0RmfQx8PHj2FgPWUNlj4H6t4lL86SIFi/vLz0mjJ5HXdd03t9Sa+5MUj7BpbzlPsBQYiuqfk2COC13PX1fo10O7QgAbp8vwRGbZaC52uPEeS4mcOZIPL8ZNvar3kvgNQ+2mAGwEGQQRDQzjbiOQ4BRzl28kuyKTza72///b2ODxVGARSiSMbpYh8ecEbRba7L9QpKA7ssxauvvYK/9bf/JvIig7FuIbMMcFlW6PS6gAI+97nP4eVXXkGaZ1hvN3XBQ4VMUOzr9RpnZ2f+/xwIXg8R13a7RbpzdPHV1ZUPewDw2SmbzQb9us+F9GzpDclJLCmmJkbtwjgMq9Dg5bnrSkqansaaYIFGk9SrpFSZSUJDWJblXtVTPmSmpzoPT/sxoFGnEbbWYjweYzQa+U2YIIwbda/Xw3w+x3A49GCHDAjFXrIOAmPeDAHJQlasgUKPWKJgvkZmAEkjTY0Ee3UopTzzwJREhgIIjgD4sBcNIAEjvd/T01M/NmR6yEyQ/WGBK3r4Ydj0Q5HpZzTEgNtUZI0Lpugy88rXjTFNijRZuMFgQOGGa3PuN4b9+CkPCQrkhiBrejBVmJsQ37fdbr3Akxss3yszi6SYTwIXyX7wvPyMLMt8+IBfsqU8X0cm6UXpfHzOBCUE+lUpCsbphtlQECWiVVOMTILbIGhScvmsyOLREOVFgaIoEccJHjx46EJ2tR7C2CbF3VrUz2m/2iIFzNQYyf3S1l1U2x5qY5AUirLAcrnCerOBCtxae+WVVxHHHZ/J03y9eNtWCv7ay7LE1dWVb2XQvOZwPQn5swyhHAqfyGdrRMYK701mrZC5lXoOCVBexGS0r/cQCwMxD+Se3Q4pyHGXIONQOKINPA5dmbQbbWAkQ8rvdX/y2bXZlUPX1D7XIcflvcJL8n1yrO4a+zZA5O/kvHrR8eEEolUJQCFOGu/IWhe/dlUS3WvyMqs3hgr/w//49/Dg0UMUZQ5TK6XLqkRZVSjqhknz+RyT6RR/8Sd/EtuNa6yW5hks3IZ5enaG7W6HwXDo26JT5MVUV8bTWSo8zVK/sbGkuRxQihCVUlivNzVLsPWDz9ewTgb1IlEU7ZVQZ0wUgNcmkLK11oUdkiTxPWAIWAhqWKKbm6VvcmaMF5MyZEKdAsM0rkKpM3abzcazLaenp9hsNv7LGIMnT554METgQIaDpeW73a7vF0MPlmmyRVF4tkApV7ZeipKkZ8ywmlSk02ACdRigKH01R4YtaNw5pgRsDKEB8O8B4MEI9RPsH8MQV1VVODk5wXa7xXA49OdhtVZ+tozlA00dCGOMr7XBhSYLj3GecEyoK+F4ECgTJPZ6Pbz66qs4OzvD/fv3XV8ZrWt6vha4tTYoY5zIkIbNU/rC4PGLgI+bwGaz8cyW1yeF4d5GMRgMPNvHeU7AwPAUmYq2hoMAhGwTmUI+CxosjgfHivMDkF2DjQ9L7AE71VD7BBgUThNUJEnsGSYyiwRRVVX5++a8Z90WtgrYbNZ4/Pgxzs5O6xLlDfMiN32LpjR4VbkSANRc3QIb/hlRdFcbC2NrUOnAX545sGJgESYxVBji+OwMo6MpdmneMBteEOqyT9oGkSyDRQN0WYaAgIfXxddz3KWXLI2V/JsUEe951+ZwyiwBNrszu/XWhFik0T9kKOU48jkfYvbkPUlwsQeIhPGVAGE/pLV/yNCTNc31BGGT8nnoeuVn89olsGkbeRZRA273G5EAieeUxl9+HvesQ4zQXfcpx5Vzof3c23oOyWD/NwujcKE5j8gZIKZ5DQYDLBcLlFWFvCyQJDGubq7wkz/1F/Ed3/kdWCwXjtEwBuvtGsY6oDEYDLBYLnF2dob5YoHv+Z7vwcuvvOLrQURxDAsnMI2T2GsKuHmyEyo3Ff6+qioMay1JHMc+5ZRUMr06bzRqL5WMBD05eoo0tEopX3OBhpRlv1mpk6EWpZQHJdJLAOB1DLKKJkV6TOVsF+siyOFD54M+Opr6KqpXV1cYDoe4ublBv9/3/WZYNp7n5mbLjrks6MV0UjJHNOa73a5pHFdv/rJomQRebbQrJysnfhgGHkCRgeBEp7iXDMVisWhi4zWLxo69eZ77rryyhsf5+Tk2mw2yLMPFxYUvDMcFIr1e6mfIatBoMVzHOSCL00mGhnU37t+/7/v2UBzMRU+gmSSJfybegIrNqO1C+c1H3fYE2wc3CM7b5XLptULD4dBrKoqi2Gtwx1AZG7dtNhs/T+WmLjc7oAENzPrhWuAa5KYogQU3tzZjJO/Bh0nquRLopqJsEDY1WlhDQrIlMvunHa4gyFBK4ejoCEo5LdZyucRiscDHP/4GgsCxh8beLhFPsCOfQ1VWfl1wzG5tmmiFPuoMjzB0DO9iuaivS8MYCxUE6PZ7uH//gbsGTbFozWx4zca+pyl/5vixD0xV7Wc5yefZ9r6ttTXmPeyVHzJGXJvtL1kun/Pc7ZuNvkNqPdohDTnfPfBuXYNkWzhPDl3rode2QxLt/1NLlmXpretphyQOXa88JDNxa5rY/YJ9bUaifb52LQ7+rn2P7bBHm6ngedu6KglCuWbbYEU+j/c6PjDYUPUNxXGEXrfrKPc4htYB5vO5L5pjjMsA+MwPfAZ/6Wd+GpUpkRc5yrKAAeutW0SRMzCkCjudDo5PT/FjP/4XkOU5JpOJ6yra7SBKYug6bCEFf+v12gsvKYRUSnmPi43TiNxkG3h6bkVReCEeQx2c9GQpqGHQWvswDL0obmwEHTLEsVwu99JR5YMC3GbA65ffZbYK28fT05P1AkjlHx0deWOy2Ww8M0BGgdkoLBJ2dHTkQydkhihu5QRkZUopHozjGEXuxjarjXtVVb4zrVQvc5K2Y6j0qpMk3lsE7VRgTmZmHoVhiOFwiNVq5UWEfKZMQSajsdvtMB6PPWAhMBgOh/46+f4kSfDOO+9gsVh4JoisD713Mgq8Phle4v1R+X9zc+PTbZNahMlnzg0hilxX2fF47MGNy1BhQagDBkDMG2n4ObfIjBVFU1lUa+1DaNIIkd6W56fegoazDQL4mVIUrJTyKaay1wrDltT4kDkC9gtlSYMn5400HBa2ARB7XmITkydQYhiPYFVunAT1HC9qvpyuI8SnPvUpFEXuDSQLV+2BpTr0wWfAyr5tVsOvc4ZRDhaAssizHPP53PVgUgqltVA6QNLp4tFLj4Ga9WrCDjXIEL+RB6l4rv0sy/D8+fM7jfQhw8frvvW7A8a5qprMqrZ+4FAmS1HcLhTGZ97eI6RxPgQW5Dg2xvpwqmf7s9oMSDvkK9lDXnv72XEfk2z5XUBDHvL1h4CPfJ187aEQjbzPQ2CqPQbyPBxfGSI9BKTk5/Bv0iF4P8eHykZxBq/wmwl1AA1l1oEOFF776Ov4+b/28zg5OcZmu4WxBnmZI4oC9AY9lNYgjCOUpsJwNERW17EoyxI/+IM/iHsPHyCvHz49bhZoms/nAJzHS+NJ0EAPh14v6XWmrkr0x83B/T7wmyS9ZwBYrVYwxgmtjDFYLBZezEhmot/ve++chohsC+A2NYrHgiDwWRo0aARQm83Gg6HJZOKLcHFSS9GjnHBpmnqvnQZZ6g46nY7fQGnkZrOZN7IcXzITADxjYIzxOhI+c7IwcRT7sA9BI0NDrLoqJ6hczGmaYrVa7xkhjt1isfD3Y4zB0dGRN04M02itfUrr06dP8dJLL6GqKpyenuLi4sIzYGRnyBrRaNGj51gSWPBv1loPGqX4kSCHmxOvk2BmtVr5uUfPGYD/nmUZnj596r3re/fuYTAY1DS7ro1ZfVjrM1bk8SLPlECAwuh+v++fKQ9mqNCI0igzW+XQpiI3K0l3y5AjDTyBKhlCAmPJZtwVA+Zcl14Tz1EUBcoD+gC5NwwGA/9MpbCX83EymeD09BSAc1TodDx4cB+vvPIqlsuVv152/5VCR3e9DbW+2+1cV92tY7KCIGgYDD9mt1X+nE/b3bYWcu9cmEUpVHDsxr37DzEYjVB6EFMDnjuhhvurtfves2M29sM7h4zS/tdhr/og2BDz8xAzIEFH24OWa78NNOT8OGQ0D6+H/SwpyZC1v9qfJUFHG5hItpbtCvZGXVzvXWGZ9tEGTvL9h65XMhJk3Tg3ee1yXRxiidqHZDV4LXxm8plzf5PPvQ2OXnR8CLChaoMWYHp05Cj+7RZpusNgMMDF8+fIiwKVMfj0pz+NN954o75og16/hziuW4BzMAONOElgrHUFtLIMVV2t83Of+5zTEPR6gGIztiZcQh0FmYejoyNPGXtkXVXeMBI0LJdL/345uQDljS4BCT1+a603RqPRCGVZ7indaTxJ0/I8jGXTgNHro5fFBcJaHgQ61lrP2KxWqz3vgJ8LYG8TZPjHGLNXHt0Y47vPkmJeLpd7qZsEUDQ0slaEXDz0SDdbR0VXZv85EGjITYXeLxerDF3JSp/0bOhRyusKgmCvdL0rLx367AL26qBGgIzUbDbzBp/PgQyIUq41OQBfb+GontMUWpKtkrUl+MwZ1mpYmsTH8slwOcan7++Zn39xcYHNZuNDGQSR3DQO7Ait/96t3OdmoOrz8vm0NTE8DzOfKGyVBbzaFCwPzjnOP94rq75ynfGQ5yBLRON3aGOU3ppkbaqq8g3FaMS5OVIEK0GeTDtXSvnMLzoKrty8m4cf/djHEMcR8jxDKNZqkTdsWxBoKLjUV2OMc6BqoFPkDcvjsQZuG2ul6oCKatJmV0uXNWaVggoCVJUL4wxHIzx+6aX6vcxEafbiO3dpLzR0hofN8Q4ZxEPzTQIkee2HwMehc7UdDOkFNx7y7b2l/XnN/dxOKT0ESFA/mzbD1DbY8noPgYrbGTSu5D+PIGxK15PxOHT+u5/P7ZRo9/vDGSkSMEjNVPsa24Bd3suh+5asxV3X3Aam0gF5ETN26x7e16vkoWgsQiRhhH6vj2yXotNxjcMmRxPkVYGPvvEx/LVf+AUknQ46vS7CMEKeFXXmSoVsl6Pb6SMKY5S14dxst+gP+n5D/7Zv+zbvIVbWeDYhDENMJhPvhVNncXl56QWfPEajEW5ubjx40NqVyLaAp9flApE0q7XWsxPcsOgNS0Ehe2GwbDm7l2qt9zJggP3NWyJj6j8A+DRaChfJ2DgQQnHQ/mLrdDpes0EBJMeDTd8oNOW15nnu49aTycT3VJFhCC4mSctWZYlupwNTVQiUQpkXqMoS27XrCSHLFcs4nzeCe8BD+2dDsLHdbr1RloI/GvPBYOCLsvGZEmDleY6bmxsvsK0ql54tdSEUezJ8RSDIBUng0E7hZcM8NmyTaJ9F2wB4DQyfe1E6I0T2iwXCqAeJogiT6RRn5+cYjUdQgQb2SjRboC6N3P693AhuLW4BAqRhp0EkmwE0Zbw578keHaJSD3lb/AxucgSN7bCIe+aBF2nKc8rNVHqkQRAg1HU9DqMAY2ENoOAy2Xa7HWAVjGERNrc/uSyZXY3TXDXVJOlCKY3nzy9xeXkNl0bryql/7GMfh9JhoyfRLqTKaqTuWnVd2JBF1xSKsnQVSI2Frks5N+mu4nGhAQBQBloDcRICtsJ6vUSa7qDhamiYyt1jJ+ni0eOXoHUEa+uGbrgNAG5v067RW1U5o77bpV4A32aT7jJ87poPZzw0pbp5voZRkMXCHNPjegKhThxwIQr3dYjtOAQ05PyQ/z8MOG4zILL4YGMwGw0d1wefPdksuUdrUWejKg8zCLcBx4tBh7xXxyTxy9agydTAliXEyRZV/rXsheSSBBhadK0mwjDyP7MaKGcI3w8PfMmWuzYIzvEBqopjGOy9731ijGb8PtjLG0FUpANoKMxvbpBEMdLdDnlVYrnZoNPv4a//rb+B7qCPbZbCqgDrXQarQ6ggxnKVIcssqjLAbmcAHWKXbZFlO+RF5kSjwwHO79/D9//AD0CFESxcUZvVeuPBBAt30TNku3Z/c1pjsVoiTGLs8gxZWaAwFSprEEQhNukOpTWoYBHEEbIi36s6KitaMvRApoDVKGVMjd4sQzFSwGltU9WS1yZ1CRLdO6EYUFUFoigAYBBFAQaDXt2groTLCDLIsh2ybIerqysPNAgMHj9+7EWhy+USk8nE1yIh5ewqFrosFja+GgwGvnneZrPBdDpGmu7Q63XQ6ybo9Tro97oYDXoIABxNRrBlgVADnSiCsgZVkUPDohNH6MTudzAVAgXAVO5nrTwzwI2A4QyGgDguzB7RWnvmQ9KNZDZoXJk23O/38e677+6FtxjSovfdDvUQxFZV5UEEnye/VquVZ1qYEsznXVWV1w9VVYUoDJGlGdLdztVzKEuEQYBOt4NdniHpdTGajGGUxaOXX8JkMhZ9LFxxPAQKpipgqgIWFWBK94Vqj37mtXNMOY8JishsyNbt8/kcs9nMh1DuSlElGGwDCIbMmFpLBidJEvT7fZelprVPPSUTQUDD8zPziO9lVVGlXHh2PBgjCiIkYYJABdhtUuw2GYq8wmazQ1lZ7PISWVEASqMoDACNNM2x22UIwxgunq9grTMEQRDCQqPfn+CNj38H8tIi7vSxWG2QV073kmc5yrxEmZeAqQuPhRo6DGCVwmy5wjbNYayG0q4nDAiwrYWrpq1qoFRBWQOFCoEqYcot1ssbGJMhihS0qaDyCqENYAsFrSI8evgq+r0RSqugghBKBSirEq5Md3vHdwJSYxRMBcRRB9ZowAZ48q13YCrjn61cP23D7Z6vgzbuu/Hz0VSu3Dn7qZiqQFkUYCl0ltx3+1SOosjgyou7MuNlmXlAQiaU4IQ/t+ca93nurbJCLZ0azn3Xo0WDJczDUDlGypczr1AUWb13pntgS/4stRJRFO2FNyVLwy86UNLzD4L9lFK5z0hbxfEH6r5BVtXGXNfP09bAUSEMY4RhDEAjy3JsNltkWQFjHPBwoMFpIF0doxJVZcAy40q5fiZBECGKEoRhDGtVDVJiWAsURYWytABYsjyoAQaBSfP+OO7g/RwfXCCqFEJS9mmKXreLs7NTKOV6E1hY/Mif/xF84hOfcJ5pXLct1yEiHcKWFr/yT/85/ut/+QOY0sCUbmJRS5FlGcbjkU93/O5PfQqdmvIOowi7Wrw5m8284aHRlKGFqg6fcFHREPABk0qmAZIxa0lRkWIG4AVg9OQYE2f65GAw8KmWsh6BTDMkWiZ9yrgYFxnpeoIcLjz+X3rdUnh5dnbmX8fMg4uLC/R6PVxcXODBgwdYrVaYTCY+C4UahjRNfb8QAL4VO++/2+0hDJ2nutlsGnYiCJHEMbJdijiMAGMRRU0BNI4Tx096DXx29BDkIiWYYKaKn3f1/bIwGUHfYrHAcrn03vTx8bFPgX7+/LkHIxT+shYKtS0SHMpNTdYIkM+L98RnIfu5aK19z5vValW/r0RSv4bnNjUjttm4EtW7NEWv33ep4EUBUxTwLrFyXpUO2KNAA4GGCgOErTTCNpUq74WMHY03wx5ksBgK5NyXHYN5cG63qVV+DoEOwR8Ls7VV7nyubc+U1ywpW4ZdAh0gDEJUlUGW5d4ZyPO6Wdx6g816g+12hzBstEd81rw+axuxsFIaVWXw+KWX8fDRIwRB6ENBy6VjG9ya3K+Y6gptWVTG9XQhO9L2ZpXiPYq4uQIsXNPKssixXq9QljnCUDt3sXIp4QoKVWnR7fYxPjpyDUSsggo0dNA0HDu0R7tn1fTIKUsn2C/rtdumxQ999z8Lhqsds29CJJUPLbWZD2dELdjb5K7wjQQ8bZpfptzK1zvmZF90zn2Tn+fmbA72JGquzw2pHLO9+7Z2b27K0Vb6dkaHvEa5px1iHiXbu8/AyDkk02D3U5v5XjIfzXkJTCrkeYEsyz3rYep0a/n5/Ey3h1DoeqiHSjOHlWqcG8eUvD99yvurxiEHqSYIB/2+L1H7/PkleoM+yvUKR8dH+Ms/93OoqtKVolYaSRwjCJzx+rVf+3X8+q/9Gj7xiU/gsz/wWfQ6XRjkGAyG0ACs1WCb5tFojDc+9jGcn5/jrbf+DIN+H2EQeC0BxY5M1Vyv15hOpx6Vbrdbv+HQOOR5jiBoWr9L4ZwEAXwYBA2ccEz1ZJVKeQ0EK4y7M6uE3iVDMcykkUiYmz+1BDReQFO3Y71ee90BM0C4ObPyJwHH06dP8eqrr+L58+c4Pj72FUKzLKsbwW18KijrldDzvby83PMc3n33HUynU6xWq73FTDCy3W79uFZFiX6vh6zWOwRBcCvTJSAbpOhxKA+q5CJUSnldCYWu6/Uap6enuL6+9u8bDof+GXTrPj00tp1Ox7MiAPZqlLCgG6+Tok8aSM4HGmCZAUUWTcZrZc0OfiW1HmlXlHtApSpLrNcbdG0jqOwNXJhlMHSC4Wy3A2oDyTHhXOY1h2EIVS/j9ubN13Q6HS9ANcb4Kq0UFRNESUGYtU3aJw/+js+Dv2vT2Nx0GUaiWJUp1HL8DhmPuAXMCEwYrpQCX2PMHgDmOAVao6q1PWxjwM8ig0P90m63w3d853d6EbFjdpqsnKKofO8PjikzhsqqhKmf6y1QwfvSuskjUTUAUa6UeZoXWK0WKKsCQaBhygJGAUXOKq5uD7n/4AGeXbyLqiwR6LZG4nCMX4ahyrLEfD6vQ6xjDxgkIJVjflcsX4bD2qEc9/PhzId9oLZ//W2jLc/Xvg4JKhj29I5L7WASVEvnUjIL8l4aYNIYe8lwyPe214K8tkMahmZfu5390b7XNvhof/aLjvZ5ee42eJTnlJ/NOXLotfKcvE/5f77n/RwfKhsFUOgkHQz6AxhrYZRFURbI8hz/1//b/x3n5+e4fH6JxWLhMwLCMMTl5SX+8T/+JfT7fbz55pt4882vQClHr8ZRgt0uA6uRTiYTlGWJ8XiE83uniJMI89kNRmOnwaCnKkUwzOJgHr1MZeKkoic6Go08zU1WhRkcbfElvbSqqnxKoMxoodEiOOADk3UzpC6BhpliTGpHZCYHa3pwAa1WK8+acPMgsOB1MCOHmhaOEz1sevSLxQLdbtcbGtawoDEhkCDjwvAMBbJMGeX1kir3hbasRb/bRa/Tga0qDPt9JFGEThxDWVenIAwCxJGrPivjogSAUlyaZRmWy6Wvpvr8+XM/p8hEMdwhhYes6sr7IqNCUEghaBiGvmZHv9/39w7AG2CZuUJDyoXGz+NCJLAEWLY+B9CkyYWho9n5DMfjMY5Pjn2xteFohPPzc0RJAh00JZXl3KLRA5p4NLCfPsjxTJLE18EYjUY+jMbnzxoRZDx43y9K6WtvvPxOseRsNvPPkGnKfE6s/SEPudnLcJTcwGWmFA9ZuItzmGJwPjuGZuSewA1yu91CK4VPf/rTglEEVqsFioKptDuUVeENh9YatjaqZVE2qe+Bvm0chEfsjJvyLFUQBMizDFmWgnoHbvO8lrx2fM7Pz2vNUnmnEdn/2MaAk9XZbndYLpa3POm24bnL4Mv3tf/OuXnomtqZDm0mq/2Zh8CKBNpSgyTTmAmsWfWX72vPV8n88brl3ON6a4MjeQ5T7TM9EgS3AdSh9/O80qnhtbQ/9y7AIe9D7i3yffI17efK65YaKTkWh7Qo7fcfYm3uOj5EUS+LbpK42LMxWG9d5sV6s8FP/MRP4JPf8UkY6zI2xsMRoiDEarHEdrPFP/h//QOslitPjf7Ob/8OyrJCHCcoS1PHVG+r1j/72R+EUhaT6RhFLdIkJc6NnRVDWWPD1tdAj5WFv9obJidnWZbe6JI2lxOID1G+11rrRYxVVe0VMaLXRK+VsXKCj9Vq5ScXi1HRozamqdlBMNTr9TCbzQA0KZRkYIxxokNWAAWA6+trAPAhkzAM8fLLL/vrZHnvJEl8Uzitta+VwIwX9zfH3nCMjTE4Pj5GGIb+M+kVHh0deS0Liz2VZbmXBkzj4K59vzS11AgQbFDMSKAoNw+CPIbRyGpQzCs3KhodPuder+crG7I+hPSq+TnUElB7wHHj9dGoyfCKtdazXta68tG8B44hwet6vfbPudPpYDQc+XLqvO48z2GqCraeGwxXuEW/711wE+M1rddr79WyrDZLisu0RDmvaZzbG4z0LqXHJ8NmBFFAQ9FWVeXrl0hjzzUg1yX3AL5X0vb8PcdHhpDaz63T6UAp5QElgSCvneP62uuv49VXXkZR5sjzzDMai8Ucm826bq7mQEeWu5CK36yrJq030E2TPP8s0GR2KGWhA+Xj+GVZ1OnwrOcgwIxtuuPmeYbxeIz+oA/4e3zBHi2en5wTZVn4SrLS0EmgKr+3jz1jK55J8+xu17NozxuZHdHOkGiHaeT7JKCRNW+kc8TQeJsNkWMpgTr/Jh3W9nxvz28/xrgNvNrGWgKJ9vv5fNqhdfm69jkPPY82kJFMgwwrtZ8px/hQETYAe/uxXF/8uT0+7+f4EOXKXcv4pGYWJpMJDID79+/ji3/5LwPKFWZirjtTGH/vd38Xv/Wf/hMUgM16jeOjI3zlK1/Bn731ZzBlhc1mi+lk6ox9FGO9duW1szzDxz/+Bl555RUAFmXpNovlculpVCJaggRWCyXA6Pf7fuOmCt4Yl+2w2Wy8N8zBlpOfmxI3UApIWUOARcxoLJjTT6aFgIEHdQgEPoDrMGuMK/e+3W69wWV/EFkvhNoNetG8xsFggIuLC3S7Xex2OxwdHQEAjo6OvCaD7eWVUr7yKpkZeoxMV+Q9XF1dAbC+tPd0OnUeWR2yYKYL03azPMP11bV/TZqmiKPYizaTOPGFx7qdDmydQdA2LMz24HlooJhNQqNFen48HvsUWGprmDHEDUouaHa1BeAZIWazcKPiQmUvm5ubGwBNQzMKLwli2iXr6Y07JqMJ0xljYOo4Mg3+cDjynYaNcfPj5OTE17BpGwXpdTArRW6w/HtRFLi+vsbbT57gyZMnuLi4wGKx8PNZerhyvNpeFTewNtiQG5H8zvUfhiGOjo488CewYHVVuenzPmkQCC6lUFgptbc5SzpdXqMEJ3yWdDp4P8wY+/7PfD+Udm0KNttNzaAVHmzsdltstxtsd6wb0hR7klS9vSXWRM2AlKD2Rqma1dAuTLpcLWCsK40OWOig2ZKp40pTp1GaTKbQStegZj9+Lw/eP9CAdt7zar3a87zbTEObdTgEQCQokMaNn8fv8mfpeUujKsFtm9Hg5/L5tRmAfc2I8cwZNUhSp3SIWeG1SeG3vD95DfUP/nfS22+PmQ9vKqdvOMRq8BxtQCIBU3ucDx1tkN4Ge/KzD4EZyXAQjHGNt4GMXG9yrUtm+kXHhxKINkjSpVVeXV3hpz//ebzx8Tew2W5crwfVxJoXiyX+yT/5J96zoHYgyzL8/u//FyjoGgwoJ3C2zOPXMKbCcDjAd3zHJ5HnGZJOB5vNGg8fPvQGj4aI3qesAMpJJrtUcoA5GWkg+KBp3Jh2yckjMyDoWfH/xjjdBbvNsgARGRJOXp7Xp9cFTRaBpO/onct4JIVrXHxy0mRZhul0CmOaehfMNJhMJpjP54iiCFdXV75HzGQy8Qae3WLH47EHida6suEsCkVvndd4c3PjGSGgBnJKIwpDREEIDYXJaIw4iqChkEQxet0uOnECDYUojHy9A8BtrtR/8PnwuVDTYq3dqzOilPLAkowXx0RSr1wcBCRRFGE4HPqaKDc3N3tgU2oSaISpOWChMAC+Zw0XY1U1qb9Mv1VaQdcpZwSJWmsoKNga7GV1+nan00FZNY3CxuNxkyFV3a4d4DbTwwwDNw0AyNIU85sbXF1d+dLqMo7ONcI5y7XRpCg3nqP0cqQIWynlxcnstcLnFgSB76zrirmtvFdJJ0GyIBLA8/lxDXOcyRzKjZ+6GV5XkiSeAeWz5fMMtNOTffLbv70u1e76qeRFhtl8VgvrSjCTwl2HU+1r3RSAahimA2WrjUwZdvWJ4jiCDjSKMsdut4XWygkO9X7qLwXUReH2o4cPHkBrx4zIeX2IrpfgVBrZxXzh9zuKgO8yhm3D3/7MNn0vgZ/cmzhXGzbzdqE4GRppA51DAIEOhNRHSYAlgRAdMglK5ZhIzZIEj7wWv95e4MFLsTjnZBtASUDCNSOvg3amzSxKMN5+FrzG9ni3n5+cG222qQ32ZQidjrhPCmg96/Znvuj4wGBDa4WTo2Osl0t0kg7KssT3fd/34qf+0k+hrCp0ej0Ya5DudqjKElVZ4ld+5Z/h3Xfexfxmhv/u81/Ad3/ndyHPMtjK4I/+8A+xWKwAaGy3qafbyQpkqYu/ftu3fwJhGGC32/pMhOl0CqWU9yw5aWTJZHpwklKXC1IiWk5Ixq1p0Ljxyk2OD4ibGB8oUaKkgblBDwYDv/ny83ndgAt9MGRAAMDrZUhCNtHigzbGeKaCnnm36+qenJycYD6f4+zszOtAttut72VC40sgluc5VquVj+M74FV5I7per/1ipPc6nU5drYjJxD87gjQKWqkJoIGk122t9ffEe6XxZoiLn8drZiiCvyOrxcXCxcCx5sEKrtzIOVaS9ieQJlDlopxMJh7o8HnwM/k6Pk8yRtTKWGt9jJ/zxM/BMPTsw7Nnz5xXVhmvmRgMBphMJm5OxzGCA5knEkDLjZcbmdYaKgg868geKHJsCPAYIpLrgplW7FQra2RI9oBrh89E6mUIZrbbLRaLBdarFZ48eYLLy0s/Bzj/uEnLzZ/3wvXH85MF43iwbDr/zmfCsWfmGgBEcYyXXnoJp2enyGvGoijy2jveIi9S5HnjpGgNKN3UNzlEtXNd8rCwCKMASlNEHNWZXQU2mzUqU0JpOs3WpZOiyfrJi9zf+3g8Rq/X9+DNPf/3v3eT2aCmjXverWsWAIDPoB1m43slM9FmH3i0gQOv5ZAnLq+1HT479Pq7DKpcF/ybfI88+Bnt+dYGPXK9tIGltCeS5SYDJUGLXKvSyBNoS9H5oes9dMh7k4yIHDf5LOX1t6/t0GvkOHBtSrD3fo4PUWeDb1TIay3B3/sf/ycEUYj1dgMVBsiLAkFdIOSrX/0q/tW//JdQAL77u78bv/h3/y6+93u/xxe9eXZxga+8+WadXhmiLE39wJwOoNfvAcrglVdewvd8z3fX9R76mM/n0Np1NDw/P/dei2QCAOwxGjQwMkbNBSTj7fTmCArI0NBQ0XOiEIlghJ8h0T437KqqPL3HhyopKK31XsvrNsrmvckHTUATx7EPcyilfJdPCh+Pj48xm832OslWVYXFYuGND1NCZVdbeokspc6KptLz7Ha7WK1W6Ha7eOutt5y3G4aIwhBa1dUXiwK9bs9rDlQ9kYqsQc2MexOIcBxoLDgest5GEAReICqfIceLz4ILiM+Kn0egKJusyTlCIEhDSdaIxopGkoaS4ycLwXFeWqGr8OGUuvrkbrfDfD7HarVCEAQ4PjnG0dGR13hMp1M8ePAAZ2dnPi1Z3kOzNve9RKlx8OwDmpCMMU31S44tQ0a8L7IXkiptbzyN596AcckIEgyyb4m1FmUtTL65ufFhHW6uEkxJWlp2TeZcIbjgWmPNE64FrpV2arkxrjrwt3/yk05/Zsta2zLzz9G9NkdVOaPh2SgB7mR65yHNBgCfrhyGGnESwlqD7XaD5XIBU7lMl0bkqzyIqCpXLdXU7Eiv38d0OnXzfa9Pyvs/tltXGp3Gt+01S2PFdSRDFpJql0ZUGqZDAEK+n+N/l9GT49s2mIeYlvYXD3kP/L8U1LdDQc0zbd7DOch1JT741jXIz5UsTXus+B75f/7cHl++9tBx6Jz8/Pb9tIFR2+GWrFR77OQ84b3J/jGSCXrR8YHBRqA1orrba57n+MxnPoM3Pv5xZFnu4+DOgDlh1S/90i95+vTv/J2/A1MZ3Lt3H6enp9jWCPuPv/wVwCr0egO36debnAsHpHWRpwKvvPoKlALKsmnANhgMMJvNPG26Xq+90HKxWHiaU25cbSZDbqSH0J9kGwB4A8jYPRGeNOSHFhELGtEjo4iRtD5Bjcw0kaImoGFbaBjI7EjmQDaRs9bi6dOniGPXhG06nfqxqKrKU/fUL5Ca5j24FF1n1KMownw+9xoSZqkcHx8jz3M8fvwY280GcRihKkrEYYTFbIZOnMCUJZIoRhLFiMPIh1SYISK9J5kqS+qO90nDRM0MjSLQbJQU+BEMcPFIECD/xoUnGQqgaSNOdoVsUVVVPt7vDU0Q7DEuBJ8APNDg/NG67n9im40hr8Wt0pDwuZNtODo6wtHREfr9vgfEnI/So5QbhgQDso29bFpmjPFhH461jK2TUmVRLhmOlIekqOkZUnN0dnaGe/fu+WwljkOWZZjNZnshQrn5SbAhN0TZ54eAg+CYIU6ykZzH1GxIVuz7v//7m07U6xXm8znyPG2eF7UU3jE4HBtvANhtzUFVe6qSDXMgcwsL49kJBzj4PmoTJK1vcXJy7J6J/5z3R2H7a6kqDzYkA8D5Jr8k6yFZhjagcq/HHpDg53HcDnnpErS2P5/vf9G9yPdIQNRmYmTIog0yeC6eT16zNLC3mYx9RmgfgOKWXZH3zP2uzX7zGtvsUBuwHPo6NIZ3MS9tZlSuMRkm5R7A89DGyfV/aB+46/jAdTa01jg7O0Oa7vD6R17Hz37xizCmQhgGsEBN/fYRDQb4l//8X+IP/svvIwxDfOFnfgaf/LZvQ6fTwUdefQ33Ts+wuJkBlcHbT97GO2+/jcePH6MsK8xuZojjCEWRw9Q30u8P8NGPfhTT6RHy1IVQKOyTVJS11hsHGX+SmwLQqIHlQLZZBykw4uDz73ISc1Nj7Q2JlOXryUJUVeWBBTd7SR9zQ6KnLTd8ThzeAz9nMBhgs9mg1+vBWke5MhPl6OgIz58/x2Qy8Z9HQadSygMGgjemuRIMLRZzHB1NvS6EGzZrV9zc3GA6neL58+fodpxBJzswnU7x7NkznJyceNZJshFlYW4tdsaSyZwQAHBRDIdDz0iw9gczE5gFRE9Y5t/zWchx7Pf7PjQiG5DJDJM0TTEcDnF9fY1Hjx75QmGe6q4zojhnGKZiWfUgDGFts0C11j7+2xRMcuBzuVzCVBXyrGEseG+NUU2QZWnTmKwwKAp1a063PSzPjNXgmNdLJsCNESsV2r0CX1wjEvjyvPwyxqAyFbTSyIvca44Ap3Pp9/uYTCfo9/q4uLho5n7NrimlPJtijNkLPRlrYKqmXg3XjNQ4KeWKpQVB4Bi40vUAYldX1PsTx+fRo0d4/fXXURqD3XaLLHVhE7cWS1CXYeriUK4ipkJlFKAib2R1Xd4ZCrXmzHoMoOsCSL1etw7vBihrrQbL2EOAjaqqha5aAcZlu+iyRBQFKPISp2dn6HS6KLIN1Ad0FTlnV8uVd6I499oH56qcT9IwSafKPX9XvdJV7VR7xuwucOZDjHcYrPbcknsEr8utUwAgwGVpdAcIuGc313k73MX5znO3P6O5PqHfCDS0JhCxtWFuwvZS5KlUwyby/GQLeX2yCCRtCP/fDkvJ8WmzUm2AIUHeXcBEjo1kdLhW+JwPhVY+yPGBwYaxBgUqmMDiez7zKXzkjVexWq/QGwygtMJo0EeeptgVFf7h//r/RtJxqYU/+3M/h8paXF9dotvt4Tu/+zvxtT/9GjabNeJ1gnfefhcP7z+EVgHSbIeqrGrj18dyucFg2Mf5g0euymK+wnw+q1MSUxwfn2A2m9WbcgTUZVo7na5nOzgx6TG7yeEWiHsoAcIwqjdU43+ndeBLvkJQl9Z7pU39eldV0FXuA1DTnwZFUdYTXHm6mkaPYtLxeIzr62ucnp769vV84AC8AZMpfJwkNLD9fh/r9Rpaa6+6p6fBdNTlcolOp+snFXUcNAyLxQLj8Rir1QpHR0dYLpcYDgdefMnCStZaf21JkvhGZwpAlufI8hy9fg/r7QYnp6coygK2tJ458aXhy/2qkgSJXBwSABKEMH2PWUbUAhDI0ZASQPCZy1CZMcZfNxkMFppiuIy/Y4YL++ywIJv0ZCgW5gZB5mowGGCX7gBroGARaAVTTyNnlBRsZWAKA6utK7+dlaiKArYe2yiMoayGURbdXhdxGGO3S2BhkeeuiZdCusfIhUGAog5ZecZAayAIYEwJayoorZDEITpJXIcBABcZcIY9ywoo3aSCy/uVHs+edgSAgUVZU9bWGORlgc1u64Tb3S56wwHCm2ukWQqrXfv4vCywzVIUpnJZSgqojKteGWqF0hgv4DT12jFVhYBCaWOBQMNBFKf9sHXYrqqvo6wqBEHoBLhVhTc+/u0wUNhsUqxXa5RZCVsYVHkF2NqgBC6DqtftIYqctqmydeojuEGHgNWuCr/ZZxzCQKHf62I8GqGTRCjyLbJ0h3S7hlIGYeCyiQIdQFmFQAEGdR8cSxGqrbUiGp1uH/3BGDdpBg0LhQYoyIJibcPiKku6PW0+XyHP8lrroryxdXsX4FJY94vISSN9m82hg6ZgTJMxJIEGGUEa4vZ1tkENvxojrUmIifsyYCVX9zdqK+h8ynMF/v9trZPW+xkdbSMt7785NFwVUhfyaippav/s61XubY9kMaTj2mawJQsDNBWoafC5N/HaJAiQjFSbGZKArf07vr7NGMlrbIep23PsvY4Pno2iNbb5Dvce3sNP/cxPYr1bIYpDbDZLwFawRYlQh/j1X/sNFGWF1XqDv/m3/jaOj4/xb//tv8VXv/Y1xEmM7/v+T2NyNEF/0MflxRW++uafYL3eoCoNBoMRwjDB6ek5ut0B4riD7SZDp9PDy6+8giBU0AHRtNv4R6NRXa/AYD5f1PXgXQMilmwdDIZA3XQGdQlfY9yGnSQdGGPB5jXWAlmWY7dLkaYZWOqVoILNcaxFLaA00DpAuxlO0+CmAQ/MuqD3PhwOsVwuPTsxHo/9BPSFnupQlKQzOcEomKNHz98xG4CC2eVyidPTU28s6S1zI7DW4uTkBDc3Nzg9PfXhqUJ4nuv12tPXjPcTgCilUBmDNM/w4NFDVMYg6XSwy1JEcYztbof+YAAohU63i63odSJThoMg2NNMSFqPTdTI+vALaIAVPQNm2BDgbbfbPYqU9RcY65eAQXrzZKxY2psbLs/BEIPcnOlJ53le6/5cF1atlPvS7ruCBowrEqStRlVW2K63sMaim3RhK4vFbOGMYVFitVhhuVghiROcn55jNBji4YMHeHD/Pk6Oj9HtdKBVTd2Lz2OSpPtyPS/CQCEMNBRM/WVhqgJVmcNUJRScR09tCDc5uVHteUxaQQcBKmtRVKUHEPPVEtfzGbZ1L6KLy+fIygJhkiBKYqgwcGCiyJGXBVQQIExiRPVXEIUwsDAKUIFGWZUoygKlqZCXBdI8Q16V6PV7GI3HMHBOUVWWyLMMpjKoSrdetmnqeqEMxvjMZz+L5WqL2XyBdJ2hSF0PFGssNDSUVQh1gDiKEUcdaB2AEQ1jLQwApYLa4CkQI8jtN45C9Htd9LodKGuQ7rbIdhsU+Q6BdgBQQSFQIZRV7ue6aRlTaQ2AojRQOoIOEkymp8grjcIoWAR7exp/bn/RQQqCCOv1Bttd6o23DhR04MCVUvtG5lDMfy8c6I3NfuVR+VoZ4pDna2vS2swH9yfHHIe37sn1t3EgxJXQbpqNNY6fBft+8G/Wukie1gGiKN4Lf7b1TvL6pGGlo8lrpdPJc/P37TAUjbasayF1JIcMOz9frjn5fNp6Fvm5kplph1Xbf5evkyETY8zes5PA8C5W6tDxwcEGgF6viy/+3M9iMp04irDfR1VZ5FkJaxX+5E/+FH//7/997HY7fNd3fRd++Ed+BH/69a/jV37lV/DvvvQlLFcrTCcTvPLKK7DWxa2+8fVvYDab7aUOAcyvbrrWffrT3488zzEZT7wRfvvtt32MnoWlrHVFdpKkg/F4jMlksicipagMgKfbaSRIMUpFu0SLfFgyh7v9EPggiM7bxVXk78lGsIQyK33ygVMvQtQqY2s8p9QpaK19BVZeT1EUvmcIa3AY49IuGfZgTY5er+c7pwJNbwkZV2SxLXZAZcgqiiIP/Pj+IAh8X5b5fO5FaozZs2YFw0z7m4zeW4Bs0CX/xnHgNXJcOWb0AqRGQ2ajSMaIHVn5nOiNMbTU7zfZANRxEMxRrHmIrpWCy30vomYDTLNoXTjDFX5jCChNU8znc6/NWSwWeOedd/y99no99Ho9HB8f4/z83IefZFzYAS0aHlMzg5m/bm6GnJtB0GQO8XdtsZy814hZW6KSJjfUIAjQ7fX82EdxjMHQ9RJKkgRQjTiOGRjUtpRV5dNyd2mKNMtQGVN3W63L7IcBdBC49NH6XMvV0oPktrjxu77ru3B6eobZbOab0Hm2sGYstHcaXF0LZ4BqI2RborjayCg0Jfjdc4/Q7facWNrrXpowolLqltjTogYz1sCKdumVcSHi8/N7bv5VTWWP9r5z12GtA8Cr5aoO4VV1GMVV9pUe611eK59t2+mRBrr9N5kayrUmDaDUOkh9jjR0hwwr52BbdwA0glA5p+UeTQdD7mvyM6Rx5fnk83br24E9yTrIZ8nxkmunzWRIjYYcM8mmyJDK7fDO/hgcAhBtxkM+X7KwXGMSdLzo/RKYvJ/jA4dRrLV47bVX8X3f9317VHQs+jf82q/9Ko6OjnBxcYEvfvGLCIMA//SXfxnX19cYDof4s7fewmuvvYbPfOYz+P3f/30MBgN861vfwrNnz/DGG28gy3b+oSdJhKIwteLf4JVXXvZCLTZkG4+nPqPCWud9syTzcNhHlqXe+Gitfd0C0u0APNVOJoGLA4DfLOXiUEp5cSMNQHsS8HUuJTACSw3zGmRWCVuoU9zKug7sh0LWghOO5+HDp5Fl75MgcK3aT05O8K1vfQv37t3D5eUlAODq6gqj0diHLFgwjCWmlVK+/oVLv7WeYZCt1Zm5EASBR8Fc6Az1SIaBf+dYU/vBBcNy73JT2I8LNxUgyRx4HUJ9bmpC2MeEZdu5WAD4589nRDBHhobznECkqir0akMJuGJrFMqyaimrx0rD5jOK6vOU9c/WWpGdQhq48VgcG2L9mJVl6VkXZqxwww6CwJcg572wcBzDajyH0zswdl/uiZQlmOX4czONa8BNfcohY0bjXFQlbD2HtBtIBEqhm3TQ7zsBaqA08jxDv++6+xZlgSio16dSyHY7BEohDBu9VKAUCmNgq8q1Ydeq7qUExzyEEWoOHwoKu+1W9BfZ+WvsxB0knQ6+93u/F9fX115cXaRO/+IqtNabv9oPIdgafBhjYUpTe9TYmy9uTjZhgjCKECdxbWRcS4eidOEppTSsMo1dsk4XYBmFUQ2Acd03FaIowfHJCQaDARazG1fHQ93WVhw6pKOwWCxQlK5ztFIGQahb738xPd7eC124YF+4yXORlTxE1Utj1TZokmWwdr9niXT6OGdptOW+yv2DjiHPz72gnfUnz8W9oLnWfXFrGDb3IfcpGW6R1yifCzPYpEMA7AOatrPA+z4EvtrP3YN1JcNFtwt6tUGFBCK8JuloSJ2NBInv5/jAYCPpJPjJz/1FTKfjuuqeU6ifnZ6jqix+67d+G//+3/9v2G42+NznPodPf/rT+MpXvoKvf/3r0Frj8vISs9kMnU4HH6ubrF1ezbDdbvHVr34VP/RDP4TtdoeijunT89Jao9tLcHx8jO/+1Kfwh//1jzAcDtHr9er20S48kSTOoytrgHAzu/ETnQYewF4uMx8AJxiFazIkIVFgWyQlEa38m/QA6fUzfEKD0W4Ex2sj00FNQpIkHgRJpTVT/OidsvQ4i1w9efIESZLg5ubGMwkPHjzA9fWNFyWxpXxVVTg5OfFFnzgpyzL3hb1kC3BORikkcoasqp+FA2uTycQjZwlIaMC4GclxoDhRLmBuAqwMS+MnKVCKRSUQ5sLjNW42GwDwgI5Gl1Ut5TO31mUPkX1iYz02pgPgRabMNpBzi2OT5zkKQaNaNBofueAB53GZqvKgSZbl5zPh5sYmZBwbMk1MNeU9NMLc0hWSqs9V5Dm2Yv6TOfLPSTUllXlf1AjJTRsAoGomI3esl1YaGgpVWSFLU1dEK0mAGmyp+vMZljPWosxrEKk0gqApMGaMC4XAuuJKSikEoVDXK4Vux2Wh3NzcuC7AtceZZRk63S5C7diFb/v2T+Kll17CZrv12TU0Tkq5rB03X+BZzjiOUVk47ZapamZD1zZZaiP2q71GYd0ZOU1R1H1UdtsdyqKAE6Aq2Pp+TP1sTa0HAQKogCHYoF5PCoPhCCenZ1jO57C29OGW9wIczRpVmM/nyLIUQaARhAqoKijsi+nvAi1tsOn2uH2tigxFSmMrDdghhoKGdF8f0WSKcR9o3688pBFsgyIPHK3dqzVDBlQCIwlu2uPhXnvbYLdDDPJojyefB++tzU7J59DWiMn7agM7ycS09wz5tzZQkECuHQKSv2//7kVMmjw+MNjo9wf4oR/6IVTGQHkqKURRVChLg3/xL/41qsoijhP8wi/8Aowx+Mf/+B/j3Xff9RvxW2+9he/5nu/B0dERvv3bvx3/n1/+5+j3e3jrrbf8ps4B2m5TjEYu+0DBGdfXX3sNX//aN3zJcl/LwNq6q2FS1/oIMRqNYW3jwXPjlx6d90I8YHFZGjSsUrjIhSAbAAFNaWFJ4cuHYozBer3zD4xghp1MaWzJTsgxoGGVhleCGTmBCDqSJPEhGAIVljGfz+cexAyHQ8znc2+gGEZJ03TPgAENCJJUJw2+rPtBg0oNynA4xG6380wAtRKS6udi5/tJvUrRJwGEZFP4Phnm4XWWZenH1hhX+My1DW8qzHLuyOqSDBFJEOXYqSa0xdcy5Mf5IhvB8Zn6fhxy0VLMV+/PNGx8jlprz2ixU21VlqhMU1mV4a9+v+/HXNYDYZ8bslHr9Rp5niGOXdaJUgqrurmdFNVyzidJgijpQuumIm7bg9vznOr+LwTBZJwI+AhWOC95Dj5TPks+e/k8qF8iK8XxZYYOgfBisUCeOVCe1sA+ikLX/C8MkRcZfuRHfsRv8Hxebepa7gkuBJYgKwrYWjxrDTUVdbqqxZ6x8g5MGKLw4aoMZVGgKJu1DNUyQGjEjqquVhqEIZTWSJIOsnSLJApwdHyCt77+p3taxLZxPHS4sFyA7XaHPMuRdGIE0N5rpxbtRadphxfcGqYDtK/dkHsV55a8XgkEJMCQ+xqwnyXRnqdtgyuFqPLch4COfOZt1kOGY+T9uue0nw4s7QjvTbKp7XCDBD+8HjlO/DzOKc5XyUBKUCPf1/6Zh9Rbta+zDVT4813syfsFGPL44C3mFVBVruodrKvlXxauM+h/+k+/jT/6wz8CrMVf/It/AQ8fPsS/+Tf/Bl//+td9saP1eo0333zTb5If//jH0e3+O1hrMZvN8Pbbb+P111/zHkK324FSNXW9uMGg10ev9iTjOMZms9l76FHkvNnBYABYhSBQKAqXWy4NvLXWd3PlZJNtp6uqwnQ63Wu0Jss3y14bNOaSqpJxQG64Yaj3JrcxrtokvU7ZWIphC34uAL+J0ZiS6ZCThLUneBBQaa0xHo990y9jjM82OTk58QCH9SNknD7Pcw9E2ufmubhIaVBYEl1rl+HCid7v928hboaIqBmgAWG4RbISbCDH8ZFpsP1+f8+7IH3KhUoQIfUeclEBTczYzXXlWahut+sLo5FZkc+B+hUyEGTIaKCtoNubDUOhMrbWOLj3BPXzNnXYg/OB4RM2vKuqyldslUCPup1ut+vZIbIDrgdPz6WmRw6AnZ2decaJ18s4dqfTQRDGWNYVZdughGPFewyCAMhzdOIEQRjCmArdxIGkQGlMRmPPYDiEBeRpE4Iaj8ZQgJ/z6/UaURwijCKURYGk0/Hv6/a6KMsKgdbI8gzp1tUMYUGuMNBOdKs12Fm4KAqcn5/j5ZdfxnK58jqnsnBi2ts0u/JgIwxDlMaFTmT8XGtVg8WmyFcU9V0dlfrQStfMHovFuX2iMiWg3N+NcojT1mLHuuQolA6gdAAdBkjruVjWovLRaITl7DmCsPEuuY4413idnM9ubYfI8wyb7QbdXseFaRRQFhWshb93ua/ItdpkiLSzF24bXGn4CSTbYkOCgrZRl6EN6dG3jawMuwBNijbXc3t9y/9zH2j/neeSnysPCcbaBrsNGni0QdWLwhFthkIe0lltgwHp3EoWCIB3sOiUynoZhxgcybrcdS3y9e91fGCwAThPLYodnT0ej3F6doanTy/wD/7B/4put4fxaIgvfOG/w3qzwW/+5m9iuVzi7OwM1loMh0N84xvfwNtvv42j42Pcv3/fecDbDVarFZ4+fRcPHtzHeDzyVfwoygp0gDzL8Z3f8R34N9P/Ly4vL2sa1Hm8k8kEi4VrK77dbNDvD30tiJOTE1xeXvrQDJuIkSblBkfWgtQ5y3uPRiOfcsmQDD02a633cJmVQEMEoAY2iUf/0mtjuGI8Hnuvn/E8ghuek8ac7Acbs7E8NhcdDTENxs3NjTeA7KXBcIRSTVYGz8/rZu+QJHFg5+joCJvNxtfgkMCCIkZOTJaQJijj75jZoZTyvW047tIjJBDs9/v+vLJLKQEYvXgKNAmMZAiDrAQZhvZCpXFh+IZePcEPgROFr+fn53sCPwIuGmOgyaO31vo24fJwnxmgsDlcLmx9nqqCFfFnAonRaIQ8dx1bGSphqIzjwK6xcRzj6dOnntlYrVYYjUb1+O8wHA6RZa45HzeUsiw9E7XZbDAajdzchjNuHDcZsiHQp56k/TyrqkKgnaaEm/Ahpo3PmWCO4UyyZwx1sSrucDhEunPzaDabeZExwWhVVYiCAGXNoOg6fLLdbfFjP/4TqCqXYbSri3xprRGp8BaroZTya5Brlc8DbLAF5cgp4cW6EFJ9DjSUdVm6lvEs8mWtrbFTS9GvXChHaSeKh3b6jspaaKthKotOt4f+cITN4to7MdJw3GVAVE3D7HY7bLdbhOFZXfL1RYEAAQAASURBVK+DRouGdN/LbZ+//aU1s172wYBcI9wvpIE8RMNLwaRcL7yWfUbFHLyeNhji+EvQI6+Pn+EZqfoayKy599/OyGmzJnK8OOfb98PXcz21r6P9zPh58hm0r5ev4We3GZooinzvLO6jbXDUfp90CtuMRvs5vJ/jgwtEjYUO4JXFSilkaYZ/9a/+Nd5+8g5mswU+//mfweOXHuN3fvu38a1vfQuDwcBv4qPRCBcXF/jN3/xNfOd3fifOz89xdHSE2ewG222B+XzhN3c3SUvvYRZlDm0tHjx8iIcPH+Kb3/wmAFcwiN1bo8hllvT6rprpcDQAYHF5eem7hRJASGM+GAzw/PlzjMdjLJdLTCYT7x1yE2YHTq31Xut2bkar1cqnhMpS31xoRP4SOVIHwAdIJoGIm2r6breLxWLha3TEcbynLaF2gZNYKeeVseGaFEwlSYztNvWvpZaDISRjDIbDIW5ubnzsv9Pp4PLy0je6Y+t4evrL5RLdbtd3sCVqJr3NDYefRbaA+hKCPE5eCkAJTnidFD9yglNo2qb6mP4qKVe5+HjvNE4ERayKSsPJkBqN4dnZGebzuc9M4bgzpOM0RDsPEK21iMIQFhZVy5sLauraGgPLhYt9z47ludfrtZ8HNBRHR0cwxnjWgyGGtG7qVlWuVk2SJJjNZjULFyNNGzZtMBh4Y0724uzsDJ1Ox2lSdIhJONkzpgTVZKkkWAPg2Rg+f85jmVmltd4T7zIj5OjoCFmWYb1ee0aHuhTOuWfPnqHT6eD58+cIggDrtatWTOMTBgGMYLdcsSng+PgY3/aJb8N6vfYp2zQGEmg0m7lqSqQrhkplXQtVC1Lhvqv9JlvueVo/rkWRI6v3QdoIaTwIMhy4CF12TRBAqwAuzdOiKEuYytUQGgyGuApcpgwB2nuFUty+Aux2u7oJYQgd1LU8/Jg12ovb7EWjzZKGyLM84n2cwxQnM9vnkNGks8M9WepelFKI44ZxlqDlrnuU194O3cjPJXsqRenAfiG7hjlvjGpVGbhU19tMi7yG9vq4KxwhGQ95LsmEynCNHB/aCskAyfsh+85nITNh2p/RBk4S0PH/hxiv93N8KGbDTQYKId1i+o3f+E3M5wt89KMfw+c//wUUeY5/+A//IbbbLfr9vr+B6+trjEYjfPnLX8Y777yDBw8e4KWXHuPdp+/A2gS/+7u/iz//53/YswvcvLvdLtZXS3SiGGnm4q6/8zu/49EnN+U0zXF8fIyqrDAeh0gzB1oIGCgaHAwGPpMgDF0r9cePH+PZs2c4OzvDcrlEv9/36aK8DlLsUmzKUAjDROwKyodN4SLgJp/UmBBpEkTQ+wPg62EopZq4fbVfmIrXL8Mb/Bt1FwQCaZri6OjIxbVzN06LhQN3i8XCeYxpivF4jCdPnuDk5KROLx6gKHJ/f/1+H4vFwrMPQRD42iBSkMl7pabAWuvDNVEUeWMnQYDsXUMDys6ssomcVIwz9ENQS4Ai+2MwvMTNhSm/1BG1w1osCV4UBXq9HlarFe7du7dXiI3PiM9YhpQoUmU5+zCOGpARBHUNKg2tNKwSm01NM4d1iEIyXwA8eIiiyFdP5cZIA02QyfLs8/nc1ya5urpCr9dFnmf+Xqg1YRiKTNJut0NlXE0U2eyOm48Mz1VVhfVqhX6v5zlmawyCemMq8xxFlmFXhzk4T/OiQBxFnjF7fnHhAdrs+trXfRlPJrBVhfls5iqDzudIkgSL2cwZg/peXN0KuCwYsSFbOCel2+t6z46MnmPKRHPDWovhurS6rLTMG0r3Cu85Az4MYa0TdmpN4ShQlRXSdFczOSmyOtyqFMEKdR/C2AQBlAqhghAIXJc2g1psbkqgqhBHISZH03qONvsSsF9wrX0EQZPG6zrvGgQ6FCEQBzSs3fesgX0A0WY4WEWBHWkJMBp2ugl1c9/kepCZIpJxkKns1jbGm3OcLG47DNEGR9Jrb4dRpJPCdcxDMg/yvO61t1Ny5fi32aDmffbOvx1iCmgTGof1dtiozRDJ5yL/Np/P91gl+XP7qw0k29f6YY4PXq48aAyFMQZJ3MUv/aN/hOfPn2O32+Hn/+rP4/zsFP/kn/wjfO1rX/OeDWPtpIHfffddXF9f4+HDh/jhH/5h/NZv/RaUUvj617+Oi4vneP311+oc/cZzsxbodnu4vLjAyy+/jNPTU1xdXSHLUg86qqrC1dUVkqQD1MVcsiz1GR1BEGAwGOxlNAyHQ89WUDCZJIlnS2i0+Rl8kDQG3JB2O0dRS8MjwyFVtV+fQPZDafdN4YQqy9KzSDQqi8UC3doApGnqdSXttE0iWd+i3BgfSuJ9jsdjvPPOOx4Q9Puuyd2jR49wc3NTMxk7L44cjUZ7YIdoneCAC4OMi9bas1QcMzIJNGy8Xx4EEVL8y78zdESQQO8/TVPf30Wq1fkaAgTWxJCbFp8pQQKNEQEL2SFumgRRDAkw60Mp5YEP5zzPU+7F+TWACra+1kpsVJUxLOPoN1oAPtuFLBbHm8wZN1Le72Aw8OCDQNbpdUosFjkA9wxns5nf1Flpl1qeoiiQ5RV6tdHg3CJw49xlVg5Uk/ZLUSdDUnme73U1dsLNCOPx2LOOrMnBlO/JZIKrq6s9gEzGq9vt+gyqdoxcMhzuGbv96uzsDCzkJ41ImxWDZQZA5PVdzCJzrz2QoUCBr20atgFAWTU1DAjEwzCERQUNDWMB7ZT2zggq5Tr0BhGgAljlqqIyzKYVPJg5PjnGYDDA/Gbr58N7GQUa2jDUWC6W2Gy2CIK+H0dnNBWcSHQ/e4RHOwTh9irHBHHP4f2SMW3XwJHAm0JfqS/gPtIwk00Xac49snoSLBwCGvLepfG2Yo1Jw821K/dqrTWUacbAVAaV2u8QK5kGOi+SDZKgh9chx1SGQdqMySEwI7VDMpzTfj+BdVuDIT/zrpCNPDhefI28nvdzfChmg4YnjhNcXl7jd3/391CVJT71qe/GZ37g+/D84jn+43/4D+gmCbq9nvOaen3czG5w7949L+785je/iU984hN4+OgBwihAp5Pg+fNLPHnyBJ/4xMdRVa7UN8V4URRiu90gimP0+308fukxrm6uUZQ54sh5e3lBAZND2cY2FSUZ52Y2wmq1wnQ69T0xlsulZyXo9XERk6GhUWdohw+XOgmGV7gwgiCoaXnXwhpojKHfFOvFRsNBI0nNgSsx3vHGhEJEbvwECjI9itQkN35JDwZBiDTNMJlM/KbOsbi4uEC/33feZB1SGo+HPuRAQwbAMzKStSGI4qbD65BeP98jRUrcBOWCo0EHmt4BTKdlkz0CNo41tQAyJsnf8zrkJkIDRi+J98gFRMEk9RC8j149r2lIubkQHPL9eypyNF6Hi9MbuPQ5A4cxKicUhEUUJHsbp2yO1+/3PTgAmh46FKdS3zAYDHyYqQHLBlEU1ALMVf0sNNbrjWsFUFS4vLxEFDmNUV4aaAFqOCZk03xRrfr8nfo6HehxgsPVauXXkxujwj/zq6tLf/3u/LZ2BDo16I9dw7LaAFCPw/PxOba9Oql5UcoVB3v06FGtNVkhzTJEYQQLoNPpoaoMAoojrXFl2pMYo8kIKtBIswxFWbqiYVZ7zQZ8Xdb68zX7pIiUQ1OgKDMUReZDHtR9uCKYFqr+zEApx2joAFCBg6SVhQoa9i9QrijbYDjCcDzCenGDylS3DIgjXCQ74eZaVZWI4w42200dVm70Sa7VAr3ahp6/y+vmunfzXfk92xjrhblV1ejJlHKZVw5kuL3IvbdhU5RyVZkJDG0tmiUw4T5IQCDDC3IMDrEG8v8S8LSFkG0xdPveK8FmyNDDoXNyjNrgQh4EJJKVkq+R1yELnvGaZYiGDgdfz2ertfbrlefg58mxu4vN4f/lfha0wi4vOj5wBVFjDDbbFEEQoyot/ukv/zM8e/cZOkmCL3z+Z9DrdPC//cZv4E++/FUMkj6ydYpe0kO62aGX9FBkJRaLFSoLrLc75GWJ0aiPj33sNVxdPcfx8RH+8A+/jDyvoFUIazXyvClJHXc7iLsJClvisz/8g5iv5+gOuihtCRUAlSmdyhsG680KZVn6ipxPnz7dQ4X9fh9XV1deR8AaCqTQ6X0B8KEgPgxqNSQVLz0BY4zvQEvPj8aRhrURHu0bXL6f1RW5sW82Gy+E42KQBZlYXIwe8Ha73WNmWGFys1mj201wc3OFyWRUh0g6WCxm0BpYrRYYDvsoigxhqL1B1lpjNBphPB77jZ8giN6zFHByc6BXzPGhgJBjEoYhJpMJptOp130Mh0MMBgN/Ho4zF0Icx57d4bhxwdAgUTColMJwONyjBBk+IEtC4SHnAF/LsSYwSdMUk8nEa36ur6/9fGJoRqYDK6UwmUz8omyeewWlLMoqh7UlokjDmAJVVSCJI0RR6DcWMgEEvnmeewChVFNpVGomxuOxp7B577vdDkmcANalqo9GI5RVgfVmiSAEXM2GCr1e35Xp3xUY9PpIt1uk2y1CrZHtdoiCAKYsMbu+RlWWKPMc2W6HbpJgs1lhs1mhKDLkeYrNZo0s20EpiygKEIYaxlTo97tg63ZrK1RVgarKkecp4jjEer1Et+vEnZvNGpPJyDOQki3kHJKbvdYaOtCobAUVaORlgf5oiNHkCMvl2vV8qVkD6AhKhYjiPvIKsEpDhQEQKnRHPQRxgNV2g7wqURmLvDIwUKigUVZAWVkYC0AHCIIICgqmMs6iAijzHFWxQ56vAFUiCF3vk8oCQRgDOoAKQkRJB3G3i6TTQxJ3EAUBQq0QKYU4UFCmRBi4rr1WKagwgoq7ePzS67BKo6wsyspA6RCVqaEPoxvauu8KMKZEECiUldtzZrM5rFUoS8nY1HXXcTtbggwaHSTuge5vrhx4GEbodHro9Qb1Vx9J0kUcdxDHXXS7PSRJF1GUwLV0CAAEYOlxY4CyNMjzEmmaYbdz7SKo71oul1itVj5E2WYQeJ384hrq9/vo9/t7adeSZeE65jlliFKGV+Q48DPaISGydHL8JEiQpcDbRl5eu5zTHHfJZMiaIG2GT4ZJlFIoypKZ9g4sWet/dvNF7f2//bOxTuxelCVQ25kgDG+lb991fPBy5XWrwd0uxbNnz/HlL/8xsizDSy+9hM/8wKfx1je+gV//1V/DZrWCsoCyFkWa+Q1/uVx4fcVXv/pVFEWOJIkduxG6pmeXzy+x3exQVQZ5ltf9KFwd+yzLkBU5LCy6/S7Ozs9daCcOsdk40VxZ0HMCttuNBxUvvfSSN1rz+dxnoThVdugf6PPnzz3lT0+5LVTk76gB6ff7nungZKAAzjEPlaeKabzC0HXcpIEDGg2A1tor7WlsSNvTWNKwUsQo2Q+mY/Z6Pd9XJQgCzGYznJ6eYrVa4fj4uB6HEEWR+5LXLGfOTB5eLxf0YrHw3WUJaGazGQB4gEZBI3UyXEzb7RabzWYPkGy3W8xmM09Vr9drr6fJsmxvAfHggiVTxeciN0C2Zud1l2XpdQ4cHwqEWZODm4oMs1AkyIqk19fXfnworNwPl1WeeTLGFd5SuilVbK1FoJvS7NCAsRWqGmykWYo8L/y5mKHEzZCfw9RWZntILRABLgWvcRy7kFiWoZPECLTGcrHAeDRAtxMDcJ1P4yjEfHaDJE4wHA6xXq98zY6rqys/vygazuvQgls3FzVd68SQbgNWSJIYWZbCxbkLxHFUV/V1zay0VnWXZ3fPq9XSr82iyBHHEebzmQfekgU77IFxnN2aSrPUi6TzoiAHAYu6iidccS2lgrqvikIYRUg6LoSyy3bYZSmMtdBBLdxUyoEVz25wK6W6H37tZFmKIs9h6o64URijk3QRRjGSuINO0kXSEca3vi7YWtIBCw2XKq1rg2ABWAMMRyOMRmMEQQhXZbTtaXJ8UJ9sn5nwqdkWezVLms6pt7NO6LnLr8Zrtp6FIPPj2k3I17IEPPu5yJoSBCtsgxB6VlV+SaZNho0lMJIGWTp5QMMOEFzIsJ88T1vX4O1ga2zan88v7p2y7L90SttjLMPQ8jPvGnfuVfLa5Xjy2QD7nc3lHJBriIBJOtayvo5cc4f0Ki86PlRvlDiO0e/18KUvfQnvvvsugiDAF77wBZwcn+B//73/HX/wB3+Ak5NTZHmOft2yfDgcYrNeI4md/iDbuV4Pi8USYRjgwf0HMMYZr6vrK9zc3LhQTZLg5OTEhyW01q76oFL42Ec/hs9//vN49uxZvYYsbm5uMBwO8c4779TFjNzAjEYjAPCG7ejoCO+++y4mk4lHvjSEx8fHuLm58fF7TtbVauUNt7XW/8wwD0WBw+EQnU4HT548wXQ6bURmdVaGLJddFIUXQFJ/ICk+TkxjjDcyUihKcZ9MRZWFyWiseP7BYIDZbIbpdOrHgedlKjDBA7MUuJCCIMBFLeCjZkc2gCvqeivUCjCktF6v/SSVQkymEvP/bY2MROscK96jjAHLsBQXB88jFx+fBTN86MHw2fg5LhY9r+Py8tIbXDJf1tq99FqGcrjAb8WDxYIOw4bdgQQh9XuMMX6+MKOHGxcZs6OjI5/KzXASNwXOHYJUjnu3kyBNt8jSDXq9BOvVArAGnTgGbIl0u0E3ibBazpDuNojr8B4FxJJFIbsVBEGtVVEwpmFYCHhc5+Ch152wgitTEbfbLYqiwGAw8OEqyfrRK+T8kOE2Pi8ebq6gpvLrzdEYvPzyyw54GhcmcOuqNohKgS3l3cEU3RhZliPdpSiLqu6VEgJqv84Eu626EIquPcaa2SgKbDZblKVBEESIwgRhGCMIIgQ6QhQlwst3Og2WMt8DUEohEPdojEFlSoRh4J2VprAWAdDt3VvG5znHZJpx23Nuwh/N/w8ZVYL9NnXP37eNpTw3sM9EECBwDnNdt40d39cGRHLcZAiEpQUk48D7l2JPaUzb4TkxlLeALq+zrdmT4ILObq/XQ6fT2Utdl/co9RXyXtphHa4Rgg3uIfJZSs3HoefarJt9nQifoTw/zyP/3mZ97jo+MNiAQr3pXuM//sf/iDRN8corr+Jzn/vz+MpX3sQ/+2f/DP3BAGmWotd3aZGbdOc3fmMMyrzA8dERvvqVN/Enb34VWZbj4aOHmE6njhZervCf//N/djqGmgZfr9dYrZxordOtG3+Fgd9E2HdlMpkgzVI8fvy4FoWdeoHfN77xDWitMZ1OsVgsMBqNvMCIOfxKKcxmMwwGA1xcXABwC4cZMW+//fYexX51deUXK9MgLy8vYYzxtQr4sKih4CbLhys9bZmCSn0C64JwATI0QO+ZLIExxrMR7ImhlPJgiA3WqDPodDpYLBbeODGVkwWqaBykyG8ymWC5XHpjzmuR3gXDK8Ph0Ic6eE62hZceqqzWGYausytTHtvpcjKFV7IQZELo3VM3wcVHnQPZH6DJP5eCTup3uFCrqvK6lt1uh+l0ivl87p9nWzDF2Cg3Sm6sWZbV3m39uqpsKotq7YxgvdAdcGw0Q7JnjLUWk8nEC3wJ8Ph3glU+GzYZTNPUeWPWoJNoJJFCul0hDgNEGjBFgSgIoGERaoPpuAdbpSgLdy29Xhez2Q263U69WTtGgoxFksRebyTDRy6baejnMFlAao+MMRiPx0iSBKvVCr1eb6+gHceYv6M4UGu9R0O3N04nRHUhtm6vj5OTUxSFC4UActOlh+30FjQOYRgB1oWeyor9UlzTLVjUgMABDa0VtKb3qWujb/yzTHcZAI0k6SJJugiDCEoF0CpEoMMaeIRQCBygEcb4ljG1BrYqUZU5qiKHVm4/oy7tRRu/av3M/ULuzZz7zD6ikZSG6C6wwb1SAmc+q6B1TxKAtAEO17msmtx2PKRRvsU6CLZBZsVQ48Fn3H6ffK8EsvJaebC3kXy/vDa5t0s9F9C0d5D7WDsU1D63/AyeS1ZuBm6zH3ugpAVC5JjzmVLfl4t96hBrJJli6QC+1/EhmA13cb/927+Nt99+G91uFz/90z+NIAzxn//z72CxWGB6fITKGqggwGa39amkqL00rRTWqzUmozGur69RVRUePXyEs7NTv6F++ctfRpLEKIoc8/nCe/HT6dSrnPM8xyc+8QmcnZ3h7OwMl5dXtTdvvPJ9NpsjjmNcX1/j/PzcU/VlWXqPaz6f+5oFBDyMv3MyUFQqi3elaYqHDx/6wWc6JFNK6cUROEijxoXIzAcaCTZRYzySgEHmSbNgFr/zvGRZOp0OJpOJ94qZeUOGiSCGnuTZ2ZlvXEZtCOl6TmJ69DTGpLwJdLjB73Y7z2oopby2gXn9y+XSlwynMSZ1y/ADw1qk7zmxyfwwq4gbAVOk+WxI83PhZlmG5XKJ6+trT/8TFFGYS3aLXwBqEXSMs7Mz37WW7+dC5ULj4pebIK+P5297eO3YrPOwmw2H9UMIJGUlV1lintVxyZJZa/3YzedzAHVn46oCYJCnG2xWKzy6fx9/7ge+H7/w838V/5f/8/+Ev/d3fxGPHpzDlAXKfIc4UOh0Is88nZ6e1uugYc3IsrnaK30PtgjSWIeF48AKqIOa8aT4ltliXGeyJLybC929DZYg95bHyc3VVHVIx+DBg4cYj6f17qWAOmTiGQnQO23E1QTzReGyRrQK6+0ygNaha0Ev2AelauWjsqiqplldXrgwk4L2wCIMEyRxF3GcIIoShEGEIIgRhlH91XQ91boWjcLC2grWlA5sFDmqPAdgMJlMfJZV423fsX/zD7Wx5T5K1jGKmo7LkoKXhvEQk8C1QHAgNR2HDhp8CTa45uQXX3eXF94OA0hwJg06PXDJMvA675pDEji0X2PsbaAl38fz055JEMhsHQkW2qGVNqPSZqQ41u3xaYMYPlc5nySAJLBpsxfy+XBM72I+/tsxGwDeeust/PI//WU8ePAADx48wA/+uc/i3Xfexa/+6q/h5OQEV1dXCKMIq/WqZjlcxsR0MsFisYBWLva4XCxwc3WFKAwxmU5wcnKC1WqFbq9bVxN9BihX2MstgNhnZjj07OKMn/nMZ+pCS67L5XA42PMKWZGTGzQzS8gonJ+fYzZzMeGLi4u9DqX8/sYbb+DJkyde7xGGIUajEb7xjW9gOBzi+voaZ2dnCIIA8/kco9HIU90sqET9hsweaSo7pt5I0kCzvLas0MhNm9cmsxBYMIlgiaGNfr/v60dwXAiYWCWUIRGtXbElVu5k7QqZSkmmgIaemgDqIViPZLVaIY7dM+NkZ70Hxt/Tuoojr4meEFO15OKj8SEoYeioTfEBTTn56XSKwWAAY1x5doJVrbWn72ncKDpjrQ56Z0+fPvVjyNgv00TJCnEjk0DFg42yyZzgRgDhHXABa6VqhX7ggRDDTWR86IWShVoulzg+PvabFrvT0us4Pz/3LFSSJNhuN9Co8KN//gfxP/zif4//w8//Vfz4j/0oPv/TP4m/94v/R/zP//P/A1/82c9j2OugqjKsVkv0+726MNhNXXfGAUUnFHa9VkajEbbbjQfF3PyMMTg+Pm5aCKDRkfC7LE2vlPIhTLJsLkyz89QzN0cCVm7K8ggCF7rsJB289PLLbi3kJcmjGnZQM7BvSLR2qatFUcIVQXFp96ZSjn2oGY59I2BhrGvXnhdZHa4BsiwFP5EhXdLoTijZQVyHlvlzW+MQaHbQrWCNAxwwBUyZI0t3HijzWqQHfuggMJJGiw6RFFe3Dfxd8XlphCR4eBHTwvdI5qB9LvlMCIDa4VMCM2rXZImA9pyQnrkEH+3742tuszdWnMveMsztz6OBlkafn029VtvxaIOMNhN06Jz8fZvxkNrASIC+drinnX0iAUYbDL7o2b/X8YFTX6uqwu/+7u9huVyiLEr84i/+IkajMX7zN/49rq+vUaR1TnVtSHq9HrK6rPnV1RVO6o0nTVMMBwN89c03XfpVr4sf/dEfxb/+1/8W280W69UGX/rSl/CXfvonMRodQWuFMNKYz2cYDgd+Iw7DEN/7vd+LX/3Sr8JULrZ7dXlVi1GXODo6xmq1xnQ69bG6wWCA+XyO6XSKm5sbHB0dIQgCD0xms5kvcEUW4Fvf+pavvSHLOlNsee/ePVxcXPgYOotcVVVVtySf+b/RWye9JxcbPT1OZLIHnAD09GjYZEohKWxu3szekBVJSWtLUSWNGYEDjRNLnTO8w1AKJzbpbZYLJ5PC18u27JzIYRj6tEzWKLHWVd5cLpd7rAAP6YkQpEiqlouX90na3hjjP2s4HPqFyHGUVKVc+EyT5Tknk4kXCHNhMi2YYI0MBze8oig86Op1e0jzDEEYIq6BQF4DkH6/jzR3NQkqYxAYC6UV8rxAEBjPdFE4O51OYa3dq+DJAmvM/CEDxDAfU7bTdIdOJ8YnP/EqXnvlMZ588y1c1aJOZuI8fvwYP/OXfgp5luI3/v3/D2EJ3Nxco9vtYjDoYz67RqfWJi2Xcw8EnMC464ys8BaZFcXKupzrnU4Hy+XSF9eTxofF2wiuHOOWeP0UnQduvO05xt8nSQfWAA8fPEaWFYjC2JX8VhrW1lkjUFAaCGAR6ABVWfnrdvoLhbKoYCoFrUMACqayUg9aH9ysLfI8q7vCApdXlzg7zjAadfdi6mTWZKE0t/FHqKoCZVXU/y+R5xaVKaErC1d1w8CaCmVRwJjSM6LX11d+bcIXUhcplKqpKGlMY2h2u9QzgtYyLKShlEtblWuwrcGQDIA03JJaP8RM8DvDY9LRkJkakl2RTAPPy79JfRTHuM1OSGPPo81K8FwMfXCfcHuGfF8Fg0ZsyuuRDgT3R59JKQARHQJpqGUYqx26kSwEz8l7lPfD+yXoYuKCfI1kIziGnJOHGB2+ToIQ6dy+X7DxgZmN5WqJL33pSwi0xssvv4zPfvazqKoSv/Irv+K97zCKcD2boT8aIghDnJ+fuxuuaekoihCHIco8x2qxxGI+R5ZnOD8/x6uvvuYFgk+ePIG18CGItN5gWUo6TV0WwNnZGV7/yOuezu8P+r4yJg3C8+fP91rWR1Hk9Qs0cldXVwDgQw7c5He7na/BATiRKQ02H/pyufQUN4si8TOc4e37B0XhH9kEFjrK8xw3Nze+qFEYhr7q283NjTeWnEhcjMxKoMdOwaCM23MyZ1nmi2wB8MCk0+lgNpt5gScrpvKaZdoWqXI52WazmRfISs9qsVh44EXGhZUqpYFnqXeem5kY8v+MMS8WCwDwYQQCEIZ9ZDGtIHDVTRmWoriS4IqLjilx9+7d84XD0jTFZrPxjeQ4tsymoh5CakI4J3h+AL4QlVaNQpypsNIzARw9y7Eh00TWhyEJY5wImfcpxaBJkngxpmRtGCIbDvuYHk+QVwXmyyXefucdfOvJ2/jmkyf44zffxK/++q/hX/yrf4Hze6f4yEdfQV5kiOIQFgar9QKD0QBlVaAocwxHA1SmRBAqDIZ9ZLmbhwS4SimvIZIbMQEE+xMB8JuhqQ0gQ4lcF04XkngBsAxXtQ8HemOkuxyPHj123i8ZPRU4VkM1IkoFp90gcHCbrwubuJLUqq6roV2NDcGGyDoWOtDIixzGNBv706fv4OL5M2TZDk5AWyLPnc6F6aWqrgkURSHiOEQYR4jiBHHSQRTFCMPAFShTCjAWqCrYqgJshaJweh7HlHRFKOH23s1wkaTWq6rCZrP27Ka15paxaVPph3QX8jXcawgYgH2gIMGGfGbSkLXPK8MD7bCDZCEOGctD7AAP+Zp2+EpeY9ugKjTXeOj8MjTRZj+4j3DuSv2ZBBqSTSDIkCnwfB7tuc+9kCystbZufti8RjoDDEETNEkQJJmP9jOWn/V+jg8MNvI8x5e//GWs1mv83M/9HIbDIf6X/+X/idlshrIsMRy5CppKO6HlT/7UT+LHf+InfL+Ry+eXKHKn8E6iBPPZDL/zO7+DQLvKnt1uxw/ukydPcH115Wl4WcXwm9/8JgaDAW5ubvDo0SO88bE3vJCTFK8r0OU81V6v59MzWfpbCtSWyyUePHjggY4c0LIscX5+jqdPn+7FqNmvhAxFGIY+3ECP9/79+163QSPM8IYMezBGfXZ2hvV6jdFohG6367NFhsOhBwU3NzfegLAqJgWg1lqcnJz41NMoirxne3R0hG6364Wx4/EY8/kcw+EQs9kMDx488CEgMjjUljCsQ+CnlPLgjGElAJhOp15XwpLoFxcXPmRCz7SqXFddCZq63S76/b5nfjgPpJfDzaC9CVRV5StKMjxG5uvtt9/2m950OsXR0RHG47F/ltSSAK6c/mq18mKy4XDoz2+Mwenp6V6vDopnAXgQQhAgBWzG1Hnt/qtegK0N0BpbAw5za8MkiAGwx4jRQyQrs9lsvPEh2HReVolOt4MocmM+W8zwx2++id//r3+A//Affgu/+3v/Be8+fYbrmxv86Tf+FKenJ3j11VdgTYXBwIGx9XoFwKLf6+Li4hniOKo1Si5EidrhYPozdRhksKTR4L0RMBVFgaDe/LhmqR2iN8dztMNmbW+XnVZff/0j6HS6MMal7RNkNBuuAw/GmJrVcM/RhfsSlKVLCyUgcTUh2HQMgKrpegVn/PMMQag9BAmCoO7KagA44WhRuiJfWbZDlu2QF2ldb6UCtEIQuk63bt25+R6FgatTogFVV/pQyrqQtNZey/RCT1Ptgw2yglKILQ2LpOyl0eTRZhnaGSN3hSjk8zp0SMMvQY78PMms8FpkiqYMA8mwSxtE7M0ZAZ4km9pmZoAmrCPBhtyfJPvL/xOE0xHg+eV9S0aFX+3U3Dabt/eIawAghalVVfmwnnydBHjyaH/OoXCaXH/vFbbz9/e+XiWOIAgxHo/w8ksv4bOf/SyePn2K3/iN3/AUFqzzzs7unWN6fITP/diP4ujk2HvKcRwh1AGOp0fIswxxGOFPv/51T7W+8sqr/gHNZjO8/c47GA6Hvl7Gdrv19HRVlZ5qfe01V978/v37mM/nXuhID58N2KIowtnZGTabjQccLNu9WCzQ7zvdh9baGxyWRma1UYZR1uu1r1VBMMMHw03ynXfe8WmmMsRA1mM8HnuajtfAollS8EejJvu6SIYDaDxoigJpgMiOMMbt+mP0vKYiTVMfUmLLcXqRFG+y1gONOVOE2fmXE/jm5sYLGNM09c3jyD5womqtfXMuhkck/UmgxPfwbxTXcjGQzTLGeITN8aX3T/aILIGs3zEajXBycoLj42O/OXCDstZ60EWgdXNz48EMi8WRWSKb1Y55djodxFGMUPxe60bTALmBW3rsTRtoLnI2IuMYcHOVmQicY2wGx7ReKejdpimW6zXefuddvPVn38Tl9Qw38yXWmx3++M2v4tnzSxRVhV2e4eHDh+jXAPr/T9ufBsuWXeeB2LfPfHKe7vzeq7kwFUsASTUJU+BMTWSzKYkSu+1Qu3+0HJJCCkd0hDtaEbZ+ObrdVjhshckOqcOtbtGi6JYMSRQlkWJzAGlAAEGCIFAooMY33zkzb86ZZ9rbP/b5du48dQtAMUKn4ta9797Mk+fss/dea33rW9/yPG/bGK4cF8dxMBqN9NpcLk36Y7lcot1uG6I1nwnv306r2JGR6+pSWK4x25kiIsjNje+1N0JuoEVRoNFooNfrw3U9M+c0quFAOwzcSAFyLoTY9hZJkwRZplUshXA0KlIhhQoL4dDaXgJZuo1an3nmLvb3B3BcIMszQGiiZ56nyPIUeZEiz1MURQ4FWRJBGf2XhhUCnuvAdzUiHPjll+sgLPcAW4Pn/Q35e0tCtZHOrT1ky2XgnLINHd9jG0MbUbCdAdtRsA26fX22I2LzEGgDAOx8zm3n5jlsI2lfx23OAg8bQahyNuzrIPrMww/e2zCuel7+rWrUee4qisN5S+eC922Pvf06+zlWx5LjYToV34L+2O/hNXH/4O/ta7PHuapL8u0cH9jZUKUH+aM/+qOI4xi/8Ru/AcdxDHEzSVO02m3c3Nzgp3/6L+Dg4AAf+tCHTBOv4+NjuJ6L9WYNCGC1XuHx48cmJ/493/M9EI5WRYQAvv7664acIxyBKNLQdavVwnq9Vcz8ju/4DlNq1+l0jVHIsm0/DIpqDYdDUylicz8IUdnIg+04cONn10kiE+wn0mw2jSFn+SvL/Qjl20xkllHSMaJ3buejlVLGOHLzpQGdz+dwHMfoWHBy0DEgaROAQYXG47Hp88HKAD4bpjsYITEipRNDsTOiEqPRCHmeG1ItERobfiXyQh7EZDIxpbPj8dhUjVDSmtduT25uekwL8TOqmwuNFo0/PXtbL4TP2IaSWarL8zCP3O12jRN5dHSE9YZOrt6MmW7b29szTqL9fjoEm83GbE5c4LCc0oIRm7KJdtvUUZrqnCtTg3TUms0mPM8zKBb5HCQku66WNOezWSwWWK7WuLi6xnA8wvVohCTN4JdGajafYzqd4tHjR3j69BTr9Qb1srstRcFWqxUAaF5M6cT3ej1NDvY9Q8DtdDrGiafDToeHa85G3ThnlsulFhwr05927xyltjynKudmZ1NzHORFgZde/pBxmjmWMMgGsGN8nW1UaOfUizyHkrJ0MtjzhA6G7XiQzyDw8NEDLFc6PdRqteB5LrIiRZYnKGQGqTJASEil1Y6l0l8KudbnUDQCOsVCWW/P9xCGAeIwRBgE5ZzSBpmIpW0Q36O1oewW9Lvl2uT60KkjUZppTDrRHHPbcNnjXoX/eVSJhrZzYhswPZa7lRcMEGx05ba0SZXoeFtUbn+3U0TcT+wKEdvRsKUI9Enee528hyoqwH2G52Dww32Nr7Pvw97X7Os1T/Z97o/PkMiuQVHUrlZJ1SnhddyWDrOdkdvSZN8MpbKPDy5XriT2+gP8B9/9x3EzHOE3/5dfx/RGt9x2PRerzRqjmxFeefVj+IEf/H74oYdaPcJ/8V/+F/i//T/+Dv7if/zTODo5RCEKKEeh2W3j/PwCX/7yV7BabfDqq6/g+77vk+h0m3AE8Mab38D9+/d1VJ3rIDDPCxSFxGBvgNV6jVq9hnqrgVe+4xVcXp1DCYlGsw4/8OG4DkY3IzTbTVxcXiCuxRCugBf4WCdrjG7G6A36mMwmODo6xHK5QL1eA3OXWhzLQb3eQLJJURQKUVRDstHqlZQ7dxzHEEvPz8/R6/UwmUzgOLrqQ5f4amXBWq0G1/UQBBqmXS5XiONaiZDUSyfKMXltKeWOoZJS7nRfpQFiWoBpFU4O8jySJMFgMDDvYbqGhsteLFEUYTAYlOiD5lu0Wu3yWnwsFkv0+wO4rodGo4kkSTGbzQ1czs2JRF07TULvudVqodVq7ZTjAttKDi5+e5GwDJRRA9Nm9nt5/zS6dGRYeskFwpyqUgqr9QqFlEjSFLksIKEwnc2Q5jkOjo7w7oP78IMQq2QDp9yE1uu1id6Pj48xm82MwVqtVsYJCIIAstDGQ7PYJbYtqsuNiz02FAClyhJE3Y1T338Bz3OxXq/KlFlsDANTQldXV8jzXHfrTRKkpVS5nRLKMomLixGWizUCL8DeoINOw4cvEtSDAt1GiHy1ANIEq+kMi+kM7VYb0+kMq+Ua3U4XvhdgOpmhVm+gXqvj6vIa7VYbtVrdkMIXi4XhLhH9oaNmb6p21ZFOo8aAEMaBIbKm10tgnrWdTrM3QyklCqnL7u8+8wwK6GZocB0USuqeNJAlgVNy8DXVz3EQhBGiuAbX86GUQKEUpAAUJArkgCjgCsAXPkqcBK4LQBSAyrBeL/H6a69hRS5KliNJE6R5jkwWyGSBonQ2i/Jai0JBFgpFrqBkAaEKqDwFZKYly30XvuvAhRbuKoocKpeQuUSWZgAUPE9zk4zwoVLlfBKaZ6IcULOcqSMlddqOlV16HPX+miQp8nwbTVcrJqqGUA9fJSVoOTfvx5uoOgs2ilF1LJiGLOQuAqHUlpNgGz/7+m6L6k26w0JwsjzfSnSXc8pxtdCafeTFrsCVwrbpXpbnkNZ9uzYCIoQ5dzXlxNebfb48LwOH6j0JR/f82TmnUshL5yzLMiRpuuO8VJ1EOyViV/YZe3+LM8H7th2Pb+f4wNUonuvhB/7Ep/Dcs8/hS7/3+xgPR8iSREOtyyV+8Ed+GN/zvd+NVz/+ClrdJrIiRaNdxyuvfhS+7+PJ48eYLqfwY53TTVUGP4jw+PETTKdT7O3tYTDoo1aLMRqN8PDhGMPhEIeHh1rBcJGgUcozX15eIY5jTKZTdNodNNoNdHod3NxMUK/VIPMCwvEQ12u4Hg0RxiEW66VuXz0ZY7C3h/F4jMVqgUajjkePH6HZaOD6+gqDwR6mUx1dpUmK06dn6PV6GI3GODg4wHB4DQ3DN3agbACmSoBVLyTseV5ULuQEh4eHWC5X8Dy/rFzYwPdD85nLpW6MRkeAFSN8sKxaYeUDDSyjFBp9W+SJHjojQ1uZslarYTQaodVqmZb07H6b52t4nl9yMWomul0uV+Vk1I2X6nWtTtpqNXYi+2azaSa1XdFhE/3olJHbwU2T2iWMDpjft4XOAJh7IV+AED8XcpIkZsEz/cTFt1qtsFguAUcgVxJKCDieBzgCjudiOB7hmeeew2w6RRhFSDYbNOsNrMp26dyoSRglMZNpGKVU2UNDOxuAjiz1dLF1H0rgXCg4pQETokAY+chSKhyqEnXaStWzjJRVHESe5ouFUf6kwXcdIM0E5ssEe4M+inyFvU4DQh2iKHLIXCHLFIpCQWU5UChACbQaLbiOi9lUOwDdjk4p5jJH4AfYrPX4QgiTBmG3ZvZnYUqKYnFcLySTAppEWy+1b+jQ6VRpAsfZthQoqpu92nJ3RJ6j1e4hrteRK6UbyeU5PNeDEtpt0Pa+NHpKaJus2DwsQJZJbJIMRfmI9Hsy+ELAgQdPm36UYudQMoMjJM6ePsaDd95B+ySHD8B1PDieb5Asz9EohRCOdlaEgEPyKQRkoUtbXUfBEUCRZ0jXa6xWCywWMyzmcxRZXrZ+085HGGo0o9VqGRKxll8nUXSXp0KHQwgPUm6DN7sEWMuGu2a+cY2yEug2p4FGyjb2tgHV590qIt8WEVfTA+YcFi9ASom8KOBZRlbo/Jf+u4ZttL2yKqOqyAZQkiGhHYVC7iIlOYO8ErF18+19FEUB6Sn9HCwjzg7OTukE5GXgJEvnktdC5wnlvQlHC/tJpeCW6B+Rc4OUbDcN/b7K+EMIcz0531OOZW6hg/b9V9Mw1fSKnQLiM64iLf/e0ij1eh0//EM/BCEEPv3pTyPZbBDHWpnyU9//Kfyt/+r/gB/+4R9Bu93BYrkChFbjK6TCZpPi05/+53jn3XdxPRyi1xtASt3c5Q//8A8NjPPKK6/g9PTUEB7v37+PWq2miZZRBGHllkhyms1m+NSnPlVKcXcwX8zR6bR1dFevoxZFGPT76LRaSDYb9Hs9jK6v0W42EYchojDEwcEBfN9Ht9tFkmzQbjd1OiUMDNmuVqthOBwayDKuxUYfg5UcJFyyaoYVJsy1s6uqhshTQ2YlrKwj8Lph47OShkgH0QpG0Y1Gw4iV2YueES3Po5Ta0YfgxrS/v4/pdIper4flcmnEq2q1mqlwca1onpArc4f0iheLBeI42iFphmFo0BJgK1HNSctS29VqZTgJ9iLjBsfPsqW3ma4g6SqOYwwGAwMl2xUpTAMwdUb0hc+u1WoBalu2VhQF0hI5YMpKlQtLlIQ8+z6klMZZIheI0TedIo4ToJEMk4+1ow1FIbINsizVZFG5JRG7riZSB4FvSpTJ0aCT5vs+prMZ8jw3GhzDoS6LrNXrSNICNzczrNYb1Gt1tLs9vPjSy/ie7/0kPvGd342jo2O4XgDd+VUZB4YVWpvNBtfX14aPQvibfA46FEzzME0WBIE5B5G3brdrULnNZoMszzEej81ci6KofGaeZQh3IWRC9HRmBQT2Dw4QxjGE48DzPU2OdLZGnZUg+oQa3XAc3elVrxWYZmXW/m424izPSr0gIE8ToCiwmM/x5S99CaOrK8hyvgdBgCisIYrqiKI6anETtbiBer2JRl3/HIUxfC8o0QdtM4VQWC0XGF5f4eLiDNfXV5hOJlivV8iyBAoFXNdBGAZwHCAIPLTbTfT7XSilxduEUOU1b6tMqvwCGj6mHrUjxNfsqmcS3eDPJCLexl2wjfb7cQXotNhkVP79/RCSanqlei/Vv1U5EVW+B8mTnEcMQhg02L1YOM95Tfa1ET1gyolOTnUM7H2N90TUwpZm2N/fR7/fN0EX78WzHJHbtEeqXJAqYsKx5fMzjo/1Vb1Hfnb1/LdxVb7Z8YGdjTAM8ZGPfhRvvfEGzk5P4fs+Op02fN/H2ekZlqs18ixDmqSQRYE806VuUAp/+OUv4/Of/zz6/QHunNzBaDhEvV5DmiSYTqd4/Pix0YE4ODgw/ILf+73fw3Q6NWWk3Fh6vd6WoS+AeqNhSIuNegOj0QjdTgez6QztVgvj0QhFXqDdakFJiX6vD1kU6HY6mJSiXlIWiOMIup39ynQVbbWbpbOgF3ieF6jVNRrAScKN//r62hgVGh9uskptFUT5MK+vr00+nFoW3IwBmL4t3Kx5/1wA/HybbMf3Up7bZkQbgzSdGvIqHQ9C4NRpsCczjSVTETTmXDR0MuwcMJuq2T08yLOhE0BUwNaosDc2Li46GdQAscm2NIaXl5c70YAt6c5xoINEoTOWmKpyrHntPC+dwqIoDCH2ejjcqTrheSmDr5QyJdJ09uwNWapdqWJrhaPI8h0ZaQiU60zLUs/nc0ynulyb12sTQ+kgMc1WFAU6nQ4AYDqbw/UCrDYJxtM5NpmEhAs3qCHNgdFkgdk6BdwAbhBjtU5wdXlpnq0Qmsy7v79vcum66ssxFT22jgpF5bipcZ4wJTIejwHAVGwRqcqyzHSujeP41ih4ZyNztt1ggyDA8dERfI+EVFaS2BvktvQVZbdTx3FMtVVuyWUDVn5eAMLVahcaidIIhJISDx/cx6OHD5BnmfFjHOHC80L4XoggiOD7+vv257Asb92qhwoBjEZDnJ2dYjS6xnqz1GiHK+D7mruhjYWLINA/h2GIVquFvb09sybtCFQ7sniPkSB/g9wp13V2DJXtXFS5D9sp+/7S3fbfqg5HlW9QRavsc1WdBhupqF5DlY/ANWobXtthUtgachp+2zGwCZHVz+M18/3kytlEWPtz+cX1xL2cDkgURSaQ3U1vvZcDYpfp2s6Z/Zm3VeIA2Pl3NYWi54Fr9iz73PZnfVCn44PLlTsakvniF7+I4XBoqjniOMY777yNz3/+CwCAZLNBukmwmM+RJiluxjf4p//kn0AoTeBbzBeIowiqkGg2mhiPx/jqV78K13Xx/PPP42Mf+5h5uPP5HO+88w4ajQaEcJClukqD2g40xP1+H9/zPd9TEii18VouFojDEJObCQI/QJYmuixuk0AWBRq1OsajMaIgxM14hCgKMZncmEmnpdE3GI9HqNViY0CSZIPFYm6iOeos0CA2m034vm+USekU0TFgOR8AdDodPHnyBK1WC5eXl2i1WoZQSYIfVUc5IezqDS4MRs8kxdndZBmd2psCq0q4mEjgIpmPBjcMAxNl8nO3+gfb5mHaodi2VrcnNT+DzhA9dApwCaEJjI1Gw2z4XLCMMIhC2FEDJzuraJrNpjGuhPDpNJFDw3HnOGRZhqurKxNN0wEi3E8eAgAMBgNcX1/voCd8H6stWEqo1JaQulqtkJRET3tTvX3Bqp1NzHHcUtU2M8+11+sZ3g0rO1hFxJQJZeqJUGVZBggHmyyHF8QYTeZ4891HeOPtB7j/+AwPnpzjZr6CHzXgxnWMpjO88eZbCEreBMnD1KYhSZoBQKfTgZRa8p+VOUyfcJ3QyaYjmFsRI+cbydl0fqn3Yhsw+8tm6pMIvre3h6JE32Qh4Tr8XKDqaKjyOx2VoiiwLrkkVYNBfQXHFaZbr+MKjMdDfOPrX8N6vYTjuwCbugkXjuvDcXw4wtOpIKHTKLwWYfqzaJ7SfDbDfD5Dkibb6pUSZXFcXcnkOKJ0PnyjSBpFEfr9Plqt5nsMVHWe7UDhZaCk5+SWE1N1NG7L+duIxG0cBD7X2xyNKlmUh23kuU7s391WmcLzSKmRQP5sIxL29djnrjogdqBj31/VyaKTwOuoogs24nHbmHFOMxXLg3pAJNJ/M4TI7BiV8bcdtKrmhj0WVYeNf3s/Qu/7zadv5/jAnA0o4K0338TnPvtZo06WJCl6rRYmsyl+5Vd+BYeH+1CQePGFF3UU4wcYLUYYXo+hlAAKYJMmODo8wuXlJdxAR6ivvfaaiSCff/55Y2CzLDMdSpNsg3WyNhGQEAKDwUDreLTbaJToxvX1NbqtNjarBK16QzeYKqNuVg5Mp1M0azHiQD+MRi3GcrM2uhuNRh3L5RphqCMPXfZax/X1tSa/zadm82TagRu83UadhprCVVEUYTQamY2UBMkkSUzzNt3wTF8nO9lSW4EPmKgLJygnAx0H/o7Kp7Yh4qZqoyGMVGlAiB6kaW6cCQpq0SFgTp1pDCGEcZYY3doQPB0LpktMFVMZnRNtYPrGvl9ObkYDvEallOlFQzie0uN2aaSd2rHRJToTtoKpvZGwWqjVapny6DzLzXVTgZP3QB0OQKey/CDAYrEyz0dXOWw3KZswZsOzvIYsSyHgmOhXIzVbOXaWcQshTHkznUJutvV6HRACs8USrV4HvueiBgXIFG/df4Lzi2vUajGiqAbheFgu15jM5shzGBE2tmnPsgyDwcAIdvF539zcoNlqGYfQHluWuRIZo7hbWvK97J46TFlyHemSb83ZqEK+HE8bym80G2g229gkGer1AGnGhnQ0phaiYW2WjBaZnpJSAk5lgy7RDFn2f9cKngUe3H8Ljx/dB2QBF4DrMCXiwnV8SOFCCAXAhVIOlORmD5M6y/MMq/US88UUjgNEUQBZaD0N5Uo4vFZZQKDUegg8uK6DeiOG77vodFvo9bs4vziD0rmjbSTq4D3Ggf/mvouy9FaP5TYKB3ajfXtMbMfANkDVNIidxuF858G1bqMF5hqtCNx+PQMWex4URYGSGrvjhDLtQHtip1ocIaCs4IVH1YmqDJz5nS2GZTvTVePOa7bv06RVy3FUShkJhSoaZBt7OxipOmJ0ZHjv/Ey7jN7+suUFbDIqz+eYubxFu+zn++0eH7z0VSmMhkM8fPTIMN2V0n1HGnX20WjiuWeeQ55liKMIK4oMRRHWyyWajQa6nS6uLi/Ng6/X63jjjTcwmUwwmUzwXd/1XWYTWSwWGA6HSLPUsO89zzObXBiG6HQ6WK3XePnll7Fer40servTwjpdw498zJdzSEg0202s0zXiRoyr0RVWmxWuhleYzCaQskC9XoPveyWjXudA63WtsiYcAT/wsEnWiOPIVCCwdNWGsu06a/baoBdLAwYA5+fnxhHiJJvP50bfgboFrNzgxClK9nNS9p7hxGfUyL/RQHMjJS+ELdftEk1Wv7B3DFEBpgO2DojenJj+IbFTiK3jYyus2pEEADM+utpn60QQvbCJgEylCLGVyGUuk/AmSatMa1EXgwf5Llyw3EDsMrRNGaFzEZHnQh7PcDg0iM56vTbjT1SG76WzQU5JUJ7fEbt5U7Nwb4GhaYD0c9Gqr4uyvbtGULabGo0znwMdSaaIgBIml7o762K5wGw+R1yrQykHzWYXEh6uh1NcXt/g0ZMzPD2/gvBCuJ6PopBmztAJHI/HxqmZTCZgBRP749ABZ/kqx5IVU9SMcVzXaNDQAeS4ELG0Hbeq5gM3SN73arXCnTt34Lg0lLpstSi2xFyScmXJt9NohTCbLjv0KqtDrHC2G2shCyihdTM8z8FsNsH9+29jPptAFTnarW11lesHAFwtCibK7q7CjhZ1q/ii2JbEC6EVTJ1yLDzPh+/5cB1C5q55/q7rmFQp05P9fn8HxqcBsucX15yUukJHCGEge4BVCO/fJ4SVULaQlj237Qi/apyqRpyRND/XPp/nafVXKu7ysPkV/EzDIbHOxdcShbOdHHMNFjpml6YyhWxzHMz4YYsA2Ckgzk97PXP/4h6240hZY0JHyCad2+Nlj1sV8a3uKfZ3+3P43UZkmOq3hSdvqwrivdjIzW0oy/sdHxjZyPMMX/va19Co142mRJLm8KMQi9USgEC/10ez2cDV1SWmN3ojGgz20O10MR6OMLweaudgtUJcRj3C1fDzF77wBfz4j/84xuMxXnjhBbz55ptotVr43Oc+hx//8T8L13exXCx3UgVXV1fo9/tQUpk+J0EQIPV9bMqmUdejIcIgwGw5x8HBAfpxH4PBAPeeeQZ37tzBwcEB2q0W2t0urq+u8Lf+q7+FTq2G5XKBMAxwczNGu93BZDKF57lQSmKzSQ2BjRwTGi6iCIy+pZQmt319fW2ixNVqZVre9/t9nJ6e4uTkpBTa6mA+n5vonVwG29GgBgJFlDihKEwWBIFBcghtUyX18vISrusaKXZ7QdHx0Y5Ibowuyz2Z7mGJI/+d5xmkFEZbhNEqnzeNAzVAiI5xMXIB2E4S4Xs6H0xLcPJT6Izno4YD0ZOiKIxGCa91MBhgs9kYROry+srAyUwT8TqCQHcN7nQ6uL6+RrPZRKfTxmK2bTRHUuRms0G/3zeOCbAVW7OdKkdbOb0xqS3zW6/b7eI1RlUCYRhZzphrDDcdcEA7W7xPdrsNwxAo0R84AlGzAd91cX11jb1+H0IVWC4L7O0fY7XZIFcbDDp9rEtUohbHxiGgU8nzX19fG5G82WyGTrdrHB0SodmIzS59pRNJtM7eXO2KJZbG5nkB3ZtkF/rl5qjTTHrOHh8d64jN9aC1KnQjNaYqNGffMb1D7Pw2nY2iqHQ3tZu1ObqcESpDUWR49PBdnJ8+gecoJFmGeqN01AFskgQItvl6fQ5GqYweM/MFpUqyqICSSl+lklBSl8UCEnBQXvtWeIkIhOM42Nvb20k/6UMZtKVqiKC2rQx0uovz0YFSWx0GGlE7pcX1yrlaTTvZP9tRuR2B246D/T6bG+B62462NLg2r23nMy2UgoEZS+SZ4uS8sVGCakqhmur0nK259H0fgXu7kudt11RFdKopFduxsH9nn/82h4HjaKeCqugoHRn73NyH+Ho+06qDUb0/pvI5BrZT+62OD+xsrFZr/N4bv1dWL/RNrilNUywXS+zv76HTaePp06f4737257BarfBjP/ZjuHuyhCoUJjdTBEGExWyJZq2pNQ2kRLOpFQPfffddA7u++uqr+MY3vgGlFIbDIR48eIhPfNfHMboZIctS1Mo8MMWVHNfB4eEhPvzhD2M0HOL48AiDvT0EUYjnnnsOjXoDB4cHGAwGaNQbqNVr8IMAqtDNiKIoRJGmcADNx9iLDOQVhj6SZAPHESgKZSJyHjYfgROCBFP+7ebmZoc8REPL6HS9XuPk5ATD4RD9fg9nZ2dGQ4GvY3M4pikY3bPqA4Ah7XFD50ZsR/OMeqkeSo6IPeE4iZIkNeRUCnQxLx/H8U7fGY2qbIwwmF36aiMrRKQcxzEIGSNXRoX2QiWCxujZhhDpcJB0yvOTDEsROCklZrOZcUgYEeZ5jlocY17Ka+9ArCXxkSXBbGefJalJvbDKRSktFU+9lWVZGstIVjgOXJS8HqV0hFzdsJUsRR+sDU8BUgBZphFCPW6uNTdDU2WkeU3C9L9h2sxxHM0r8n2ssw1ySPR62skN/QCtZhfz2QzCddHv72O2XCCKY/iu7t7b7/eNHDzl7qnbMp9r7hJVaB3XRb/f31ER5Ry0SXb2HOOzp4NXhZDfb1NnpEVnfDAYoNFsaIdC2JH1tuyT6qHKIkw6DsxcSJIEUimNHtyyBypoSfnQc7CcrvDg/juYT28giwyOUDg4OIDwfRTQpa+u55WVMPrd+gy6t4mUGfI8gSxKDQ/XgRJa40NAQDplnwoFAGXELyWkzLUTonQlSrO57ezc7bbR7baxWMwg5bZiLwyDreKp2CUsMnWpiYtEZHV30yoCwfG2UQt737PntI082cgAnxedF9sRpwNgR898DYMPe41W55PtJHBvJreJKSKbDJpaRGCbP1S9L0dsy29tZ82+R/6O89weL94T31NFmuzvfI393f47f3ebk8TX2SkW+5rt1ArH2NZYqjpZ1dcRwW80Gjtil9/q+MDORpomGA6HhnylO7ZqMZ5CFtjb28dXv/pV/LNPfxqf+9zn0O/28A/+X/8DDg8OcHlxoSetUohKkllcr6Hb7UG4unPhcDjEkydPcHR0hMFgsBOJQqBUCO1gVkpwp2mKhtUdcjAY4G/+zb8J3/MRRSG8wEcQaRTE9zykJZy+XCwgIbFO1shKhynJBIRSOD07NToTrVYb69UaFNnSEzbVzo7FuZjP50bOnFE0UyDM/wqhRah6vZ6B4clToPNydXVlNvCDgwOcnp5ib2/PePFc3I1GwywcbuJcKK6rlSPp6NTrdcON4CSfz+fY399HmqbY29vDxcUFhNj2aLFLwPr9vmn7bUeiRElIGlwulxBl5QRTR6wuoRLn/v7+zrUAMBOYKQlWJQghjOHhd26MjOToPLF6Ic9zQ3AlqXOz2RgCL+XTaZDZ9nxROgt2sz46SERUHEeXal5dXaHb7sABsL+/j9FoZMSneM7pdGqcgSTZaKKgsnPW8j0bg/7D7npTSkFCQigB3auDOh6x4WWQL8N7pcNlp8aISi1WC3iBC8eLMLm5QbPeROgHGA5HGAz2kMsC5xcX6A56kFJiuVxi0Oubiin+jqJsLPWWsuw+W86RyWSCTmeLzNnwsh2J2wS4Is/hlQ67Hd1Vc8nVjZnqv0IInJycaDKoEFrHoiRf0vnQmiZbLQn2OXFdvalmqYamoVht8t5DSllWpEhMpmNcXp4hzxL4nodms4nnX3geT30fGwBeGf1BCAihnQaFAkoVkEVednfNAEgIR8H1HKjch3L1tbpOec8KZbfXbcfXosghCx0kUO2WaV3uHXTgqtHn1pHfPgs9hlujpPU3dlMaNieB85PPyY6SbUfjNiifv+f5aQCZgq1yFVyxW3ppR+w7a8VCCOzvjuOYfcpOc9j3x+CIc8+u6pGyFHUrj0JK0/W16rzZTo+dlrCDp2raqbrmq/fzHjQKuw6F7XzwOujc2Twbm9tEdMgeT9vxs6+rmkrhWBLt/HaOD+xsKKUJLHfv3MPF+QU6nQ6E4+Hm5gYKCp///OfxO7/9GWxWK/h+gCzPEbgeHtx/oBtz9XooSi3+Xq+P9WaNPM+wmC3hui4ePHhgxKXu3LmDm5sbnJyc4PT0FFeXV3jlOz6G1XKFVqtl4ONFSbyUZT+Jw8NDpEmq66ddB7mSiOLSuMkC48kNanENSZ5BOA6Wm21uuRaEeP7558sN2kOaZmg0G7i6vMbx8UmJALilGFZmDNv+/r6JDFjWR+NH40uWPRGOPM+NcicfvJ2LJtGOTexIeGTag54oS4A5EcnipzPCNEWe5+h0OsY4Ui/k3XffNRG9DSkXRWE0PLihkyzIzZ15UCIYw6EmGpJ4SuNHjgjJjOTjUE6dKAMX12QygVJqh1zICJ4VC9ygWPY3Go0MykHHj5ApdQTogHDRkUyaZHrTXpdOK1NedKaCIECn08FwOMTx8TFuRmOEZSVAu93WOhzlYqYx3kZemvGvrDXEDcBe2LfBkcYRUdrRD4LQOBkcOxvZqJahcvyKQiuI1uoR1pslAs9FI4qxmC8hIuD46BiT6RSFkjjcP8DNbALXd9Hr9XBxcWF6BpHPo6u0Euzt7eHm5gZJkuDo6AhX19cQYivlTifUNiA2oc0myAkLnuccJFSr33PLuJQH0at79+5BKglXCChKjHOTh2PKHPVz2KIdHnk76aa8JlGKLr13D9Q8iRCTm0u8885bGI9HKPIcstDzJvC2CKcsG+vZqIbWvSiQ5SkKmQKOhAvdNVYVuq09UMAVLmDeCwiUBrgokDkZdH+VDJuN7iirmzZqrtmzzz6Dt99+q5wjoUFd3+t0YMfw2I7gZpPsIBrG8Fe4ErZDaD9jfqcRp+PD33EdRlGEWq22sx/YDo2UEirPdwxhNcK3nQIbXbE/ww5wGFBUo377c+3PKIoCudo6G3TK+Hl2mozXQWNdRUns67TROc7j6vjx+ux5b6N99r3SyWBamnvzbfdl70F8rtVzV1M+HE8Ahpv47aZRPjBBVH9YYHK1WaY3Dd/TYjpFnmO92iBJMtTrDTjCxWQyRRzV0Gy0MBqO4bgesqzAZrUxHjS9wCzLcHFxYYhOL774ojFyv/M7v4MojOA4W5lqGlLXddFqt7YwVpEjqsVQUFhvNliuVpBKwQ98CNdFAVnCmwKNZhOO66LeaAAQyPLcLHDH0RD+s889g9HoCqvVEuv1CoCuCEhSTYrcbNaQUrO5m82tgiYA4/0xeqchp8NktyLX6qRDgxpFUWTEkbgYSXQkKY7qohwnEpxsbgfRFSpKpmmKVqtliH0k7dkLZAv9wSAOzK/yvQDMvdDB4MSlsWGunjD8er02Y0GkhBEFr7ndbptOt5zcXEgknZILA+jyaD57OkgkGXqeZ3rU8L18DiYaKfOf3JSprEpEQwiB2WyGwWCg++6UiNL19bWRJa9Cp9s6eG1Iq3ly4Lb8+e0RKF8jpdYZyUoUiA4TN5jVamXu2b5/AGbN1GsxQt9HUeQIAv28zs/PIaVEEIRmLB3hYDQaG0dcSolWq2UcvSiKTCflVquFSYnmKAWzpkmCJgdAkzWL8lnojc73AzQaTYM+xXHNOHi1Wh1hGEG3fLfh5G0hSV7kpcBeDe1OF0JoZGOnaRp2o2v9szLn8lwPSgFZptVFIRwoqd9Zvrr8v9bVcF2B0fUV3nn3HaxWC42CKIXBYA/9wZ5Z+47nQqkCShZQhQSIaCmFosg0L4PQtlv2QPF8eF4Azw/h+RE8P4IfRPCDGEEYI4gihGGEIAxRb9SQZSmWywUcB4iiEFEUoNvtIIpCpGmCosjgOECRFztGeev8KBS57gPDQxuo3dSJbYhso0ZSJc9tO5I2AkF0yzZ81KWQUpryfBvRoGNiO6g2SdhOBdmISjXt835OimuRT6sojF2VVk1XCOyWtnJ/4rltB81GaWzui/067u/V9Mz7pVuA93I2bhP7slM4/M4glfs1bVW1RNb+4mu5/3M/mc/nhj/3rY4PLlfu+eh1+phOp7i+GqHV6uho3vUReAFWswUcAYSej8V0Bs/30Wy1IJXEfLlAo91CRja3koj8EH4UIEk2iIIQy9UKp0+fwnNc7PUH+OiHP4IH797HweEBri4u8fTpE9y9e4KrqyvU6zUsS60LxxHI8wy1hm4uFsYhkiyBUhK1OMB0OkO9FmG+WKDdaCIvcriOi02SQOYSDhxMlxPUghCz2QSFyjG+HqFVNpJaruY4unOEMAjR7XURxzFeeP5F9PcG8F0PJyd38Cu/8qv4tV/7NSTJGoDCzc0YrXod6ab0AIVjPF7C2p7nlaqnujU7+3msVmscHOxjvd4gjuu4uLgoI6paOal8pCm7foYGWTg/PzcEzpOTE0wmE+zv75s28BTEsicpKwooksXqF+bP6/U6hsMROp22MV6TyQR7e3s4PT1Fvz/AZrM2C6fRaJVVCT6KQsFxBLKssNAW3VBqtVrvRDgUkWHuWSllSJZkXdtS49ycbIcTgNmM6KzxYHkqHQkSaweDAeYPF5AQkJIRr4soqiHLZqWaa4Jms4HpdA4pAS/0kVtS6izd5Nh1u13TewYAsiKH5wkUUpQGrUAYaoSm0FWUKPIMUEVpU7cVE9pZ0UaxKLJysweKwgFKDgjRHzY+o7Q6ORthGBoUbT5fwveDslfKClJJdHp9rFZLw1sRhUKeFPBcF/OSbNzpdDAejxGFIRSA+WyGdruNLMuwLFOBm01aOoS6W6pSjJAE0jRDnmfGWQnDqORBuSUB1EW73TUOOCtU8jyDJitukYECDhQKCDjwgxqSTYrDkxMo4QKOD6kcCKHl0wWE7oKiyiixTKtIJSGUhCpcZLlElm901YrwIeDqFIxyIAoBxxWQeYYwihDFLjbLGzx65w1cnj7RDrnno8gyHN17BvV2G2JOp73QDgqkdnjKW1ASEPDhuX5JI6F0uUKOAo7rAYqN3/QbPFcgz1MUuULY8hFIDwe9IzjQ/KsoCBD4HrJkjbt3TnB4sI/R8Lo0MDmiyAeEA8/14HkOHEi4kHChtUKyZIM804hjIXUPJDt1SyeDCAgNDw2YkeYu0Re3NKBKSiM3biNbjqORpjTTPTxsDQ2FMlVRFCZlb3NzgG1vE9so2o4501ee7xvJ+qLQysBFUZiSc6WUJmxjy7+oEi2VUnDc3dicxt4OAmxeBp0QOyVtcx44FjZ5k9dtoyG2A8R9jkgRX0etFfv9Njphoxh0LHaqa6yUVvV6bIfNPvg8/r1xNnS0ska71cH4ZgwFiTTTOc7VaoUXX3wRj588gZQFul3tiIQlKTIvcniei2lZ0VGr15HnGdKF3iipaHl2egYAJkccRZFmzu8NkCWJFvMqG1G1Wi2tk+97gAJmZcmo4+iFvVguAAlEQYDJzU3ZNGxjSHaqkLr1txCoRTU4Auh0O/gzf/bPoN/XZLP9/X006g10u110u1090J4HR7hGV0Mogf/5f/7/lNGSNrBRpOFuz3UhwggFYFIhzKe3221T7UDiHftcXF1p8aizMz0eSinEsWsEm+gw1Os1FIU05MEkSXBwcGAkyB8+fIiDgwNTujqbzXBwcGDquTebDU5OTnBxcYG9vT0sFgtD8CNyUq/XDEGS1TdXV1doNpu4vLxAv983Bm86ne4okrIygeWnvH8hBGq1GjqdjtFXsPVKeM/0tkkipcppp9MxaqdMI3ABUNyJC5r6E9wYDg8PMZ/PcXh4iHfeeQdHR0c4O780hOf5fI5Wq4WDg0PDw0hKUmhRFFjMF/Bcx5TCEmVhF1g6abPZDI7jIHADE5e7robK2UfBdR1NDFQSCTcZtUU67I2Uh96stwgcOxtzHDrdLlarlUkJCSFMemd//xBJkmC1WiMun89yvUKtXketRAuJyiyXC3OP4/HYPEeel4J+OhIGoiguUausfM7bDVmjSh58f1tS3Gg0TJQ3n81MZO84bumskKRIJr0sQQZpmlQVUnM9Bnv7cHwfUjgmZWJEF6xDKZ3aIALnOFo/oyhYAulBCBeO2CJqUBKOkAh8F0IWuDx7gkf338W6RG7YWwXCxSZNy0Zv5gP1N9jGS8Bxtn07jEFxANcv9EVL1yAyWhZ9Dc/3EdU8BKGHe3eOcDzoYn4zxnA0RLJJ0O11EPguLq+ucHx8hK997WtwHQdKKhS5hO9roqpGaMqzKwUBCVnkmqgKzWepciTsCL7KeaADYtIIsNCDknhe/aI9sfUfbH6BUkrLzVuRtx2p8/08qsbRft7cm5jS2a49dwcBsK+Fn8G/h862lJ6N1vg3Olx0hGyUwkY8qtd6mwGvpqJsYmeVRwFo5DyOtxwuOgv26+3z2Ofg87JREvugQ2k7PPa12Kmeb3V88K6vJfw2m09xcnKM9XqFosiwSdbwfBdPT58AkKjVYggoFHmG4fAaV1eXkEWOm/EIjUZNG8g8M0QnCjJNJhM8ePAA8/kceVHg45/4hNGlePfd+5jMZnBcB+vNGn4YoFBKR2h5jjTPkKQJsjxDlumeE57vY7lcwXUDAA6SRBuTPM+Rl7ks3pcoa9aVEPirf/2v4yd+8j/ED//Ij+DlD30IH/7oR9BoNSFcB67vYbGYoyjyMlKcYz6f4+tf/7oxdEIII4OdZRn8wDe8BebTKVpGtIN5Z0oHU7CGD5xiYDTWW2hOK5auVqsdY1CvawGyfr9vUBQu2LOzM3N93W4XFxcXJnL1PA8XFxfaqJbdZSne5ZaVBsOhLl8uigL37t0zTo7ve+b1RCW4KDlpbfKk47gGzbm+vjZpHxIHafx5rYx2+XlUx0ySBMA2n0hjyGiJn99utw2nYTqdGl2Gi8sL7O/vm+ZmbKZHvZP5fG6Mtu1QEBkiuZVRhF3uzGiEf9OQqf+ezYbwevWo5luruWTOB6YsmFKjMV8ul+Z5FEWBm5sbFEVhyq+LothRGd1KthcmFSOLwnA1+EyE0Mqtvu9rAjd2VT05NoR2wzBEHMempt8uXweAsBwzwuq2HHyes3x4Oy4CWnMiTXM0Gk3s7x3AdX3IQpVRq7x1g69Gwu5O1F1unoJRsv5SkPA8F3EcYrNZ4+HDhzi/ON/hWEW1Grq97nsIc+9nWOw0no0UeK5GPIxuhOfqFEkcwvddhFGI559/Bi++/CL6gz0M9vYRRXHJsVCo1evo9Xp46aWXzLz3XNdA/+aazH/6yMv9zEYF7GuupjTYip5kahofe0+lAbPfa/MVbAej+hoAZs3bqQHur/b5bd6EWUvWded5btK2nFfclzg/7T3KRjWYhrWfK1OyVfTARi9IpGSqiPfAisTqmFbHzSaHVqtaOIZ2VQ3vkX+vpn+q6ITtZNioUvUZVNM61eO23912fGBnQ5dSaoN1enqKdru9I5nrOA5cx0GjVsfl2TkW0xl67Q7iIEQjrqHdaELlBdaLJZLVGq7jot/rGQErRtbL5RJZmuLw4MAIQDE90On14foBJATSPMdsucRyvcF6k6Dd7SHNC2RSIpMSruuhUW9CFhJRGCNNUhS51HXsaktMDAIfhSyQ5RmSUpuDfyc5kxH0ZDIBsO3nUKvXcXp6anQsOE4A28m7cIRjNDHIuibBhsaQE4PaFOSkEIZj5QjJfgAMUrBYLJDnOa6urtBoNIzRtomK5HzU63UcHBzsGJZ+v28IoXYZKQ2s3WGUhowqj1y8GpnYmDJam7dBiHE4HJrz0NgAMEqTRDw6nY7hqlBEi9fLcVyv1zsNyMgp4WZELgXVNfM8x9nZGRqNBsbjMQ4PD82Cs5+xEKJsdraN6Ok8JEmiCaKTCQ4PD81CJem1KLQGiN2tFthW6FhLFICGPwWESf3celhRDvBegldR5AZi3TpxmtxKSX8AmM1mZZpLl8dSot1uMkgHS4vE6c+p1+sIygoeEomZpyfn5vLyEnFcM2mrTqdjNr9ms4larYZWq2WiLwCGMEjOEcu0CZXTsaHzzOGzjbd20ICjo2PNuVICRSGN46CH71tvhlKWWhY7zknZyEzoVFa9XoPjOjg/P8OjRw+RJhu4ngtZlu93uj20Wm0EfghT7lzC+K4XwPUCOK4P4Xhawtz14Xrbv/HvrhuUX2WDLc+B6+nUWVQLcPfuMV548Tm0Wg0oBYRxA3GtieVqjZvJFEo5aLd7ePHFl9But5EmGXRX4UqZqlOmHBzHrA86+QAgLaNmExlt40PDeluETBSBwUDV0Fcj79uQj2pqwY6+q06Gnb6g4BW/6EjvpGqseWg7Lfb9Vu/Vni/krdjOg+3Mcr3YTRlt7lWV22IjGrazYfPW7BLVKifDdhZswS/7+fBru3cUO8/Dfo62A2U7JLyGqsDetzr+SAqiWZaWRCTduCxNdUQ7m03R63UBKIyH1+j3ehj0e1CygCOA5WKOKNQSvI4QCHwPULJsLDU1hJP5fI7T01PTwKnf7xu58vOzc4xGI7O5Eyr3fV/nCq2BL4oCUAJxXIPnaY5DGMZIkq2oi74nDclqJyDFnbt3S6GoOparFeZlM7GbsuV6ludYrdfIyiZzcRTht37zN436W17kAJQxzDpVsTbG1lbn5OZMXQRyJ0gQtRtt8UGv12usVisTCdFoh2FoBJSY/iBMTYVREgqpd2H3TCGXwd7MaeijKMJwODTIAA3UZrPBcrk056Vhs1ufX11dGai91+vh5ubGLKQ8z40EuF01w/QOABMNM81CZ42ROhezmdSOY/5GZ4XpDH5Op9PBdDo1vI2iKAyvhRsFz0sHhOmny8tLtNstXF9fo91uG2SDTg6vz2bX2/BlUWzbWZczcLvAhDCtsisLb4dothuhwGzojPxYpk1HlChLnudGlp1divn86CQRGdFpu9g4wCTntloteJ5n5g7VbdM0MYJqbLDGShlukPwi3EyhNxJ6qUppaylsYVqt/WD7bEpqzZu7d+5BSc25AbQcuG20ONRK6RQBv4gMyoKEPbad53hrIa0oCtBo1DGd3uCtt9/A5eUFCplDyhyaGK4FDyeTibk3fX26GdttJEVtfLb9NLSBK8mhHqN4F57nIPA9+IGD4+NDvPTyC6jXa/A8F34YQzge/DCGgov5fIk01ZyYwWAPd+7cRZpmQCkOZqYZ/xPCECSllMjyDLYx53riXmOTn+0vVuDZc53znQbV5gLwnNVSTH6enZLhXLB5Cvb7q46GneKpGkN+Nj/PNprVdICN0rEtwfa5yveQXrnvcC+yEYbbHIsqomE7ITZ6wTHnHs21ab/PHjP7emyui31f9mGnV+wvG6mtOkM2gvPvDdkgpLlerxCGAa6vr0pDmsP3PZydnaLb6SDwfchCKz9u1mskyQbNRgNXl5eIoxBh4EMWBZqNuplA/X4fl5c6b352doY0TXHv3j0MBgOjsnl4eKTzjyX8ytSB7/tot9uGmb8dSA/X12PEcR1FoXsjaCVGXRUxmUywWC4wn8/gOg6iEr2IazWsywhflukYLVfumCZrxB8fPnyIf/fv/p1ZHJ12Rzs4VsklkYk4jjGbzeCVNfmE9uyeJSwVpGdMAiDTG3bL8s1mg6urKziOYzQPWG5LHYQoirBarXZ6oqzXa8PBIILCUlUpJRqNBnq9HgAYwTDqctBxoNfebrdxc3NjUg+MftkDg/opYRji5uYGnU7HGEcaMiI4TIfYRFGiAjRSw+HQIDdUKRVCGGePBEQ74gjD0Ah5sZke+SvNZhNQMBUrdBDtZmppmuLg4ACPHj0yOhrkqbACiFofLAll6oJrgPNcCAG/TGmt12tkVvkn3ieVwjlU3WQAIMtS45BxTO3GfXEcm/FUShlHkGRhAKZSiOOsx1wrknqeb5wFOnxEu4pCVw7pICQ3pbb9ft8QkqWUJm3F68vz3Dgm3OiYimKK0Hao9HMs+RrC0hhQQL3exP7+AShN7jie7gsCAaW2EuUkq9oGRymgKKTZG2zDJZVEIQs4DhDXIgShj8urMzx48A6m0xsk6xWyZGOex3q9xng83omAc2uTVkpptVi15SPsbvR2NYqvu7uWzoZwFU7uHOHFF19As9koK9gEIFwUykEQ1RA3WtikOUbjCRQEWq02nnv2OeMQVHJQW+fHYUpDVxFxrtloho1A0ADupDf8LZnUdiJsfkDVQagatqrxrKZK+LN9LptTYDsYdnqaqLtvXWP13NV1RoNqp2BsJ9IWH7NThnEc76RcGJAxwHs/58d2Cuw1bl9v9d5s54z7FPdMx9nqijCwrn7Wbc6enVqyUzf2+4Hd5nr2M/5mxwcmiBJaV0oZoSemIhhNzeYz+IGPVbLBbDFHVI8RyBDrdINmp435siT1rZYQvov9/QNkhRaP+sQnPoFms4mPfOQjkFKXav7X/81/g00pntXutrHOtIH1vABKCaxXCQCBq6uh5kRMp9jb0xLKy3SNZJPh7bfvYzrVhu65557FcrmA67pa6XG1QKfdQZom8Fx/B/7rdDrGsHPSsfw0TTI0G0187Wtfw3A03MmPbzaJZkIzUnYcqLwwkSHHivLfNHCciPw3O+vyQXdLOWgApbhThFariflcq3menp4atVIuAJZ6Mi0gpdwhVzK/yLJQOh90YuhAEKqng0dy4GKxwN7eXsnj6GKxWO6gD47jmNQO0zJcmNfXl7hz5w7yXBsqkoSpTiqEMEZSKV2d0mw2MRqNTKrETtvYKSdGHayx5/MJw7CsoukbNn0QBlgs18YYc8PhJra/v4/z83N0Oh3c3NygXd6P4ziaX5TnhnRL54rIymazMYx6lwtVWQ2vHE2edl1XE9fke1mNdjMqbvb8tyrRQXJyiqIwwmm8Z1bKbDYbJJsNwhIpoqw9uUJ0EnXER6XYiSltpmw5N9A4js39x3HNNBtkistxHMNBklLu9L2xI2XNEZE73BZ7Q1VKQUFp7RzHKRGtGoQCnnnmWQRBhNVa85SUUJppiW3libDH05BOyxJaoHRqg3Jj5+arn1+n28bh4R5Oz57gS1/6Is7Oz7BaLiAcgSAIUZRogOtq5GcxX0F6rHxxkUktSgUF5OWz9YIQXslN4XgopUrRLlUiOAp5niCKfQz6B3jmmTvodNqAkCgKBSkFpHDghzHgeBCOB9cLMZktEI9u0O108NJLH0Kt9htI1msEUbybcjApIgd5XuieT2VVmed5cB1nJzXHZ0WjR0MjpUSWpkizzMxpG963bQdfbzsDnMe38RiAbcTPa7edBLvfiZ0G4eeRL8Hgge+1HRr7yz4394btubfmMssyFKIw1AE6mEw/87Mo3mg7Dzxso29/NrDlntnjXRTFzhhyP+a92aX3tqAXdUaqjox9EK2ynUp7bvJnOy1jO9HfzvFHqkZZrZYIAh9CKCwWcwAaBtVERA3vLzcrxLUIi+USaZ6iKHLU4hoGBwM06nXs7+3jxZdewvHxEXp7+3A9rYK3v78PIQTG4zGUUmi1WjqaCgIoKIyGI2QyR6fZged4qDVj3NxMkGUpPOHi8YPHSDYb/O7nfxdZmuHx4zMslytMJhNcXV3iP/1P/zemd8Bms0IU1TGbT7FarxDHETZJgkazieVygbgWY5NodCMrvUXP9+AHPvIyTy6lxC/90i9BQBiugyxkyWnIEJUoANELRogATLUIqzY4YUh+pIHI89yIJxH6ZiqDPAOn3BjsaHY6naLVauHi4sKUYNJhYefa8XhsFE052enZ0uFi2ofpBpbORlGE9XptHBeWXTabTUMCtT3l8XhsXkuHq93uGMdiPB7DdV1zn1xg9phRBpyN4ej4ENYHYCJuRuLcdCg+1e12EYahIX8S8eHiZdksUyl04o6Pj40Rd1xNMrXTNUyd2JvLDnkOOmFiRyg8vp0Fa0d/3Ij1ZrL9PTcux3F2iMJ0lO0yPDoh5HdskZKsVFrMzLNaLZcQZbREvg372wQlT4kGiGgG75vn5UZmR1W8Fv6OY1fdxAj3F2X3ND2vXAjHR78/AODAddwS2i4rmJxttFaN0nY1O3Spsx7TbYWB5rfEaLWagACePHmMx48fYz6boshTeL5rziPLdE7gh2AVDaBRE6kAVaZplAQ834PnB/DKqhwhBByhq5O0oyFLOXOJKA5weDjA8dE+BoNeKQaYldyUktCqHKB0NIS7RpHlmExniKMId+/eQ7vVwcXFBZySmFydanYeP89ynVKCRmVcy8ChnLvkI3HszNyszGWOezX1Z88NnpuOp026rF6jHU3b64B/42faSGj1enk9XCN0WPj6qtAX90QaYx6e78HzxHscBjoy9j5gOzB0auw0vv0MtvNxVymURzXVVHWYeB4blbFLZPlZfC2v1UZW7LHkc+HYk7tG9PHbRTWAP4qCqNTVKFTq3Gw2JRwfIE0TLJcL9Pp93D3aRxiEeOmll3B4dIgXXngBR4dHaDQbGgnxfLiOg/F4hHqjBacstWJzLGpOhGGoSzL393F2eorBYIDFfI6333gbRZHjq6+9hjiK8NWvfBXCEbi4uECapFrXwPWQZJoM2h/0MRrdQCmdq81zjQpcXl4ijAIoJbFcapLl+GZsiKpxHOsOmXGMJNUQv1MiIovpArPZTDdW63Ywn81NDlijJhHWi6WJltNim1qI43gnz0njBsDoDNiTbLFYmK6WhPsJd+v00r5pCJbnmktyfHxc/u3QpEyYQjk4ODDlo8aAlhOXhsSGAoFtW3men9oN7FWia8d3y7XIGSEaxOu5vLwsHY8lfN8zZbF0tmgoqxsDHR7yWZrNJoqi2LYEx25fAvIphsOhISrmeW4QB46pC03mTbLULCZgK4SVZZlJNdRqNcwmN3DFtrFUo9EwBEw6GFJKM3ZKwLD7hdBNtrbIiZX3FKWuwi2HvXnz/DpHmxllUVvMjePIsedYMHXCjZXPmnOMrd2F0A4ty7LJF2IVC1NqbOClx8ozjksURViXzRbtDcyG5W3kwo4ub3O+siwDhILjboWYWq02Bv29cs2FKAqU7dsdM676MzVB0v7de69Fd1HldQWBTs12Oi2cnj3Gu+++jcViBiEUHN/VnI1ClZiJwnPPPYePfvSj2D84wpsTF8jLCFXp5+o4Dlx/C11LqXY+XwjA9RwIKVFIiTgOcLA/wL1n7qDVrMP3vNKRcaCkgyKXyHOBTCoouPCDCK4XIE02WK03mM10k8S9wR7Oz8/hul6JqEmdUrL8LiEcQAEZyybLNIHtnBIttJ1IG1WgloZtgKqog/3Mq3PbNrK28eSztw1hNbXD39Fw8jPtQMV2NOy9lQ6OzZXg+rCv3b4vjSJukR/7/dXUDu/H5lHY120jQDZiwc8humE7JPa57DHj99scjducOBth5H7FdGY1rUOni+PMVNK3e3xgZ4Mfxrz8s88+i8FggJOTE0RRhIPDQxwcH+Hw+MgYA0b2URgiLdMHi/kctXodzXYbm5VuS//o0SN4noerqysopdGSyWSC2WyGd999F9PpFJPJBALAdDJBXKthcnNjFsl6rdGVJEkQRhEW0zlqzQ6SNMdoOC51D+aIohhAbjbLKN4SXZkyYD6ZRpBGh1Uxo9EInVYXv/ar/xZnZ2eoRxpO09GjhyAIofPBZbVCHMNR+oETNSCBkEiGPaFZScI0ALkPnucZJCBJEvR6PYRlnxlWctCpYHkWybXD4dB0OyV0TQeCKQkSAlnJQaPBKJgwPRGWdru9k145ODjE5eWVIbYKofUdarWakZenhPm2KVtiroVOlh2R2k4Pe9EwtcI8ZRzHpoSSvA++h/fEvggcczoejuMg2ejPny8XZmHS6282m1gsFkbQbDgcYq/X01C60DLRNO724qSzo5TCarMGrOjPcbZdRoFKWZkQt/ob1VwzD7vCiegF5w55NUxlNJtN42DZyrV5nmMwGKAoCozH49JRSZHnAo26dqSajYYZCxogfpbmGQFCFDssfrdMQVYjIG5efMZ0nhjd2obFPmwC72aT4tln+wjCCEUu4XoOIBllOqVhLQ1p2V5elfwNmIZkFP8iF2HbZKzRaOD45BiOI/H1r7+O+w/eQZKuIfMUwnEhlUIY+Lobq9JOo14LW36EIxwIR5+X/AZga1SALZFQSgklMwiVoNNt4uTkECfHR6jXIwidRILu+OIhyySSjURWFEizHHkh4bi+RkuEg81GE9ub9QZefPFFvPHGG5CygKPK7qn2/BJa60UbzHxnnG0eBOe0bfD4d6aK7ei6Oq9ppDiH7XnA8ah+8bARjdsi/Wp0byOHdiRvr037823nlwbasxyuXVTMGroKonDb/dqoiO0ccB3b92anoapro5r6uc3h43l5bjvdVXXkbMeBQSSDPNsxs+coOXTV8uNv5/jAzkar3cJ//Xf+WzQbDUSR3rDrjSaCUn0yyzJEjRqG4zE69TbWmxU834OCxHK9xKPHjyEAPHr8uKxICfGN19+AlBJvvvmmgdHPzs7Q7XYxmUzQ63YxKXP5yXqNzXqNVqOJ08dPdF+KUkbaUQoqLxAHIdarNaIgwHQyxfGde5jPdVTa7/dx+vQMd+4eGVhIKWlK/9KSUzGbzdBqtzGZTqCvHtjvD7BJNaIglYLne/j0pz+tYeokM5UYqwUbt+WmLDgIAsDZQtl8oDoyzQ3TnxOfzgh/tqtIgG0+cjwem5QQI9vFQvNR7OZZUkrs7e0ZrggJoev1GsC2a61SaifiZVRPo28vrEajYRRKb25u0O/3y061ugFXp9PB5eUl+v2+6cdCg02HKAg8IyLGz2VKzt7MSNJkDwXKpfM+yA3h4gK2GybvZT6fG0eJ3rwtp+1722dBA+l5Wi2V0txseHV5eYlet2MWqP3M4jg2lRp8bgDFpN7buto+NK5xO7ph58y5wfNzgS3yxGfIMaJGCFNHjhDolAqns9kM3W7XaNx4nmdQGp2PFsgy3d11uVwaIS0ppVFLpZO4WCwRhrG5TqbzGJXy4HOtohvAtimfbaz0OOneIa4rkKQZPNeFkgp3794tX7d1JCgIRj4C5yw3xSpELZzyq3RKpJRoNOo4ONhHrVbDo0fv4o03voEnTx5jMZ9DFgV8P4CUGRQUsiRBs9nGYDBAXPLZwOuHVl12xK5hsrkDunRWoypR5KPbbuPoeA97e320WnVImUMVEsLxACUgCyBNc+S5RFYoJFmKJMuhhIDrehphEA426w2GoyGefe45NOp1rDcbKFdqZKT8z1yk0EJVeZZDKj33pYUI8LDz9VWehD2uVaPL9XibRob92ts0SmyjZ6cJGQzYn8n1b6cy7eu0513VabHTm/a5zf3Bvr+tfgcRFPte7f2D6xXADqmWCDHtZtXRqI6xzamwx8bmuNiB2vs5ffa/q3+3nT7737y3qoPzQUpf/0hy5UdHx2jU63A9Xbrl+z7GN2NEUYz5fIHVZoPxeITHDx9jvV7jyZPHuL4e4urqCm++9SYC30eW5bgeXkMVCs1aHfPZHPUygmq124iDEHmSY9Dt4cGjh3j22Wfx5htv4MXnn8dyNkerVkccBFhMp2g1W7g4Oy+dkY0mUQUBXM9HGDfw5PFD7B/s4+L8DL1uFx/+0Ms6RePHuBnfoNlsQBUOihzwvRCj4Q329geQUsH3QqyXK0RhpLtahhEcOGjUaviNX/t1LBcrzG9mlqR3F2GoSuMdYLac6iqBNIVTEsiEAMIwKIk7WoNCLwKdh2VEyslna1LYhDpC+pwQOkWyQRhGKIoczWYLQjjwPBdXV9eo1+slIpUbZKDT6RhxLHIeSDRkdQm5CVThsyHDXq+n8/al7kWrpcmbx8fHuLq6MmXL1FEh2ZUVIdOp7g6qy/UcLBZLHB0dGqVVDWeHZU48wnq92oEGiSowxWJ78USlbO3+zWZjNEb45TiO3mzLyJpN8ugU8popRjUej9Frt1Gv1w0Rl2MxHo+NU8gIIAxDyI0qc/IFlNSRpS49dCDUtgzREQ4kiq0h0NtKqfKou4Bqh5QR5vZ5sByaaTCWuFKhdbVaYTAYIM9zIw53fHyM5XJpyppZJsx+KJvNGv1+D+PxpETWtINWrzcwHF5jMNhDmqZYrTZot7smmrTnid3kz47m6GgTXZJS7qBSu5GkgJASKlcQ0oVQHpqtLu7ceRZJpqtQCimBEs1QSjsnKNNXdPQcR/BslpOh+R+FLJAXGQSAdruJwaCL+ewGb7zxOp4+eYL5dI4iKyA8D0rmgAIKqSD8EC98+KN44aWPwHE8jMYTU2HkOIAntCgYCgklSodHKQgU8DwHUuYIAg/tdgP7ewMcHuyhFmteVJZkCDwPSrjIM4ki05yUNM10SkXmSDcJiiwDZdBdx0cU1pBu1jg9u0Rca6DebCMtJCQUChoZIaxxVoDUnXfzLIMDQLgu3JIoKhw9vhRKVFb0DJSG20ob2MbM5gJUyYX2sxa3vJ/nto0e/06EHdgKWtmcj6oxrqZpbBIrHQ2933BvjnaqTZyKTgn5ZDavg8GAvY/ydXQ+mEJncEVE2kZcbEeG7yP5dMdxUMo8Cx58Xq6rlbFtSQh7DKocDe5/dLJUxdFwy/Slee5K3SbQ+77HH6nrKxwPjh9oDYr5AtPpFJeXF3jy5Ak2mw2+/rXXkWw2mM8XpcJoYWDoZ+7dQ5ImEMLBXmegm/8UOZpRBBQFOvUW0rWGdC+vr5GEPmphhMV0ijsnhxiNrjEY9DAaDdHr9fAT/9H/GigK/Mt/+S9Nv46nT3WL+vVmBSk89LodbFZLxFGEdLPBzXiCeq2Oy8tLTRC90TX+b7/1DSzWc7z00gtI1ynq9Rrm6QJxUEOaJAiDEHmqJ02R5vjVf/Ur2Cw20D0MXHRaXUzG0zLKLkvGPBebVEffqsihOz4Cvu8ZNn8Q+MZQSKmjff1AlYnEOfZM7RB1oCZCUejIqCgkVqttyifLdClwu132sAlCLBZz47TMZjPdRGsyMXLp7M6a57lJp5CjQfVOTnbyHljSKKUyHUJZ/VCr1Qwb2vM808RvOp2i3e7g+vra8H8Ggz2Mx5My7cJ8qwMpdeMq6mawNIsoDR0xO7Jhqobplm63a15PFjvTUTrNkBguB2XZyVVgGuvy8lIjWYUWCOv3+4akSmVORjMkPGZZhiIvN1cpIJSAJzxIKORKS1O7ZdSqigxSFRA7K3hLNg2CELWoDikVVusVZCEhVWEqn2xyHOcNRbpY7eM4jiHGXl9fI45jo5IaxzEGgwGm02mZIupjMV+g2+1pOXHPQxhGWCyW6Hb7mM3mJbKxVf9kWitJEkRRZJxUOxoCdsWU7CgOwM4mJ4QWH3el0JoVfoisAO7dewGuX4PKEo0GKVlKcOeAcKDK1InA1tDI0hNxHE0o1SiAgISChH5/FIXo9drwAoHR6RXeevMNXF6cI880MVNXieQlugIo4UE4AYKoAQgfjqsb72nN7hxZorVhXN+BA63r40DCdYHAV8iLFPt7TRyf9DDo9uCJAK5w4PuudpwKVfZp0bIDeZZDKS3mJvMC6WYFVeRQUiJJczjCRyZzSLhICyDfJKi3OrgcjeB4DtxSVwhlrxYBne7JlYIqJPJUB0qqVITNrFJLOoraQReGh0QjWUWSuM/YRtlGK2ziafXvdELtPh62A0OUxHYqbGNpa2BUUwg8l/0zsIs82CgKoDk9ZkVKCeHu3gcRRRtZ4Py2nTI61VWNDnssqtUtNPY8DMqA0kmwUjU7aKlSkJlW6hbWOfhseD28DnucHYt34rjbkuUs2/bBcR0H355Y+R/B2UizDL/7xS/irTffxPX1NYbDIc7OzswmvlgsAKnQKomBHPhGvYFWs4UkSQ0k3Ww2MRnfoNtuAVDIiwLJJkGt3sD9+w9wcHCA/aN9/PW/8dfwpT/4ffyzf/ZPUatpNOLwUEe/P/wjPwLXdfHbv/3bWK/XmE6n2Nvbw8OHD/HCSy/hwaOnePFDL5ftx3XudDS8hhgM8Eu/9C9wenqKyWSiS1qXMzzz/D1853f+l8ZIea7unRBFEdKNjrrq9Tre/MZbuH//AbIsR73egG50pA3qarUyKSASTbnRs2yYlRBbQ6d2HnwQhBACxmBxshJuY7MvG6K3N3Maj2azaRauEFoqXDe2gmnLznOaahopTR6euiA03JyMtVoN4/HYaIUwt+j7usFakiSmKuXk5MSIcbG6hWjHZDLBYDDAaDQy2hS2RkSr1TKcj8Vijnq9Zvg0FBQj0rO/v2/GarVaWWhLy/wcRZEm9HY6aLVaRlBMQpdCTi158m63a54Ly5NZuXNzM8Fev6fTfL2eMag08Czl1LopK8MjcK2IXxFKvsXQ3pYfrsU1NOoNTGdT5HmBek0LlKVZajYYe/MC9GZFnoWU0qTVSDAmp4rkYtd1dyqJWNE0mUzMs07TzGh6sKqIc4baHaPRyPBzyFOwFQ3tOXvbvVYPpTTM7zkuCiVQi2u4c/cusowoj2FiGPUgzXHYjYZtyH+bx1ZGZM31fbRaTfiBh5vxCO+88xa+/vrXMJ1MNOoks9JZFLrCBAIQCsl6gzTZoF5vYrNOUbaMRbJeIhVLKN+HkAGEH+p9yBFo1Gvo9hoIIwcHh330+i3Uwjo2a6nTKiWpNc91hYjhdhjnTJO9s7JZXZIkyLMcDgDX1f1dhOMg2azR7nQ0ssNqG9fR/VvEVt9FSQWJAq7jII4jLVxopbWqaQmOKb+zRwf3Crv9uA3r01mxS9VpkG1Ogu1M2FwXOhVV40xjaZdqVrkNvNZquoAO065zsatjgQoSw3uw0YHdObtFE3gvtvgeq1+qacT3Ww+2w+K6WoKeqSkisTbB1f58O3qx79VGVuy/V9dLlfNhp3O+3YqUD+xsrFZL/NzP/qzRWGCkS8EkIQQarQZc4ZjIhgZRcx0kjo6OMBgMEEURBr0e3nnrTfiei1a7g2E6KSOqHm5mU/zpn/gzePXjH8fewR6++tU/xNOnTxEFEW5KrsB8OkWtFmOTJAgopRxHuPfMM1iulnjuuWdxdnaGk5MTXF5eoigK3LlzB48fP8Yf/MEf4PT0dEt4RYHv+xPfBzZ7Wq2W6HV7ABQc4WA2mZZldgq/9Zu/hTzLEccR1msdvQ+HQ8M9UEoZvYJms2lId0IIozXAyI49LFgNwLJXRlC2B870Aj1jvpaiUjbZkyhAs9k0Kp6ckK2WNkDdbteMj92AjWRLGm87X8jf0YAzmhiNRuj1+nAcx6QvDg8PcXFxYfqlUIOC+f56vW6cMKpt2ikbipLpCgfH8CfsBSaEMM4deQrckICtYBUXOIXPuNA2mw1c3zckOUZ0tjoijTFLjff7fZydPjUOUlEU76kSchyndHp85LmEKDkEirAlI59ybX0ro0snMwojxJ0abm5u9BhYz8ZOSwAayp1Op4bPw5QXERCSdamsapdeUxiO6BewJb1xfhNVIh+En0dkg6Rcstx5H8xx37bJvd+heSAZhBei0WziYP9Qp0tKZ4NsFw3nw/yuen5ullvDpv8upYSAKteJg4uLCzx88BDX19eALOD6HiAFXGFFugKQaYLR9TVOHz9Cs9GE6245N3meIEkWKAofUoZQqkAQ+PAdH/1+D8+/cA+uqwBRANKF7wVwag42m0QrfyqUHKLCRMJ5liNNdQpQq75myPMUSaq7XMNxQKlzx3GMlo/ruoaPYaTL1fbe6bC5ros4rulIuJwTxmhhW+1VHUvOKxo5OiV0Rm0+QtU5tjkW9jyzycbAdu+x03S3cX7signeHw87bcAvfqbtmNiIw3vWZjlv7IDaJp0C2HGkeA6mCe1KD/7b/m6Ph53qsdPEHM+q02OnnLhWgW2lEQ8GoXZwwmvQHBXsfL79N6Y9P8jxgZ2NIi9Mjw4qJPLGN5sNer0eZpMpmvW62WxZHvk3/sbfwKuvvorIgtUXsyn+73/n/4r79+9jNpuhPxjg6vIa9559DsO33jST5fDwEK+++ip+//d+Dx/+0EcwvrnBwdEhjk6O8dabb2K+XGC9XqPb7WI4GmF/fx/JPMV6szIExaIokBc50ixFu91CvV5Hq9XCeDzW0Wuvhf/gj383lFLotNtYrpblhFLISg4EN/O33noLQjhG+pwRIBEHlnZSO4JkzG63i+FQi4/Zapkkc9oqn06Zn6cx5wOnIaTEOR8+PeVGo2GMdZZlphkbEQymYhxH96W4e/euKTOeTCbG4cnz3DiRdGgWi4Xpi8LJz8j36OgIZ2dn2Ns7MI4MESySPk9PTw0iY3vLdDBYdknjR8OnEZapGQPyVVhxQzSHRNfpdGr6xRAJ0ZLaqVFptR2yqBbj8nKIIArNvZF3IaXcER5zHAfTmzHu3LljInlbKZBcF1tvgs+NGwLz5MwF25uLEFS93D2WqyWODo+x2WwwGg3RaOjeMcv1ynqf2pkP1NngtbRaLYOmOY5jutiSt8PyVs4hdsZltRO5PXRuSc5drVZot9tYLpemnJrk0OVyeeuGXDUw9j3cdjiOgyyX8B0Hvf4AXhCgUK7lWACauADTPrx6cL3Yho98BUdfGFxXkyMvL87x+NEj7eA4jhHvglLla/W5cuHAd13MZxNsVgtEJVEcAAQKFEUCiRwKOYSQEE4Ez3fQbDbKdCvgegGSTYZxvkAc6Zy+67jYbJLSWNtGYls1oYnTSVk5ZPMV2K0WOwHJYrnQqWtfoyYaTS31VsW28oDnqlYH2YhBFZXiWqMDb69VnovjTgTWJhhWjTydDZ7Dfn784hwyabJyrTFIoPNjr0GW8FbfWz2H7RTpNWU7xluyLPdo+96ArbNRTRUKIYzDZqMG1ZSQTaLlte+gDWUaw3a6iB7ajogRZlO7VSn2+YAtYuU4DqJSDNJ+TjZZlUeVS/PNjj9SbxQpJc7OznB0dIQXX3wRH/7wh3FwcIB+v1+mDGKT/+Xm/Oqrr+KHf/RHdZv2dhuqKLCcz9Fqt/Gf/5W/gk6ngyAMMR6PcHB0iKenT9HutOH5HvIshXAcfOijH8bde3fx+Mlj7O3t6UoCpXB85w46nQ7iONZliXt7uH//Pmq1Oq6H1xBCwfc9OA4goLBczBGGAaTMkecphACyLMGP/9k/jW6vg2azgTRLTb4sz/IdSO9f/6t/jbfffndrqKLIVEMURYHr62s4jmMQAjob1DI4PDw0Db044VhJYJcVcTOno2GLRpE4ygd+c3NjokXqHLBs1nG0ymWVy0BvlgTKm5sbc41Mo9hluZOJRp1o/H3fx9HRkenBMhgMMBgMTIUDjQxREEbRrqulszudjjFmVCudlZVFbGrHcdBluTWDnhH64wLQ6SHN7mZjLxr+IAjQaDQMIsLontyNZrOJItdoT1EUCIPQOAus6mCEwE37xRdeBAD0+32zsbAM2YaUt4tzl72tVEn4dF14ZRT6rVIK7VbbODfHxyfmue1G6bvRe1EUppcQu/8yPWVX6BCVkVKaUmeichcXF3BdF51Ox3ymXbVDB8NGNOjQkrRLci3fe9vxTTctIVAo3RnWdT2cnJwgN6gQN0+dvZDfYhyr0DBRZjZVW84XOHv6FF/9ylfxzrvvIE91WkQppRvXKwUHrq7WUEC308Uf+46PYX+vDyUz5OlmW42iCiiVQRYJ8myDNF0jSzcoshSeIyCzAkUm4SgHQrlIk8yo4pLsyzGjE5BlqdEjSpIN1uuNlqnOciile7SYtKza9v9gR+ikhO5d1y25x3JnviwWC4xGI5Mmn0wmRgbbfk60Bbyu9XqNxWJhkL73S3lsnaR053W2QbN/x9dVUQwefA9JnVRNJqrC9WajJLbTX031VImfNvLBgzo5thNj847s9xqCaemI2H+z/22jB7b+hb2meb/kTbAajmkrOnk8F99H5Iloj83XsK8B0IFfrV43BQW8bjoyHK9v9kxuO/5IOhuu6+JnfuZn8DM/8zM4Pj6GUgpf+tKX8PM///Ma3k+2qRVG9GwElqYpnjx5gpubG0ynU7zyysdw5+5d1JsNrDcJXE+T3er1OqbzGRSArCiQbXJ87yc/iV/8x7+I+WyJJE2wSTZYLJcYjUYYjceIohCO62CxXGCwN8BqtcKzzz6Hh48e4eTkBPO5bu3ebreNQqk2qNoQ/rGPfxxCoIz0yh4rvkZu1qs1slRPwH/1r/81oiiGKs/F2mMqgfLB0EC1Wi2TOmD3QSm10BWNN1EAPkRJWWNva4i4OdKbZPmU/bDtumymH4iwsGPqZHKD/f09Y7iIJFAdkuPS7/dxfX1tUh40SJQmbzabuLy8NLl6zX3ZltZyDOzFdffuXZyfn6Pf7+Pq6sqMDXsX+L6P8/PznTr/rSLoVrWVaR0hBGq1GhqNhtGOYPqGglaM3u0xtBeR53lYJxvEcQ2jMjXBBnIktD777LNmPjuOg+FoiKx0NJkuJLfFThFs289vUwdAGXUTbRfY2Yx2XlMeQjjwfB+D/qBsWrjC/r4WchOpMGJu1KGxN1XOA1vLZDqdAoDhDc2mUzSaTTiOY9A1pZSRop/N9NqhKJ0QYid1SgePxNItcXmrP0LjYZf5VZEM+7rtQ/9ewA9D5IVAp9tDXkh4LvQAOgJCKgixRTZuO+wIkp+jP0tCFTpqn05uMJtN8PrXvobZ5KZsHAmjf0KAmVB4s9nEM/fuAY4DWeRINTO0/ESdmpFSQSJHnm+QZkBYeFitF2g060AOZGXzNOEASmlCchFJBH5Qpj4325buSWZ+1ukWjUI6LqCb0ClIsXUCikJr7oRRBAiU87GxfQaqXBuuA0cBi+USy+UCjrXvALupCPu52XC8zX8g6m2jELweOyK20QTb+DPtZiMNVSIlv+yArrCM8G3cCvt3dARuc6CIqhBZdiv8H9ug22gGr4nXa89r26GxUzS8HlbAAFtyKJ0f+5y2s8Axs7/4WXaQXC1hrj4/+3WeuxVzs19r38P7rdX3Oz4wsuE4Dn7sx34Mf/Wv/lUcHBxgPB5jPp/jO7/zO/GpT33KGFIS5Wxious4+IM/+AP87M/+LP7e3/t7+IVf+AW8/vrrJspdLpcIY238qLO/KUlGN+MbFLLAT/25n8LR8RGGoxGCUDsX45uxbv18cYGTO3cglUIQhpjNZzg7PzOExFq9pqNNqNLL0w93vpjjE5/4OJ5//lkIAeRFhjiOEIYBsjzDbD4DBNDt9vDkyVOslmvU602sVxpZYCc+lqfGcWyImY7jGEJimqbodDqYTCbodrsm/8/cNyMLRvSM4G39DUYKVSiYBCq70RX1MSaTSSmelRp0hLD4YrEw1QeMdtmCfDabmWiIjg25F0QTOHEbjYapbHBd3VCNDcrYg+Pw8BDn5+d4/vnnMR6P8fLLLxs9h9VqpY1eiWjo/LwW4GK6iMaahpAH0zxpmhr+x2q1Mo3kVqsVFosFLi8vTapjOByafDZRntVqq/bKZ7dYLLDZbHB6emo2VBpWx3GMMJm9udrQqc36ruZL+V0W28jpm6UR5vOZId42m9v+OsfHx1gtl1iWomzC2UqV08BznFA6EIPBQDfGK9N87VJGnk4loKFou4KFUW+324XjOBgMBkb7hPLy1QiOP9sdhW1H0YbOqxvaTkQnmOOX6HZ7iOIapASUKoXShAM4LuCUJD+xhYt5Dm7S7yUM6pLkIs+Rphssl3O88cY3cHb2FEoWuppFvxA69SUNV0ZKqRszBoFuRub6cB0f24LAUr9DAMLRJbBFkUGpHHmeIE3XusIk17oZaZJCFvr8SRmYcI0xVccIkz9rQ6dMekWnCnZbjEupCbyBH+zMPQ3UqB1jqQmnW0N+mzGrpsCIpNqOBLlo3W4X7XbbtFWozu8qsmB/ho0AVK/Hjqq5H9qpoGq6pPr629IW1XnIdE41EBDlfdhIpX0PtiNkkAhrfXAe+r6PWq1mkGI73cQmjuRb8XpN1Rm2iBffx4DN5oTY6EXVcageduqJdo02JwxDMxZ0jIgYfzvHB3Y26o0GfuqnfgpJkuCNN97Ar/7qr+Ktt97CbDbDn/8LfwFHR0coisKQxehlLpdLTGcz3L9/H5eXl5hOp9hsNvjiF78IPwpxcu8u3EBD94WSmi/hOuh0uyikxGK1xOXlJT75yU9iMBjg8PDQTMBnnn0WvV7PQPqE1P7yX/7L+Es/8xfx4kvPw/N1KVmtHmGzWcFxgeVqgTDyUa/H+LM//mdQi2NkeQrfdxGXEZsq9R3IA/gX//yXMJ8vkWcF6nVN6gRg8t1BEJiImNUP7XbbeKlXV1e4c+eOKUcEYB6ilNLk+wn/A9ghRPHB07gKIazIv9hxTrjBM43SaDQMh2Q2m5nzzee6FHY0GplKDHI67I2liprwullLbiMFrERoNpuYTCa4d+8exmPNc3j77beNDDb5PLxvjgMJhTw3K2voWDiOg/F4bBAl8mHCMMTBwQGklHj69CkAvQGTqLhcLo1htNuac9O1m97xvSwDLorCKMlu1huTOqMRGI/HJlrhF6PK6qYEbHPvUu02nbLhWvto1LUTqCuINMzu+VrRtNfvoz8YmLnv+/5O8yU6a0VpdK6urpBlGQZ7ezsIRrvdNlwWppLoqLqulumflWuMTdcAGIVZezPlnLfvi3+rCsTZG6HtZNgOg4J2pp9/6SUEYahLVpVublZIXWkqASjHMR1VzUZncQDeO77kLSjIXCvuvvH1r2OzWunGaEr/zYHS+hhCQKrScLgu7jxzD/VWG612B41mG3G9uY1oHQ+u48EVLhwI/X5VQMkcWbpBslmhyFLIIocqtAZLUY6F3dacDvx8PjdOhg4spLkHWFG55nFkUFYawy2hdM5Lzu+qs6eUMiRm+9nZhtmeV3akW/2is2uXetqk92oKsIo88fNt7kb1HPx8Otg2R8J2MOy1x/NUI3T7uvgzHTsbmfTc3UaCtg4GERabs2GjHUz3RFGkRTHLvZ7PnHsp1wl5UbaTXn02PKcORJoGKa46cBwDew3a48vgMC/tB+eJ/Xo+/yAIdtKj3+r4wM5GFIV48aWX8M477+Dv/t2/i3/0j/4R/v7f//v48pe/jCLPce/ePUhZmAFiNDybzZBmmSGRATAaDqvlyjQGC6MIUirUmw0UUqJWr8FxHXz5y1/GfL5AXhT4nk9+EkXJunY8F2meYTqfww8DDMdjNFpN/B//9v8J//n/7q/gr/21v44//sf/uElTjMdatrwove/pdIqXX34Z3/3d3431RudAAYXVZonZbFZC6jqHPxwO8eTJKeJIExMpvERvkIuAHVFZocIKDPadoBMyHA4Nic4+6AnbE6peEm7thUiHgxEsI0kAhifB99ocDZYjEjEoisLwTh4/fmzIgWGoBcfYQpxGiYaLGyArT0gaZmVJFEV4+vQpjo6OjLMzn89xcHBgYH+SFIUQO5/Jyh02/rl3717ZVbZjvH46N/P53CwMKSUmk4lJX3BRZOXc46Ll/ZN3w8iPvA4uLJ7X93UnXXJlKB3PDYRt1enAsPEb0whV2HJns7YXZGUzsQ+WzLHh3Xw+h+u46Ha7mM/nhmtDo8/cOQ/+nvoutVrNEIJZhcTUCK+X3J29vb2t41UUWJSVQ/wMm1RczdW/X0R1G6JhR4j2/NdfDhzXxcHBIVgJokoCtzaQutGZKstk7WiO56kS5PR1aTdFO2ouHj96iMVyAVHyGaAKw8EQwtTYAg7gBh7qraYmvdcaCKIawqgBoXMaCPwYvhfDc7X2jq4CUVAqx3qzxGIxwyZZIc+zUi8FIHG4sFApOt3L5RLL5crMbcAyzsLiDnBcLefVpAZKw+w4W+fQfLm7jfNsY2o7iBSrIz+CRpQGkVE/06t0kngvNMb2VxXxqh42ulF1khho2doVPC8Ag/RyPO3rtw25PU/owDCYqSIbxolztyWo3Ft4Xo6NjVjw3hms2VwLm6vCa7Wfj42ccE0TzWALBv6bytV29Zn9Ht63jfbRHuSW6m/V2bQdPnJqvp3jgxNEpcLNeIxf/uVfNhD45eUlvvKVr6AoCnzf930ffF9H5DZhznVdNOp1zOdztFotNJtNHBwc4M/9uT+HKI4wvB7i8ePHGrqt19BoNrXRnk4hpcJv/dZv4fT0FFEc40/+qT8Jx3XRarfQ6fUwGo1Qq9fw4OFDdLod/NSf//P46CuvQJTQ+A/+0A+WC62MYrElEwEK3/Vd34V2uwUpC91fQcsBodNpmQlSr9fxuc99HmdnZ0jSFHt7+8izwsD919fXO6RDGi/m/I+OjpBlmeE7CCF0U7kS+ua4sEyND5Csajt/aMPzXGScxDbkppTCer3G+blWV2X5LaNURr70TGn46XwQTViv19jf3ze5fPYWYTO0+XyOdruNJ0+eIAwDw0XhuM1mM3Q6nZ3cKcspmYKRUu5U6mw2G5PuIUkxCEJcXV0ZB2N/fx9JkqDf75uKFxp2z9OS5naksVwukSSJ4RVw3LMsw2Q6MZsvF6LNlaHTSKdpYfVQobGlo0m0hG3ZbWKdHYmh3LQda7F/M2eD6JqUEu12G3t7ezrVVFZ+dDodw3XhfZAgTE0Sm48xHo/R7/fhuq6RvW82m++Bk9vtNmazmXFOAY1wMlVFVIPzE9jCu3YEeRsZzr7nb/ZFg7e3d4B6TeuwOEJLlgPaKZBKq4TmlYj2toh5F6bX6z3LUpyfneGNb3wD69USssghqfhqevaWGguBFs4qZIGr6xE2SQrHC+AFEYKwZgxXGNYQx03EUQNxGCMIQvieno+yyLBeL7FZL5GmG+R5AqXynU2eBo//1ht8ZgjS2lC45rsoHQ6jSGuMVNkh2Pc0D47KudgSH13XMeknhd2eKIT87XJwRucMVGwHAtiiBzSePE/VuN32rKvPykZsbTSC129H8LZxZzWjzRfh3tDr9dBut291NPg7Oik2xwTYJsns++HBYKhq9G1iJp9xmqZG8JI2gwiyfc82eZTryw4y7fJyu9SYn8/Uin2N1WdAJJ2pZXsN8XO2c25bUWQHNN/s+MDOBgA8ePAQX/jCF0w+fTqd4rXXXsNoNMJHP/pRPPvcs5CqrBAoCiyWCzSaTbz7ztu4vLo0YjL/4U/+JE7u3AGEg02itQ604miOxUIbuka9Ds9z8fwLz+MXfuEfY73WsO53f/d3Y7lcYj6b4ZlnnoHneaYb5Ssf+5jJd0pZYLXUG4f2JgOsVkukyQaz2QS9Xhd/+s/8KQOPFzkrPTbIc+bbcqxXa/zqr/wKHEeXG1HumROLug+NRsNUoQgh0Gw2DYKilDJaHPTy+SBprGhoaajsVt42aYmRA50LTl4bRiR0CWx5JXZFS61WM+d2XXcncqfBWSwWKIpih/jHydZoNIxoFAB0u12zSJjHo6fN7r0UdOv1ekgS3XNjMpkY4a9Op2OqJ/I8N8aM0D7HiyRUKmHSyeOimk6nhgeS5zk6nY7ZiOz8KNUuXdc1kRgXIzcrblDk2GhtEc84GuSG8Pro8DAFAVibfsWZ8DzPlK9tN9jbnQ0iKAAwHA6Nc9Xr90sHTHfv7XQ6eiOoRHk0FDwHS5aplEp+CgXMqFR7c3Njrp9ztSgKox/DyhOOLw25vTFVnSgbrq4amtsPrcp5eHyk9S7KcxeFAuBoiEOVRMyy9Trw/noNBm2RErIokOcFNpsVXvvaVzGZjLHZrEEHQ0JBGtJpWbYogCzTVNDL6yvA0V1XPTeE44eaQwLA8XzN7K/VEccNxFGsDXQQahnyIkeWbJCul8iSDYo8R5FrES9ebbJJzDPzfR9embri/bnuNvJFeY/6miUcR8DzPTiuVyJuutu2NhAKhbQ4GZIpLd252kYJgiAwRGymYUgCpxGynSTbWbAj9SqXwUa9eJ7bkA3yT+zzV1EF/mxH8jaiZu9dDCZ4Xtto2miBncaozl/7WmznYAcBEbslv9y3iZoS0bB5EOxjROfSJlTvIEcWh4XVQFp3ZVs1wudiO/q38TeIItufaae+7ACE98N/v/+a3T0+sLORZRn+4A++ZOAfpRSOjo6wWCxMdUGj0UBUizG6GaNQEnGp0PjwyRP8xf/kP8b/+b/9v+Dv/nc/h7/wn/wl7J8c4/z8Ag/uP0ar2UIY+FgtZnAgMZuMEbiaZX7Q38Ppo6d49813IYSLj370FURRDfPFCutNiryQiGsN+EGEBw8fww9CpGmOeq2F4eUQURAhWSdYL9ZoxA1kWYG8UHjllVcxGBzA9QIsliu4ToCryyGSJMdyuTaVCe+8+w7GN0NASDRbDeR5AscR6Pf3sF4n6HS6EMJFmmZwHA9ZVkAIXSff6/XheQG63T6iqIYsyyGEi/PzC4RhhNHoBt1uD67rw3U91GoN45na/AsaYPPwLIiaXq2U0pTEciNnGoGoBgBICUwmM9RqdTiOh/V6gziuQymBOK5hsVhBSkAI12hfsA8JdTa4SOfzOWazWVluqo3+YrEw1S3dbtcQN6fTqenMS+TFlmDn+XnQkTg4OCi1QCLMZnN0Oj3M50scHZ3oyFToyK4oFDzPR6uleTKMXLiRsFpoNpshTVODIsVxbJwtOg12ftpxnBLCXhpp+SRJ0Wy2sVis4HkBhHARhhGWyzWiqAbP87HZpHpuuC6UALIiR5pngKMrKwCU8L8EpII0vA6n/NLOh1IKQRTA9V0sVwu02k3UGzE2yRpSFmg2G+h229hs1litluh224hrETzPQZ6nSNMNsiyFKkWd6JTSePD5SClxfX1tpN0pCkfCL9vWAzAImeu65vkxvcXNCNhu8jbsbKNvNtpjR1N6npYbonCQOx4O7tzFKkkhXB9pmsMTLiBLLoUCXKHgOmWFg9Lz1y15EwJad0IV2sEQUkFIQBVa/+LBw7dxfvEUOVIoJ4cSGZTIoYSCcgSU40A6HoTroUgzRH6IwPXxsY+8gka9BSUdCOHBcX1TsaIcgcJxIcIYflyDF4bwoxj1ZgONRgP1eg2h50IoBZkmkMkGKssgsxQuFISSkHmGLE1Qi0LEcYgw1JLoQUCDqL8ch23sPcARyFUB4QpIR0B4AoXQwnJhGQikaYIw8CGVdqal0s6VhtELiLIajnL2NjeMEbSdAqPRvc1pqDoQdmrHTovY77d1NvgeGwGs8ho4P+3Ps50Svpb7znK53CGREg3k+rf/zWvjoQCD7hBhrhplzv1q6o73bacbwzA0hHpeKw0/HQqOGX/maNLRse+F/+Y1ZXkOWWh1WNdxtmnB8nyB7yMKQ/ieB3HLcyHaYq/J6ph8q+MDl77KMmJmGoDGYDqd4smTJ/joxz4Gz/e1kFRZ5ZAXBaQAPvkn/lfo9nraMxMCSZ7j333hC/gf/95/j1arhbPTM9y7d89MpDxNMZtM4boeTh8/xV5vgJ//h/8Qh8fH+MEf+iHcf/AAtVoNDx8+hJQKjx8/xvHxMb7yla/g+PgY7VYLv//F38Nnf/t38M5b7+JDH/kwhHAAx4Pr+qjFdfzYn/zTkFJhvd7A90MUskCr1SkniIvA02jLL/7iL+qFGga4mYzw0ssv4OpiiOvra7iui/l8YSZ6vR6aOvharY7r6yHCMDQqnlEUmwmTZRoNmU5nUEqXnO7v7yPPc1NqSmVPqmnSsaCHzrSB7WnaZVP2Js5STepekHORJCmoKqiUj263a9CP9XppkI92u22Msg1nCqGJi3EcoygK7O/vm8qTs7Mz4+hEUbSjD5FlmWkGxwiKJNt2u23SFxcXFzg+PsbZ2TkGgwGur69Rq9Vwfn6Oer1hDJ9enLJ0IGoIwy0hjogDx5KqoHmukatWu4WolGdHuZnQIPO5UBdksVigW/Z1aTa1uNZisTBOHaHRer2Oq+trOK6DnBspNx+hSx6LojCaG3pMZVlmWdbFlsdisTCcldVa6x60O22DnOnKEF06t1gstOMC6GjWpDcy6D46rilVDcMQnXYb4/K51uu6b1Acx0b0zt7k4yhCaiFotuYJ2evchPi5jKpsKN2OrExkjq16oo105EWBdreL7mCANC3glKXhaZrqdArKoN4IY5YcD+HA9CkSWpgpSVNAacZVnuXwPYGr4SXOz88wX0yQpBsIoSB8nU6wS2mFEpo0mmcohIuDw300a1qi3A9cuIEDX25f7zgCfhDCcQCoHI5boF4LcXC4j71eB0WaIk1SqALwHAdCASgk4JbIhiwgiwJFiShpIbsEvq/Xe5ZKAK5GN7RqAIQDFFL3fSlEocuCPWpqKChZQMubM9e+q81CsjSgm3oppcnADC4YwNiIbtX40Fng9+rfdlISlfRJ9bAdUttxqJIWGXzZ7+P57VQHkQAbNeActNEYnsvI8VuOlZYKR7mmttWC9n3aqTpgt+ut7ahXHQkb/eH7bETbnKe8FnJVOD4MSG00T2BXv4PXymdjv6/6ftuW2CkrOz327Rwf3NkoCqOtwNQAPbxarQYBDSMHpb4GCX9xHMNzPQho6NotYar79+/j0aNHqMc13L171ygbbiW7BTzXxcHBAT7zmc8gyTOcnZ/hI52P4Md+7MeQZRmeffZZKKXwzDPPYLFY4Etf+hI++9nP4oXnX8B4OES63ugupJeX8IMAy8UCyUb/7kMf+pCpGGi268iKktPh6mhhWQrsTCdTE4Gzp0itXsNyuTL3SM+aP89mM8PWtRUzhRCm0yujRJvxT3EkEmnJBbHV8Oh9ExbnZKW2BQ0sUwWUH+fkoVHkeNPoep5nSpGvrq7KCS4NCsB7YqkvUyY852q1Rr3ewNOnT9Fut0057Hg8No4SFwWFhi4vL00fkiAIDPrBRUEdkuvrayP4tbe3h/F4bMiSg8EA9+/fx+HhIZbLZTkGwiAnHHumiMjXYPdb13MRBqEp12OfHZSLlOgMuS93jo5NGoNwqC1oxedPTQoubp7P3vCqmyneJwWaJokhcFLB8+bmxvzbdV3jyFGSvSrGpJTu3My5x3TXotRaYcVNo9EAoJu4UYGXTlSaJDs6FrahsaNTYFca2s7t2vdrv9ceHxoJpXSFxsnduwjDCHlW9hJCKS7m0ljCRGwQmuxoNkcjH14gyzMoyeqpHMtVim98/eu4vr6GKtU0pZQQkHAcF6p0/FRphIUCPN+H53p4/oUXEEc15GkGwNNFssZR1FLqfqC7xDrCRVRvY2/QwfHxIVr1GpL1CuvlClmSo8jLoEAoiFLFlCWWSkr4QVg+VwnfL0yqTCMaAq4n4BYCbuGgrMtBXngIw+VOc0im9YqidB7KZ+gQMSjH03F1bxXyCrgXsVyTzjj3MNsZoXG0n6Vt7GxUq5pSuw3ZojNqp0K4x/E1doWNPS9tR7Y6VzkX+T6qOvM1ttGPXIvz4HlwnF11Tfvaq6kLjgU/z74mHnbVjk1Mp3PBezWOHLCj5Gyfj2NdRQ/t9WcHAkTH7XQWx9oupa2OY7XC7psdHziNkhdakZAebb1eNwjHaDRClueYln1TVosF/LJWv9VsodPp4Eu///v423/7b+N/+gf/AFmS4Id/4Adx9+5dQzgkXA3ANEgjd4BVEl9//XUsl0u88sorxihz8FhyGYYhvvKVr0BJaaI0WcLU9VoNnu/jIx/5qDE65GmEYVhqLel8ZhAE6Pf6+JN/6k/utDOfTCYmks+yzGgOLJdLrNdrTCYTKKV5K8fHxxgMBlqpsuQ/kHBHJ0AjHpGpXeYDZY6cuT0SJ5lf46Kxowv735xs9GJZgcFJVq/XsVqtzH2RgPjggW6ER+SDUKXtRdtcE6Im9XrddA51XddoWpCsyF4qvI7Ly0vTEI2y171ez6Rl7EiZcuOtVssIoLGkdb1eo9frmYVM6XLCm8w9tlotKKWM0abSZ6PRBKANMHVGWLXDz2g0GhiNRgbZIARqk1uJAEmpewBtYdEtnMpFymocG2L9ZuqXQcnfSdPUOIn1et04nEppATmOM9Nq3Hj4GeQC6VRQYvK1eZ6bvDGdfYquUcAtKPUk7Py2vWkBu5GrDYvbxFs7b2xvyjRWNmIGAJ7v4eWXXtbquM5uV1FjQNQuvKvK9ytIFHlWRvNrOI5AUejOqRAST58+xuOHDzGfTjVp13HgQGxLVeX2C3J7/jAM0O/34PkOVusVNusVkvUaSbLWvDQAQkmDToRRgMPDAxwdH6Pb6SKOaohrDdTqTURxDOEIZPlWDdImJuq0kCw1Gbbl347LVIIL3y+Jm2GEMAhRrzdQrzXQqDcQx3W4no8s0yiJ63plZLrr2Zr0g7NFHheLxU5TNc4d7ke23kd17vJ5cH+2qyO4p9hEShuBsOH6KhGTqYPVamV4CjbHxE5p8KhyDYDd1ExR6AaOs9kM0+kUk8nEtD2wOSPljb3HGalyOqqOje0A2ER/20lneodBQjXlaCMyRGVoDzh+YRiaqjobKbG5KbchQAbNsngwNrGUY1Ql6t6GRt12fGBnQwhhukiyx8T5+Tk6nY4mw5U3l6XaABdFgTAIAKWbmX3hC1/A11//On73C7+rxa16PXz84x/HbDYz8D/zg5yEZNpSPOm3fuszKPIcTx4/xmAwwM34Rqcb5gukSQpHCOSl1PLjx49x79493NzcQEHDx+NSJXJ8MzaIg5akdjGZTkByH3kSk+kE3/s932tKDyc3EyOKZE8Ybgz7+/umzJW18XSYiqJAq6UdL5YYsonYZrMxYlusDuCkPDk5eU/9tg0f2nCcTQpUattGnd4yHQhWsbB0lYtps9lgf3/fRDNXV1cIggCjsucMFyfz9FJKLBYL4xwOh0NIKY0YFGu3+XlSSpycnJix2mw2hjTb7XZxfn5uDDujGSrr5XluNEI6nQ6ePn26U9qb5zna7bYp7eOcsTcaGtd6vW79O0RYLtikRBBcV0t0U9I8jmMcHBzg6uoK3W4XzWbTqK9eXV0Zg80FSvlvz912obQjeXuhmg3wm0QJq9IJb7Vaxknl5sQ5QMSM8v2sPLGNlw2B0mklbwWAQbEAGOl5OvDc0O3o8v1IerYgETcum6xp54UB7Mxj21kRQiCO4pJUvDH56KLYJSZWdqryfDp0KGQOrUALCCHhOECSrnF2/hQPH9436yNNEuR5AaeM7KSUpdaGPie3Vd8P8cxzz6NpSqB1z6U0SZCuV0anIs9SFHmKei3C4cE+jo+O0Gl14PsBHMeD5wWI4hrqjYYmUwcBRJkOSEqnQyOUDtJkW0VGQ+e5DjzPLq/04PsBwjAu04gRGo029gYH2BvsI/ADTaaFQJbl+l6Nw6tA3Q7HEVBqV4yKz4z7I6XJmda1EQo7AmaUXC0xtW3KbWuhiojw79zDqF9Dp7+aKrGREBp43oOtLko7YzsAtnHm3LSNqu0k22iNvSaqJEq7HJbXwOsnD4b3xIMOhu1M2WgK90auM647BpR2qsgeP9vx4PPhGNjcKt6rjRrxq4oMfavjAzsbbiWv2u12zYb+3HPPQSqFKIxQK/tCOEJgPBohKb3fXreL/b09XJyf49133wUAfOpTnzIMXAo5JUliNjxXCNy5cwfn5+eIwhDvvvMOPv/vPo84ipElKfq9HrIkRS2KMBoOUa/VMZ/NEXge2q22QWI8z0O9dGQ26zVe/9rXMJ1ODeLgOi4EBNblhl2v1U1/kMHeAN/x6qtQSqHRqJsGc8vl0ohlcdO8vLw0kXWn08FmszF8B5vU2W63jYeulG7x/vzzz0MIgZdffnmntwZ7TDCKV0oZVT4ApsSRDohN3KMBYpqGDlCe50YNk85DVjppLCX1PM+Upu7t7WEymZgUGs9P6W9GOiQaUsqdERK9dHIfmArodrtmPDebDU5OThCGIdrtNjqdjoETF4uFeVbkaxDJItRPsTAhdjvfEsUBYO6Jjliv10McxYCCUR9lOgqAUfiLSz5Hq9XCJtkY8iSbzdE55XNKkkQ/N6AkZ24XPDdCbih2VFHdjHn4peMwnU5NdRCRDVYGNBoN08uFGxEJt9xY4jg2Y0FdEN/3TXfcMAyNQwfAkH3zPDdBgK1zYkdXdimkvcnbaBodiNsMRHUT45raCse5xjEAlG4qZg6be6DTr67nQjfQ0pLkjgtskhUUckynY3zjG1/DxcXpVgDL2Uo1Z6UB1dejtKNSrrG4VsN3fMd3oFarbw21LJDnKfI0NUa7yDMIFOj1Wjg5PkTHlFpqQTbX9eB5PsK4hlqriXqzYUoQc4v0ByhD7qYDKaUsy6bpKBBB0PM7jmqIwhjtdhsnJ3fx7DPP4/DwBM1mG46jyez63MIgQLLUHGHTMUbKtn4LnXGijpzv9rymU825QUfTdjJvq0zhYTuQtjNiG9+t1sg2RcHrsR0cXjv3QqLEDLRsI83vRJj5ZZNP7Wvk2uE9VzklVdQG2KZL7PQm0xjcL+1qGbuqzh4XSh1wLdqICcfJJr/SCeHn2VwTIk58Pw/7dbehVtXn9s2OP1IjNg4wIScOWq1WQ5YkePToIdbrNeazObIkRbPRxKDfR7rZ4PDgEI8ePkK9Xsev/Ot/A0iJZ5591igXEmK3jYWUEqPRSAsPzRdo1Gr455/+NEQJefqui1oUIfB9tBoNLOdzHO7vY17qgAAw0LJSCnt7e3AcrUD5mc98BgBKhdNpOTF8RFGIJE3MhNmsN/iZn/mZsrGcnjRXl1cYDAZYr9c4Pj42EUcYhri+vkYURZhOp2YDBmCckCzLjAIjI+2TkxNMp1MkSYJHjx7t8DH4oJvNpkFNyDego0Cv3C6b2vbmEAbKZsTJSTeZTIwTUqvVjLS34zgGcWJqiM+Duhl0NOhdt9st47nPZjNjHIUQhpA5n8+N40PJbWCrksgUxmAwwMXFBWq1Gq6vr3d4GhTuYhUFf79YLExahggcx4DXqpQyqTO/5BblRY5+v4fhcGjGIct1uojXP51OcXJyopGCshyRmiJMaywWC+PYATD3pssJt/ltPh97Q+Lv6CzCjhjKZ8VOy3EcI01T07uGJdY3Nzc4OztDHMfGueXmSYVBok6+7xtnOC15MXRc2B2WhoJzlaJuREMYzXFDYi6fGxE3fM41GpZqCR7HhI6JDd0KIbBJNnjzzTfh+9s1sdkkRm2TqBDXQxD4gFMaZEcbOAhV7gESo/E1Xn/9q3j85CGmsxsUeaob4pUGI/QDxHENtbiGek1/j8MIcRgh9MOyFHpTPiOdbnGEgCoKyCLfMkRRIPAdDHodhL6nCaASyNICSjjwgxB+EML1PEQ1vX5IOKYh1gZamvnVKFEQKSV8Y+RcOMKDU1aP1eva6Wy3u2g2Omg22/jQhz6KT3ziE3j11T+GbqeHPC+MM6D3dQCqgJQaAbJRKzocDBxoB/Tc3mpQ2AgBn4tNwrShd84D/s1OC1QFsTgXbBvEeVVFw+yya/7NLuXkkWW7HYlZxt/r9TAYDLC3t4der2eQdjuCz4utOFh1DdvpHyLMgJYfIBpkjwmRBt6Xjcjw2ukU3Oa00Z7Y6SBbwIx7PpFOG6Gy00g8qrwamxNii7hxjGljv9XxgZ2NMAzx8Y9/3MCuea7lfVutFlqtFr72+uu6ZXZeIAoC3fdBKf3vOMaDBw/Q7XRQ5AXOz89xenqGbreLj33sY8ZoFYUWlWq1Wri+voYoN7MoirBJNJw7mUzw2c9+Fiijfim1wie9N26mlxcXaLVaOD09xeHhIVrNJuazmS4HzXM8ePDARLHs8dButXcGOE0zhFGIbqejy3yXi7JLbWBSEjR+JFl2u10AwP7+vlEtZfSvlNpRmeM4ksfBRXx+fm4mh+1x0wlhaSI3fttzpgEkr4bnsJEGXgsNC7+TpGrrcNC5bLfbOw4Uy0HtseJkZvqmWTb4Iow6Go1MOW6r1cLl5SX29vZweXlp2pN3u108fvwYd+7cQZIkODw8NLwO9mzhQtLdLxMT7RBhAPTGxDQDHQ86OHSQGLXc3NwYDkuSJPC9bVRSFMUOZ0eUGwIXMB0He0PkZpplmSHfcQHzy1Y9rMKT1YMbECMjcpTsclyWKU4mE6zXa/T7fQxKGXO7CSCveVmmQ/0yJcpzUCuGcKvmPMmdzZSOJ+cbnSG7S2eVf2AjGxyn6mGnVmjsBAQePHiA2Uw7d8oyVFU0REfpNIYlCVtJpGkCoMBweIk33vg6Hj66j816Ac9zdtI+IaNhh+WFuzCx67nI0gyr0mgIgTJVrPkZqsihUBJjixyOUFCqQFbOT1MxlRRQSsALfPhBAMd14foe6vW6iaaJcnDMOG+qCramOsRnOktX3Pml09RqtrG/f4Dnn38JH/rQh3H37jMIw8igJuYwMi8aybFTD3aVgj3XbbSumgqpOpb8XbWDKH/P892WmuMRBLqhJPcnG1Wopk44P7c9ZN5b8cH5SoeDY1lN79gG2Y7q7bQk91U79WD/3Z7XdgrQdljoTNjXaqdQ7PHkPVQ/o5qCYgBop6fsVKXNAeEatJ0cm49nE+5vI7q+3/HB0yiui+/8zu/EyckJms0m2u02arUafvInfxLdXg+//Mu/jNVKt35XhcThwQHSNMX5+TlmsxnWq5V+mL6P9WqNz/zWb2Gz2eCVMkVxfX1tNn7j1eValOnRo0fG6Hmeh3/7b/8t/tUv/zJ+6Zd+CfP5HJ12B6enp2Yg6rU6BnsD5HmO/f19PH782FTQZHkO3/Pwla98Ba+99prZPAHNzJdSmZw7IcVOp4P/7X/2nxnp7DiKDcdiNpsZUiPTJ8IyMDc3N0atkhwGRtmcBDbcT4VVboC2d05vmURG+9pphKWUhtBpbw6cWOQW8HUkR/G9bBFPQ0KI/unTpxgMBpiXctWO4xhniYJhdnv4fr9vjHWr1cJms8FBOSfYoZV9U/b29pCmqUEwWPVDNITOFu/ZdXWVEqtCiCZwYVAQzPM8s3HT0bAjBaIFUirDvQC2kRafIREgPe4w1TzkL3HD4pfZ9AAjG23DyzsLkVGkvy1TrR7M53KDtTdtjg1JrdxIJpOJuW7ODeqw1Ot1+OWYKKUrgeigUiuFm+N6vTYRMB0PjqfNP7Fz6bYBqUaffN83O2xIXkqJi8sLnJ2dQTc2Ezsbve1s2NFakmyQJBsIVyBNN3h6+hjvvPsWTk8fI802CEIPjqtQFNtNusgLpEmCpLwPKnbqr1S39pYSvX4P/cEAcVxK3Lu6/1JZZaoPJeG5CjLPsF6vtMOXamdjs0mQpDmkhC5PFVthJzocFJyj4VdKGc0TjWjmpTEIy/RVCM/zzXfP8zVvo95Cu9VFt9PD8dEJDg4OylRKWo7bViHVcQQgFKTcog425F5FHHjcZoyrjgadlqozafN1tumgXTlzPlub+2CjYLYRrTqz1Xlx27VUeQq2U/yeQ+1yGarOkh352ygdP8NOTdHR4LquBiy2U2E77FLtNoKzuRz2HmPfq01mtZEgW33UdmyArZ4IEUs7bWrzQ77V8UdSEL179y5++qd/2rB1X3nlFfzET/wE3nn7bVxcXCBNM5wcH8NxHI0s1Bso8hxvfv3riKMI69IAjIZDPHnyBJ7rYTAYoNfr7VRnpGmKL3zhC/i5n/s5/PZv/7auRklS9Ls9TG8mGF5d4+f+nz+L3/vdL0IoneO8e3IHk/ENup0uri4v4QjHVC74nhauYYkfYeHf/M3fNETHTbLZgdX098CkE1568UUcHhzCcZyyhf2zcF0X9+7dM6hBURR4+vQp5vM5njx5gnq9rjf2snTVzs0xOqnVaiZS5kO/vLw0eUZOEvt9jG754AGY3xHOZ0qCDoPjOCZVwbQC01eEzavQmk0IpYdMBKDZbBpDpt+neQQU+rLLuGazmWG2e55nmvWR/0HHYFJWM/E6mF/lmFMh0/M8IzZ2cXFh0ii9Xq+MqDdm05ZSGuPJMtWiKExvkGSTmCobcgJYcUXSKBGDyWSCWqwFyghpZ1lmnK5q1GZvZvbmxU2PC99xHAPj33bwvYRh6YhKqXuWTKfTHcEtpiFJKCUiwk2NCA6dWZYJc4zCMITnusjLeyNaRR0SpnsIx9rGn79jtMYNjwgXcDuqAWx5KzYHidyCp0+fagOptgRpw+hHRVPBOv98NsWDB/fx5ptv4MnTJ1gs5vA8B66nK1M8x4UnHKtZmk7LbKtRJCB1G/oiz1Gr1XB0dIRWs4kgCFGv1RBHIcLA18JIfM5Kl61m6Rqb5RzJeoUsS5FlWiY9zwtkWQ4pleZwlA4dUxfc0INAd7gmQsUNvpAZXE8gCH2EYQDP9eC5PnwvgOf6EHAh4MF1AwRBhHq9gU6ng729vZJXZ6FBSpXpE80B2d7CFqWokhdtY24f9jyoGkubZ2EHXPbrqobRTulwH7RRBPs6qk6IfU18jY162A7H+73+PcHB+yAT1Wob2+mwq2+qPVmqgYh9XtvY00HhXmGPpe1484vjZCM7NreE92CvoyoxlmNOhNJG5plC/XaOD6yzQSP1/d///WYTf/HFFyGEwFtvvYX5bIZ6rYanT58arYTlcomTe3fx7LPP4TOf+Qxc18Xw+hr7Bwd4/Pgx/uk/+Sd48uQxJpMJOp0Obm5uTBR1eXmJX//1XzcPzpaxtqNlpjBotKeTiSnXI9z/nd/1XWg2GlhvNgjLKK7b7eK1117D22+/jRdeeg5FUZLR8lyLb0llpNOVUmg0m/iBH/wBvP7aaxBKd0ol74FCWJzIbMjGfD6NEu8tyzLTyIqTydbkYL8UThw7/QFsyX2e55n3MG3CKMH3/R3WNn/HyUMkhh4qo/zpdIpOp1O+f2XQhTt37mA4HGJvb884FEVRGGdtPB6j0+kZPgAjXV4P0yDHx8fGIaLBa5b9cAgTUzCLqaJarWbmx927d/H222/j8PAQT548QafTQZ7naDabOD8/L8dImM2ZxE0iNFmWmQZzaZoiruumZK7vGWl4Pk92r53P5wiCAJ1OB8v5HEdHR3jy5IkhwpLbYRtKx3FQyLLZ1y2wrM3hALCzcd629rg52fAvoyQ6rdz4yCkhShbHMS4uLo3TqAWiNiZl0mg0jI5KHMeYTaeI4hiu5xn0ZlUik8CWJE79FDocwHbzZdqHjgbv0eYh3XbYG66UElmewfMDTKcTTCY38N0QrqvXvChjJoGtYRIg8iExm03x8OG7ePr0ERbLKdarJfI8hZCAW4q0KilRCnjvRPTaaXFA+XNd5aIRNRK8hdjq1ziOAKS06lYU0kSPceYXcBwfUjnwpQBUqXbpKCihK2d2InvPK3U2BJxCIlvtii7ZaVHXdSGL3VJOUQqaKaWDJ0c48H0PYRih09HE/uu5riDzPephAMIREGLb0I0EUj5f20DdZpBvc0LoUNjGl3+zHQs6rza6wf2Se0jV4bGdWvvz7fQC55ztAND55hzjHK0afJ7TPrSol3qPQ2SeRbkO+PeqQ8H32CiL/Tk2qmf/zHVvHP3ymrmP34aw2Oe8DanhWPMzbFtBR99On9rvoR37do4P7GwkSYJ3334bJycn+OEf+EHIcuA//en/L37xH/8iVCEBR2HQ68N1PaRJiizP8M5bb+H//T/9Q3z2d/5/ODg8xHLxGIHrYTwc4td/49chpcRqs4aaAFJJpHmGMAjgei6iOMZ8NsN6vUar3cbwZohWswWpNBmzUAWSTQLXdTBfFrj37D3s7+/jzt27uHvnLu7cvYujw0OEZd+IJEkgHAeLxRKdbgfD4RC/9mu/hv/9R/4mwtDHbK6JjK7jwnEF/FYbSZpqldEgxPd+8nvxL/7FP8fN8AZnZ+doN5uQUpium2wOphUda4CCaQJWFMVOy3lu4CTeXVxcoF7XjabYJ4S9R7hQyF9hJC2l1qAQQpcls0Ee1SE5EYmikEsxm81MxY/tBBVFYRqwaSGvqakyubi4xGAwwOPHT3BwsI/pdIper4/pdIIoitFstozjQ/l6VpQQcWm1WpjNZuj1eqYRHRcIm7qNx2N0u12kaWpKg+mA3blzB0+fPjVlsqySYaRQq9WwXC5xeXlh0CKqr9brdZNKoZ6LEALj8Ri9QR+PnzzRz6QsKT06OoLv+xiNRjg5PtYIwvgGYRjg9PQU7XbbPL9Gs6lLH/McSkoIRyDJStEdK6V221HNC+tFvYW2AQFZFAZC1x1sw1ITYF5GGgHa7Q4mkymWyzniuI5msw1A4erqGkmSIgxj42yR19RqtVAUhXFuSZzr9nomfUViKZExImHkWNGpsDcubro2UY1IF7BNC1QP2/BwLJQsUJQVPTfDa7SbbUSRg6LI4Lo+cunAdVwoR0AKwBEKMsswmYzx4MG7ePDgHeR5gmSzgpI5PNeFEIWO5IVGM3T3WHaBBYQonQxVQJbdZJVSEI6Ldq+PerMDpRy4jg+Za9GvLEugZA6xEYDUzc2SdIP1eomiKEBBNSkLyDyH73uQhYci9+C6YtdR0Pk3eL6Cwq6WgxDKBC/bNII0zgJfo1EKp0yNAMpx4YUR2p0+Wu0eLs9PkRUSfugBkFp+VAhAaaVVyv+Tm8S9wjbiPGxH6bY5bRMS7fSYTYq257/tmHBOEPUgSsDyezrcUkrD37INr01y5Wcz2CPfio6MMaoa9jEomo2UaHThlvsvnVZeJ53wnfSOUmUAUiJGFjLHtIrNVeHPwNbRLEonM6cjWaLKvGym/rdz4/Y9porMEDWRsigRWjb821ah0rnwfa90tN+zhG89PrCzMZ/P8fP/w/+Iv/SX/hKef+EFZFmG1197Df/qn/8SlhNt5NIkgR/FGI2GCAINBWZpis9/9nPodbpYzeaoBzGGF5eIazVMZxNskg1a3RaWi6XW7I8iXI10tcd0MUWr04IXeOjt9XC3cQedTgf7+/toNpvo9/vodXs4OjpEt2QPa9i4gON4UKUceRiGWG105F7IAp1uB8vl0kTD77zzDj7y0Q8j8AIIJZClOgKkJ1koiXroIy8y/IlPfR/+8T/+RewdDjC90ZUPq+UahcpxcLCHq6tr1OsNLKYzHB4ewRUupJAm104o1M5/clOn0WSHTnrcy+XSOBlECZgmIZTO35G4ynQDCaRMHdEJoU4E83QkUPI96/UarVa7nGB6810uV4jjGpIkQ7vdxWKxxN7eAUajkXEguKlQB4LS6zwcxzFdaOmkUXGWKBGb1l1eXpoyY9/3cXl5abQ17t69i/l8btJBvbILsM7pejg4OMDFxYWpurA3hna7jcvLS/T7fUzncxSZ7uK5XK+QFzmaYUPLiaeag3N9eaURnWyGZJOg1+thMpmgkBJxufELz4VMNCSeZRmUI+A4HrKSOFuFcLkBcxPQX4BSBfTmD5Q603A8vzR6AkWhsFqt4fshWi0a8RTD4biEW5leWSEIfNTrDcSxMiJILP9luoUbDVNqdMZYncTSakaSTC8RdTLQbmkY7C8aDlsbAMDORs3757OpEhOFUvBRQGYS0/EVkoN93QPF8ZFmGaSQCOsxlHCQSgWvyLBaTHD/3Tfw8MF9ZFmCPE+higwOdHknpC4N0U3Wtu3md6JyiywpROn6eQ78eh0JACFc5NKFEC7CIECuHAAZ2IjN9XVAkWc5PCfHcjFFlm4QRXXEcQ0yD1DkJCKH8HxACa2PLlwHcPhVwA88pFmijUFRoB6HyHPNwyHiqZ8T4AcukEkUEuU9aJ6Jcjwo14fwfcTNJlZphrSQ6IQ1pOkawvEgpYAqERymveyGj0QjbYTBdqTtKhpGvzSWNrJnExN5Dnse2GiDDfHz3HQ4OE/IYbJLYrm+WAJeRR983zcKqlC6oghCoJDUSdHrNgpD1MS29NV1XUBt03ng5ymForwnKXXKDa6LQmhSM/vSMDUusCWLSqmri1jxwWvKswxJuXd5nofA91GUDoHn6e9ZRgQFZQpaGC0aPQ4FPC+4NeW0dTC2KIae8wquqxE3KSmWViAIPERRAF3lpTsXfzvHB0+jAPjqa1/F2fk57t69iziO8Ydf/jIWyyXCmq4WkUphNLnRmv7JGhAC/V4fy8UCnu9jk2wQhJp9XcgCoR/haniN1XqNwWCAo6OX0Wq1cHh4iDt37uCFF14w+ekojrDerHB0eITlamnUS225bUA7RfUauRkB/MBHXuSI4hjD4RCf+9zncHZ2hmaziel0iocPH+J3fud38Mwz91Cr1XZaxdPrZiVIo9HAp77/+/Fv/s2vYLVcIYhCLObz0tDdGE+ZyAArMkYTGkG9mU8mE5PCIKei1+vtpES4OEhypQZCmqYmmicaQcNAOJzkMgBmUdLBYVRKfoGddnFdV49fvW4MDUs8WWa6t7eH2WxmnCb+neWti8XCpJEYlXDxk5RGJAXAjiFSSu0QRJmOo/qoLSa2Wq2Mk0XH0ObBjEYj1Ot1DIdDU20DoEwpXGBvbw+j0QjtdhunFxdmAUopoaRCvV7DernCfD7HcdlwMM9zHBwcGNl+Er7W6zWSLEMYRUjSRGvOlE6WbWBpsO3I3URNZZQgHB0Z2we7cqoyCtdzMzeVRL7fNMhBnheI4wCNRmzSY5RfJ/l3vVqh3ekAgKkoAraQcFEUZmwpwc9ncBvZ1YZeeZ7bcsLfLH3C8bChblP+mecQrovNeoXh9RVOohhKKLhBCD/wUcgcQrlArpBna7z91ht46803QIEqWWQWj0PzMShjTkSjem3v+Tf+/+39W6xmR3YmiH0R+/bfL+d+8mQyM0lWFVmkqqgadVWpWtVjXTzd0sz0WB4BY8uD6QfPg2zAMPw4hmEbhgH387Qb6Jd+6xam0Y0GZDVGl5HHXUKppCqyWqqLimQxmSQzz8lz/++3fYvwQ8QXO/4/kyympBpj4BNAMpPn/Je9Y0fEWutb3/qWRpEXSLMcWZaj1WhCigRaCQgZOOSCBjMMQ0OSDokKFCgKhTw31Sn1egP1esNExEWBIAptV1tAqvV27P4eEgJod9pI8wLLZcUzo/HwuRLmeQQwvY+EdY5baLU7gE3lGVTHfH4YhYjiGEAAKStEgPt0Pp87o8TI23eg/dLTZ6VNuMfJDfDXv+9o+OuI98JzgmuDZ6PPl/PfA1QpDB9l8683stezmcrkz5zKqagaREZh6P7fTyn4HBCeZ/zj85d8x433z+fs8738+aSzVJFYK7SHqWx24va5FwxgNomjm8GP/zuDkklEkXE8qkok6aqAZrOZKyL4NOP5nQ0pEcQxZssFvvHNPwYg0Gw0IMMAqyw1PQMABHEIaGtEtMbF9SU6nS6On5gSVAjg7gt3sX94iEa7joPDQxweHODzn/884iRB3ZPjZl766voaWits75hmbv2kb0pqazVX9pokiYumhqMhGvW2bbIW4eLiAj/60Y/w27/92y5KrtfrYO78u9/9Ln75V34Jt28fuc3hlwP5Coovvvgivva1r+EPfv8PIKVEs9XCcDhCrVaziqp9FEWJKIygFXB+do7eTg9nthTXV0T1y1jn87nrI3J4eOgM+WJhmOzT6dRA/ltb2Nracs7KbDZDo9FwegtbW1sYDAaO3NhoNFzFCVMtrAghYsK8P7+Hhwx7kkynUyil0G63MbKcGPbOyPPcpUQuLi4cutLpdBx6lKbp2qGilLLIScdFPz4y40Ob5JCcnZ3h6OgIZ2dn2NnZQZqmrlzWT6cYKfguRiNDhr137x6urq4ct4NRDSW43/vxj9Hq9d180Fmp1WooMiM8NhqP0bCy8qPRyEX7i8XCVRkVyqQAsyxbi/wLe2BsbnQ/vwsYjUqTXxcovYiJhxoP18nENChstVoOTaJiKw0OUQyiWEyFsJ/KnpWjJ+GXJcKtVstVPTEny9QUnwnRL3MAraMQPHwrCLeSz/84NGNz8L00lo4oLCTSdIXLqwvs7h8gTiSiQEJKo9YJmJLv4w/ex/FHHwDCOBSLxQxCawTSpKa09r5Xr/fT8K/racdIIE5M6aUqNUwrFINk6VIjjs1Bz0/vdLrY329AaGC1SrFYLFEUxjgslguwnbtxWkOEcYQwjhCEwRqxlkiC77w1Gg0rQrd01WWE3p/l1AkwTWP2WVKrQdvr8o1wHMWIoxjadh720wuM2JkK8asRmJLwOWh+1OyXbvqOB99fOdKVY8H3bnI5OA+bnAyuHd+A8rv9dehH9L5j4RMiGYT5FSocSps0qV9lRUfHvydeO4M8vt6/Rl6zz2nynQCTfqu0NYgGGQdi3Rnhs+e+dAJxhXFyn7Wmn8XtAIw4HM9ipoNarZYL+PyS4U8zntvZKIoCy3SFZquJyLLYsyLHam6Y70t7YTs7O+aQv/sCXnjhBWz1++j3++j1enjhhRfQ29pCIE1vhygJXYTN/Hpu+Q5+dFPkOYqyQLdnemM0Gg3nxbGKQ+uq2sRs0hWkDPHtb38b3//+9/E7v/M77uBi23IuhIuLC/zxN76B/+x/8Z+5A5WHN4mbdFCUUviN3/hP8Z1vfwenp2dAUOWot7e3rYHtYnQ9wP7eAbI8dzoVJCkyz+3nCVerlUM3Li8v1zYGqySMsJh0D11r7RYjJeTJHQGAbrfrOs5qbaB0KkbyZzxseLg1m02H7rBkkhUN/kbYsZ19O52O+wy/rJdODe9DykoIjE4RAPddZDyzGdr+/r7TSBkOh9je3sZkMnGI1Msvv4xHjx5hd3cXT548wcHBgakWaTQwHA5x69ahS+V0u11Mp1OX0uEhHgQBotg23SuqiG25XGIymSCJYtfBNQpDXF1dYW9vD8vl0pSE2rleLBZAYNq1t9qttU3Kg4zP2T/cn5XrFkKuHQAA+QQV+lQUps8OnydTXyQncw8wnaW1dg0TtdbOMSMhmEgYuRtFUTiEy2+8t8m14JrhPfGw5O82o6ZPGpyn9bngejP9iiRgBQXH2NpOTNq13kAYBVgt53j80Uc4fvwR8iwDoJEulwikbX+uNYRFNey02rn9+OoYf2iY82V3dxfKVqYoAFESokSJMJLQuqpG6fa66PcKFLlJ6ZalgtbVIU1UIQgC0/tGlyhUYZGFikTsK0HyHKZRB+AiW87ds6JmGUiEOjTCaPbsTeIa0lWGPC8Qhp6xFQZJU6pcc3J4Db5T66cx+F2+IqeUcq2HCs8pVlcQ7fQdPq4nPz3nR/e+Yd4kV/rnk88P8V/vr1F49+Or7fpN2fI8R6aq9ZFlGYro6QZrPueC6Saf67FJWvWdnE3yqD/866bNCAIJk26FmxdWGvo8D54DdBzdWvacHn/egoDPVkLrqpMzX0O9HjpBmwHEx43ndjYgAC0EllmGoxfuoN1qo1GvY3tnG3Gc4OjoFu7ceQF7+3vodroIAqNLUJQFAhk4Us/19TVanQ6yNEUYBxhbwiAX3fHxMT7zmc+gLEt88MEHOD4+xnw+x507tyGk6Q9B+djxaOxabdNY0et+/Ogx/vRPv43f//3fAwA0mw1EUWgP0jFWK1PRYaoeCkymk7U+Ecxj+zLULBs8ODjAz/zM6zg5eYISJZrNFhYLc6i3Wm2TCmi1MBgOkMQ1zFYzZ7iZcmC+ks3OgIqJvFwu3QImbMn6eqIU5DhsSp/PZjP0+33X5ZTIBuc/iiKXLiL/gVUb/uuWyyUajYYxuna+mXZgdQYRGTourEDa2dlxqRgpJba2jEKnEAJXV1eu3TzLN7XWTohqsVigXq/j8vISvV7PITc0lpeXl4jjGMfHx47AyUoXKogmSexIqpQgn06nzrEZDAa4ffu2I6M+On6CwHIt8jxHaPPoM9tWvigKZLYEdjKZQCkjyX51fY1Op4PpdGp1QZqu+yv5MTTQfhqFB4G/kSE2WOOCRAG3/Ww5tDH+bNrHih/qjYzHY3fIUAWRsCcdcqaQSBSmrD3TcUR/lsulCwK4ZnynoIJeq4oBGgwervy9u49PiIZ8nZC1VFNk0mNSSBRFhrOzU3S6PaMJkadYpikenzzB4w8/wmoxhdAZUutURhErKXRVKUJ/Qws8xdUA3Peun38G2Wg3WyiKHEWeIxQasq6d3HkgQ0fQa9QbSJIcgTSHPSAQhivniCpVmvRPYUpwS1UiLAwPZVNd118nnNtaUnP/T2MBu0Z8TpBByyTCKERZ5KjXGnjh9gvY3t7G8PrcIiMJpAwhZQhVahTWQG0iCbwG//n73AgaXD+iJ6+Mz5eiejxLea7y83wDzDXAz+Oc+HyPzZQdz01foMs38P5ehIfA+fdGZMlpT3hLoShK5MidA8TP91NGm06Qj9RwX/Cs93kbPrF6c/9UTv06GgPABXV0dHx0y3zvOvnWX+PrAQ85WECeVwRbBjPAetXcTw3Z6PX6+L/9w/8Hut0uDg4O3EMh6YwRNaOlOIyRWUZ+jhzL1dKUzyUxLi4vcHx8jNXSROIPHjzAxcUFHj9+jF/6pV/C/fv3EYYhfud3fgff/rZp3PZb/5vfwmdf+ayDfJfLJdod0067ZhXxaFj+3b/7d/i93/t9vPnmmyYiHk/Q3+ojL3K8/vprCIIAL774Eu7dvYtut2sqJ/pdt3h8T5XVGLPZzClNKqXx67/+63jzze9iPp1hMLjGzs4ehtcDS8gzHWW3+ts4P79Af9fkyvv9vvPmyTXhQiP8Tk4FEQ0ALkpgFBqGoUMeiLrQ6BdF4QzIcDjE7du3cXJy4t7DNM5oNHLVL/x8Iiz0lmnImFpwUTzgeA/UaNBau2sYjUa4e/eu6xr66NEjbG1tOcSCsvS8fymlQ6g2Pe6yLB28z3QZ0Y84jh2/g1wOHlK9Xs8RTPkMOZ9s9lYUBQajoalMur4GAM/ZqtColU35cMO1Wi2cnp4iSRLXa6bb6SDNs7UKGKXUGvvcj9Z9AqV/WLIiokqjCAgpkRcFhJSo1Qx6RCeQ0RsPdTqGSin3vDi/RVG4KJNVBj7R0y+to+PJLrmEoX3DAKzX4xM5fMqR+pjB+/cP1U1Ymiij1tT0KDGZDDEaDbCzu4csW+H8/AInj46xXMxQFCuoYoVASCAA8iyFgGn5brJT2qZPDFqxiS5tOhl+JKwK02gSMAqhOjQt3Wu1GEFgyISOKxVGkEIhCATi2FS4hFaZlnwmfm9e5FArINIKoqjmkM4qU2g0UmVZIoqrtervFV+HoioJtc5OEEOFEQ4Ob+Hevfu4OD+xiIuAFIZ3opSGtsUpfjqMc+CjC0DlFPoVcz5a4ZffE9kgt8rXufDLXPk9vuNJBM9HB2gIfcTQd1R8x9dPUfBzy6JS5fS/+5PWbFmWKEXpHAN//fropb+mn8Vt4L7h9fuf4ZeM+6Wv/nnBJnx0YBhw+ygTP3+xSNfWtf/8Pm6f0pnyUZLNPfJTczaiKMLP/MzPYDFfIIljXF1do1ZLcHp2hlarhe2tLYxtN9eyKJCuUgxHQ1ycX+Ds7BRlqfCXf/lDNJstHJ8c48nJCeLYtBoH4Lz5L3/5y25DPnjwwFVmMGfOKJ7pjTAMMV/MHdnzRz/6Ef78z/8ce3t7+C//y/819vb2sLu7a5yKdge9vuntQV0H/7D1y6FYvsp/a61dJD6dTHD33j383M/9HL7x3/9/oJTRBdnf28N0MkOz2UIjqaHVbGFklUtpmFhF42tk8PO5ASeTCQ4ODhwszsqBfr/vpLsHg4FT5KTwFEteHx8/RrNhxLaYWiE/hCgCHTYuGs6/D/FRvKooTBO4ZrPpfkYUg8RV6pE0Gg1cXFw4LQ+Kbw0GA0fq9R06vyuszyUhKZGlvGy0RpVZ6nCQe0BxLXM4C4dAsLEYU250TFh9MZ/PUUI6vRalFAILB5Ofsr297fqjXF9fYzweuzRSbI17oavIhKTRKIpQZOt6GpuHhjv89LoaIYSwVRNGC0IJ5qbNwUS+BtE8J05n1xQROvJjqBnCXkFELZh+Y/TIuWeZtJ/bVkqtdeH1DyA/T+5H1ZsH8ubw0R5+D/ckUwWGIe8RJcsSV5fnplw3y3BxdorlYooiS1GWKaBLaKFcB1bBqhKwq6mtPNhI8fC5+Pl0Ol9CCJSFcu0YijxHFEZoNGrY2dqClBpxEiP8sUUkZIgoBKQwiIx/jwwSCIkbIl6KUpcIwmgtEvdTcL5jl6WZE1zj/BLd9B1BM29GMyOKIpRZgO3tHXzxi2/g7R99H6qkEZaIogSGjPI0J4IpUv85+E4yjR6vgesJqLg9/JsOkY+Ice/45FIfKeO5TDTAR37IJ/Gfpc8j8pGATfREPSMV4/MtlFKA5yuUZQkl10XtNp3ttVSNN3zehj8vPBPoZPsOF+ecTkf1sesBDN9PEKAKIIE0XVdL9fcj56ISGjNaKww4ie5s8lf85/uTxnM7G1orzEZjpFmG1WKBxXyO89NTjMdjDIIr/Mk3/ti1cH/vvfewWCwwGAwgpXRt1Hd3dnBycoLtnR0D4fa7bhHWajXXxKzZbGI4NO3jWRZ554UXnOG8vLzE0dGRi3a5EF966SW89tpr+M//V/85IOAMIGByvb43TKPHCWXURhic0SKJkyZFYiLiRr0OVZb4xV/8Rfx3f/CH2N7eQqvVQWrTEbPZDKGQGI8m2OpvQclq09II8V55qLPKY2dnx2lzMHdOQhirCUhKfPz4MV588UUsFgvs7e0hz3OnWMqfsSMqjRA5G8zHh2HoSl8ZTbHjIFM/NEiUUebrKF1OJ4zOSLfbxXA4xNbWlkuXAFWZnjswrVEBKqExaoXMZjNsb287gz+bzXB0dISLiwvX2r3dbruIh7lWU5USYDweuUhqsVhg3wrJsckbuR/37t3Do5NTyEA6h0xIIzN/sLdvUlO2MuOx1eKIItP9ttlsYmBJl04Ax+Y6gfVoYBN+Jqpj9paXc5bSVEtoQ0K08pgAmK9Xzqng+qbTlSQJ+v2+q0whz+fOnTuQQmA0Hruuu41Gw8joZxliWxVFkTwhxJoaKg8jCsVtohc+hM2DyIe8/dyuf8+bhovGgK8vigIaGkJIlLoEFCz/ocBqOcdocInJeIqrq2szVWUBrQpDBhWAgOlZogCUeWF6nvgRuwKAqmJqc2xGw8aRNUYmDEIc7u/i5Zfuo9vrGP0DmGoFe4MwjRvJwwkAXaEDcRwhy9IqBVqWKLIcRbnea4PG1VedhF0WdCj9aJqGidertUnzyCCAKhWkNKJu9+/fx4v3X8Tw+hJJUlt3UGQV4VPNlI78arVyEbTvXPhOmv+MfePKZ+o7oT7HwHcCfIPINAT7GRHpoeKyH9379+GvM3+t0UEoiwJ6I60AwK11BrTBslojfLnv/PmoDpEm/3v95+dfJx1E3p8/B/w3X8fPMM86h2mat57iovPip7UMJydcm3M/peTreRBBpBjc5l71Sfx876cZz+1sLOYL/Iv/5r/BYrHE1dUVZrMphsMhlssVWq0mHj16hDCM0GjUMR5PULcLNE8z7O3s4NGjR5AwpbBlnmNvdxfnV+cOiq/Vatjb28MXvvAFjEYjnF9cuDLI4XDooioAjl1PCWVqKZA4mtuowyAGVcMoQ4CRlrFvcthRFCNNV4jjxCOXpmvwFkl3FENSSgEB8Nrrr+Pf+9KX8M1vfhNlqTGfzlCr1dGo1zEejhCGEcaTMWrNmiPfsY8FiYZMUVC98vr6Grdu3XLt6ul0pGmK8/NzbG1t4fHjx9jf30cYhnjy5InjMnS7XZydneHWrVuuUysN49bWFobDoTMEPEhoJP38Kp2xer2O8WSMZqO5JvbFa6JCKwWhqGxo5rXqGHp1deVQGb8ChZUwTIlwo5O8SVRiOBy6dBvTML2e0Urp9XoYDAaOv2HSJguXNmC0cHl5iXq97ko4qUcyuB4gSWLMrTPKgyuKDQLA1u1Xl5cO6WK1zeXVlZNKL8sSYWQUaP0ojMiGnx/1nYvN/5dCQG1GRboSAIoiU5ZIbo8fyaxWK5yenrrqJDqOfO5EnsqydNVA5AARkfHREV4T03yMnvxr9g86P/rxDzT/9ZuDhxojeH4WYIWEygKxbZimtUF5BCtTLs+xmC+RrubWudGQtr2q0BLKE+qCMBGbEEZlFMoQIT/umng9vH8Jc9hKSJRFgdtHR3jt9c/jcH8PWZphPp+ttW0TwpD4zG3bewx8JUtTTpjnGYTQUFrbNu9VOTiNrm9E+AyazcYavM6gifPnEyM1BIQEAhkg00b0rNfr48UXX8J3r68RhZGpNiRsH1TpCp4XREdJ+OR1EBHldTJQ4/PjtftpCmC9NTwNtc8/cfNunwPPMp+kzO/3Se50YGiY6RhtOrYmMKgc203H0iduisJTytQVIZS8EKIrvAc/xeKjMX61y2aqxUeJ/Iqcp9NKFTKziRjSGfVLbTmnRGn8/eunxMy1swhArzn+fsABVLygj9vTm+O5e6MsFwv8m9/5XfzRH/4h3n/wAFfnl0gXSwyurvHk+AQ7/W1ICEzHE+xub6NRq6NRb2A2nuDq4hK1OMGT4xN0mk3Th0Bp9LumHJFCTru7u+j3+4jjGN//3vdczrlWq6Hf77vNxXI8v+cHc+qTyQSXl5dYLhcOTtdaO4KcyVmn3uJaz2ExIqNz45NjisJIgodBiCiKoZXCG2+8ASEErq+v0ev1TN8PS4KKo9hFLmw7zxw/kRUS8igXHcdGobLf77tyVZ/Aycj+6urKkReZFkrTFDs7O3j8+DFu376Ny8tLt6B8ZT3Xptreqw/VZllWyVZPJqjX6q4ZF3uf+FUmrVbLpbMmtqvu1dWVI7kyHeWXwT7r0Gm1Wo6TIYRw10gHjSgLn8VkMkEQBDg/P3eO0tbWliU99lwdOA9DOnhs+Mb0TVGWKEvlKo3MgRkjy4xGSr1eN7127Byxk28URTi089/r9Yxj4VUO+Cx94NnVGT4EykOM1SjmYPHIooL52aqbpdZ6rZKkLEvs7e25El06hyyRJpucVUHX19eOP8DIi8+a18dDx1cA5XPz/zBC4v36B9Kz/qwdRnKdcLhZFskIX0oBDQWBSvBICoFASuRZhjxbWQNfAigsdKFdGsXNubZpq5/gbGyiL51OF+1WB1EY4WB/H/v7u0iSCGFoVD8pgGQ/BUJU/IEwNH1LKG2eJLHtoJwgtjpC9XolQMW58CN4H0XQWNeC8InIm7wKbZ1FpbXpzF0qNOoNvPzyy6jVEnfuhaEpw+X+5Hcvl0tMp1NMp9OnWiA8K7qms8N5pPHnHmJQR6NLFI1nso9u+EjAarVyjS/JPeKe5dlBp8R3BPznSicmz/M159Bfmww42cnY51wIWVXjEP0Qoqoc8vcGHQx/D7kKy2K94Zlf4eOfF/5cmXXAarbqnPDTW/zeoiisFtFsDYXha/3XA0yDVmeQn37yq2x4rvHaPs14/q6vYYh6kkBqIF+uMBoMMRqM0KzVIBRwfXmFSEroosR4MMJsMsGjDz9EPUmAUqHVaGB/bw+D6wFCIXHy+DG2rSwyo3DANLrhgdPpdFCv13H//n289NJLDtqXUjqnhEZjOp06b3NnZ8cZcObl+TC5gbk56anTCNOD4/voGTPVsFqtUCplxKwA/Md//z/B1772t53n3+12cXV15WTB4yh23mS/34fW2h3+LGHLc6PnwK6pURThyZMnAOD6hLCRWaPRwPb2Nl566SW89957zrAARqBJKYVer7dmVPb29jAcDZ3DRjSDDbsIS/plVD4cGScJplbUamkV7RaLBbTWLoKo1+uu+mN3d9fdLw/KKIrcgUJHa7VauW68i8UCy+XS9TcZjUwa5OLiAkopPHr0yOl67O7uuoOLxGTWf0spcXFx7uaz1Wo5JOS9995Dq93CcDSE0grXg2urWZK5aJ6wc7PZciWwt46O3MHCnjzj8diloGisiQ6wKy9Jen6kyeEbXN/5MPoNG501tXaEURKDqZbL9csUF51ZarkY9HHpeBh0dPlMaAjyPMdwOHRrwT9UaMDYuI3X6l+nUsr1w2GOl1G8bzw2720zmvQPOSklorASKiptGgACKArbBhsaoRQQWkFCQ2oNKCN+BkE/rXL+DOFWQAQf3/iOaYmq3NM4C4acHUArjSLLcXF+gelsbu9PQKvSPafqs9cJ50EQmmqnJEGtliCKY0SReQb1Wt20aiAaoJTj8uQ2VZalhnRPThI1X2jsfFTJ8RiEkbzP0hSwCqFKA7u7e6jVGsgLg8qFYYAwDFyjL9/wzudzV+HG9czKv9i7ZjoGmxG0v058Uibn23/mvjYF10hRGlGx2WyGxWLuHAFDHBbVtSRmH2Z5BtP4jj2ltOUjVJoVhu9VGXz2CTKogEKWpe5s4Yi8ChKKxm06gj6Xg2cqeW/kmPkIjPkTubJhBnthWKEaZVkiy1LP0Xt6/njerGtsFO67oqhKmVAIjAG1mWerVBpXZxbPPdpKovGbqMonjed2NrTWmC0W0ADG0wnqzQaSWoLr4RDtbgcXlxfo725j7/Y+FsUS9W4TcSvBxegKnd0+7r58D5ejK7z06mfQ29vCf/Af/Sr+T/+X/zO++tWvuuhsPp9javUZHj58iOvrayyXSxwd3UIgBaIoxHA4sIbSSFVPJmO3sdNVBgGJ+XyBLMutnkQDSlXqi2EYwTQp0jafFUFrOIIoF+dwOHTiJcyNAsB0OoPQEpGMka9yRGGMw4MjtFsdjMdGy2F3dxfNdgtJPcH16NoZfyGEKxOl48IDYzqdot/vO66DEMJpHqRpim636xbvkydPXFUJAOfgACai3N7edhuaBiaOjFPA3i3kutBj5YHACoWiKBBGEebLJYQ0tfppliGp1aCgkWYZlABGkwniJMFgMIBSyjlRAHBxcQEAGI/HDtXY3t52ERnnhIaPIlLkf9BZnM1mrlyz2Wzi9PQUnU4HQRDg4ODAKbVqrW2aysj1NhpNZFmONM0xHI7R6fZwNRxBCYnzyyvs7B3gyekZgrASQ6pEj5RrV39ycrJ2eJbQkGGAx09OUG82MZlZ7kMcQ0CgyAtIIYy8cLnesGkzGvFztFppG3XbP6bFGKQlEZZFjjxfQUAj4Ps81IEEMQCu5w5RFyJSnG/fmSDXZZOYBqwTPx2BNlhvnw1U/BQeTn4ZK4eL2mBEUrX3B4S1Afe3kKahGDRQZKVVcZcQ2nAgypKS4wIKgAxCQASAlrbrqZET11pAQ0JBQGkBLSUgpF9ZbJ+trVKxqY8giGyTsxBCSLSaTRR5imy1wPHjR/jog48wGU2QZznyXCHNStvAzBIJoQFzOSihACkQxjHipIYgCKFFACEjBGGCOKohimIkUYx6nKAWJ4hkiAAASmX4HKsUeZahSDNkywVUmSMMJMLAVOkYVMdokizTFYpSQcoQ0NqgOPb+pIgggwTN9ha29w9QaiAIJcJYACJHXmRrZ4LvDPj/T2PkI1o+n2cTxePvysLwZ1qtFhr1OuIosj1rjJgd0So6cVIAQiuo0kD9ZVkgS5cWSbJ9YaAhJRDZCqE8TbFaLVEWObQqjSNoUS5AQbv3CiRJhFargVargXo9gbkMZUqcC+PQcMhAEmi092bkvQF2zoX9W6ylA/0UkZ/q4F4yDgwQBAJRFCCOidBEoO4F750aG89CSfk8fP5IUaTIMuNYhWGAer2GRqNmugWHAQCNKArQ7/fR73cd34P7nmkuP/Xip3h+0nhuzkae51isbITUbGKRrrC/v48XP/MyFosF/nf/h/89vvzzX8E/+sf/NVqLNm69cITf+q3fwjf++I/xM6+/jvv37+Of/JN/gv/iH/wDvPXWm2jUm3j55ZdxdHSE733ve5jP5/j7f//vO42I09NT14js537ub2G+mKMockRRAKUKm1YoHGTebrdxdTUAIJDENQShdJF1q9XCaDhCWZoNU0sSW+YFzOcL7O7uuVQLyzJbrRbKokSn28VgcI0gCDEcmhb2qlRoNtooshKzyQyvvvIq/vk/++fY3t7G5eUVjo6ODEqRxDjsdjCbzVx6g4TQbrfrCJ9+n5Dd3V3XG4REWFZaSCnNNfT7TlGTqAbTLn5ZJtMbaZri6OgIQ0vg7fV6XrO1yRqPw4l3SWkiSFViOptha3trLdcoo9CkILTCIl0hDg035ODgwKFNfhqMypuXl5cIggCnp6fo9/suRURyK6tSiGLRSWGpJ/kH/LyHDx+i0+m4zsGj0Qjb29uYzWbo9foYDIe4feeOkRhPatDLJWQYot3rIV2tUG81HarCjRVHEfZ2d6GKEicnJ9jf34cUAte2AmgwHmG+XKLX7ztZdep/FEVhWpHXG1gsFmv5dj9d5eeRK9KfBkphOAUaEDBwueEpmAZIYRCgKDKk6RJRbNAkOhN02phWWy6XTj6eqBAjEx5IdDj9a/P/zeFftx+d+k4HI3hGWb5WwOZYczwscuMitepF5jNlaL0SYXuGWQNbKujAoK4Bv69UHjwuoBRgxLMklAZKrQBtnBMjPyD8b7T3WhlXrTTyrISIA7SabQgYlOH66sqgflGE7ODAGMwwdvclggClLiBRPVshDaoAKU23Vw0EWkCLALpUiEqFXEgEourAWZQFytyqsAbG8VJCAFJAFYUhCsIYW0L1LLcOAgGFHEVWIEAISAlVAkZ3IURca+Ley5/FD7//FkQoUegcapWjLCWisO6CLD8d5j9TH4l6luHzn3P1RMxzDcMQdZsaZkqQaYUqnaChSuM8GOVXTVYvmAIX0JDWSAvyGYRAXuYOlWYKDiQ/WscgigIXyQOs5lHIspVrRgYAYVjF5ob3Y77PNNATFhEyXXrNPJRQal3p1N8bz0qjmj1VIShaG36WOQ9z61jRmXm29gkJnCSM87lVwQUdwwBA4JzTJIlsf64WcitXsbnfNzkfPvL0k8ZzOxthFOIX/s7fwS//yi9DCIE/+eY38V/8g3+Ax48f4/T0FF/62Z/FfLXEbLaAVgK//uv/Kfr9bfzP/pNfR932JfnlX/6fYm93H4GM8C//5b/C3/n633EPodPp4PDw0JFtKHxVlQOFaDTqFpIyk2EIh13nHe7v75ueJRYWXi0zlx6o1w3JUwYS8/nSHaqz6RxFXjr58narCwAmtwmN2XSOQEYQEDg8uIXxaIR60sCDH78HpRTefPNNxxtgGeIHH3yA7e1tK/TUwsXFhat0IETG3KcQwmlwzOdzHBwcYDweY7lc4vDwEGdnZ05ca2try91bt9vFhx9+iIODA1xdXTmDwSoFOmBCCFchw/e2221z71Z4ivPs34MQNgqzlSjUO2APEqZFSJhs1OqAMj1LuNCZTuFc8zmTY0OjRoKo1to5QDx0njx5gq2tLVDB1U+5sXwVgJOuZ18U59iEoSOYnp+fodPtYmyrcWbW0WNvGqaser2e0aeAwP37942jEobY3d3FxcUF9g8PobV26YnhcIidnR1cX1+7yJ9GfhNF4L/9yKHKfT977/nEQPJ/Go0GIAI3V4yUuF/4HOnIsSqJqReuRyJbfq54E/r2D01eh3NKvYOPv+N7Pm4YzsGzf/4sI+Uf0v4By2siLEykUGzksH242b++TUPoXwf/DsIAZWnShL1eD0EYQkMgKwoMRyPIIEBuuTLNZtOVKxjov4TUz3bIqgjR7LmyKCDK9WviWilRPn3/qJACe9Uu0vbJpUVhu6VGoUvJAKaqWsgAL95/Ce++/T2X5y/LAkncRBxHrkpF64poufYMN5zPZz3DTWSEMD7he6bbXDWX54TzfqpntV5R8qxh1m5V8cX1s0l45HOgc85gg69hSioMQ4QyWvt8puY2ne4gCNzeMM0r16s6/ICD81WlRBTY88SfL7Nmq7Qmv9d/DjyPiZpXKab1PVvNTwmtQ2hdnZ2G17Z0aVnf9nKueN1+quvTjOd2NuIoxs9/9Wv43Cuv4P2H7+PV117DcDxClMR497338OJLL+H7P/y+E9f6vd/7PVxcXODXfu3XMBgM8ODBAycvXRQFzs/P8fbbb+Pu3btOzfLo9m0kSYKTkxOXb59Op7h37x6m0wla7Yb9eRuz2dwZqFarhcvLS0RhjDCMXJnmeDxGrVZzZYH0/lht8fjxYyfW9Yu/+ItuQcznc1e9cHp66kSwgiDAn/zJn+Dq7AqT0cQJQ/G+aDh3dnZcyeaTJ08ccnFwcICyNE2u2Ifj8PDQwdiNRgMnJyfYsaXBRVG4Dp1JkriqEhL1Go2GKytmS3YhBPb39/H48WO3gQFDrGRahijAYDDA4eGh679C54Hea1KvoRE1AGBNqIqkVObmgyBAXuQmyrOCUSwtbbfbrkJmOBzic5/7HB4/foxms4nr62sIIZxOSJqmuLi4cI3i6DCxCohol1LKIUAs76XTxOsj+bXIc2S5kUFfpSnitCLmknRLRVOmAObzORLLjaCRrltnptPp4OTJiZNwZ9XOZDJxhz+dSvJA/MOFB4CvaeJzI541+F7mxpUl/NUbbeR54Z5ds9l0jiUPOQddl6X7uc/N8fkUPFh8R2HTyH1cmaJvJDch9M3hKJRiQ2sET0fC/JnPReABy1y2/zNGtnQ4pFLGQdDapTgMSCI2+91tXKR2xFxYqfJ2uwPTtl1CyhBKmQ68l5dXMKS6inRqiMcFgGq+NhEBv+JNaEDrcg2mprHjnK6l32TVd8Qol5rvD8MIaZ6hVFamGrAwvEmvaGjDV4FEURheVaPVhpAhtBIobbtYpapy2k3ypv/MGDlvGla+dtNBJJmTZaBc8zTS/nMuitwGOesVNlyX/NtfH/6a8UmX/l4DsObwcT/5Egjrzmi1jqWoqoPI1fDvY3MvbI7N1EqVijHf41+jn8bi/WyWsdLJoAAcbQHPjGftbf8PAxRyvXhWkXxLR4Y/53d+WkcD+Ks0YhMCMgzw9o/fwYcffIhbt25BBgEmsxne+u5b+NVf+1Wcn19gPpujLEq88867+OVf/hV859vfcYJaURjhvR+/h6Oj2/jqV75q6vptpJ8kCXa2t7G0JJRarYYnT57gjTfewK1btwxUKKSDqqveDhOXgsiz3PEx5rM5AIHZbG5LHleo12oABC4uLiEAZFmOB+89wLs//jGenDwxMPx4jNlshh/+8IeYTqcWYcnQ6/Zw8uQJ4ihGPa6jyHKHEpAz0e12YcrSmq46ADAbgg4EIe5u1yAovmz36ekpDg4OnHT3ycmJE5JqNBpOdjsIAgff06CTmyGEwNtvv426FUFjWS3botP5YskqdS3IlCZ3QmnT5VJL4cpRyS/xCVNUjY3jGCovnCqp1trxOAA4Qtvx8TGyLHOITZ7nzoEiX4PrjYubFRWsKkmSBGdnZ5BS4smTJ2g2m7i8vDQpjsHA9VPZ2toyjqB1SA72DzC3yqCz2cxVzNAxKEujHxJaeeyzszPcvn3bpVm2bSpse3cXy5URgKvX607/ZRMJ8A8G30jzgH6adf/sveenJgATibTbbUymRs212Wy66HA2nRoyt027rSyhlyQvEun8igIespupHv9e/Hyzf10+uZgGw89RPzMC1drKh6xH+psOyqZB4fwyukqSZI2rwioW39kJNlJE/md8EpmeVQfsX9Lr9VCr15HnhSV5aigF2+pgBg2BolTIi9sAEgN9lxpCrLs0nJdnOU9sT891wnvx8++cJ7OerOMaSoShEbPLssKSt4FaoiACow0CVSDQAaQw3DcdBlA6gwwC3Lp1hMnkGhrGYVkslsgz5SotuDb4/X45K6N6/xn5a8WProlANRomgGFFIQedFj4vEyxVzfOelYLgtTwrZUen2k8P+uRW//p4b3wu/n34TowhCq/3rNo07IGdc39/cP/y+7j/7EqHIa9izaj797m5L31EgwilL1XvO+T+fvCDBP6exG4+o8059vecX6n0SeilP57b2ZBBgN7WFl5//TXcvnMH//pf/2u8+PJLeOXVV/Dqa5/H2cU5up0OBtemadbf/vm/jdc//zp++7d/G7/5m78JrTX+uz/8IyRxDf0e8PprP4OvfOWr+N3f/X850aUkSVAWBb71rW852e7bt28jz3N0ui33IAyvInhKJKrT6eD8/MKQKYMQUgQQWmA+NWmA89Nz/PCHP8TFxQVmsxkuLy/x4MEDSCnx5rffXNN/9yGjNE1R5gqhDJFnObY6fZxcVbl6lkLS+HzwwQeuIoEPlOQ8tu6eTCZrUUu323WbTymFDz/8ELu7uzg5OXEoBFMw0+l0rdMpEQwu7Ha7DZYIUxXVJ/1xASmlHOrie/RJkiCznzm3EX8cx24+iIBwYfI5SItSxLHpTdLtdp24GDvWkntBb5nwLefGJ5rt7Oy4dAQN+nw+d/PG5mx5nqPT6eDs7AyJJauyv8qtoyOcnp4azZDxCHEtcTLqNIrkMbBiY7lYottuY29n1817IKV71teDa+zv7+PJkyeIogi9Xs819uMG5HPxD9tNFKCCuq2BVs+2fpubn2uTTi2VVuM4NvlqW5mziRYwSqVz4kPAPsOch5VvTAibEprmeuJnblYYPCvy8aHhTXSDh+KzokLf0PnzycPRN2b8uX8Y+hoHaw7hM5M5JIjatFBRII5rSCzsn5fSGmWBUmnkRYkgBJbLFKPRFKXTfKjiYZe68GBwP5LXWhuNiyBcMwQ0TuTV0DhVXJnKEaCAWFEUyLMMQpoSV7sCjHoqpHGiwgiAglIJ0myBw8MjzBcTKAjkpYYqUqiyKmPl9VYaIcKtGaDSd+C/+XvuB66ZMAxRtxUfLM3kPW7yP8yzosOwKelfcQb4XP20hO94bHIM/D904nzUhtfrl636CqJBGCC0BQ1A1Rhvc60GQVVm6js0fldlH8Wiw7G5tonQMoXq8z/8QIUcOd/54/t90q6/5/g+IiH+XvKfqZ+W01o7G/VJCI4/nr8RmzbdJ999913803/6T/GVr3wFP/7xj/Hqq6+i0WigY+Ht/+N/9V/hH/7Df4gvfuELqMUx/oNf+RVc20P67R/9CLdv3cK3vvlNnJ+f4/TsBG+99Rbm8zl+9Vd/1UX/7NbZ6XScbkCapoh0pZ/PPKpS2gk1jUYTLBdLnJycIE9znByf4Mc//jEG19cYDIeYz2aYLxYuslpZrgCN3nK5NP1LhGk417T5/7t37+L8/BwNm58/PztDr9dz6MLV1RX29/ddFEmUgdEI0yBCCNfZ1Yfh2Lfj8PDQpX7YK6RWq2EymeDo6AgPHjzA0dGR2xjUJyFCQdEzal/4PUG44ZmyMI9Uu8geMBt7MBiYBQlgvlggqSXOiSKhkI6U33pcQNhOu+vRAQ0ikZz79+/jypLrAKwRWZli4uFKrshgYCqQiHhRyO38/ByHh4c4PT3FrVu33BwMBgOwZI+E0tlsBgQSIi9cP5/lcom9vb21zTocDnHr8BBFluPSCnmlaYqlJd/OLenTV2bls+Uc+rosPLQ4d37kxoPQTwX4EKkfOXBOGckVRYE0M43RWq2Wc25SW1XCXjv8fsr+A3Dr1P9uHiaVIYNLU/J62CsHqA533/hwTRN+fVbEyX/7cshFUbg16B9gH3eY+Q6pQwXsKMoChbKVIMKQT/OyYtK719pyyaJUgEuoUBNFQSsNLSVkEKAocvS6PSglAEEVUcAQAsm1kZbIV6ExqlQohV5LP/o6PjQOURQhjEKEopJHZxRJR5yII6uGSqVQr9UgvEAmCIgIFEhsP5tQWiGmooAGENcSU31V5tAQCMIYzVYb7U4fy+XEVKqE607D5rz7a8J/9n707KN7vrPC0lK/Lb3vfPsOrlmvlSPiOw6cH+4Hpn9Zqspn7TsSm4gGgyR//ftr81kOcy2pIZeVgqn/fPx0jV99AqwLeXFPco4BOsfrhPHN6xWiUu+UUjpdJh8p5Xf7yBnXHr+b+9J/nR+A+sMPWOhg+nP5acbzOxswCnStZgu/+b/8TaxWS3zxjTewmM/xg+9/H1/58pdRKgWlNBr1Bq4vr/Gd77yJBw8e4Pz8HKosMbwe4vf+29/HkydPUKsn+N4Pv4der4dOp4ODgwNcX19jsVjgvffecwfm7u4uGs0mBteXgKC3qtDpdG2UXMPZ2Rl++7d/G5PxFPP5wpRfZiVmEwsp1+qI4gjXV9e4dXSEwfUA0BrtVgeL5QLQpjTNVLYs7EILoRVQrzdwfT1Aq2kUNHudLuaWhMr8PvtkcNErZXQ4fJEZGvlr2ymUHmK/33dRznA4RJIkuL6+doRFVrZcX1/jxRdfxOPHj7G7u4vzc6MlwdTN/v4+xuOxKxFlZcJoNHKkzCiKnFw3Safc1DS4rHwJggClrmq32UcFgENSiE60220slgtIjTVRndFo5DgnhL3ff/991zmWVTUsDW632x6h15Cd/N999NFHLsIm1+L09NSpo5KM2+v1HL9lNBqhbVVowzhGXuSug66UEoPBwGmY9Ho9vPDCC0hXpvql02pjNBqhXq/j1q1bOD09RRCGmIynaFo+0eHhIQaDAXZ3d/Hhhx9iZ2fH1dSz4yrwNATsb3CXV9baoRu+ofUPgDzPq6ZrtaY7LPwcK+Fq8jTq9bpTFNxEBny4GFgn3mldEQPJdfKvxXciNpGJzSh1bXivUcr0MKECrW+onjU2URrfkGzC+FpbVc7yaXRBCAHp8vkOg6jg+ajiQUALtNot4xwVVbljEBiiZVEoFIWCDKrrXi5tqaHV2fERJAYhfCaObKfWuSf83bNIiKVWSJLYlCdogVJVyr9FkSNSMQJJ9Mzcm6maCE2lWQkobUqHZRijVqsjy4zTLzVc6SnnnAaK1+Gvk2chaD6i4RvgxXyO5WLhVGsp9kUj5jvZ5ntNBQjXmJ8uazQaLj3IdeqvHc6X77zTyPposJ+O8c+XCrGozGWWpVjMny43f+ba2wga+Hdo05x0ziu9kPUusB/nzGmtHW+C6Sbu/U3EzEfDfIfCt1X+s9vcSx+HdPjr+CeN53Y2lFJ49+13sL+/h+vBAG+9+Ra+82ffhgYwGY3xj//r/yekDLCYzgEF/LN/9s8dX4BGt9Fo4qOPHuHevXsoigxJo4brwZVrljafzzGdTvHBBx84Iulrr72GxWJuNntgyoxqNeVKJpOkhrfffhs/+MEPsFys0GqZSowQEaQMkcSGp7FcrBBFCR4/Oka71cLltSlRhRZot1rIiwLz2cKRLlutFubzhY1QNLJ0aqPtJba3TY8X9gBh6oKERyGE8zi5wEl6bDQarnMqHSoapqIonHEYDocujdLv93F+fo5areY6ujKF0O/3MZvNHDGTjc/Ozs7cAQ4AOzs7jnvAqJ4VIavVylWrsK07nZBClY6zAaxHu/7P0zRFEpqojfyHIKhaztMZYI6RxrDRaDh0hUgCeSwkOwkhXFv44+NjHBwc4NGjRzg6OsJ4PHZGhygD56IoCptnN/MQhBGKskSrWTcdiO31M/dPYrCAMM7saORKsc/OzhDHMWr1OibzmWuQx/TQ+fm5I6tKKZ1cu7/JGXlwc/tGXggBXT7taHD4B7lDQuy+3ISCiXxwjfmHxmbelweIn0PePHj8iNaPoPzf++WRnxZepaOhNpwNjk0HzR+bUTRfEwSBRSOeoXIJuOsOLMehzDKUpdHgMU3RKkcmsKmIOK6hltRQlgqlMjLoEAqyVJClRiEVilIhKJVLnRgSeIE4qRqrMc8PVNVDvG6lFFRerDkVAJwB8dMEWZahKAsIqxWitIKya8KkBKliYvQe+DcRFCGE5cwEYAO2Wr2JxWIOITQiaQuC9bqRp3H5OMPqry3Oo280y9KU0ZcW9SVZlM6Gb0yrz9LuXtizhI5v1f8jd8iBz8tgGmHz+vhvJyTpvcd3DCpno0I40izDvMzWPo9z4DsHSlXpB645OgjkKLpnWZiGfD4a4zsJdID83/NzSQ73Uyqb3BA6dH7qm398Uiznwn+2DrneQFp9R+Ynjed2NlarFf7lv/gXkNJoPQghnETs4f4BHn30EaIwxvbWFj788EOrbTF0wlPXtiyx1WxCCoFut4d333sXO3vbiKII9+7dw2q1wh/8wR84nYQvfOEL6HQ6UKXCIluhVk8QhlV5EatE3nrrLRMNbzexmC9MpFRkSOLESBrbqCtJEiSWYHrv7l0cHx+jbw1Fq9VCkedYLZfYsd/PjoANq1nQsmmUy4sL7O/vW2nsrkv7MFXAh81cmO+VpmmK6XSKVqvlSnz39vYcN4GLkVE3Hbbbt2+7Fu7s9rm9vY3Hjx/j/v37ODs7A2DIg48ePcLBwYH7TuqHaK2dISf7utlsYjabodUynBiSJYuyhFBV0y964a76xBodpp8CaTYDSyrZVI7XwP4t7ADLxnM8aHktdGLIxeA9sYpGa+2k4dkHhORcdjKdzWauW2yv18NsPrcHhYl+yHXhRmQ5bb/fNymp0RhlUWB/f9/wSIRAw6uWOTw4wBOrE8Jn6HNFiDwQBvchWj+NwMOEkYIRZnq6Bp9j0xCXqiKC+caeZa8+WZLfs+ls+AcTf+anVHjI+dEQr3tznfvv9+/1WYPfZ+gRVVmvn4v+pOFH+T5sHQYRRCAhtLIt5O13BBKQwuilWcGlQFitirKAUutRGpGPUplqN1Z6mZJGg66WpUZZGoSjLCsiIwCs0hSD4QRRLJ0yMVn+NJJrjoTWkFiPLHnI02Fc+5kKASGsbohG4aXiDCcqRiADV7UiAokgDiGkQFmY5nRhHCMsM0RRgkazjdF4CFVk0BBOzwLeNfnpN14TsM7Z2EQV+P/8d5qmKDxCMtcs1xiwXpliNC70mlgcX8t9xpQGv4dOq5+q8KtN/HXsP28/5cFgSwiBsKxe6+8dn6P1NGpYET75fUEQOL4U73MTwdhEFTgX/r0BcJo5aZo+hVL4+4LX5gcGPAf8z6Xz69sq/6zwHX8/gPk046+kIJqtVphPZ9jq9SEhAKUQhxEef/QIjbqp3x8Nx4ijBMvFCvVaA6tliiIv0Wg0HSQ5HIxwcvwELdtq+etf/7pbFD/4wQ8gpSFl3nnhBbSsVkS327USrqHjDbB8lJ0IV1bEqN3uot5oWJKOIU6ZktgFylJjMp5iPJ6g2+lhOV+iXmtgPlugltSxmC8RhbE9REIISCwWSzSbLaSrDHGcoN1qu260vt4AW3nzmvyDk63hV6sVut0utNauVJEVJcPh0BlRRqfcYHyNn/pYLpfYspLvhO8pd0sZcGpYUEGSuV9eAwfz+zTEmwiNv4m5EKm5YXZfFWUzN8vSz4uLC5dKIoF0OBy6jU8iKStd+Pzp1c/nc+zu7iLPc/R6PYdcsLkbO9DScWKDuFqthqKsNFQ2DTqvl/c0m81wdXVlOBhSYLFYoCgK16V4NpsZGfTpFHfu3MFisVirbWe5NXkbfE6MyPz14DsflSPwNAvc33++Y5KmKeZevxPCsTT8PJR9h3cTufAPFEZNZLbTOPhOikMOPDiV372JhvDzP+k84fdCCMA6LZsojQ/Xf9xnVM5ada2l92/n2FgHzY9Ia7VkzXC6z+VcwfCKWEEhgwCQpj19oUoU9rtKrVCo0iEbqlSurJ7KxETpfD6Lm1uItQgWgHNOfEcSqGB4/1nxc9vtNnr9HjqdttFPCNcl431kzPzOrMdaUkMUxlb4zJQJb84vjRKdCz+aZnRcr9fRarUcj+hZ88oon2ecX8HENWA+s3KmuYfosCwWC9cnhYEUv4tz6BMgOYe+M72JLnJwH1CK3ecmBLIimHLw/OA98Jzx7417AzDOjC+Dvpme2EybscSe76Fz5XNTADy1vjjPm/vYH6Y/T81VCZEk769RXifRJP81n2Y8v6hXGCIKQ6gyxXQyQafdxnQ6hRQCLYswSBlgMLxGv7eF68G1KVms1SCFgJFZBXKRAwJYLVfYv7WL8XSMo6MjBEGAhw8fYjabYTabotNp44tfeB1alWi3W5gvZojiEFnG7q+GGPfk5Ak++OADxGGM+XyBbqeD4WCARtJEIEPnoJeqRLPRwipdod3pYDQceQ9FoF5vYDabY29vH48eP0an3TGLK7KyyKVGmmYQAphlc6s6V1V0kBBKwxxFkWNbsxstDSMNEksvGcl3Oh28//772N3dxYP3H+Dll17G9fU1XnjhBRwfH6+VOkkp15CBWq3mNDt2d3cxHo9dMyF61CQVmV4gS2xvb+Hq6toJd3U6bWitENp0yGK1Qhisd3Ak8sBNQINR5DkUjDEa2fbri/kcWZ4jqdVwPRiga4msnU7HORJCCJycnCAMQ1xcXCCOY1xfXzvniVEGm7zleY6dnR2cnp663jhSSoxGI9fRVCnlSKAksaZpChkZbYTDw0McHx87TQoSSKlYGsoAi/kC7WYTtbrhBG3v7GA8HmE0HiOp15xgG1M35LRQB8SvnOG8+Yeez6EA1h0ADmFz7UbDgYdEFfEIqUxLc+dImLbmRaEwHA6sg2Mkn7PMlDM71ECbTrJiI5LOy9L83H5/HEcQQtrDLXM8Bq2r3D0NUGk/T1jEQGsN4UHT5urxVOkrX1NulOBWZFRl56JCepTSUIq6DFZoyTMigZSA0ihhKjKEvWYNoNRGL0FBIArNtWe2zNLMuPdMhEAQGkRAw1SnCuWnDkoUqkRQlijKqjeKApDnJSaTGaI4QasJSBmg2ay7c8OlGSCc0+WMuyqRZ0Z2nxG6UcD0q0Q0JMXarSHtdDqmYiKMkKa5M7rCCL1Dl7bKziElNqUQhQjjEFjBSnprez3KoCZKm/mH4cFIGGKsUsaBCG16gykCrZSrcvOdXaVJkAzM+zUlxM2/zRwLV5kTBqas19eGIcLiE0y1NqkidvbV0ChLhaLMIYXJC7HnBwMjaPOsjXaG3QMAwiB0z1YpDaU9fspGfxUhpL0X7VIn6whHhQIJASyXC3e97G9i/r8qG68c4ZrHwynWBBcp2sU0FJ0PH2Xy0yCAmZ8wCNx5IYRRJYanomuus9L9EBIIhDlD4sRw6QIZYrGYI81WH+curI2/AmejhEIBEQB5nmK5NJ0Lz8/O0ajVgNIo9+3ubiPLchwc7OH6+gr1esOUjBUZarU6sFKIIokoaeHy8gJ379/DZz7zGZRljixbYTaboNOK3AMMAABB1UlEQVQxzbO+8IXXEYYCeW4INMbYCyyXKbb62xgNh/jxO+9BFwCkQKAlyrRAu9nCbDLD9vYOLsZDa9w0NEoACkVRor/VdaTJNE0RRhK9WgdFmaFeTwChXAS9XK1QlJn1mAMEkE4anK9hMyxuBC4WVljQ4JBNTMjYjzyJHsxmM9w+uo3VauWQjsPDQzx8+NBxNqSUjrfRarUwGo3cvVCXQynlUhOm26rEbDax+hwjJMkWAGV5ExqLxRxNizZpKMRxBGkXL5EZXjMRJp9vEiWJkV2OQqR5hjAxzoGyvURyVaLebmGZrrCzte0MvNbaORJSStscrUrZUDTLj7Qox220VsbY2dlxJa1pmjrEp2G74UZRZHqayKplPUm0y/kCURCi02qbHhTIUW/UkZUFLs8v8drnX8M777yDe3fvQoRTF10x4qVyKaMMypRvRqlSGpKsX4bGIaWE1AplUQn98A97IZjh5UqV6e/jjxLGqAkBFLlxEAuUpm+IE3kyvIaysB0ltXLkRAWNUpvmZlEUIE1XFqUR7pA0YlXCRsVGNEqZmwCs8yCCSouBRtSH2DdhWikltH0d9wijSkbf5t5LAFWqJgg8Zn2pUeYKsQwRyRhAZubTHqbaWBfowDghMiwRSIEwqSEqS+TZyvZYMfehhCU2ao333v8An/nc69BFDglD5kUJ6LJAWQjkG2mDUmkUClgsM+jrIbQ21WNChlClIT0KAIF3L7kqvGegnKR6aTvd0g0SUEhXGVRp/l9CIg5iqFKjUYNRdVcKAgpxFAE6xipNUWQZAhkgCkyvoyxLoaERRBIhAsRJDBFYJ6tU1skzRhLCOLKUzYYMYJqQFZAwwUy/13PaOTPbuFEKY+C0czABERgCrhDrRGnrzkGXJjgUWiEMEmgtkOcl8ry0fBTKuZu+NRUCVkJIII7Zyj1HnhfQUAhkiCAUkJJVHhGiMLJOl5+2gj3bSqTpCmmaQXky4mWZI0eGslDu9WVZIaXGWQ4QRQDlzwEJIZT1QyWEkG4tG6KxQFlW6AhRDb/ShVWTJIIDWEvHlGW5prdT9UjJnYx7LYkdl02VOaAlyiJ3tqfIU6hSQkogDARyKAitXFVQFAhIKCzmE6RpinpSKat+0njuNEpZUmwE2NnZthF5giSJXQUBoTQpDZJhSr5KrFZLC3VPcHh4AADuQX/2s591RL93330Xw+EQeZ7j/v376HS6gDDeVRgZI2eMucBkajqkvvXWm2jaPH0cx8izHKEMXA5/e3vbeZfj8dg9TD408hF8BTUaPaPNMHYQqg+PU7HN5fUtl4W5Q6YMiFwQESC8xTSIn1IZj8duETFSDMMQvV7PyGTv77tqEW7UVqvleB8s5aQAGB2cfr/vnk+32zXy2zbyZlqCqY9qMWvUa3W3mGlMeX1aa5d7XiwWjkBWqBKZLcHUMHyXvCgQhCFyu3m0MH1GgiDA2dmZq05hFQw3yiZ0X6/XXYUJAMeXEEI4FIkQIyFdzjVbyNMhJD8GACBMyeRgMMDOzg663S6ur68BALcOb+Hk5ATb29s4t43l0jTFzs6O4+rUajVnPGezmUNuXDTnwfabqQE/J8qo+NMPDUod+3/KMkdZFjZlRMRDIopNdFjaNV6UhSE7ArbZns3dxpFDCExH0siDTCmOVFU5yECg0WhYwnay5mR90njWvPh8EUZzm7lsn7/gV3Y450ZXSo+BNEJW/LlDkJQxlFqb9zZbTbQ6bUSRLQstSxstS3toGxKmuQ7z+T6Jks9Qe/dWKIUSAnmuMJ3NMZ8vkGeFNW42fWQ7mxBFKe19F6Up3xXS/NwgCgZVyMsSuSWTlkXVxTeJTCfP2KIAPmwfSOlSQ1EUQVq0TLny4QqVIBrG5l9V2WaVAokidmZd19dYrVaYTqcutUHonWsjSRKDltoGamHIdWTXb2k4NICGFMYgl2WBPM9sz5KVCz6DgGJmcOlRrs0oYiqEKWHjFDcadWxvb+Hw8BD37t7FvXv3cP/+Pdy+fYS9vV10u12bVoicLfPF9bVDfSqeyfo5ZcTWzDOt3sWRJDEoTU5SqBDCnVVAxaHgmcLzy99fnFOttbO//pnJFBWRUJ+PwjQVU0y+qB9RmjQ1VU1SGuQ/sd10DU9k5dbFpxnPjWzwBofDIbb6W5DtAIPBEDvbuxgNBobDUKvhx+/9GJ95+TMYDodOOImLjvLPRs/hGs12E5/73OcwnU5Rq8V48803cXBwgMePH+E3f/M3nZKjicwzVzq6ZaPi0WiI4WCIOE5snn6Bg/19fPjhh7h//yWjjWHFjvwyTDofnOCyLNHpdJzOBKs6THOatmvmRl7BYrHA1taWq55pNpuuuoEODNEHcjno7HBRUDJ8OjWRMn83nU7Rarfw6NEjfO6zn8N7D97Dndt3XG48DE2vD/YA4UHt92P5/Oc/j7feess5A4PBAK1WC4PBNVYrgd3dXVf1wsoYEjTJRUlEDaXSiGWMMAjQaDZxdXmJvChQt1wI073RRL+1JEFuDyUacRIwqZtBTkjdKjEWUeSctXa77crhALjNwuunAdda49atWzg/P3eS5eRbAMB0OkW73cZgMDApq8nEIUA7e3u4vL501T58NkWWIwxC7Ozs4Pj4GEopHB4eoigKTKdT9Ho9R0YbDofY29vD5eUlXnzxRXzwwQeo1Wro9Xo4Pj52XCIaPhK6uI55OGwaWgBQujTQxKccfp76Wb+jwdYakFpAKMNZANiVklFW9VlEVZIkMmlTVZXvMcoy76/ep72eFc+CkTk2uSK+c7X2ebrqT2EquRqekwELQbMaBWuf4XOJmL/eJOIxggSAIi8snJw4wzQtSqORURQIhC2BpdMo7ZwLeNfrEejsLRnHt4SUAmmWoShMKiOQxgEKw8CkRZQ19lEILSpypI/40Pny51WUAlKsE3eFNP1Ysixzjj2fCY0X4BMITRpEyhCAQpLUEUUJClV4871eMcX+OkxpmP1jkNPLywuXzpByXRWW11jmhYm2bRAWhqFrIsdGaWwZH4YhAkFNmsrp4T3TMTLXZiqE+D0+QbrT6TguSbvdRqfTMcUKjaYLyBaLBcbjsRPJ4/mT5zlk6C0yt6a1S6GY7zLdw4sid0g154AyCUQd/PW3yY/afOY+SZRp2GedAa49g3U8KrJ+iDwrnF3ye9wwFc5n6+8fntl+8ODLs2/q53zSeG5ng1FEq9UyyonWEx4MrtFqmHbgURzh3t17GA6HODg4wJMnT9xiJWFSKaPLEMURZnPT96TRaOCdd9/G5eUl8twIYL344otmwytDiGnU6xAycA5HkiSuCZwxunJN8Gk4HLrKhG636yZ2s9QnSRIsFgtkWYbxeOwiYOb/+eBXq5VLUyilXFkjJbRZ1upXaTAHx74dJNUQdaBELMlPLOe9OL/Awf4BHn7wELdv38bIlmDy+nh/rFTp9/vuwbdaLfzlX/4lOp0OhBA4PDzE+fk5FouFRRAm7lnOZjO3IanSSaSk0+lgvlg5EvB8MkUgJKKkBiiNAALzhUljREEIrarqCz5nqgQSaVlLg9i5Imze6/WcIabeBtEhVif5nVnLssSTJ09cqazWeq0SpNPpuGqU8WSMZqNpFETjBGmWOhEpPgMAGI1Grhvv6ekpoihCv993jjPJsuy8e3x8jP39fUynU0dMpaHwhdCeNXiQ8GA0B8WnKxnd3JebY5PcWZYlRKkQRlXkrZUAggBCVJGZQw8AV1bLg5N6BoR2Hd/AOgA+EY7PefPafONnf7D2M+a2/febgy23v6t4LIxkK9Go3DoWlcIhHQ7e27PmisiIhkYcRybVC2CxWKHIFQxfxnBesqyAjCxqo9cdJ4dgkX9guSTaEi6LPMNkOjPwfRSh2aibyjptOBtCmWh5M7W0OZ/uGZSmI63WRpIdALI0g4xM2sknMBKW57RXcL2pmgiCEKUyiG0UxkgXUwjPieFnJUniyIHcn0RBTTt2rDkBZm9XaWKzbwt3bvoOgV9NAqDiAeWFM7abCpp+lU4Yhggj892dTtudtQyS2+22aWlvtTnCMEQoq6CIxpj6Gf66kdJzmAlIgfcaI44TSBnY9a9dGtMgLuv6FrxPrkdeH4nznCuOsiwdausLatGG+FU/riLLrgeuIx99oeMDwHFB+FoTzFYluuR/0NHxia5ch59mPH8jtiTGK6+8gnfffRfb29tIVxXzNcsyHBwcmDy9fcBXV1eo1WqYTqfuJrXWLioUUuD27du4ffs2Wq0WvvPtb2OxWOD6+hr379/D4eEh8jyD0qXLvWtNJb3CtHq3Rj8IJIpCIQojXJyf49atW5jPF24xs1355eWlSwHwIKWxoaGg0eeiN6hLzaUSfKY/S19ZokpD52vVM8piSoSwFTkDVJ9kuSgNrdYmjbFaGsh/MBhge3vbOR50kra2TOv3o6MjPHz4ELu7u85xqNfrePjwoUslUdabKqWUufYjKc6NQWNCd09UoKQDx2dKnkmaZdACbiHTKPklkayWUUohyzOkK8Mq7/f7ThZcCOHSSf1+33UkZAfVuS1jpWAZoxJyTTivTCu1rbBWp9PBk7MzbO/uYGnTeq6So8hRS2ru/lhOzA1Zr9ddd9e9vT1orfHkyRMcHBzg7OwMbUuWJurFZ+8bDs6Bf4j5vA2S5/4mxub3muinQFmGkEEF/fqVODy0eF25Jc7RaPv55MoJMJyQIAyR5+tVH0VRQCvD4+K9+8NHdPh5flWI73RQgZfX4ROTNz/L7Nt1joh/4PtoEjkoAO/HRKrNZhtaB1ioJVSpMZ3OjN7N9j62d/dNvxVDbnjKqaM3pzXTHkAgQyjkWKUZRqOx46G0W00EQeQMWlFWhmgThfERJq01tDIKpVpXaBVQVaoorZzolHE2+Ay0NdyFIX1qpodNc7lGo4XVfAIhKpl6rglGz0RuXSsEBG6d+KWpfglrtQa05Q7ULBFdQwmBMhCAllDCBiRlgULb6g5VQkMhjAIEQWUIk9g6DVGEVrOJVruJZrPhekX51T2s0uO5LoQtAVaVumZsmy/6ys95nkN4VbJxFCMUFrkQ0iIreg3RSJIIpn17uYZU+U4vnSVfe0NK6SqX6FD5KIg/txx+JYsQwp2X/jnOwXOUhHl/71TPrNILYaqFZ5m/Ljm3n2Y8t7PRqNfx6quv4kc/+hFm8xma9Ram07ljDV9cXGD/4AAnJye4c+cOptOpM27L5RIHBwe4vLzE9fW1MWBa43/yi79ocuHnZxiNxo7Yd3h4iP39fXMAhsagzeZzdLt9R4iczWf4sz/7M2NI2l3keYGL83MXBW9v77oGbTxY/ciXxtLAtKbkslarOfVOIgF+GSUdp/l8jqOjo7XuqyQrcpP5rGKllNO2UMpUjkgpXQO3Wq3momqWv15eXrpW541Gw7WX7/f7zkhSMbQoCjx8+BB7e3sumhdCuEW7XC6tczTG1lbfLVBf0ZQVIESCDCFUuKqZ7a0tQzKzh4eyJaW59XiLooAWcHPKeeV80VFL0xRhGGK2SlGzpWXc8CQ5ERW4vLx0aQluNB5+e3t7CILApag4h3y2bNT2+PgY3W4XFxcXCIJKZvz6+tohSqoo3bpgMzymZmik3nnnHdy/fx+np6fY39/H4eEhLi4unMS+EIY3wi61YRi6aIXrwI/8eZBwffjr5a87nhXBa6VQ5BkCbasAPMgWgNGU0YZjoFQJEUWmogMmuuIhyPvh9TJ1wIos/jwIgrWMkH9fdBR8ToFWas3h4Fz4hx5TBRCsyJDIbRokDCOQPFqhHevt5H1jzXs3v5OAhk17SCRJjHa7DejAEHDtdBLBDLVGFJpcto/yMMq3dwxmigwfJoIqC4MWXg8ghSEp1pLYPA9tnAeI9U6d/pz5f8z7K8VIiEoyO4oqfhSvyXyeMJyPPEdRkJNiuRcyRBjEaDbaSJtNoKxKPHmWZFnmHJiK2yUAoVEUTAsEMATOEkVBgmS1HoNQOj6E4YTY9wUBBICioONXoCwrvQ6WopK7QH4J0zr9fh/1eg1BUPHpqmcC9/9cn1JKFFm+9ho6tADW+HVKpe76kyRGPeD9GhSwUgGlhIARg/PRS34WifVmriqCJ//tywPwzONapUPA4JRnCD/Pr+DyA4oorBxx0gZY5cfqRn5Wmho+DNcfnVVf9IvrkyninzSeP40CYHd316glnp3b3gsRVssVwprxIjVMy2JWQ7BM8ezszBkFViCkWYoXX3wRzWYTZ2dnOD194h7G66+/jizP0et2MRxdG+MsTM6o3zfGcj5f4MGDB9YZmCGOa6jX6g4mGg6H2N/fx8nJCTqdjlPF9B8IFx57h2it0W63obV2FRZ8DREKKm+yqysrTYIgcB4xAAc9SSmd0WVkwMGN5qMl5Jfs7e25aJkGmvwK08V26RqbAWbhEmk5PT3F0dERptPpGql1b28PFxfn2NnZcRE7kQrySLgZOp0OBoORea2NYFqWcxMGAXJlSgqZWoqjCAq64nzYDe+Th33DKqxQDwBX0UMP39eKoAgZ548O3sXFBZQyAl0vvfQSzs7OcHR0hI8++qhiXKtKZltr01o7QNW6288Bs7KkXq/j/PwcL7/8MtrtNi4vL6GhcffuXTx+/Bh37951aRs/bcJ1opRyTi0d7c3qEz8K91MVYRhAqCrS96s2/tpDA0CJMl8nhWjtC46xKgAo8hylh8Lwb9+Z8FFCKdd7MPA+/Wun46BLIyrlE0n99BMjJzevqsBqlUFDWUNTqz7LVueY6kaBIjcMf3/eNjURnuWIKJPyt06JtLoDdUAH2N3dx6uvvo4osX2GyhKyTjKgGZtcC6U08rxEGJpqAyEDCG3SP/PZEpcYQAYh2q2WJeEaoqNvVDiHvlHxU17QlaCWskiKlBIikEjtHjI5dg9BK43UeVGUCIIQtVodWpeIogRxbM7ZRqOJbFlB5HyuTGX40LvWhrNjzi5DqDQpFUrJsyzVzGkYVWqkDIaqqqMK1eI90ug2Gg2XCmGJ+tNaGNoZSj5/Xivnk8iclKY0mnPMv6WUTumY+h4rPXWspGazCSQ1m1YzpcmlorIqQGLtZnqQe4hpFF4f9xFTLFqbc5FOFffa5vBRQf7tv5/2h3vJd9z9tB/XDzV6wrAi/frz6DsbvB8WTvyk8fw6G0GIr3zlK/jGN76BM+tsKGUW+2AwMCqY0xm2drYxHA6doubV1RWUUjg/PzflUf0+5vM57r/4Il577TWcnZ3he9/7Hv78z/8ch4eHmM1m+OIXv4jQI/nQYPHgTuIEp6enOD8/d3Da9fUQ9UbDdVWt12trIlA0UoyCqcBGTYpNvgWhoygyXT1Z3820Ax8EP9MvdeShTOeJuWX/QfvQGNGL4XDokAEeglxoC9t91YiWmVTLarVCu93GarWyxNrHDh06OztDp9PB1dUVbt26haIoXL71+vraoQLb29uuAR0RCTpZe3t7KIrCdaSlvDk9YhJmTe26qeEHKmKo36jN3+Raa7QaTcer4UZgv5Z6vY7RaOQa09G5A0yn1yzLHHImhMDDhw8RhiE++ugjdLtdx/M4OztDy85Vs9nEKssQxiFW1nlbLpdWT0K4VNFqtXKCXePx2KQH09TNI/OnzE3zvojc+BuUnJSP4y7wvqufmxI5/3d/E0iHGc9O0WhN6N+oawqQ/Lie8+e1+GvdRY96HS7n9W/et/t/KaGLAgVMtZKvOeOjHnQ24iSBkBKL+RwCQM3ye7hHeR3mYDYcCx8lehbSU6VWvENbKTBckCJAHCeI4zpeeeVV7O3tYbHMsCrMvaZZCg0glsLMmSbB1rzfVPoIlEqghEAIASGMXkeR55hODWq2WqXodtoIgiaCoKoYACryIOfDd/ZQeOtISggLo2RZZkp3UTV7I8Qvpe2LQsTFcjuUFgjKGHFUhxRGK2aYTte4ZZxrHxkwZ5NRPjVS/iYoMNFxaTQ1pJGFF1JASOPsFFlqhNAsv0tEIQIRIKnX3H3WajXj/DebqNdrzsmgY8EULgDkeYE0Xbpz2X/u/vVWSJb5fRSEa6/lGqLdqZyNCBATQBs0GHEBKZd2jy+hdWmcKJtv8R0KP91Ah3GTm8T3kN/H7/UJtptpUZ59/nnjB3T8PNpRPyDm7/g5fL503pQqXXBNMjGDP9ovKk1/mvH8LealwM7uLj73uc/h7R+9jUCacrA4iiA7wpEL33//fbzwwgs4OztzC1VK6SJXwBjnn3n9dbNw4sg151JKYX9/3+XLCatKKXF9fY7tnV2bgxL41re+5QxjXDcL8OrqCnu2x0qS1HFxceGEUfr9vjPY/GyiAexb4qIlrZ0XnySJg8n9qG46nTrPm/1GmLPnoiH8SCeBJDtGxb7Xy5/leY7Dw0OcnJzg8PDQISn0PJnqII+EqQnKg/N+KPZCefAwDDGbTdFut9xmYPWJqVQxnBCllGsJH4UhlosFoihGt93BeDhCt9PBfD6HKko060bLo16rY7FcoCgLxLbPCL11vySrIqqZXD49cF/mfTqdOs0SXtvx8TG2trYq9ntRON0PNkJ74YUX8P7777s1Q3VP8m/y3IhaHR7ewocffehymYWdQ3r4RMUAo653cXGBZrOJO3fu4MmTJy4XzIOMwmA0tFWO/Glyn39YrEPv9udQa69/lqPy1xnPcltMZGq+3QkhWOhfe695FueiOuSM2NFmCsS/P58/YT/AOWRxHDsOEZ8DnfE4jgFBOFqCJb3tdhthGLjeOKYCQgKQKIv1iNXnDGymc1h+aHLsZi7KQmFRLtBoBIhCoN/fQrfbRxAtUU5nVnLbao1IicD+rZRHEC2NGJYxqgoyNH1IAiGhA6AoMszmS2hThAGgRLNZiaP5xEwavjU1yMyKUmntSpl5HgpVKXsaReMCZVmAmhS+OqiUAXQZGOcqSqB1iUajhalNA/qcAToYdDqllBAwuhYsxayet0EVhLS8kyKHUqUhkktLIrX8gnq9jk6ng3a7jaZ1PpkeCeMqfeo/Nz9oY+rBT1n4xnzdoa9QEx9N4Trh2c/fSSkRpRqAQZCDsEL5mLKJoghlafZPWRrUyEfV/HXIZwlUXYvpHBBhrdcNSZkOvI/C+SlFP4CljWUQSoTDIEPB2t5kiijPc4xGI+PQx7ZsOo6QZSlIDOXgWcfrKIrCoeo/afyVGrGpssTf/bt/F7/33/4eBtdDHB4e4eT4BD0beWoB14HTwPAD54mSYMjSxC988Q3DJ1it8N3vfhdKKZydneErX/kK2u22UXi0cKmUEp1OB3luun+en5/j4cOHRpshMbn9ZrONomkrVxoNLJcLF6HTaSCpkNFvu22atnHh+s3T+G/Kj5M8NJ1O1ySoWa4qpXSaHYTLGOGXlt/gL2IuGHJQuJDomDSaDddzg/fhk7W4ia6urtDr9RyvgdA/eRfsMJumqSOa0mHhoU7naLlcotls4urqCkIIXJydmzJRy+tIVytAa6iidGmcxWJh8q0eeYgbxVfJ9A+oIAgcD4bsb1assGEcO6Y2Gg2XQmq327i6ukIQBDg8PMTV1ZWrxhmPx7h9+zZOT08dfOpD9EII5EWOjz76yM0Vn4eEQGo3GLVPpJQ4PTvFrcNb0Frj6urKsdnpvPHZ+BA3nxVhUz6rzYPPhzI5NDb7K/zNOhvPM3x41h+byBwNll+d4b9uM4Iz1lVDBOtRIBvyLRcL5EohsanHoshRqyfI8wy1es32Fpqg3W5hd3fHORzmuwBqafDc2Eyn+D83qQjrXAmrj6CreygKc3ZcXw+wv3cL3W4XmSVlqlKbslmZIRIC0nLXHEEUGmVRIi8KhKHhUoRBYPU5BDSMpsZiQXXIBVTZRqtteklxr/Na/P9XSkHJytnIPaOgbU+YwKpg5lYZ1ZwtClJGZg+rylHkYwmC0PRTESa9QgPOfcS160PpBpGsu0ic6+JZRj8IAiRxjE67hV7XtJRguoBS2Uw5E/5Xlrzrl4vzrPT3EM8X33nw0yOcP96Hf/0+0sg96ld+RM1q/Se1BLXQl2pn1+XK1kBoZGlFqOR+INLhX59/ZvBnm2kgn2Tv7yX/unl2ETlptVouyON98/OJUPjpYN/B4dwTAWE2gL/39/WnGc/tbDCPeXh4iL/15b+FP/rD/7clHG5hMh7j9p07jqtxdnYGrbVTS+Rkp1mGTqeD3b1dvPjifYRhiG/92Z+6KHV7exs/+6WfRVGWLpKNYwN3h1GI+WyJdLXCYDDAw4cPUa/VneKiIYoaB6fZbEEI4Zi3WmvnEJDwya6dhIr8bqZ+WaxvUPiZhq8SOyPOcllyNnwExycE+ZuCyMTe3p77PBp+OkdsQc8KDACu5JYL6fbt246rMZlM1owvKy4MCfccYRjg4ODAVc+w+oXGl9Akn1un20EgzdxkWYZer+ccEr+aJs9zNOp1yDDAwhJl6RzxfrjgSX7KssxVK+S2B0kYhk54jXLgw+HQOU5Mi7GRHZVB6Z2zAocHSFEUSOwmyfMcWzvbGNv3slfMcrk0UGNg0IqdnR08evTIlBh3e87h472fnJyg1Wo5R4LzSD0RRpi+U7jpNPgpkqcQA1TG0D+sf9rDP7gAazOf4XD4ziQNiJCVYJTWRnhKK/+ajciRUfDU7nN9zkqWZWi1W2bNpivkaQYhJeKk6g4chqFTxqWzenBwAKWUJfOa8k7Kjj8rjbIJp2ttBcqk4awIUfE26ES+++47uHXrNg6PbjsCc5YadUqjwikQhIYIqaWG6WOmkZcFgsLoSkghoIRp7qaoA6qArDTVUEWxgkCBKDaaDYyShcCakdRau5JXKSzJ1kunSFUiK6rOvMbJCBxvQ7pnZcpyjc6KPZfCCEURQBUCdVtSPp/P3eeYNU8NCcsxsoGVj+wBcM6H4dDFqNcbaLdb6LTb6LZaaDYba6mKdaTMrJ+0LIySqvJ73xhexmZ6pEIQKoeIRph/+889sIRUgWoP0lb41WRBEAD1ao9u9fuoJQqL5QJiACwWSxiBMQEhjKqqUuUasqlUaWXZYdE5I+sfRzFKVSJdrWBEvqpqK3PPFOazXBwpEEAiCGpunljFkmWZFZcUtsWA9bxRVY5xnvy2CUSkyK8zXYLNHMdxDBmY9gRSBk81ffvpEUS1xng2g1YKb3zpS/jv/+03kOYZZBggTEIMJ0M02i1MpzP0+1smBysDrBYLlKrE/q1DXFyco96q4/5LL2J7dwtRkuC9995DEIaYzSZ49dVX8aUvfck4H0mMLE2R1GKzNZXGVq+HNM0wG89w8pGpMlgWK/R7fVxeDrBYmHJNwGyATqfjWof7KpyMjrlJiDQAJsLyc2vke9CpEEK4qFhK6QwePUi/QQ43IlEPH7YjTEUnZbFYoNczxm17e9vxYFjxQCNPmWxCbnTspJSOEMvrDIIAnU4Hx8fHtnpmtRYhEILc2trC9bUh4j5+/Bi3bt0yCFSr7UTcVKqQ5inCKMR0boinge3AK638MfKK7Eony5fn5nwlSYKkXkeWppCw8sBKIbBIV1KrGflrYaSbDfJRR5qubCpK4fLSqHkuFplVM5TW+YmxXK5cuSQdrkajgYuLS4TWefV5N4XNd88WcyyWS3R7PXS7HZyenqHf67kGeFmaYqvfx8zreTKbzYyWh3WYfZGcNE1dTw1DZNTOmLH5iJDmyCOxDGq9LfdmDvqvM575bt+5ENWrPum7uH6gbdliWUIEGoDpQSGgoYU5sPgdVDeF5FcoQywMBACFVbpEHEe4desQvX4Pb775FrqdFqI4QhQaISyWR3f7W1Blidl0ijiKcHR4iA8++NBycWLb7Es4Qx2GAbI0RZCYSgGmB4IgtAc6I7+qzbz5nam2uB5c4P2H72L/6Ba0EGg0W8jSIcJAoigy03m1jBBAATUSEQuUWQoVSOgwgJbS3LvUCEMJgQDK8hqgNbJcYzpbojZeoNNuI44AkRuHXWhAlwaFUKqEKgpAGHl3o/oaQgQSsgggrJFUZWkFxCTCIAJAB4pcgsA6ZcahCQJpVEUhUegQCBuQURNKr5DnKwAauixhJMElhKbWiBUtywOEkUH7GvUGGs0Gmo0GarU6mq0mmo2m4WDUakjiGFIK5zhoGB2TvMhd8MgeJAoUTaOjYXaT0bIwwnQVX66qxAAqRMMswerc5f9TBRbCcG6U/WylWEFj9is5MADQqrcgkxK6VFjGCyNXrwKUhVHkNY9YQIoQ2qVVNCIhEdpeRbBtM4QoIYUhV0eRtAFsgTwvPSdQI5Cm4ge66oEjUCIKE9SSCEVZQOsS9Xpin6WwKqxGnbUs1Vpajus8CKhszaoXo1dTFBnCyCD5CkBZlMiyAloYUTqTFiu8z/vk8dzORpqmePfdH+NLX/oSXv38a7h3/z4ePHiANM9Qa9aRphmy3OQFi8L0YhiNxmh3mpC2dFUEAoPREK++/iqiWoSr6yv84Ic/NIauHuPe/XuI4hhRZCojarXE8ROW8wXGgxEEAvzgz7+PUATIljmiMMJwMEYYhMgyk2ZZLavKEcAgFJVy6cCVsYZh6PQZaBx9ljS9PSEMYzpNUxcVk0dBZVHmzNjjg59BA0vPmvl+qoHyd4yY2LmUKpckz3GTdDod1zGV10yl1kePHmFnZ8dpQNADJQ+h3W7j7Mz8brFYuLb1jChY7su0xnK1RKPZQKlL1BqGEBREpvwwyzPsb+3j4uLC1fVXBKNKqtuXwmUkTAdMhpbYJ0zp3zJdue+XYYDB2JSyNhsNNEKzLgy0WyLPM8ePocdOMvDOzjYWi6VzAhipJEmC6XzqrisIAkT2eQHmGra3+yiVwuPjY7zyyisYDYaYTCYIggB3jm7j7OIcdSumBlRkWOqLMF22XC4RJ7GBuAXWHA4HlaLqF2H/88y993HoxiYq8kmjciY+xevcv55+A50MewHOaTXy09JGRny/9p69gm0KYvoYhea1Puy+WM5wdnaK3b0d/MIvfA1/8Rd/gVbSRLpYImo0cGjXNKvHyrzAeDjCCy+8gC9+4Qv4i7/4C1vySTl1RqwS9UYdpgFh6SI5gxhKKFVdBwfLSAFAqQAPP3gPn//CG4jrHahygSgOofICOtdAkZu0SWAQDQTG2dBFBpUFKEOJMgCCQEPrEDK07dMK210VJaAElssCw+EUYZAgieuw5z90aT9PCGiUbg1p4bJXFNF250ye5giDAHEUIwpjJHEdUWiqG0qUoOiW1qU1UCRya5Q6QCnqCOIWRDCH1gsrgKVskzSBMJKIE6NxUYt7Tm2Z6px+N1Gm0qWwVTNKoyzWu/IyXeBSxMqiEk4WX2LTtvncC963W6NYT6f576n2kgZzbxqU/tcV94biXZ7yfhLG0DEgGoDUGqldV2m2QpquEAUaRSJRFBJ5SZSiSicbdADVnoDpJdWoN+yaXKAockAo01cmCu0aNM6mDFmZpCGlWRdFnplnEkTuHgENVVbojJShLUuGe/aF3SthGFgkSmC1MqWytXoNcS1BVpTICo2sVJCQUBrQRQGhNcJPef48t7Mxn8/xb/7Nv8Hh4SHu3LmDr3/96zg9PcVsNkOaGSO8mi8RihjTmVGp3NrZRpoaGfOr6ys02030Ox18/etfRxzHOD17HxcXF4Ycqgt8/vOfx3K5hNaJXawJisKwZReLJepRDePRFG+99ZatHintYVcgSUzOKV2lZjGr0hl+wvBEDpgycART+zqiGgCcwaDxo7PBf9MhYGdPOg4+DObnDKscX+lY3r4zsrW1hdPTU2xvb69F3ZRC7/V6OLc6Irz+0WjkmpaxmobIyHA4xNbWFq6urtDtdm05axtBcITT01N0u10nSEVticlk4hwN6l8wQifcyTwfAEd6Xa1WaLZayFeZ+x0Zy7wXkjaJzpAYKD0iIp09wnUkuTYaDcwnptma/1nb26ZHz+npqdNIabVauLy8BKWCJ5MJejYVtbO7gzRLESWxS9cUeW6iKJjvu7q+Rr1Ww9GRmad0uXIE1tPTU9SsE8h5d5UuVgqYpbuCufmNHKufGnEIgR087DZfa373/xvuxlPDvw5t6ZCOJKptKmKdkGegf1XxNTbKHFnyzAP4u2+9hb/9C7+Az3zmMxiPRsjEyu1HiufRqZtMJm79NptNzOZL5EW5tt84d4R9ufeiKDbdcMuyQnhs5Ehn1FRyFBgOB3j//Qf48tf+feSrFO1WG9PJGGEUoSRCmRVQda7/ElqV1hjkBgEODerqa6yYUaltQiuTdmjUEUVVdFwKyrtbiXZZVVH5xtpfbyStx3GMTqfrzjA66H5JNj8LMJB5GEYwaqchVK2OUCg0ay3EcYBaLUGj3USj2TAVO5FRFu10Ok5DyEH/dg0rpVCowpAny6pfEP/4DoE/fN7DpnPNc3XT0eD88r2b1R/mj0JelE/tsWelDP3vNSlwY7ijKMAqXSLLVlitIqxi04ulLBWEjFGoqknacrm0qb5Kz4O/Y6m2eS6VyifT2kTnyW8yzrIpReZZ6aPmDKboQDebzbW95ojEnuMlhECcxFBlAiEiyCAw6R0YtL4scpMutShYJAPITxnrPLezUZYl3nv/PZyenmJrawtf+9rX8Lu/+7sAYAmaLRPRl6XpQWJLS+PYkENr1jB/+ctfRqvdxmw2wx/+4R/i5OQEe3t7qCdNvPb51xwXgl5eHIcWWTBQ4MOHD/HgwQOYTWt4C+bgMRtlPl+gUW8ahTy74FiVwmZtlBbPsswhJ4zsfTY4O3r2+32HRPhkJ+pY0HCQBeyTgZgu4ULgASCldAtCa+34BuRRDAYD7O7uulJQEmt9MiUrNoiC9Ho9nJ2duQoc9luh+JepcDlw+U2Kkd27dw8PHjzAZz/7WZyfn+P27dtO4+P6+to5GuSKcLNMJhOnrCmCAHlhFnmz2VyLWB1y4BE3mcIxTO5KAIwlkCybldJ0nM1XS0d4ZdluGIZOIZVkUkM6LZzCaqfbRZZlrq8P7GfHceyeVyCrJlJ7e3sIpMTx8TG2t7exvb3t8rf37t3Dh48+wp07d9z1hWHoGvH5m5zfL8N1ohwH/+07Z5uJjv8hORtPDess+eMTr8M6FNqLJtcIbR4awrkiYliv111qEgAiIfDhhx/i3r17mNs9y7lgqfdoNIIQwom+kZSeFSXyYuk4QzwDeMACcMRukxdXyJVyjdt8w8frN8RPjQ8+eB8/87M/h16/g/lkhuUiRCgEllqZ6pOicPdZFAVUZJwNpdgt1cDn0Kbhu5QhpIYV8zKCXlleYLE0JdjNeh0K2rVhV8q0sCdiyUY6dHorHop291mv1xFZcrt/FnFOiDLSWIVhaJ56YVIVzWYbu1td1OIA290W6o0EzWYdtWYDAQ2chquWASoD6zsAlWNjXu8To30EgtfuOyqf5HT7gVz1MwGtTWqy4iDoDb4HoFUJCSvqZj7dW8/mP0IIiI19KwQQRhJKRwijAGVpAmMGUaUqoXUAIY2ZXS4XmEhhSoTDSp8mLQsorRHKSnGV3Bj+m+uW65BpIb8qyHc0fHQoCCqF0k3nbrOaR2uD5BjbJ5ClObI8RxAm0EojCiKEgYTQhUkJBQK5dX5+0nh+gqgUuLi4wNtvv41bt25hb28Pv/Ebv4F/9I/+ETqdDk5Pn+Do8A50rpEXGaA1zs/OcHjrAKs0tR5RgJ/7uZ+zFRxj/OD7P0CSJBgMBvi1//DvYXtn24rH5LYCJEKaGnEkVZSIZYw//4s/tx1kp6jVzOFglEJNhN3t9jAeTdBsGbEqlrwyv+SXRrJ6gxUnANYUH7U2+gnj8RjNZtMZVz58chO01k4rA6j6RBDeZ8mrf/j6ugR0Gnz9jyRJMJ/P14h4PAxHoxE6nQ4AOKeFDsqtW7ecwNX5+Tnu3Lnj+oo0Gg384Ac/wK1bt1xb9rI0TeiocVKr1XB+fu6MPgAn1815IC+F2hJbW1uYzGZotTuQNodK54uHOueajgejR84RDzqmjOhkxHGMWpKgaxu1dTodV4r8+PFjF+HSwWG57GKxdAgQD9Q8y1EUuateIMRY2sqZTqeDy8tL1JLEGLr5HOfn57h37x4WiwU++ugj50RQaZZoz/7+Pq6vr10VDoXK8rJwm5uHFe/fPyB5iPm/26ym+B9ybDoavC6IdeeHBEWNj3dEhDCdfiEMx4DrIwxDJyxHJGw0GkFrIwc/nU7xyiuv4OL0zCnAXl5eoixLHBwcYLVaOYSKiOOd27fx6PGJI1L7xpdp0TRN0e12bUO/S0wmE/caVp4BWHtfEAS4urrAX/7l9/Hlf+9vIYpNp9gizZDlOUpdAKp085DnGbLAlP0HYYigZH+PAkLSsEfQQqBAAZSADCVUWWC1TDGZTFFLaoCoA3FgDZKuiLgigBTVWeI7Ebx2imEFQejSm/4z4XwwACCPRcBUboVBiN7+Ae6/cIhWPUIjCRFGBnYvoR3PQtjvo9Hy/2wOadeBz6HgH3+PkBDpO64+1wzAWvBn7sk4GtDSpZT897m/y9L8npwiPJvIzevT3rwZgmyEQAZOk0IpI4Rlzhrj+OWFclVI0ApFniMKQyyisEIkAMPHAFDYQMyfA98h4Jz4kgxMVdFRp5YU95Zve7i2aZv8PwAcGhtIEl0BaIkyLxCKAEEcIBACZVZCSpNCypaLj93za3P4qV7lDR4+f/RHf+SY/H/v7/09fOELX3CVCrklU1Jd886dFzCdTk2apCyxv7+P119/HdfX13jvvffwwQcfoNEwqnxf/epXURSFM7B+GepkMkGe5ZgvFvjOt7/jIpzRaOg2CwB78M+toV6s9QihYaGBJCxLp4MHn68Cyu6dhB+pBcDDiOVBrKTwWdObMLlvVLl4/AOAvAJWh1A0hQaa8Dw9WZY6kYnPJnnz+Rzb29sYj8cuNcPSUR7SAFxVShzHePvtt8GW80AlKsRU0Gw2w2AwgFLKGWoaeKDqIsmqGr//Cw2+855RwZKcRxIrufg5py4ysxUnVDkVQjhDz43GZ+pXqfAwJeLQ6/eglGnSxk1Wlqb2X8NU+uzt7WF/fx/Hx8coisI5bUopvHD3Lihi12w23WGws7OD0WjkyLAkEVf5Ul+ls4KNnxW5Pevv5+Fm/E2OzevznSD+LaVE4K1t/x79+4AwWhSMwoxCZ5WuK8vSdU7mATmbzfDOO+/ghRdewN27d3H37l30+31EUeTaHhwcHKDX62E2m7mqKpYub0bEdGirFGbg9HA4/MoGH50xiFWOt3/0fYxG10iSEK12EyIMIMPIln5X1RBlXmCVrpBlKUpvfWZZWhlLF5WydNj8Wa5SjMYTTKYTLJcrFEWJUhuEIy8K5EWxxs/gPDMY4R+ikFG0DrH7UbKvGePSv8LoZ0gp0W61sbOzi063hySpWyVno+WQ5aabLdPLaZq6vcu9zD3i1tPGuud10KGryJ7SEFyDdbFEPks/PeY/K2k/M5CyIoDqjTSesMXHz0ih+DwS90dXzoaGqRIxvDwjmAbAppwSRFGMMIzWKmzIZ6GoZavVcnLrQhjHjt/l3wvPWToPRIH9Uliel7PZzDmNlaIq1j6DGhx+YQTtiX9OlYUye8Oey0a7RUOoErosgLIEyhJl+ulEvZ7b2TAEwBjvv/8+fvCDH0BKU0b6a7/2a+7GVqvU6WlkWeZKJA0JETg6OkK/38fV5SW+973vOa+80+ngM5/5DJRSruW6WbyZk2ANoxAX5+euzNFA46bqgka7Xq8jXaXuwOdCZr241tqRQXlt9AiJdvjepM854M/q9bpDLbiQhBDOQPPa+DA3a6T5+XQeKARWq9UwnU3dz5iOoMz7+fm54S7Yst3ReOSci62tLdfqnt6t1hoXFxcuWidy0e12sVgsHLeDZZvD4dAtyFqt5jYGDSjnkMgGG8cBwGAwwIFtFe87WiSuEn3h2vArc/haVv1wPulpsyMvjQCfB3U3VqsVer2ec0JMJ8PKaSERWCmFi/MLrJYrLBaLNWNeq9cQSIlXXnkFw+HQVQAxjUYI+vr6CovlAi+++KJLodEBYhmtnz8nKsWfEaHivT+NbFSttZ9l3DfHT0qvNJtNJ8G+vbVlunY2Gqbnj9WdYW6YyBOdQ6JZYRgitE46S8nDMHSHp+84OiKgZ6Q3jQ3ngFwC7i1+Lu+LRmYyHuOdd97Bzs6OM56dTge7u7vOCV2tVk7f5/j42HX2BKpyc14bEZQkSWxL9IpbRQeV+5cGyEWERY7peIA//dNvIi9sY8JGE/Vm0+iCSJYbAtAKabrCcrXAarVEnqfI88xTcLTVONL0JREygIapMDEVWJlxOGZTFLZCIy9KSxJdl1z3y9b9s4+/J2GdfA0aFp6DdPg490prpFmOIAgxmU6xWq6QpWZ9L2yAlq6MsJnW66gbP5dryl/nvgPI5831QsjfX0fPckh8oj3XlO8gCjOLBrUoS+giNwZSlRBamd9Bw3RwMc9Jq9KwcLWCNPxlCMvMVaqwVTjWDqoSRZGhKLK1oIE8lySpodFootlooF5PEIUBBBSiMEC73US9nqDdaqLTaaHZqCGJI0ShkXnn/uB8UcjRv09qYpDvxuaUdAr8pmlKKae6zc/lZ1Pugc+cwXYcJ8ZZCiKDcNgCsjgKoMsScRAihEA6nxmn41OM51cQFRJ5bozMv/pX/wp37941LPAvfhFvvPEGvvUn30IU2Jr4piFrHh8/RrvTQqlM2dsbb7zhuAXf/va3Ua8bpODnf/7nXbnn7u4uAEJy5qAYDoeIghDvv/8+Fos5Op0uRqORI2hy8U2nUwhLuKw3Kn0Mwk8+ZMdNSTETPy1ADQwaR25iHo40Ktwo9DjZzIsbiwcVUwY87BKrsknjSOLqvbv3cHl56UpXGfErZZRVJ5MJGo0GLi8vjXyyJY6ORiO0221nJD/88EPcvn0b1J4gYXR/fx+np6fY29vD1dUV2u22uxZ2LlVKOeeLlSo8CLTW2NracouV/IRer4fpbIatrW2cnZ9hf3/fOQwcRCnG4/HaocN7lFKuichwzkjG3dvecigZ0zNpmmJ/fx/j8Rh7e3tOk6PXM6qp9XodMghcNUleGBXRTWOSpqZM9oc//CE67TYODg/x5OQEUkq0GsbA5lmGUEj0+328/c7b2OpvrZGzuNE5mAPVqhJgexZnwx8maqlQJa6d5x1CVN0ffUMUxtW//Y6v/H9fcCgMIqSpKR/f29vDZDLBeDxGv99fSx1ubW2h1+thMK4cLr/XkFYKEJXMOR0VH7lkipMHKp9NYSP4k5MTNJtNRxBO0xSnp6dr+4TVViR2djodnJ+fr0X+dGpWVqvHoGaxc2J5VnDv+s9MKQWV51jqAg8fvocgCPC3f+GXkNRr0LYM1X9eRZ5hVRgydSClbU1vVETNNYYIbBrFoD62FFXYtvRKYbFYIkliNBtNCEF4XEIGoQ3S11NsnIuyLN3eNOXflSATU1gM0gC458XPKssSYRAhDAIsZiMMhkPsbfcQQEArI8lO+sXmutskbXLeK2RXgsJr/mv8MkofFRbBOhJSVc2sOzlaa9jpg2D1l14novJ11VDwS2rNozMl0+Z12pKg/X1bQmuWq1dy4uZeQgSB7WSrjGqrgEZZmnuTUqBRr0MlMeI4RGBVb+ViicmkSkn4c8Kz0Cfa+w3jfJSHe5hOnn99vEbaKm3PQaDiMIVhiDzNUOYlBEx1WS2pIQ5M+XUtiqDyDNlihmy5NBL5n2I8F7IhYGD+3d09lGWJJ0+e4Bv/9t8aZCDP8bnPfc6Sg4SLNoIgRBTFWC1Nh9dut4c33ngDnU4H77//PgaDgRVE0abxmo1sCYeaNIFpY9zr9TC4HuA733kTRVFgNp2iLEqjlyEDpKt0bdLjOHYkJFWqtcXvD/cepZ0xpVgVjV5Zlmt7qsgrhTUAbmPzQSexqTNPrOfpf2+R57ats3bROh2OsiwxuDbKlPOZSd8wzcHDgAclr7FeN7BmFBqOAMW7aIBbrVaVerHOQ7PRwGw6cw3z6NXOZzNsbW2BxNEwCJHYiEcphdCSMqnrkaYpZlZanGhPnmUGzbBpsMFg4OYfFlXiIa6UsmqFFcLAeWJkZFjQBhlZrVYQtqPnwcEBtra2UOSFQ1NWqxXyzKAn0+kEsMZwYkmlRJmUquBd+8Xueu7cuYN2p4MnJyeOwyGEMM7O/r4j3b7+2utYLpeuLJkHoH+IBjKAFNK1//aN2CZ/g/e/Nhcbh80z9+Un/FwpBQFzuNF5ZnRY5LkTM9JaIwyshLKDmA1SEQbhmiOwu7Pr0I4gMCqFRjbcNmQrFUqLKpWFabgG75BjntlXlg2CALE96KIoqqT2RdWsLkkSfPjhhy61NplMzJqwUVu73Ua/37cpQuEUaKVnpNiOnXt6tVoZIrWNiH3SorTzz+cAWDi6zJCmcywWU/zlj/4ST56cAAII4wiRNerSqINBQyPPVlitllgsF259lra3SpFbZUgtIIV1RCSJo6bEME0zzGZzjCcTLFcpICyp1OpJ+I4sHTmiM+7ZWiG0PDff7cPrfsko914gAwhIZ3zKUmEymbl1bObEyJsLa0aksKk0WUHxdC74MwqQBTYw5OeH9mzadFTc++TTkvNBEDgirnMkNHVdnuZe+Gi1WZvkBVrRLEve1VoB2qRJtA2QNZEPO+iEUDjOOB2hnf8IcZQgiVnyGyG2FSpsUiclLL8jQqNRR6vVQKfdQrvdcr2rfETI57NskoBp73heUo11U+Gac8h9yGCbyDyRwCp1FRmeT5ygWW8iCkLUkxqKLMPZyQmOP3qEmSXtf5oh9E/CYO0o/uf/DFMZYSUDBMXQ1LHDHEatVgsaQLpauY6k5uFqsN7eh2+6vR4CizzMF3MoZVTVWi2jHKi0qkqfhXAqhKaToMnp5lluFhTIKQgcAmLVhjcOYeoFVLoBlRKAdj82WTyn7+Je5/9M2B88rUWwqcHP4nf/lZ5hsWqK/jXz9UJKB+2qUkEGhhhGI6C875G274CAaR0dBqEhdoWVTK1W5jMFKkhZKfO5jMT8CJc5W+1tWq21rZG3z0fyALaiNy4arWq8AYFSlSb/6+ZUr82a/ySq2QYfiLl2Yf4t7c+kkE5kx0Rjeu3zn05PmM2tlCmR3jyI3DcKgTiKYOSHSwSMHNw9Bg4yDoIQeZ5Vzqh1pp7eURra/xJvFbi7fcZ71ueCr3uagvlJmIebByE+dpb9z9n8mU/YCySrBOy6xdN7u1TrLefX1rvZ0BB2n/kXvrZX7QVuPiNDQASC0BimoiiM8dNwhGTYSL9UVdmraQOu1z7bfgFgCet0MohEPXNW3e+qa9YQqCU1hFHkvltrhRxNaEhIvYTUqb1vw1Uwe6EiSD7tLOqNB6GfQgCEu8ZqF1XXVe2DijOzTpLkXtfQzqFTSlVnoftm7c4MpxbqrRR/Pz+FcGxWMfkvER/3qo8f+hn/585o8UmftP7aj/vET3MBcmIQx8v/68tQvRgChqMTBCGCIIQUlRy6eU8BpTLL08kcj4120k/PZYXGKisxmy9c2ng+nzuE2Ucj2EcIqLqWO4daVd1mfWdksxTY59TRuSGSmK8ylLmhTCQ1g/rPJhMMri7x6IMHmFxd4tbeNu7dPkIUCvxv/+//+CdO33OlUdoqR1vlAOrVOxWAiYGNIwRohe2f/EFT88AaEGjIVoWvrACsMnySHlkAIEENiGrPc+n/4x7PnewCQAVZufG3/+/gGb/Dxu+ed/xV3/fXGc8zP/JTvv7T3kdcf77X34y/uRF9ytf9tJ+NBuAyhQJmkaXeC5LqdWuIs4XoP/WX/PWrkf56U/E3cw3/Yx/5qy2ErQSIjF6KKa+t+FRlWaVlpGCpM9bkEdaceItGxQgQZobDQ0fbR3r5Pp+/5QeC/GyfEO87Fz6f0EcU+TtfkyMQIYQAJpMJiuEIi8USZyfHOH1ygvHlOba7bfQ6HURBAKE/3Zr41Mf0H/+Hn8e//IvDT/vym3EzbsbN+P/7cav8JuLlj5AXBcpSIYgi1JIGkloN9UYLSVxDrd5AktQQhpEhCMLyVCx8X5a5IStqjVq9hq1+D61WE0kSOz0MPy0X2FQMlZONsTGlqhqmhHMynVgSudGgoPaEgMAqNR2SZRAiTmJAFZiOB0iXM9y/exvtRg0iMEgXJCz2oaGV6e7qDK9Diq2+xlpfFoN5BS59ZQ0mNiqv7M/sb1D9WKMsFfLclmoGplWB+UaFgOkNj9Ni5tNyOLTVg9GGBGqQYTi0SUrp0jrwUAJAQ92uI2gn5t4sb8WkaDRs6143nxIlNKpyZJ+U6/O7Ki6TdKgFUQYi1EyfkFDrV/r4aU7202Jant9JhIRoF9ESoKIBKKXcz1arFa6vh1itVlguFjh9corz03MEOke/37cyAwUatU8X+H/qNMrNuBk342bcjJtxM27GX2U8d+nrzbgZN+Nm3IybcTNuxvOMG2fjZtyMm3EzbsbNuBk/1XHjbNyMm3EzbsbNuBk346c6bpyNm3EzbsbNuBk342b8VMeNs3EzbsbNuBk342bcjJ/quHE2bsbNuBk342bcjJvxUx03zsbNuBk342bcjJtxM36q48bZuBk342bcjJtxM27GT3XcOBs342bcjJtxM27Gzfipjv8vDExPtj63TZkAAAAASUVORK5CYII=", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "img = run_image(runner,\"dog.jpeg\")\n", + "sv.plot_image(img)" + ] + } + ], + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + }, + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.14" + }, + "widgets": { + "application/vnd.jupyter.widget-state+json": { + "013ebfb59e88443d978bb2a4f3a68f96": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "ProgressStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "ProgressStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "bar_color": null, + "description_width": "" + } + }, + "0381e7fdec3642d7af08a11841aaaba4": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "06c1c81b5e8544d8aaca394f2e13539e": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "ProgressStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "ProgressStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "bar_color": null, + "description_width": "" + } + }, + "07cb92c22899453291baccd1f9b11a49": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HBoxModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HBoxModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HBoxView", + "box_style": "", + "children": [ + "IPY_MODEL_cbc909708fca4191a80767479a9c9c55", + "IPY_MODEL_152972aaf5c7433da0a7ce4889694cf4", + "IPY_MODEL_b769fadb878c43beaec040a779ba9067" + ], + "layout": "IPY_MODEL_483f26b6d2e54bb581e8a6392b8e1b39" + } + }, + "084791b432c64ea383eeb10dd912d27f": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HBoxModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HBoxModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HBoxView", + "box_style": "", + "children": [ + "IPY_MODEL_c7e34cc6b3b54c36933cf4b21f32b469", + "IPY_MODEL_961b3186964b4aa694ed50e601ca6ea6", + "IPY_MODEL_9c7aebef36c94f659420f35c6951ac14" + ], + "layout": "IPY_MODEL_0381e7fdec3642d7af08a11841aaaba4" + } + }, + "0aafe16d6e6d4561932cba3bed69f562": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "0bc8d02b9b0941f8b38f822b8552e54c": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HBoxModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HBoxModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HBoxView", + "box_style": "", + "children": [ + "IPY_MODEL_c5081cf89abc4514b81b0a705850b26f", + "IPY_MODEL_93a7172913a84728a2919fe8796567c0", + "IPY_MODEL_c50ae95e956d456395d05f12367ff8e3" + ], + "layout": "IPY_MODEL_c80456ab37c844b1beb074e74b17d8fb" + } + }, + "0becbcf3af914252b73937ffd789c533": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HTMLModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_52d5fe0cd2514f87917ab8bcf923becf", + "placeholder": "​", + "style": "IPY_MODEL_0cee1b12a94c4fdaa97d7b0e57a9d8f6", + "value": "vocab.json: 100%" + } + }, + "0cee1b12a94c4fdaa97d7b0e57a9d8f6": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "DescriptionStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "14b64b065ef740cbbff5587f062b04a3": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HBoxModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HBoxModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HBoxView", + "box_style": "", + "children": [ + "IPY_MODEL_5ede178010f54c259c9802698a599664", + "IPY_MODEL_225ca87fffb54bfa9514513ace1fdbf1", + "IPY_MODEL_cd906068e1cb46e4b5b62fc6267e8e6d" + ], + "layout": "IPY_MODEL_0aafe16d6e6d4561932cba3bed69f562" + } + }, + "152972aaf5c7433da0a7ce4889694cf4": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "FloatProgressModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "FloatProgressModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "ProgressView", + "bar_style": "success", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_e908586e492443c6a28ed16750df6748", + "max": 2224041, + "min": 0, + "orientation": "horizontal", + "style": "IPY_MODEL_013ebfb59e88443d978bb2a4f3a68f96", + "value": 2224041 + } + }, + "164ffff1e1944183b01d8cf76541556a": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "DescriptionStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "1745520fa3834cbf900b1646fec5d6aa": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HBoxModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HBoxModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HBoxView", + "box_style": "", + "children": [ + "IPY_MODEL_768b536c12f84b1cb24d38675573baa2", + "IPY_MODEL_569e8aabbcd74e4f9288bdebeb91400b", + "IPY_MODEL_ad5431bc98784ee7adcf489989aba432" + ], + "layout": "IPY_MODEL_8614da2bade94ade978fe71994c777fa" + } + }, + "225ca87fffb54bfa9514513ace1fdbf1": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "FloatProgressModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "FloatProgressModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "ProgressView", + "bar_style": "success", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_d839228be8b84096a587489217630b7f", + "max": 605247071, + "min": 0, + "orientation": "horizontal", + "style": "IPY_MODEL_b76961c341d64959ae6ed7ad40f6abab", + "value": 605247071 + } + }, + "265d430fcc604c6984d70b7e63f11e37": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "2d181d3861c64d0c9d71331751de111e": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "FloatProgressModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "FloatProgressModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "ProgressView", + "bar_style": "success", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_f9ecf05660fa4512b4ff4cbb9d30f3e1", + "max": 389, + "min": 0, + "orientation": "horizontal", + "style": "IPY_MODEL_898c2d408c0a4b34851f7fbf537f45b1", + "value": 389 + } + }, + "2f5098940d27496983565ddb3ab158bd": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "DescriptionStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "309c33ce179144ac9b23d6396f2fdcd6": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "32b452668efa4b61acacd04d289edde0": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "ProgressStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "ProgressStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "bar_color": null, + "description_width": "" + } + }, + "32f222c92f844a8ea780960c0e25a64c": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "46da2b5501cf471a99f354f17e85fc1d": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "DescriptionStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "483f26b6d2e54bb581e8a6392b8e1b39": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "4b48981f033a4e0b89b3dc1cd088599e": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "4e47a4bc196e44dba1d7ce4faa5b74af": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "52d5fe0cd2514f87917ab8bcf923becf": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "53a11753fc664f12942c0a5a8f62e695": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "DescriptionStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "569e8aabbcd74e4f9288bdebeb91400b": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "FloatProgressModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "FloatProgressModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "ProgressView", + "bar_style": "success", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_cd8f2fffa9a845cfbc2ce664647acda5", + "max": 4186, + "min": 0, + "orientation": "horizontal", + "style": "IPY_MODEL_32b452668efa4b61acacd04d289edde0", + "value": 4186 + } + }, + "5dbdd01ad0bd4939937fa32eb32182a1": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "5dfaba276a3c480d837a75767300e96f": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "DescriptionStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "5ede178010f54c259c9802698a599664": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HTMLModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_a81ab5c22fdc4ea99ebe396d3b43c552", + "placeholder": "​", + "style": "IPY_MODEL_8841ee0d44fe4073b3dc5237c8045185", + "value": "pytorch_model.bin: 100%" + } + }, + "6113de583b7a4a22bbbbfcf9a0ae6ea7": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "614d44b9730b4fe9a01305ac6c822388": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "DescriptionStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "768b536c12f84b1cb24d38675573baa2": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HTMLModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_6113de583b7a4a22bbbbfcf9a0ae6ea7", + "placeholder": "​", + "style": "IPY_MODEL_164ffff1e1944183b01d8cf76541556a", + "value": "config.json: 100%" + } + }, + "794250f1a0b44831864f487cfe4be7b3": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "ProgressStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "ProgressStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "bar_color": null, + "description_width": "" + } + }, + "7c53e4cff8344da8858060970b931a80": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "DescriptionStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "81df29145f4449339e75f78919147899": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "828a59ea87f34d4f8be9fa6fb63fe991": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HBoxModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HBoxModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HBoxView", + "box_style": "", + "children": [ + "IPY_MODEL_0becbcf3af914252b73937ffd789c533", + "IPY_MODEL_8dc08812835f40e9a85c73ea57710029", + "IPY_MODEL_bd6743fab19a4056a741fb923f1d66c6" + ], + "layout": "IPY_MODEL_cfc1570a53d4467397583e5614f35515" + } + }, + "8614da2bade94ade978fe71994c777fa": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "8841ee0d44fe4073b3dc5237c8045185": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "DescriptionStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "898c2d408c0a4b34851f7fbf537f45b1": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "ProgressStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "ProgressStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "bar_color": null, + "description_width": "" + } + }, + "8a23897839594ba4827c5a34463dbb35": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HBoxModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HBoxModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HBoxView", + "box_style": "", + "children": [ + "IPY_MODEL_ce8d0eadfac444a6b88e0ba16ab6f3f9", + "IPY_MODEL_2d181d3861c64d0c9d71331751de111e", + "IPY_MODEL_fd9cc05ff50e4463b004cacd050b59c3" + ], + "layout": "IPY_MODEL_dedf6f98735643d5bb53ff2e874137c7" + } + }, + "8dc08812835f40e9a85c73ea57710029": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "FloatProgressModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "FloatProgressModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "ProgressView", + "bar_style": "success", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_ef7a3e2a70624fdfa2d590635e962ffd", + "max": 862328, + "min": 0, + "orientation": "horizontal", + "style": "IPY_MODEL_794250f1a0b44831864f487cfe4be7b3", + "value": 862328 + } + }, + "93a7172913a84728a2919fe8796567c0": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "FloatProgressModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "FloatProgressModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "ProgressView", + "bar_style": "success", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_32f222c92f844a8ea780960c0e25a64c", + "max": 568, + "min": 0, + "orientation": "horizontal", + "style": "IPY_MODEL_06c1c81b5e8544d8aaca394f2e13539e", + "value": 568 + } + }, + "961b3186964b4aa694ed50e601ca6ea6": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "FloatProgressModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "FloatProgressModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "ProgressView", + "bar_style": "success", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_309c33ce179144ac9b23d6396f2fdcd6", + "max": 524657, + "min": 0, + "orientation": "horizontal", + "style": "IPY_MODEL_dc6812fd13504f6bae35d81aaf2593fa", + "value": 524657 + } + }, + "9c7aebef36c94f659420f35c6951ac14": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HTMLModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_f7463653c82e41b087e794191e70c43e", + "placeholder": "​", + "style": "IPY_MODEL_7c53e4cff8344da8858060970b931a80", + "value": " 525k/525k [00:00<00:00, 28.1MB/s]" + } + }, + "a81ab5c22fdc4ea99ebe396d3b43c552": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "ad5431bc98784ee7adcf489989aba432": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HTMLModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_e46b4e1e95da4d6f924a851265403480", + "placeholder": "​", + "style": "IPY_MODEL_ee06192a75fc403ba6d945da2efe4317", + "value": " 4.19k/4.19k [00:00<00:00, 161kB/s]" + } + }, + "b2dd4e48fb974451979e37fb99bbdf5b": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "b69eb52454c64fb4bac7c9f008241d24": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "b76961c341d64959ae6ed7ad40f6abab": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "ProgressStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "ProgressStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "bar_color": null, + "description_width": "" + } + }, + "b769fadb878c43beaec040a779ba9067": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HTMLModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_265d430fcc604c6984d70b7e63f11e37", + "placeholder": "​", + "style": "IPY_MODEL_f55df7a2f0474b5ab6d0a23bcedf8cc2", + "value": " 2.22M/2.22M [00:00<00:00, 8.62MB/s]" + } + }, + "bd6743fab19a4056a741fb923f1d66c6": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HTMLModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_4b48981f033a4e0b89b3dc1cd088599e", + "placeholder": "​", + "style": "IPY_MODEL_46da2b5501cf471a99f354f17e85fc1d", + "value": " 862k/862k [00:00<00:00, 1.24MB/s]" + } + }, + "c5081cf89abc4514b81b0a705850b26f": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HTMLModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_4e47a4bc196e44dba1d7ce4faa5b74af", + "placeholder": "​", + "style": "IPY_MODEL_d0bad9ce27a742a49667d1cd58eea350", + "value": "tokenizer_config.json: 100%" + } + }, + "c50ae95e956d456395d05f12367ff8e3": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HTMLModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_81df29145f4449339e75f78919147899", + "placeholder": "​", + "style": "IPY_MODEL_614d44b9730b4fe9a01305ac6c822388", + "value": " 568/568 [00:00<00:00, 24.3kB/s]" + } + }, + "c7e34cc6b3b54c36933cf4b21f32b469": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HTMLModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_b69eb52454c64fb4bac7c9f008241d24", + "placeholder": "​", + "style": "IPY_MODEL_5dfaba276a3c480d837a75767300e96f", + "value": "merges.txt: 100%" + } + }, + "c80456ab37c844b1beb074e74b17d8fb": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "cbc909708fca4191a80767479a9c9c55": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HTMLModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_b2dd4e48fb974451979e37fb99bbdf5b", + "placeholder": "​", + "style": "IPY_MODEL_53a11753fc664f12942c0a5a8f62e695", + "value": "tokenizer.json: 100%" + } + }, + "cd8f2fffa9a845cfbc2ce664647acda5": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "cd906068e1cb46e4b5b62fc6267e8e6d": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HTMLModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_df073637968a4ca499a861f74869d45d", + "placeholder": "​", + "style": "IPY_MODEL_2f5098940d27496983565ddb3ab158bd", + "value": " 605M/605M [00:02<00:00, 182MB/s]" + } + }, + "ce8d0eadfac444a6b88e0ba16ab6f3f9": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HTMLModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_5dbdd01ad0bd4939937fa32eb32182a1", + "placeholder": "​", + "style": "IPY_MODEL_fd7d351c2a5943cd9934b36be67481ca", + "value": "special_tokens_map.json: 100%" + } + }, + "cfc1570a53d4467397583e5614f35515": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "d0bad9ce27a742a49667d1cd58eea350": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "DescriptionStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "d5797b57dcf04274a5f7077d104a62b6": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "d839228be8b84096a587489217630b7f": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "dc6812fd13504f6bae35d81aaf2593fa": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "ProgressStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "ProgressStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "bar_color": null, + "description_width": "" + } + }, + "dedf6f98735643d5bb53ff2e874137c7": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "df073637968a4ca499a861f74869d45d": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "e46b4e1e95da4d6f924a851265403480": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "e908586e492443c6a28ed16750df6748": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "ec8e16b5e78d4c55b100090ee7e23ddc": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "DescriptionStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "ee06192a75fc403ba6d945da2efe4317": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "DescriptionStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "ef7a3e2a70624fdfa2d590635e962ffd": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "f55df7a2f0474b5ab6d0a23bcedf8cc2": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "DescriptionStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "f7463653c82e41b087e794191e70c43e": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "f9ecf05660fa4512b4ff4cbb9d30f3e1": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "fd7d351c2a5943cd9934b36be67481ca": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "DescriptionStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "fd9cc05ff50e4463b004cacd050b59c3": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HTMLModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_d5797b57dcf04274a5f7077d104a62b6", + "placeholder": "​", + "style": "IPY_MODEL_ec8e16b5e78d4c55b100090ee7e23ddc", + "value": " 389/389 [00:00<00:00, 31.4kB/s]" + } + } + } + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/models/YOLO-World/demo/simple_demo.py b/models/YOLO-World/demo/simple_demo.py new file mode 100644 index 0000000000000000000000000000000000000000..fb797835db5be63d50cc5e213662d0039ae73cc4 --- /dev/null +++ b/models/YOLO-World/demo/simple_demo.py @@ -0,0 +1,61 @@ +# Copyright (c) Tencent Inc. All rights reserved. +import os.path as osp + +import cv2 +import torch +from mmengine.config import Config +from mmengine.dataset import Compose +from mmdet.apis import init_detector +from mmdet.utils import get_test_pipeline_cfg + + +def inference(model, image, texts, test_pipeline, score_thr=0.3, max_dets=100): + image = cv2.imread(image) + image = image[:, :, [2, 1, 0]] + data_info = dict(img=image, img_id=0, texts=texts) + data_info = test_pipeline(data_info) + data_batch = dict(inputs=data_info['inputs'].unsqueeze(0), + data_samples=[data_info['data_samples']]) + with torch.no_grad(): + output = model.test_step(data_batch)[0] + pred_instances = output.pred_instances + # score thresholding + pred_instances = pred_instances[pred_instances.scores.float() > score_thr] + # max detections + if len(pred_instances.scores) > max_dets: + indices = pred_instances.scores.float().topk(max_dets)[1] + pred_instances = pred_instances[indices] + + pred_instances = pred_instances.cpu().numpy() + boxes = pred_instances['bboxes'] + labels = pred_instances['labels'] + scores = pred_instances['scores'] + label_texts = [texts[x][0] for x in labels] + return boxes, labels, label_texts, scores + + +if __name__ == "__main__": + + config_file = "configs/pretrain/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_1280ft_lvis_minival.py" + checkpoint = "weights/yolo_world_v2_x_obj365v1_goldg_cc3mlite_pretrain_1280ft-14996a36.pth" + + cfg = Config.fromfile(config_file) + cfg.work_dir = osp.join('./work_dirs') + # init model + cfg.load_from = checkpoint + model = init_detector(cfg, checkpoint=checkpoint, device='cuda:0') + test_pipeline_cfg = get_test_pipeline_cfg(cfg=cfg) + test_pipeline_cfg[0].type = 'mmdet.LoadImageFromNDArray' + test_pipeline = Compose(test_pipeline_cfg) + + texts = [['person'], ['bus'], [' ']] + image = "demo/sample_images/bus.jpg" + print(f"starting to detect: {image}") + results = inference(model, image, texts, test_pipeline) + format_str = [ + f"obj-{idx}: {box}, label-{lbl}, class-{lbl_text}, score-{score}" + for idx, (box, lbl, lbl_text, score) in enumerate(zip(*results)) + ] + print("detecting results:") + for q in format_str: + print(q) diff --git a/models/YOLO-World/demo/video_demo.py b/models/YOLO-World/demo/video_demo.py new file mode 100644 index 0000000000000000000000000000000000000000..9e6b13ec915429cec699868ffa189d1d9647195b --- /dev/null +++ b/models/YOLO-World/demo/video_demo.py @@ -0,0 +1,108 @@ +# Copyright (c) Tencent Inc. All rights reserved. +# This file is modifef from mmyolo/demo/video_demo.py +import argparse + +import cv2 +import mmcv +import torch +from mmengine.dataset import Compose +from mmdet.apis import init_detector +from mmengine.utils import track_iter_progress + +from mmyolo.registry import VISUALIZERS + + +def parse_args(): + parser = argparse.ArgumentParser(description='YOLO-World video demo') + parser.add_argument('config', help='Config file') + parser.add_argument('checkpoint', help='Checkpoint file') + parser.add_argument('video', help='video file path') + parser.add_argument( + 'text', + help= + 'text prompts, including categories separated by a comma or a txt file with each line as a prompt.' + ) + parser.add_argument('--device', + default='cuda:0', + help='device used for inference') + parser.add_argument('--score-thr', + default=0.1, + type=float, + help='confidence score threshold for predictions.') + parser.add_argument('--out', type=str, help='output video file') + args = parser.parse_args() + return args + + +def inference_detector(model, image, texts, test_pipeline, score_thr=0.3): + data_info = dict(img_id=0, img=image, texts=texts) + data_info = test_pipeline(data_info) + data_batch = dict(inputs=data_info['inputs'].unsqueeze(0), + data_samples=[data_info['data_samples']]) + + with torch.no_grad(): + output = model.test_step(data_batch)[0] + pred_instances = output.pred_instances + pred_instances = pred_instances[pred_instances.scores.float() > + score_thr] + output.pred_instances = pred_instances + return output + + +def main(): + args = parse_args() + + model = init_detector(args.config, args.checkpoint, device=args.device) + + # build test pipeline + model.cfg.test_dataloader.dataset.pipeline[ + 0].type = 'mmdet.LoadImageFromNDArray' + test_pipeline = Compose(model.cfg.test_dataloader.dataset.pipeline) + + if args.text.endswith('.txt'): + with open(args.text) as f: + lines = f.readlines() + texts = [[t.rstrip('\r\n')] for t in lines] + [[' ']] + else: + texts = [[t.strip()] for t in args.text.split(',')] + [[' ']] + + # reparameterize texts + model.reparameterize(texts) + + # init visualizer + visualizer = VISUALIZERS.build(model.cfg.visualizer) + # the dataset_meta is loaded from the checkpoint and + # then pass to the model in init_detector + visualizer.dataset_meta = model.dataset_meta + + video_reader = mmcv.VideoReader(args.video) + video_writer = None + if args.out: + fourcc = cv2.VideoWriter_fourcc(*'mp4v') + video_writer = cv2.VideoWriter( + args.out, fourcc, video_reader.fps, + (video_reader.width, video_reader.height)) + + for frame in track_iter_progress(video_reader): + result = inference_detector(model, + frame, + texts, + test_pipeline, + score_thr=args.score_thr) + visualizer.add_datasample(name='video', + image=frame, + data_sample=result, + draw_gt=False, + show=False, + pred_score_thr=args.score_thr) + frame = visualizer.get_image() + + if args.out: + video_writer.write(frame) + + if video_writer: + video_writer.release() + + +if __name__ == '__main__': + main() diff --git a/models/YOLO-World/deploy/__init__.py b/models/YOLO-World/deploy/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/YOLO-World/deploy/easydeploy/README.md b/models/YOLO-World/deploy/easydeploy/README.md new file mode 100644 index 0000000000000000000000000000000000000000..1816e7ed96ee34209c56af4a22eda5f1eb7e499b --- /dev/null +++ b/models/YOLO-World/deploy/easydeploy/README.md @@ -0,0 +1,11 @@ +# MMYOLO Model Easy-Deployment + +## Introduction + +This project is developed for easily converting your MMYOLO models to other inference backends without the need of MMDeploy, which reduces the cost of both time and effort on getting familiar with MMDeploy. + +Currently we support converting to `ONNX` and `TensorRT` formats, other inference backends such `ncnn` will be added to this project as well. + +## Supported Backends + +- [Model Convert](docs/model_convert.md) diff --git a/models/YOLO-World/deploy/easydeploy/README_zh-CN.md b/models/YOLO-World/deploy/easydeploy/README_zh-CN.md new file mode 100644 index 0000000000000000000000000000000000000000..4c6bc0cf4ef91edeced04bdf15af08ae1f6f0dcd --- /dev/null +++ b/models/YOLO-World/deploy/easydeploy/README_zh-CN.md @@ -0,0 +1,11 @@ +# MMYOLO 模型转换 + +## 介绍 + +本项目作为 MMYOLO 的部署 project 单独存在,意图剥离 MMDeploy 当前的体系,独自支持用户完成模型训练后的转换和部署功能,使用户的学习和工程成本下降。 + +当前支持对 ONNX 格式和 TensorRT 格式的转换,后续对其他推理平台也会支持起来。 + +## 转换教程 + +- [Model Convert](docs/model_convert.md) diff --git a/models/YOLO-World/deploy/easydeploy/backbone/__init__.py b/models/YOLO-World/deploy/easydeploy/backbone/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..dc167f8515c66a30d884ed9655a11d45e21481c0 --- /dev/null +++ b/models/YOLO-World/deploy/easydeploy/backbone/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .common import DeployC2f +from .focus import DeployFocus, GConvFocus, NcnnFocus + +__all__ = ['DeployFocus', 'NcnnFocus', 'GConvFocus', 'DeployC2f'] diff --git a/models/YOLO-World/deploy/easydeploy/backbone/common.py b/models/YOLO-World/deploy/easydeploy/backbone/common.py new file mode 100644 index 0000000000000000000000000000000000000000..617875bd979a5b9150e476544090777118087a0b --- /dev/null +++ b/models/YOLO-World/deploy/easydeploy/backbone/common.py @@ -0,0 +1,16 @@ +import torch +import torch.nn as nn +from torch import Tensor + + +class DeployC2f(nn.Module): + + def __init__(self, *args, **kwargs): + super().__init__() + + def forward(self, x: Tensor) -> Tensor: + x_main = self.main_conv(x) + x_main = [x_main, x_main[:, self.mid_channels:, ...]] + x_main.extend(blocks(x_main[-1]) for blocks in self.blocks) + x_main.pop(1) + return self.final_conv(torch.cat(x_main, 1)) diff --git a/models/YOLO-World/deploy/easydeploy/backbone/focus.py b/models/YOLO-World/deploy/easydeploy/backbone/focus.py new file mode 100644 index 0000000000000000000000000000000000000000..2a19afcca1d9c4e27109daeebd83907cd9b7b284 --- /dev/null +++ b/models/YOLO-World/deploy/easydeploy/backbone/focus.py @@ -0,0 +1,79 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch import Tensor + + +class DeployFocus(nn.Module): + + def __init__(self, orin_Focus: nn.Module): + super().__init__() + self.__dict__.update(orin_Focus.__dict__) + + def forward(self, x: Tensor) -> Tensor: + batch_size, channel, height, width = x.shape + x = x.reshape(batch_size, channel, -1, 2, width) + x = x.reshape(batch_size, channel, x.shape[2], 2, -1, 2) + half_h = x.shape[2] + half_w = x.shape[4] + x = x.permute(0, 5, 3, 1, 2, 4) + x = x.reshape(batch_size, channel * 4, half_h, half_w) + + return self.conv(x) + + +class NcnnFocus(nn.Module): + + def __init__(self, orin_Focus: nn.Module): + super().__init__() + self.__dict__.update(orin_Focus.__dict__) + + def forward(self, x: Tensor) -> Tensor: + batch_size, c, h, w = x.shape + assert h % 2 == 0 and w % 2 == 0, f'focus for yolox needs even feature\ + height and width, got {(h, w)}.' + + x = x.reshape(batch_size, c * h, 1, w) + _b, _c, _h, _w = x.shape + g = _c // 2 + # fuse to ncnn's shufflechannel + x = x.view(_b, g, 2, _h, _w) + x = torch.transpose(x, 1, 2).contiguous() + x = x.view(_b, -1, _h, _w) + + x = x.reshape(_b, c * h * w, 1, 1) + + _b, _c, _h, _w = x.shape + g = _c // 2 + # fuse to ncnn's shufflechannel + x = x.view(_b, g, 2, _h, _w) + x = torch.transpose(x, 1, 2).contiguous() + x = x.view(_b, -1, _h, _w) + + x = x.reshape(_b, c * 4, h // 2, w // 2) + + return self.conv(x) + + +class GConvFocus(nn.Module): + + def __init__(self, orin_Focus: nn.Module): + super().__init__() + device = next(orin_Focus.parameters()).device + self.weight1 = torch.tensor([[1., 0], [0, 0]]).expand(3, 1, 2, + 2).to(device) + self.weight2 = torch.tensor([[0, 0], [1., 0]]).expand(3, 1, 2, + 2).to(device) + self.weight3 = torch.tensor([[0, 1.], [0, 0]]).expand(3, 1, 2, + 2).to(device) + self.weight4 = torch.tensor([[0, 0], [0, 1.]]).expand(3, 1, 2, + 2).to(device) + self.__dict__.update(orin_Focus.__dict__) + + def forward(self, x: Tensor) -> Tensor: + conv1 = F.conv2d(x, self.weight1, stride=2, groups=3) + conv2 = F.conv2d(x, self.weight2, stride=2, groups=3) + conv3 = F.conv2d(x, self.weight3, stride=2, groups=3) + conv4 = F.conv2d(x, self.weight4, stride=2, groups=3) + return self.conv(torch.cat([conv1, conv2, conv3, conv4], dim=1)) diff --git a/models/YOLO-World/deploy/easydeploy/bbox_code/__init__.py b/models/YOLO-World/deploy/easydeploy/bbox_code/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b85a815536a5749a15f0ad6aab2b028eb6a3fe0a --- /dev/null +++ b/models/YOLO-World/deploy/easydeploy/bbox_code/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .bbox_coder import (rtmdet_bbox_decoder, yolov5_bbox_decoder, + yolox_bbox_decoder) + +__all__ = ['yolov5_bbox_decoder', 'rtmdet_bbox_decoder', 'yolox_bbox_decoder'] diff --git a/models/YOLO-World/deploy/easydeploy/bbox_code/bbox_coder.py b/models/YOLO-World/deploy/easydeploy/bbox_code/bbox_coder.py new file mode 100644 index 0000000000000000000000000000000000000000..6483cf8b0328aff3d61f1fa0788337ab536d347d --- /dev/null +++ b/models/YOLO-World/deploy/easydeploy/bbox_code/bbox_coder.py @@ -0,0 +1,46 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Optional + +import torch +from torch import Tensor + + +def yolov5_bbox_decoder(priors: Tensor, bbox_preds: Tensor, + stride: Tensor) -> Tensor: + bbox_preds = bbox_preds.sigmoid() + + x_center = (priors[..., 0] + priors[..., 2]) * 0.5 + y_center = (priors[..., 1] + priors[..., 3]) * 0.5 + w = priors[..., 2] - priors[..., 0] + h = priors[..., 3] - priors[..., 1] + + x_center_pred = (bbox_preds[..., 0] - 0.5) * 2 * stride + x_center + y_center_pred = (bbox_preds[..., 1] - 0.5) * 2 * stride + y_center + w_pred = (bbox_preds[..., 2] * 2)**2 * w + h_pred = (bbox_preds[..., 3] * 2)**2 * h + + decoded_bboxes = torch.stack( + [x_center_pred, y_center_pred, w_pred, h_pred], dim=-1) + + return decoded_bboxes + + +def rtmdet_bbox_decoder(priors: Tensor, bbox_preds: Tensor, + stride: Optional[Tensor]) -> Tensor: + stride = stride[None, :, None] + bbox_preds *= stride + tl_x = (priors[..., 0] - bbox_preds[..., 0]) + tl_y = (priors[..., 1] - bbox_preds[..., 1]) + br_x = (priors[..., 0] + bbox_preds[..., 2]) + br_y = (priors[..., 1] + bbox_preds[..., 3]) + decoded_bboxes = torch.stack([tl_x, tl_y, br_x, br_y], -1) + return decoded_bboxes + + +def yolox_bbox_decoder(priors: Tensor, bbox_preds: Tensor, + stride: Optional[Tensor]) -> Tensor: + stride = stride[None, :, None] + xys = (bbox_preds[..., :2] * stride) + priors + whs = bbox_preds[..., 2:].exp() * stride + decoded_bboxes = torch.cat([xys, whs], -1) + return decoded_bboxes diff --git a/models/YOLO-World/deploy/easydeploy/deepstream/CMakeLists.txt b/models/YOLO-World/deploy/easydeploy/deepstream/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..f640bea13bacfc0f6cc2f33e598f65cf5ce0922e --- /dev/null +++ b/models/YOLO-World/deploy/easydeploy/deepstream/CMakeLists.txt @@ -0,0 +1,35 @@ +cmake_minimum_required(VERSION 2.8.12) + +set(CMAKE_CUDA_ARCHITECTURES 60 61 62 70 72 75 86) +set(CMAKE_CUDA_COMPILER /usr/local/cuda/bin/nvcc) + +project(nvdsparsebbox_mmyolo LANGUAGES CXX) + +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++14 -O3 -g -Wall -Werror -shared -fPIC") +set(CMAKE_CXX_STANDARD 14) +set(CMAKE_BUILD_TYPE Release) +option(CUDA_USE_STATIC_CUDA_RUNTIME OFF) + +# CUDA +find_package(CUDA REQUIRED) + +# TensorRT +set(TensorRT_INCLUDE_DIRS "/usr/include/x86_64-linux-gnu" CACHE STRING "TensorRT headers path") +set(TensorRT_LIBRARIES "/usr/lib/x86_64-linux-gnu" CACHE STRING "TensorRT libs path") + +# DeepStream +set(DEEPSTREAM "/opt/nvidia/deepstream/deepstream" CACHE STRING "DeepStream root path") +set(DS_LIBRARIES ${DEEPSTREAM}/lib) +set(DS_INCLUDE_DIRS ${DEEPSTREAM}/sources/includes) + +include_directories( + ${CUDA_INCLUDE_DIRS} + ${TensorRT_INCLUDE_DIRS} + ${DS_INCLUDE_DIRS}) + +add_library( + ${PROJECT_NAME} + SHARED + custom_mmyolo_bbox_parser/nvdsparsebbox_mmyolo.cpp) + +target_link_libraries(${PROJECT_NAME} PRIVATE nvinfer nvinfer_plugin) diff --git a/models/YOLO-World/deploy/easydeploy/deepstream/README.md b/models/YOLO-World/deploy/easydeploy/deepstream/README.md new file mode 100644 index 0000000000000000000000000000000000000000..111f3765e41d558b64097d8a25585bd9c14acf4f --- /dev/null +++ b/models/YOLO-World/deploy/easydeploy/deepstream/README.md @@ -0,0 +1,48 @@ +# Inference MMYOLO Models with DeepStream + +This project demonstrates how to inference MMYOLO models with customized parsers in [DeepStream SDK](https://developer.nvidia.com/deepstream-sdk). + +## Pre-requisites + +### 1. Install Nvidia Driver and CUDA + +First, please follow the official documents and instructions to install dedicated Nvidia graphic driver and CUDA matched to your gpu and target Nvidia AIoT devices. + +### 2. Install DeepStream SDK + +Second, please follow the official instruction to download and install DeepStream SDK. Currently stable version of DeepStream is v6.2. + +### 3. Generate TensorRT Engine + +As DeepStream builds on top of several NVIDIA libraries, you need to first convert your trained MMYOLO models to TensorRT engine files. We strongly recommend you to try the supported TensorRT deployment solution in [EasyDeploy](../../easydeploy/). + +## Build and Run + +Please make sure that your converted TensorRT engine is already located in the `deepstream` folder as the config shows. Create your own model config files and change the `config-file` parameter in [deepstream_app_config.txt](deepstream_app_config.txt) to the model you want to run with. + +```bash +mkdir build && cd build +cmake .. +make -j$(nproc) && make install +``` + +Then you can run the inference with this command. + +```bash +deepstream-app -c deepstream_app_config.txt +``` + +## Code Structure + +```bash +├── deepstream +│ ├── configs # config file for MMYOLO models +│ │ └── config_infer_rtmdet.txt +│ ├── custom_mmyolo_bbox_parser # customized parser for MMYOLO models to DeepStream formats +│ │ └── nvdsparsebbox_mmyolo.cpp +| ├── CMakeLists.txt +│ ├── coco_labels.txt # labels for coco detection +│ ├── deepstream_app_config.txt # deepStream reference app configs for MMYOLO models +│ ├── README_zh-CN.md +│ └── README.md +``` diff --git a/models/YOLO-World/deploy/easydeploy/deepstream/README_zh-CN.md b/models/YOLO-World/deploy/easydeploy/deepstream/README_zh-CN.md new file mode 100644 index 0000000000000000000000000000000000000000..13a85d5bc90159c3ff9f1a32e93d01e82ed2faa4 --- /dev/null +++ b/models/YOLO-World/deploy/easydeploy/deepstream/README_zh-CN.md @@ -0,0 +1,48 @@ +# 使用 DeepStream SDK 推理 MMYOLO 模型 + +本项目演示了如何使用 [DeepStream SDK](https://developer.nvidia.com/deepstream-sdk) 配合改写的 parser 来推理 MMYOLO 的模型。 + +## 预先准备 + +### 1. 安装 Nidia 驱动和 CUDA + +首先请根据当前的显卡驱动和目标使用设备的驱动完成显卡驱动和 CUDA 的安装。 + +### 2. 安装 DeepStream SDK + +目前 DeepStream SDK 稳定版本已经更新到 v6.2,官方推荐使用这个版本。 + +### 3. 将 MMYOLO 模型转换为 TensorRT Engine + +推荐使用 EasyDeploy 中的 TensorRT 方案完成目标模型的转换部署,具体可参考 [此文档](../../easydeploy/docs/model_convert.md) 。 + +## 编译使用 + +当前项目使用的是 MMYOLO 的 rtmdet 模型,若想使用其他的模型,请参照目录下的配置文件进行改写。然后将转换完的 TensorRT engine 放在当前目录下并执行如下命令: + +```bash +mkdir build && cd build +cmake .. +make -j$(nproc) && make install +``` + +完成编译后可使用如下命令进行推理: + +```bash +deepstream-app -c deepstream_app_config.txt +``` + +## 项目代码结构 + +```bash +├── deepstream +│ ├── configs # MMYOLO 模型对应的 DeepStream 配置 +│ │ └── config_infer_rtmdet.txt +│ ├── custom_mmyolo_bbox_parser # 适配 DeepStream formats 的 parser +│ │ └── nvdsparsebbox_mmyolo.cpp +| ├── CMakeLists.txt +│ ├── coco_labels.txt # coco labels +│ ├── deepstream_app_config.txt # DeepStream app 配置 +│ ├── README_zh-CN.md +│ └── README.md +``` diff --git a/models/YOLO-World/deploy/easydeploy/deepstream/coco_labels.txt b/models/YOLO-World/deploy/easydeploy/deepstream/coco_labels.txt new file mode 100644 index 0000000000000000000000000000000000000000..ca76c80b5b2cd0b25047f75736656cfebc9da7aa --- /dev/null +++ b/models/YOLO-World/deploy/easydeploy/deepstream/coco_labels.txt @@ -0,0 +1,80 @@ +person +bicycle +car +motorbike +aeroplane +bus +train +truck +boat +traffic light +fire hydrant +stop sign +parking meter +bench +bird +cat +dog +horse +sheep +cow +elephant +bear +zebra +giraffe +backpack +umbrella +handbag +tie +suitcase +frisbee +skis +snowboard +sports ball +kite +baseball bat +baseball glove +skateboard +surfboard +tennis racket +bottle +wine glass +cup +fork +knife +spoon +bowl +banana +apple +sandwich +orange +broccoli +carrot +hot dog +pizza +donut +cake +chair +sofa +pottedplant +bed +diningtable +toilet +tvmonitor +laptop +mouse +remote +keyboard +cell phone +microwave +oven +toaster +sink +refrigerator +book +clock +vase +scissors +teddy bear +hair drier +toothbrush diff --git a/models/YOLO-World/deploy/easydeploy/deepstream/configs/config_infer_rtmdet.txt b/models/YOLO-World/deploy/easydeploy/deepstream/configs/config_infer_rtmdet.txt new file mode 100644 index 0000000000000000000000000000000000000000..a1e5efd2a3810730144e037ee96dfbd36124b0e6 --- /dev/null +++ b/models/YOLO-World/deploy/easydeploy/deepstream/configs/config_infer_rtmdet.txt @@ -0,0 +1,22 @@ +[property] +gpu-id=0 +net-scale-factor=0.01735207357279195 +offsets=57.375;57.12;58.395 +model-color-format=1 +model-engine-file=../end2end.engine +labelfile-path=../coco_labels.txt +batch-size=1 +network-mode=0 +num-detected-classes=80 +interval=0 +gie-unique-id=1 +process-mode=1 +network-type=0 +cluster-mode=2 +maintain-aspect-ratio=1 +parse-bbox-func-name=NvDsInferParseCustomMMYOLO +custom-lib-path=../build/libnvdsparsebbox_mmyolo.so + +[class-attrs-all] +pre-cluster-threshold=0.45 +topk=100 diff --git a/models/YOLO-World/deploy/easydeploy/deepstream/configs/config_infer_yolov5.txt b/models/YOLO-World/deploy/easydeploy/deepstream/configs/config_infer_yolov5.txt new file mode 100644 index 0000000000000000000000000000000000000000..6ad7d6429cacd0a6050821e5b2a41317478f5119 --- /dev/null +++ b/models/YOLO-World/deploy/easydeploy/deepstream/configs/config_infer_yolov5.txt @@ -0,0 +1,21 @@ +[property] +gpu-id=0 +net-scale-factor=0.0039215697906911373 +model-color-format=0 +model-engine-file=../end2end.engine +labelfile-path=../coco_labels.txt +batch-size=1 +network-mode=0 +num-detected-classes=80 +interval=0 +gie-unique-id=1 +process-mode=1 +network-type=0 +cluster-mode=2 +maintain-aspect-ratio=1 +parse-bbox-func-name=NvDsInferParseCustomMMYOLO +custom-lib-path=../build/libnvdsparsebbox_mmyolo.so + +[class-attrs-all] +pre-cluster-threshold=0.45 +topk=100 diff --git a/models/YOLO-World/deploy/easydeploy/deepstream/configs/config_infer_yolov8.txt b/models/YOLO-World/deploy/easydeploy/deepstream/configs/config_infer_yolov8.txt new file mode 100644 index 0000000000000000000000000000000000000000..6ad7d6429cacd0a6050821e5b2a41317478f5119 --- /dev/null +++ b/models/YOLO-World/deploy/easydeploy/deepstream/configs/config_infer_yolov8.txt @@ -0,0 +1,21 @@ +[property] +gpu-id=0 +net-scale-factor=0.0039215697906911373 +model-color-format=0 +model-engine-file=../end2end.engine +labelfile-path=../coco_labels.txt +batch-size=1 +network-mode=0 +num-detected-classes=80 +interval=0 +gie-unique-id=1 +process-mode=1 +network-type=0 +cluster-mode=2 +maintain-aspect-ratio=1 +parse-bbox-func-name=NvDsInferParseCustomMMYOLO +custom-lib-path=../build/libnvdsparsebbox_mmyolo.so + +[class-attrs-all] +pre-cluster-threshold=0.45 +topk=100 diff --git a/models/YOLO-World/deploy/easydeploy/deepstream/custom_mmyolo_bbox_parser/nvdsparsebbox_mmyolo.cpp b/models/YOLO-World/deploy/easydeploy/deepstream/custom_mmyolo_bbox_parser/nvdsparsebbox_mmyolo.cpp new file mode 100644 index 0000000000000000000000000000000000000000..eb780856cbd2b289cdf9dc8518438f946a2ab548 --- /dev/null +++ b/models/YOLO-World/deploy/easydeploy/deepstream/custom_mmyolo_bbox_parser/nvdsparsebbox_mmyolo.cpp @@ -0,0 +1,118 @@ +#include "nvdsinfer_custom_impl.h" +#include +#include + +/** + * Function expected by DeepStream for decoding the MMYOLO output. + * + * C-linkage [extern "C"] was written to prevent name-mangling. This function must return true after + * adding all bounding boxes to the objectList vector. + * + * @param [outputLayersInfo] std::vector of NvDsInferLayerInfo objects with information about the output layer. + * @param [networkInfo] NvDsInferNetworkInfo object with information about the MMYOLO network. + * @param [detectionParams] NvDsInferParseDetectionParams with information about some config params. + * @param [objectList] std::vector of NvDsInferParseObjectInfo objects to which bounding box information must + * be stored. + * + * @return true + */ + +// This is just the function prototype. The definition is written at the end of the file. +extern "C" bool NvDsInferParseCustomMMYOLO( + std::vector const& outputLayersInfo, + NvDsInferNetworkInfo const& networkInfo, + NvDsInferParseDetectionParams const& detectionParams, + std::vector& objectList); + +static __inline__ float clamp(float& val, float min, float max) +{ + return val > min ? (val < max ? val : max) : min; +} + +static std::vector decodeMMYoloTensor( + const int* num_dets, + const float* bboxes, + const float* scores, + const int* labels, + const float& conf_thres, + const unsigned int& img_w, + const unsigned int& img_h +) +{ + std::vector bboxInfo; + size_t nums = num_dets[0]; + for (size_t i = 0; i < nums; i++) + { + float score = scores[i]; + if (score < conf_thres)continue; + float x0 = (bboxes[i * 4]); + float y0 = (bboxes[i * 4 + 1]); + float x1 = (bboxes[i * 4 + 2]); + float y1 = (bboxes[i * 4 + 3]); + x0 = clamp(x0, 0.f, img_w); + y0 = clamp(y0, 0.f, img_h); + x1 = clamp(x1, 0.f, img_w); + y1 = clamp(y1, 0.f, img_h); + NvDsInferParseObjectInfo obj; + obj.left = x0; + obj.top = y0; + obj.width = x1 - x0; + obj.height = y1 - y0; + obj.detectionConfidence = score; + obj.classId = labels[i]; + bboxInfo.push_back(obj); + } + + return bboxInfo; +} + +/* C-linkage to prevent name-mangling */ +extern "C" bool NvDsInferParseCustomMMYOLO( + std::vector const& outputLayersInfo, + NvDsInferNetworkInfo const& networkInfo, + NvDsInferParseDetectionParams const& detectionParams, + std::vector& objectList) +{ + +// Some assertions and error checking. + if (outputLayersInfo.empty() || outputLayersInfo.size() != 4) + { + std::cerr << "Could not find output layer in bbox parsing" << std::endl; + return false; + } + +// Score threshold of bboxes. + const float conf_thres = detectionParams.perClassThreshold[0]; + +// Obtaining the output layer. + const NvDsInferLayerInfo& num_dets = outputLayersInfo[0]; + const NvDsInferLayerInfo& bboxes = outputLayersInfo[1]; + const NvDsInferLayerInfo& scores = outputLayersInfo[2]; + const NvDsInferLayerInfo& labels = outputLayersInfo[3]; + +// num_dets(int) bboxes(float) scores(float) labels(int) + assert (num_dets.dims.numDims == 2); + assert (bboxes.dims.numDims == 3); + assert (scores.dims.numDims == 2); + assert (labels.dims.numDims == 2); + + +// Decoding the output tensor of MMYOLO to the NvDsInferParseObjectInfo format. + std::vector objects = + decodeMMYoloTensor( + (const int*)(num_dets.buffer), + (const float*)(bboxes.buffer), + (const float*)(scores.buffer), + (const int*)(labels.buffer), + conf_thres, + networkInfo.width, + networkInfo.height + ); + + objectList.clear(); + objectList = objects; + return true; +} + +/* Check that the custom function has been defined correctly */ +CHECK_CUSTOM_PARSE_FUNC_PROTOTYPE(NvDsInferParseCustomMMYOLO); diff --git a/models/YOLO-World/deploy/easydeploy/deepstream/deepstream_app_config.txt b/models/YOLO-World/deploy/easydeploy/deepstream/deepstream_app_config.txt new file mode 100644 index 0000000000000000000000000000000000000000..331776897a5e9109b9007ed1b7974f128287c4fc --- /dev/null +++ b/models/YOLO-World/deploy/easydeploy/deepstream/deepstream_app_config.txt @@ -0,0 +1,62 @@ +[application] +enable-perf-measurement=1 +perf-measurement-interval-sec=5 + +[tiled-display] +enable=1 +rows=1 +columns=1 +width=1280 +height=720 +gpu-id=0 +nvbuf-memory-type=0 + +[source0] +enable=1 +type=3 +uri=file:///opt/nvidia/deepstream/deepstream/samples/streams/sample_1080p_h264.mp4 +num-sources=1 +gpu-id=0 +cudadec-memtype=0 + +[sink0] +enable=1 +type=2 +sync=0 +gpu-id=0 +nvbuf-memory-type=0 + +[osd] +enable=1 +gpu-id=0 +border-width=5 +text-size=15 +text-color=1;1;1;1; +text-bg-color=0.3;0.3;0.3;1 +font=Serif +show-clock=0 +clock-x-offset=800 +clock-y-offset=820 +clock-text-size=12 +clock-color=1;0;0;0 +nvbuf-memory-type=0 + +[streammux] +gpu-id=0 +live-source=0 +batch-size=1 +batched-push-timeout=40000 +width=1920 +height=1080 +enable-padding=0 +nvbuf-memory-type=0 + +[primary-gie] +enable=1 +gpu-id=0 +gie-unique-id=1 +nvbuf-memory-type=0 +config-file=configs/config_infer_rtmdet.txt + +[tests] +file-loop=0 diff --git a/models/YOLO-World/deploy/easydeploy/docs/model_convert.md b/models/YOLO-World/deploy/easydeploy/docs/model_convert.md new file mode 100644 index 0000000000000000000000000000000000000000..9af62599dd1b56648680fc315ca88c35c7b31cb9 --- /dev/null +++ b/models/YOLO-World/deploy/easydeploy/docs/model_convert.md @@ -0,0 +1,156 @@ +# MMYOLO 模型 ONNX 转换 + +## 1. 导出后端支持的 ONNX + +## 环境依赖 + +- [onnx](https://github.com/onnx/onnx) + + ```shell + pip install onnx + ``` + + [onnx-simplifier](https://github.com/daquexian/onnx-simplifier) (可选,用于简化模型) + + ```shell + pip install onnx-simplifier + ``` + +\*\*\* 请确保您在 `MMYOLO` 根目录下运行相关脚本,避免无法找到相关依赖包。\*\*\* + +## 使用方法 + +[模型导出脚本](./projects/easydeploy/tools/export_onnx.py)用于将 `MMYOLO` 模型转换为 `onnx` 。 + +### 参数介绍: + +- `config` : 构建模型使用的配置文件,如 [`yolov5_s-v61_syncbn_fast_8xb16-300e_coco.py`](./configs/yolov5/yolov5_s-v61_syncbn_fast_8xb16-300e_coco.py) 。 +- `checkpoint` : 训练得到的权重文件,如 `yolov5s.pth` 。 +- `--work-dir` : 转换后的模型保存路径。 +- `--img-size`: 转换模型时输入的尺寸,如 `640 640`。 +- `--batch-size`: 转换后的模型输入 `batch size` 。 +- `--device`: 转换模型使用的设备,默认为 `cuda:0`。 +- `--simplify`: 是否简化导出的 `onnx` 模型,需要安装 [onnx-simplifier](https://github.com/daquexian/onnx-simplifier),默认关闭。 +- `--opset`: 指定导出 `onnx` 的 `opset`,默认为 `11` 。 +- `--backend`: 指定导出 `onnx` 用于的后端名称,`ONNXRuntime`: `onnxruntime`, `TensorRT8`: `tensorrt8`, `TensorRT7`: `tensorrt7`,默认为`onnxruntime`即 `ONNXRuntime`。 +- `--pre-topk`: 指定导出 `onnx` 的后处理筛选候选框个数阈值,默认为 `1000`。 +- `--keep-topk`: 指定导出 `onnx` 的非极大值抑制输出的候选框个数阈值,默认为 `100`。 +- `--iou-threshold`: 非极大值抑制中过滤重复候选框的 `iou` 阈值,默认为 `0.65`。 +- `--score-threshold`: 非极大值抑制中过滤候选框得分的阈值,默认为 `0.25`。 +- `--model-only`: 指定仅导出模型 backbone + neck, 不包含后处理,默认关闭。 + +例子: + +```shell +python ./projects/easydeploy/tools/export.py \ + configs/yolov5/yolov5_s-v61_syncbn_fast_8xb16-300e_coco.py \ + yolov5s.pth \ + --work-dir work_dir \ + --img-size 640 640 \ + --batch 1 \ + --device cpu \ + --simplify \ + --opset 11 \ + --backend 1 \ + --pre-topk 1000 \ + --keep-topk 100 \ + --iou-threshold 0.65 \ + --score-threshold 0.25 +``` + +然后利用后端支持的工具如 `TensorRT` 读取 `onnx` 再次转换为后端支持的模型格式如 `.engine/.plan` 等。 + +`MMYOLO` 目前支持 `TensorRT8`, `TensorRT7`, `ONNXRuntime` 后端的端到端模型转换,目前仅支持静态 shape 模型的导出和转换,动态 batch 或动态长宽的模型端到端转换会在未来继续支持。 + +端到端转换得到的 `onnx` 模型输入输出如图: + +
+ +
+ +输入名: `images`, 尺寸 640x640 + +输出名: `num_dets`, 尺寸 1x1,表示检测目标数量。 + +输出名: `boxes`, 尺寸 1x100x4,表示检测框的坐标,格式为 `x1y1x2y1`。 + +输出名: `scores`, 尺寸 1x100,表示检测框的分数。 + +输出名: `labels`, 尺寸 1x100,表示检测框的类别 id。 + +可以利用 `num_dets` 中的个数对 `boxes`, `scores`, `labels` 进行截断,从 100 个检测结果中抽取前 `num_dets` 个目标作为最终检测结果。 + +## 2. 仅导出模型 Backbone + Neck + +当您需要部署在非 `TensorRT`, `ONNXRuntime` 等支持端到端部署的平台时,您可以考虑使用`--model-only` 参数并且不要传递 `--backend` 参数,您将会导出仅包含 `Backbone` + `neck` 的模型,模型的部分输出如图: + +
+ +
+ +这种导出方式获取的 `ONNX` 模型具有如下优点: + +- 算子简单,一般而言只包含 `Conv`,激活函数等简单算子,几乎不存在无法正确导出的情况,对于嵌入式部署更加友好。 +- 方便不同算法之间对比速度性能,由于不同的算法后处理不同,仅对比 `backbone` + `Neck` 的速度更加公平。 + +也有如下缺点: + +- 后处理逻辑需要单独完成,会有额外的 `decode` + `nms` 的操作需要实现。 +- 与 `TensorRT` 相比,由于 `TensorRT` 可以利用多核优势并行进行后处理,使用 `--model-only` 方式导出的模型性能会差很多。 + +### 使用方法 + +```shell +python ./projects/easydeploy/tools/export.py \ + configs/yolov5/yolov5_s-v61_syncbn_fast_8xb16-300e_coco.py \ + yolov5s.pth \ + --work-dir work_dir \ + --img-size 640 640 \ + --batch 1 \ + --device cpu \ + --simplify \ + --opset 11 \ + --model-only +``` + +## 使用 `model-only` 导出的 ONNX 进行推理 + +[模型推理脚本](./projects/easydeploy/examples/main_onnxruntime.py)用于推理导出的 `ONNX` 模型,需要安装基础依赖环境: + +[`onnxruntime`](https://github.com/microsoft/onnxruntime) 和 [`opencv-python`](https://github.com/opencv/opencv-python) + +```shell +pip install onnxruntime +pip install opencv-python==4.7.0.72 # 建议使用最新的 opencv +``` + +### 参数介绍: + +- `img` : 待检测的图片路径或图片文件夹路径。 +- `onnx` : 导出的 `model-only` ONNX 模型。 +- `--type` : 模型名称,目前支持 `yolov5`, `yolox`, `yolov6`, `ppyoloe`, `ppyoloep`, `yolov7`, `rtmdet`, `yolov8`。 +- `--img-size`: 转换模型时输入的尺寸,如 `640 640`。 +- `--out-dir`: 保存检测结果的路径 。 +- `--show`: 是否可视化检测结果。 +- `--score-thr`: 模型检测后处理的置信度分数 。 +- `--iou-thr`: 模型检测后处理的 IOU 分数 。 + +## 使用方法 + +```shell +cd ./projects/easydeploy/examples +python main_onnxruntime.py \ + "image_path_to_detect" \ + yolov5_s_model-only.onnx \ + --out-dir work_dir \ + --img-size 640 640 \ + --show \ + --score-thr 0.3 \ + --iou-thr 0.7 +``` + +*注意!!!* + +当您使用自定义数据集训练得到的模型时,请修改 [`config.py`](./projects/easydeploy/examples/config.py) 中 `CLASS_NAMES` 和 `CLASS_COLORS`,如果是 `yolov5` 或者 `yolov7` 基于 `anchor` 的模型请同时修改 `YOLOv5_ANCHORS` 和 `YOLOv7_ANCHORS`。 + +[`numpy_coder.py`](./projects/easydeploy/examples/numpy_coder.py) 是目前所有算法仅使用 `numpy` 实现的 `decoder`,如果您对性能有较高的要求,可以参照相关代码改写为 `c/c++`。 diff --git a/models/YOLO-World/deploy/easydeploy/examples/config.py b/models/YOLO-World/deploy/easydeploy/examples/config.py new file mode 100644 index 0000000000000000000000000000000000000000..4a85ff34273c22a356c9d6a3eaeb048b637b5f40 --- /dev/null +++ b/models/YOLO-World/deploy/easydeploy/examples/config.py @@ -0,0 +1,64 @@ +from enum import Enum + + +class TASK_TYPE(Enum): + DET = 'det' + SEG = 'seg' + POSE = 'pose' + + +class ModelType(Enum): + YOLOV5 = 'yolov5' + YOLOX = 'yolox' + PPYOLOE = 'ppyoloe' + PPYOLOEP = 'ppyoloep' + YOLOV6 = 'yolov6' + YOLOV7 = 'yolov7' + RTMDET = 'rtmdet' + YOLOV8 = 'yolov8' + + +CLASS_NAMES = ('person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', + 'train', 'truck', 'boat', 'traffic light', 'fire hydrant', + 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', + 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', + 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', + 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', + 'baseball glove', 'skateboard', 'surfboard', 'tennis racket', + 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', + 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', + 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', + 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', + 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', + 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', + 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush') + +CLASS_COLORS = [(220, 20, 60), (119, 11, 32), (0, 0, 142), (0, 0, 230), + (106, 0, 228), (0, 60, 100), (0, 80, 100), (0, 0, 70), + (0, 0, 192), (250, 170, 30), (100, 170, 30), (220, 220, 0), + (175, 116, 175), (250, 0, 30), (165, 42, 42), (255, 77, 255), + (0, 226, 252), (182, 182, 255), (0, 82, 0), (120, 166, 157), + (110, 76, 0), (174, 57, 255), (199, 100, 0), (72, 0, 118), + (255, 179, 240), (0, 125, 92), (209, 0, 151), (188, 208, 182), + (0, 220, 176), (255, 99, 164), (92, 0, 73), (133, 129, 255), + (78, 180, 255), (0, 228, 0), (174, 255, 243), (45, 89, 255), + (134, 134, 103), (145, 148, 174), (255, 208, 186), + (197, 226, 255), (171, 134, 1), (109, 63, 54), (207, 138, 255), + (151, 0, 95), (9, 80, 61), (84, 105, 51), (74, 65, 105), + (166, 196, 102), (208, 195, 210), (255, 109, 65), + (0, 143, 149), (179, 0, 194), (209, 99, 106), (5, 121, 0), + (227, 255, 205), (147, 186, 208), (153, 69, 1), (3, 95, 161), + (163, 255, 0), (119, 0, 170), (0, 182, 199), (0, 165, 120), + (183, 130, 88), (95, 32, 0), (130, 114, 135), (110, 129, 133), + (166, 74, 118), (219, 142, 185), (79, 210, 114), (178, 90, 62), + (65, 70, 15), (127, 167, 115), (59, 105, 106), (142, 108, 45), + (196, 172, 0), (95, 54, 80), (128, 76, 255), (201, 57, 1), + (246, 0, 122), (191, 162, 208)] + +YOLOv5_ANCHORS = [[(10, 13), (16, 30), (33, 23)], + [(30, 61), (62, 45), (59, 119)], + [(116, 90), (156, 198), (373, 326)]] + +YOLOv7_ANCHORS = [[(12, 16), (19, 36), (40, 28)], + [(36, 75), (76, 55), (72, 146)], + [(142, 110), (192, 243), (459, 401)]] diff --git a/models/YOLO-World/deploy/easydeploy/examples/cv2_nms.py b/models/YOLO-World/deploy/easydeploy/examples/cv2_nms.py new file mode 100644 index 0000000000000000000000000000000000000000..79e376356b75339c796aeeb280cd8cdb52db8518 --- /dev/null +++ b/models/YOLO-World/deploy/easydeploy/examples/cv2_nms.py @@ -0,0 +1,36 @@ +from typing import List, Tuple, Union + +import cv2 +from numpy import ndarray + +MAJOR, MINOR = map(int, cv2.__version__.split('.')[:2]) +assert MAJOR == 4 + + +def non_max_suppression(boxes: Union[List[ndarray], Tuple[ndarray]], + scores: Union[List[float], Tuple[float]], + labels: Union[List[int], Tuple[int]], + conf_thres: float = 0.25, + iou_thres: float = 0.65) -> Tuple[List, List, List]: + if MINOR >= 7: + indices = cv2.dnn.NMSBoxesBatched(boxes, scores, labels, conf_thres, + iou_thres) + elif MINOR == 6: + indices = cv2.dnn.NMSBoxes(boxes, scores, conf_thres, iou_thres) + else: + indices = cv2.dnn.NMSBoxes(boxes, scores, conf_thres, + iou_thres).flatten() + + nmsd_boxes = [] + nmsd_scores = [] + nmsd_labels = [] + for idx in indices: + box = boxes[idx] + # x0y0wh -> x0y0x1y1 + box[2:] = box[:2] + box[2:] + score = scores[idx] + label = labels[idx] + nmsd_boxes.append(box) + nmsd_scores.append(score) + nmsd_labels.append(label) + return nmsd_boxes, nmsd_scores, nmsd_labels diff --git a/models/YOLO-World/deploy/easydeploy/examples/main_onnxruntime.py b/models/YOLO-World/deploy/easydeploy/examples/main_onnxruntime.py new file mode 100644 index 0000000000000000000000000000000000000000..bc0ad1b0f10ed6cbea8c8b3c0c5010ec7a760cb5 --- /dev/null +++ b/models/YOLO-World/deploy/easydeploy/examples/main_onnxruntime.py @@ -0,0 +1,110 @@ +import math +import sys +from argparse import ArgumentParser +from pathlib import Path + +import cv2 +import onnxruntime +from config import (CLASS_COLORS, CLASS_NAMES, ModelType, YOLOv5_ANCHORS, + YOLOv7_ANCHORS) +from cv2_nms import non_max_suppression +from numpy_coder import Decoder +from preprocess import Preprocess +from tqdm import tqdm + +# Add __FILE__ to sys.path +sys.path.append(str(Path(__file__).resolve().parents[0])) + +IMG_EXTENSIONS = ('.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif', + '.tiff', '.webp') + + +def path_to_list(path: str): + path = Path(path) + if path.is_file() and path.suffix in IMG_EXTENSIONS: + res_list = [str(path.absolute())] + elif path.is_dir(): + res_list = [ + str(p.absolute()) for p in path.iterdir() + if p.suffix in IMG_EXTENSIONS + ] + else: + raise RuntimeError + return res_list + + +def parse_args(): + parser = ArgumentParser() + parser.add_argument( + 'img', help='Image path, include image file, dir and URL.') + parser.add_argument('onnx', type=str, help='Onnx file') + parser.add_argument('--type', type=str, help='Model type') + parser.add_argument( + '--img-size', + nargs='+', + type=int, + default=[640, 640], + help='Image size of height and width') + parser.add_argument( + '--out-dir', default='./output', type=str, help='Path to output file') + parser.add_argument( + '--show', action='store_true', help='Show the detection results') + parser.add_argument( + '--score-thr', type=float, default=0.3, help='Bbox score threshold') + parser.add_argument( + '--iou-thr', type=float, default=0.7, help='Bbox iou threshold') + args = parser.parse_args() + return args + + +def main(): + args = parse_args() + out_dir = Path(args.out_dir) + model_type = ModelType(args.type.lower()) + + if not args.show: + out_dir.mkdir(parents=True, exist_ok=True) + + files = path_to_list(args.img) + session = onnxruntime.InferenceSession( + args.onnx, providers=['CPUExecutionProvider']) + preprocessor = Preprocess(model_type) + decoder = Decoder(model_type, model_only=True) + if model_type == ModelType.YOLOV5: + anchors = YOLOv5_ANCHORS + elif model_type == ModelType.YOLOV7: + anchors = YOLOv7_ANCHORS + else: + anchors = None + + for file in tqdm(files): + image = cv2.imread(file) + image_h, image_w = image.shape[:2] + img, (ratio_w, ratio_h) = preprocessor(image, args.img_size) + features = session.run(None, {'images': img}) + decoder_outputs = decoder( + features, + args.score_thr, + num_labels=len(CLASS_NAMES), + anchors=anchors) + nmsd_boxes, nmsd_scores, nmsd_labels = non_max_suppression( + *decoder_outputs, args.score_thr, args.iou_thr) + for box, score, label in zip(nmsd_boxes, nmsd_scores, nmsd_labels): + x0, y0, x1, y1 = box + x0 = math.floor(min(max(x0 / ratio_w, 1), image_w - 1)) + y0 = math.floor(min(max(y0 / ratio_h, 1), image_h - 1)) + x1 = math.ceil(min(max(x1 / ratio_w, 1), image_w - 1)) + y1 = math.ceil(min(max(y1 / ratio_h, 1), image_h - 1)) + cv2.rectangle(image, (x0, y0), (x1, y1), CLASS_COLORS[label], 2) + cv2.putText(image, f'{CLASS_NAMES[label]}: {score:.2f}', + (x0, y0 - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, + (0, 255, 255), 2) + if args.show: + cv2.imshow('result', image) + cv2.waitKey(0) + else: + cv2.imwrite(f'{out_dir / Path(file).name}', image) + + +if __name__ == '__main__': + main() diff --git a/models/YOLO-World/deploy/easydeploy/examples/numpy_coder.py b/models/YOLO-World/deploy/easydeploy/examples/numpy_coder.py new file mode 100644 index 0000000000000000000000000000000000000000..3011965597415b9b6b09fcfe950ea36702b51e57 --- /dev/null +++ b/models/YOLO-World/deploy/easydeploy/examples/numpy_coder.py @@ -0,0 +1,309 @@ +from typing import List, Tuple, Union + +import numpy as np +from config import ModelType +from numpy import ndarray + + +def softmax(x: ndarray, axis: int = -1) -> ndarray: + e_x = np.exp(x - np.max(x, axis=axis, keepdims=True)) + y = e_x / e_x.sum(axis=axis, keepdims=True) + return y + + +def sigmoid(x: ndarray) -> ndarray: + return 1. / (1. + np.exp(-x)) + + +class Decoder: + + def __init__(self, model_type: ModelType, model_only: bool = False): + self.model_type = model_type + self.model_only = model_only + self.boxes_pro = [] + self.scores_pro = [] + self.labels_pro = [] + self.is_logging = False + + def __call__(self, + feats: Union[List, Tuple], + conf_thres: float, + num_labels: int = 80, + **kwargs) -> Tuple: + if not self.is_logging: + print('Only support decode in batch==1') + self.is_logging = True + self.boxes_pro.clear() + self.scores_pro.clear() + self.labels_pro.clear() + + if self.model_only: + # transpose channel to last dim for easy decoding + feats = [ + np.ascontiguousarray(feat[0].transpose(1, 2, 0)) + for feat in feats + ] + else: + # ax620a horizonX3 transpose channel to last dim by default + feats = [np.ascontiguousarray(feat) for feat in feats] + if self.model_type == ModelType.YOLOV5: + self.__yolov5_decode(feats, conf_thres, num_labels, **kwargs) + elif self.model_type == ModelType.YOLOX: + self.__yolox_decode(feats, conf_thres, num_labels, **kwargs) + elif self.model_type in (ModelType.PPYOLOE, ModelType.PPYOLOEP): + self.__ppyoloe_decode(feats, conf_thres, num_labels, **kwargs) + elif self.model_type == ModelType.YOLOV6: + self.__yolov6_decode(feats, conf_thres, num_labels, **kwargs) + elif self.model_type == ModelType.YOLOV7: + self.__yolov7_decode(feats, conf_thres, num_labels, **kwargs) + elif self.model_type == ModelType.RTMDET: + self.__rtmdet_decode(feats, conf_thres, num_labels, **kwargs) + elif self.model_type == ModelType.YOLOV8: + self.__yolov8_decode(feats, conf_thres, num_labels, **kwargs) + else: + raise NotImplementedError + return self.boxes_pro, self.scores_pro, self.labels_pro + + def __yolov5_decode(self, + feats: List[ndarray], + conf_thres: float, + num_labels: int = 80, + **kwargs): + anchors: Union[List, Tuple] = kwargs.get( + 'anchors', + [[(10, 13), (16, 30), + (33, 23)], [(30, 61), (62, 45), + (59, 119)], [(116, 90), (156, 198), (373, 326)]]) + for i, feat in enumerate(feats): + stride = 8 << i + feat_h, feat_w, _ = feat.shape + anchor = anchors[i] + feat = sigmoid(feat) + feat = feat.reshape((feat_h, feat_w, len(anchor), -1)) + box_feat, conf_feat, score_feat = np.split(feat, [4, 5], -1) + + hIdx, wIdx, aIdx, _ = np.where(conf_feat > conf_thres) + + num_proposal = hIdx.size + if not num_proposal: + continue + + score_feat = score_feat[hIdx, wIdx, aIdx] * conf_feat[hIdx, wIdx, + aIdx] + boxes = box_feat[hIdx, wIdx, aIdx] + labels = score_feat.argmax(-1) + scores = score_feat.max(-1) + + indices = np.where(scores > conf_thres)[0] + if len(indices) == 0: + continue + + for idx in indices: + a_w, a_h = anchor[aIdx[idx]] + x, y, w, h = boxes[idx] + x = (x * 2.0 - 0.5 + wIdx[idx]) * stride + y = (y * 2.0 - 0.5 + hIdx[idx]) * stride + w = (w * 2.0)**2 * a_w + h = (h * 2.0)**2 * a_h + + x0 = x - w / 2 + y0 = y - h / 2 + + self.scores_pro.append(float(scores[idx])) + self.boxes_pro.append( + np.array([x0, y0, w, h], dtype=np.float32)) + self.labels_pro.append(int(labels[idx])) + + def __yolox_decode(self, + feats: List[ndarray], + conf_thres: float, + num_labels: int = 80, + **kwargs): + for i, feat in enumerate(feats): + stride = 8 << i + score_feat, box_feat, conf_feat = np.split( + feat, [num_labels, num_labels + 4], -1) + conf_feat = sigmoid(conf_feat) + + hIdx, wIdx, _ = np.where(conf_feat > conf_thres) + + num_proposal = hIdx.size + if not num_proposal: + continue + + score_feat = sigmoid(score_feat[hIdx, wIdx]) * conf_feat[hIdx, + wIdx] + boxes = box_feat[hIdx, wIdx] + labels = score_feat.argmax(-1) + scores = score_feat.max(-1) + indices = np.where(scores > conf_thres)[0] + + if len(indices) == 0: + continue + + for idx in indices: + score = scores[idx] + label = labels[idx] + + x, y, w, h = boxes[idx] + + x = (x + wIdx[idx]) * stride + y = (y + hIdx[idx]) * stride + w = np.exp(w) * stride + h = np.exp(h) * stride + + x0 = x - w / 2 + y0 = y - h / 2 + + self.scores_pro.append(float(score)) + self.boxes_pro.append( + np.array([x0, y0, w, h], dtype=np.float32)) + self.labels_pro.append(int(label)) + + def __ppyoloe_decode(self, + feats: List[ndarray], + conf_thres: float, + num_labels: int = 80, + **kwargs): + reg_max: int = kwargs.get('reg_max', 17) + dfl = np.arange(0, reg_max, dtype=np.float32) + for i, feat in enumerate(feats): + stride = 8 << i + score_feat, box_feat = np.split(feat, [ + num_labels, + ], -1) + score_feat = sigmoid(score_feat) + _argmax = score_feat.argmax(-1) + _max = score_feat.max(-1) + indices = np.where(_max > conf_thres) + hIdx, wIdx = indices + num_proposal = hIdx.size + if not num_proposal: + continue + + scores = _max[hIdx, wIdx] + boxes = box_feat[hIdx, wIdx].reshape(num_proposal, 4, reg_max) + boxes = softmax(boxes, -1) @ dfl + labels = _argmax[hIdx, wIdx] + + for k in range(num_proposal): + score = scores[k] + label = labels[k] + + x0, y0, x1, y1 = boxes[k] + + x0 = (wIdx[k] + 0.5 - x0) * stride + y0 = (hIdx[k] + 0.5 - y0) * stride + x1 = (wIdx[k] + 0.5 + x1) * stride + y1 = (hIdx[k] + 0.5 + y1) * stride + + w = x1 - x0 + h = y1 - y0 + + self.scores_pro.append(float(score)) + self.boxes_pro.append( + np.array([x0, y0, w, h], dtype=np.float32)) + self.labels_pro.append(int(label)) + + def __yolov6_decode(self, + feats: List[ndarray], + conf_thres: float, + num_labels: int = 80, + **kwargs): + for i, feat in enumerate(feats): + stride = 8 << i + score_feat, box_feat = np.split(feat, [ + num_labels, + ], -1) + score_feat = sigmoid(score_feat) + _argmax = score_feat.argmax(-1) + _max = score_feat.max(-1) + indices = np.where(_max > conf_thres) + hIdx, wIdx = indices + num_proposal = hIdx.size + if not num_proposal: + continue + + scores = _max[hIdx, wIdx] + boxes = box_feat[hIdx, wIdx] + labels = _argmax[hIdx, wIdx] + + for k in range(num_proposal): + score = scores[k] + label = labels[k] + + x0, y0, x1, y1 = boxes[k] + + x0 = (wIdx[k] + 0.5 - x0) * stride + y0 = (hIdx[k] + 0.5 - y0) * stride + x1 = (wIdx[k] + 0.5 + x1) * stride + y1 = (hIdx[k] + 0.5 + y1) * stride + + w = x1 - x0 + h = y1 - y0 + + self.scores_pro.append(float(score)) + self.boxes_pro.append( + np.array([x0, y0, w, h], dtype=np.float32)) + self.labels_pro.append(int(label)) + + def __yolov7_decode(self, + feats: List[ndarray], + conf_thres: float, + num_labels: int = 80, + **kwargs): + anchors: Union[List, Tuple] = kwargs.get( + 'anchors', + [[(12, 16), (19, 36), + (40, 28)], [(36, 75), (76, 55), + (72, 146)], [(142, 110), (192, 243), (459, 401)]]) + self.__yolov5_decode(feats, conf_thres, num_labels, anchors=anchors) + + def __rtmdet_decode(self, + feats: List[ndarray], + conf_thres: float, + num_labels: int = 80, + **kwargs): + for i, feat in enumerate(feats): + stride = 8 << i + score_feat, box_feat = np.split(feat, [ + num_labels, + ], -1) + score_feat = sigmoid(score_feat) + _argmax = score_feat.argmax(-1) + _max = score_feat.max(-1) + indices = np.where(_max > conf_thres) + hIdx, wIdx = indices + num_proposal = hIdx.size + if not num_proposal: + continue + + scores = _max[hIdx, wIdx] + boxes = box_feat[hIdx, wIdx] + labels = _argmax[hIdx, wIdx] + + for k in range(num_proposal): + score = scores[k] + label = labels[k] + + x0, y0, x1, y1 = boxes[k] + + x0 = (wIdx[k] - x0) * stride + y0 = (hIdx[k] - y0) * stride + x1 = (wIdx[k] + x1) * stride + y1 = (hIdx[k] + y1) * stride + + w = x1 - x0 + h = y1 - y0 + + self.scores_pro.append(float(score)) + self.boxes_pro.append( + np.array([x0, y0, w, h], dtype=np.float32)) + self.labels_pro.append(int(label)) + + def __yolov8_decode(self, + feats: List[ndarray], + conf_thres: float, + num_labels: int = 80, + **kwargs): + self.__yolov6_decode(feats, conf_thres, num_labels) diff --git a/models/YOLO-World/deploy/easydeploy/examples/preprocess.py b/models/YOLO-World/deploy/easydeploy/examples/preprocess.py new file mode 100644 index 0000000000000000000000000000000000000000..6b6fb563a16a7f40ef556b5a23f635ab4627fc4f --- /dev/null +++ b/models/YOLO-World/deploy/easydeploy/examples/preprocess.py @@ -0,0 +1,57 @@ +from typing import List, Tuple, Union + +import cv2 +import numpy as np +from config import ModelType +from numpy import ndarray + + +class Preprocess: + + def __init__(self, model_type: ModelType): + if model_type in (ModelType.YOLOV5, ModelType.YOLOV6, ModelType.YOLOV7, + ModelType.YOLOV8): + mean = np.array([0, 0, 0], dtype=np.float32) + std = np.array([255, 255, 255], dtype=np.float32) + is_rgb = True + elif model_type == ModelType.YOLOX: + mean = np.array([0, 0, 0], dtype=np.float32) + std = np.array([1, 1, 1], dtype=np.float32) + is_rgb = False + elif model_type == ModelType.PPYOLOE: + mean = np.array([123.675, 116.28, 103.53], dtype=np.float32) + std = np.array([58.395, 57.12, 57.375], dtype=np.float32) + is_rgb = True + + elif model_type == ModelType.PPYOLOEP: + mean = np.array([0, 0, 0], dtype=np.float32) + std = np.array([255, 255, 255], dtype=np.float32) + is_rgb = True + elif model_type == ModelType.RTMDET: + mean = np.array([103.53, 116.28, 123.675], dtype=np.float32) + std = np.array([57.375, 57.12, 58.3955], dtype=np.float32) + is_rgb = False + else: + raise NotImplementedError + + self.mean = mean.reshape((3, 1, 1)) + self.std = std.reshape((3, 1, 1)) + self.is_rgb = is_rgb + + def __call__(self, + image: ndarray, + new_size: Union[List[int], Tuple[int]] = (640, 640), + **kwargs) -> Tuple[ndarray, Tuple[float, float]]: + # new_size: (height, width) + height, width = image.shape[:2] + ratio_h, ratio_w = new_size[0] / height, new_size[1] / width + image = cv2.resize( + image, (0, 0), + fx=ratio_w, + fy=ratio_h, + interpolation=cv2.INTER_LINEAR) + image = np.ascontiguousarray(image.transpose(2, 0, 1)) + image = image.astype(np.float32) + image -= self.mean + image /= self.std + return image[np.newaxis], (ratio_w, ratio_h) diff --git a/models/YOLO-World/deploy/easydeploy/examples/requirements.txt b/models/YOLO-World/deploy/easydeploy/examples/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..0b761189b52fc57e4231b37df0ff42bb44404c95 --- /dev/null +++ b/models/YOLO-World/deploy/easydeploy/examples/requirements.txt @@ -0,0 +1,2 @@ +onnxruntime +opencv-python==4.7.0.72 diff --git a/models/YOLO-World/deploy/easydeploy/model/__init__.py b/models/YOLO-World/deploy/easydeploy/model/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..38af8bc322b0a8e0c870fac243a0af9c1dba7315 --- /dev/null +++ b/models/YOLO-World/deploy/easydeploy/model/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .backend import MMYOLOBackend +from .backendwrapper import ORTWrapper, TRTWrapper +from .model import DeployModel + +__all__ = ['DeployModel', 'TRTWrapper', 'ORTWrapper', 'MMYOLOBackend'] diff --git a/models/YOLO-World/deploy/easydeploy/model/backend.py b/models/YOLO-World/deploy/easydeploy/model/backend.py new file mode 100644 index 0000000000000000000000000000000000000000..64d6e3f020bcfd3c3cf7db5f5611a8f815df4cb1 --- /dev/null +++ b/models/YOLO-World/deploy/easydeploy/model/backend.py @@ -0,0 +1,23 @@ +from enum import Enum + +import torch +import torch.nn.functional as F + + +class MMYOLOBackend(Enum): + AX620A = 'ax620a' + COREML = 'coreml' + HORIZONX3 = 'horizonx3' + NCNN = 'ncnn' + ONNXRUNTIME = 'onnxruntime' + OPENVINO = 'openvino' + PPLNN = 'pplnn' + RKNN = 'rknn' + TENSORRT8 = 'tensorrt8' + TENSORRT7 = 'tensorrt7' + TORCHSCRIPT = 'torchscript' + TVM = 'tvm' + + +def HSigmoid__forward(self, x: torch.Tensor) -> torch.Tensor: + return F.hardsigmoid(x, inplace=True) diff --git a/models/YOLO-World/deploy/easydeploy/model/backendwrapper.py b/models/YOLO-World/deploy/easydeploy/model/backendwrapper.py new file mode 100644 index 0000000000000000000000000000000000000000..2997d84ea98b3f30973cf2335ab0eb4af4edaef5 --- /dev/null +++ b/models/YOLO-World/deploy/easydeploy/model/backendwrapper.py @@ -0,0 +1,202 @@ +import warnings +from collections import namedtuple +from functools import partial +from pathlib import Path +from typing import List, Optional, Union + +import numpy as np +import onnxruntime + +try: + import tensorrt as trt +except Exception: + trt = None +import torch + +warnings.filterwarnings(action='ignore', category=DeprecationWarning) + + +class TRTWrapper(torch.nn.Module): + dtype_mapping = {} + + def __init__(self, weight: Union[str, Path], + device: Optional[torch.device]): + super().__init__() + weight = Path(weight) if isinstance(weight, str) else weight + assert weight.exists() and weight.suffix in ('.engine', '.plan') + if isinstance(device, str): + device = torch.device(device) + elif isinstance(device, int): + device = torch.device(f'cuda:{device}') + self.weight = weight + self.device = device + self.stream = torch.cuda.Stream(device=device) + self.__update_mapping() + self.__init_engine() + self.__init_bindings() + + def __update_mapping(self): + self.dtype_mapping.update({ + trt.bool: torch.bool, + trt.int8: torch.int8, + trt.int32: torch.int32, + trt.float16: torch.float16, + trt.float32: torch.float32 + }) + + def __init_engine(self): + logger = trt.Logger(trt.Logger.ERROR) + self.log = partial(logger.log, trt.Logger.ERROR) + trt.init_libnvinfer_plugins(logger, namespace='') + self.logger = logger + with trt.Runtime(logger) as runtime: + model = runtime.deserialize_cuda_engine(self.weight.read_bytes()) + + context = model.create_execution_context() + + names = [model.get_binding_name(i) for i in range(model.num_bindings)] + + num_inputs, num_outputs = 0, 0 + + for i in range(model.num_bindings): + if model.binding_is_input(i): + num_inputs += 1 + else: + num_outputs += 1 + + self.is_dynamic = -1 in model.get_binding_shape(0) + + self.model = model + self.context = context + self.input_names = names[:num_inputs] + self.output_names = names[num_inputs:] + self.num_inputs = num_inputs + self.num_outputs = num_outputs + self.num_bindings = num_inputs + num_outputs + self.bindings: List[int] = [0] * self.num_bindings + + def __init_bindings(self): + Binding = namedtuple('Binding', ('name', 'dtype', 'shape')) + inputs_info = [] + outputs_info = [] + + for i, name in enumerate(self.input_names): + assert self.model.get_binding_name(i) == name + dtype = self.dtype_mapping[self.model.get_binding_dtype(i)] + shape = tuple(self.model.get_binding_shape(i)) + inputs_info.append(Binding(name, dtype, shape)) + + for i, name in enumerate(self.output_names): + i += self.num_inputs + assert self.model.get_binding_name(i) == name + dtype = self.dtype_mapping[self.model.get_binding_dtype(i)] + shape = tuple(self.model.get_binding_shape(i)) + outputs_info.append(Binding(name, dtype, shape)) + self.inputs_info = inputs_info + self.outputs_info = outputs_info + if not self.is_dynamic: + self.output_tensor = [ + torch.empty(o.shape, dtype=o.dtype, device=self.device) + for o in outputs_info + ] + + def forward(self, *inputs): + + assert len(inputs) == self.num_inputs + + contiguous_inputs: List[torch.Tensor] = [ + i.contiguous() for i in inputs + ] + + for i in range(self.num_inputs): + self.bindings[i] = contiguous_inputs[i].data_ptr() + if self.is_dynamic: + self.context.set_binding_shape( + i, tuple(contiguous_inputs[i].shape)) + + # create output tensors + outputs: List[torch.Tensor] = [] + + for i in range(self.num_outputs): + j = i + self.num_inputs + if self.is_dynamic: + shape = tuple(self.context.get_binding_shape(j)) + output = torch.empty( + size=shape, + dtype=self.output_dtypes[i], + device=self.device) + + else: + output = self.output_tensor[i] + outputs.append(output) + self.bindings[j] = output.data_ptr() + + self.context.execute_async_v2(self.bindings, self.stream.cuda_stream) + self.stream.synchronize() + + return tuple(outputs) + + +class ORTWrapper(torch.nn.Module): + + def __init__(self, weight: Union[str, Path], + device: Optional[torch.device]): + super().__init__() + weight = Path(weight) if isinstance(weight, str) else weight + assert weight.exists() and weight.suffix == '.onnx' + + if isinstance(device, str): + device = torch.device(device) + elif isinstance(device, int): + device = torch.device(f'cuda:{device}') + self.weight = weight + self.device = device + self.__init_session() + self.__init_bindings() + + def __init_session(self): + providers = ['CPUExecutionProvider'] + if 'cuda' in self.device.type: + providers.insert(0, 'CUDAExecutionProvider') + + session = onnxruntime.InferenceSession( + str(self.weight), providers=providers) + self.session = session + + def __init_bindings(self): + Binding = namedtuple('Binding', ('name', 'dtype', 'shape')) + inputs_info = [] + outputs_info = [] + self.is_dynamic = False + for i, tensor in enumerate(self.session.get_inputs()): + if any(not isinstance(i, int) for i in tensor.shape): + self.is_dynamic = True + inputs_info.append( + Binding(tensor.name, tensor.type, tuple(tensor.shape))) + + for i, tensor in enumerate(self.session.get_outputs()): + outputs_info.append( + Binding(tensor.name, tensor.type, tuple(tensor.shape))) + self.inputs_info = inputs_info + self.outputs_info = outputs_info + self.num_inputs = len(inputs_info) + + def forward(self, *inputs): + + assert len(inputs) == self.num_inputs + + contiguous_inputs: List[np.ndarray] = [ + i.contiguous().cpu().numpy() for i in inputs + ] + + if not self.is_dynamic: + # make sure input shape is right for static input shape + for i in range(self.num_inputs): + assert contiguous_inputs[i].shape == self.inputs_info[i].shape + + outputs = self.session.run([o.name for o in self.outputs_info], { + j.name: contiguous_inputs[i] + for i, j in enumerate(self.inputs_info) + }) + + return tuple(torch.from_numpy(o).to(self.device) for o in outputs) diff --git a/models/YOLO-World/deploy/easydeploy/model/model.py b/models/YOLO-World/deploy/easydeploy/model/model.py new file mode 100644 index 0000000000000000000000000000000000000000..21cf50f7df059ebc7d1974754d290883c06f6a0e --- /dev/null +++ b/models/YOLO-World/deploy/easydeploy/model/model.py @@ -0,0 +1,217 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from copy import deepcopy +from functools import partial +from typing import List, Optional, Tuple + +import torch +import torch.nn as nn +from mmdet.models.backbones.csp_darknet import Focus +from mmdet.models.layers import ChannelAttention +from mmengine.config import ConfigDict +from torch import Tensor + +from mmyolo.models import RepVGGBlock +from mmyolo.models.dense_heads import (PPYOLOEHead, RTMDetHead, YOLOv5Head, + YOLOv7Head, YOLOv8Head, YOLOXHead) +from mmyolo.models.layers import ImplicitA, ImplicitM +from ..backbone import DeployFocus, GConvFocus, NcnnFocus +from ..bbox_code import (rtmdet_bbox_decoder, yolov5_bbox_decoder, + yolox_bbox_decoder) +from ..nms import batched_nms, efficient_nms, onnx_nms +from .backend import MMYOLOBackend + + +class DeployModel(nn.Module): + transpose = False + + def __init__(self, + baseModel: nn.Module, + backend: MMYOLOBackend, + postprocess_cfg: Optional[ConfigDict] = None, + with_nms=True, + without_bbox_decoder=False): + super().__init__() + self.baseModel = baseModel + self.baseHead = baseModel.bbox_head + self.backend = backend + self.with_nms = with_nms + self.without_bbox_decoder = without_bbox_decoder + if postprocess_cfg is None: + self.with_postprocess = False + else: + self.with_postprocess = True + self.__init_sub_attributes() + self.detector_type = type(self.baseHead) + self.pre_top_k = postprocess_cfg.get('pre_top_k', 1000) + self.keep_top_k = postprocess_cfg.get('keep_top_k', 100) + self.iou_threshold = postprocess_cfg.get('iou_threshold', 0.65) + self.score_threshold = postprocess_cfg.get('score_threshold', 0.25) + self.__switch_deploy() + + def __init_sub_attributes(self): + self.bbox_decoder = self.baseHead.bbox_coder.decode + self.prior_generate = self.baseHead.prior_generator.grid_priors + self.num_base_priors = self.baseHead.num_base_priors + self.featmap_strides = self.baseHead.featmap_strides + self.num_classes = self.baseHead.num_classes + + def __switch_deploy(self): + headType = type(self.baseHead) + if not self.with_postprocess: + if headType in (YOLOv5Head, YOLOv7Head): + self.baseHead.head_module.forward_single = self.forward_single + elif headType in (PPYOLOEHead, YOLOv8Head): + self.baseHead.head_module.reg_max = 0 + + if self.backend in (MMYOLOBackend.HORIZONX3, MMYOLOBackend.NCNN, + MMYOLOBackend.TORCHSCRIPT): + self.transpose = True + for layer in self.baseModel.modules(): + if isinstance(layer, RepVGGBlock): + layer.switch_to_deploy() + elif isinstance(layer, ChannelAttention): + layer.global_avgpool.forward = self.forward_gvp + elif isinstance(layer, Focus): + # onnxruntime openvino tensorrt8 tensorrt7 + if self.backend in (MMYOLOBackend.ONNXRUNTIME, + MMYOLOBackend.OPENVINO, + MMYOLOBackend.TENSORRT8, + MMYOLOBackend.TENSORRT7): + self.baseModel.backbone.stem = DeployFocus(layer) + # ncnn + elif self.backend == MMYOLOBackend.NCNN: + self.baseModel.backbone.stem = NcnnFocus(layer) + # switch focus to group conv + else: + self.baseModel.backbone.stem = GConvFocus(layer) + + def pred_by_feat(self, + cls_scores: List[Tensor], + bbox_preds: List[Tensor], + objectnesses: Optional[List[Tensor]] = None, + coeff_preds: Optional[List[Tensor]] = None, + proto_preds: Optional[List[Tensor]] = None, + **kwargs): + assert len(cls_scores) == len(bbox_preds) + dtype = cls_scores[0].dtype + device = cls_scores[0].device + + nms_func = self.select_nms() + if self.detector_type in (YOLOv5Head, YOLOv7Head): + bbox_decoder = yolov5_bbox_decoder + elif self.detector_type is RTMDetHead: + bbox_decoder = rtmdet_bbox_decoder + elif self.detector_type is YOLOXHead: + bbox_decoder = yolox_bbox_decoder + else: + bbox_decoder = self.bbox_decoder + print(bbox_decoder) + + num_imgs = cls_scores[0].shape[0] + featmap_sizes = [cls_score.shape[2:] for cls_score in cls_scores] + + mlvl_priors = self.prior_generate(featmap_sizes, + dtype=dtype, + device=device) + + flatten_priors = torch.cat(mlvl_priors) + mlvl_strides = [ + flatten_priors.new_full( + (featmap_size[0] * featmap_size[1] * self.num_base_priors, ), + stride) for featmap_size, stride in zip( + featmap_sizes, self.featmap_strides) + ] + flatten_stride = torch.cat(mlvl_strides) + + text_len = cls_scores[0].shape[1] + flatten_cls_scores = [ + cls_score.permute(0, 2, 3, 1).reshape(num_imgs, -1, text_len) + for cls_score in cls_scores + ] + cls_scores = torch.cat(flatten_cls_scores, dim=1).sigmoid() + + flatten_bbox_preds = [ + bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, 4) + for bbox_pred in bbox_preds + ] + flatten_bbox_preds = torch.cat(flatten_bbox_preds, dim=1) + + if objectnesses is not None: + flatten_objectness = [ + objectness.permute(0, 2, 3, 1).reshape(num_imgs, -1) + for objectness in objectnesses + ] + flatten_objectness = torch.cat(flatten_objectness, dim=1).sigmoid() + cls_scores = cls_scores * (flatten_objectness.unsqueeze(-1)) + + scores = cls_scores + bboxes = flatten_bbox_preds + if self.without_bbox_decoder: + return scores, bboxes + bboxes = bbox_decoder(flatten_priors[None], flatten_bbox_preds, + flatten_stride) + + if self.with_nms: + return nms_func(bboxes, scores, self.keep_top_k, + self.iou_threshold, self.score_threshold, + self.pre_top_k, self.keep_top_k) + else: + return scores, bboxes + + def select_nms(self): + if self.backend in (MMYOLOBackend.ONNXRUNTIME, MMYOLOBackend.OPENVINO): + nms_func = onnx_nms + elif self.backend == MMYOLOBackend.TENSORRT8: + nms_func = efficient_nms + elif self.backend == MMYOLOBackend.TENSORRT7: + nms_func = batched_nms + else: + raise NotImplementedError + if type(self.baseHead) in (YOLOv5Head, YOLOv7Head, YOLOXHead): + nms_func = partial(nms_func, box_coding=1) + + return nms_func + + def forward(self, inputs: Tensor): + neck_outputs = self.baseModel(inputs) + if self.with_postprocess: + return self.pred_by_feat(*neck_outputs) + else: + outputs = [] + if self.transpose: + for feats in zip(*neck_outputs): + if self.backend in (MMYOLOBackend.NCNN, + MMYOLOBackend.TORCHSCRIPT): + outputs.append( + torch.cat( + [feat.permute(0, 2, 3, 1) for feat in feats], + -1)) + else: + outputs.append(torch.cat(feats, 1).permute(0, 2, 3, 1)) + else: + for feats in zip(*neck_outputs): + outputs.append(torch.cat(feats, 1)) + return tuple(outputs) + + @staticmethod + def forward_single(x: Tensor, convs: nn.Module) -> Tuple[Tensor]: + if isinstance(convs, nn.Sequential) and any( + type(m) in (ImplicitA, ImplicitM) for m in convs): + a, c, m = convs + aw = a.implicit.clone() + mw = m.implicit.clone() + c = deepcopy(c) + nw, cw, _, _ = c.weight.shape + na, ca, _, _ = aw.shape + nm, cm, _, _ = mw.shape + c.bias = nn.Parameter(c.bias + ( + c.weight.reshape(nw, cw) @ aw.reshape(ca, na)).squeeze(1)) + c.bias = nn.Parameter(c.bias * mw.reshape(cm)) + c.weight = nn.Parameter(c.weight * mw.transpose(0, 1)) + convs = c + feat = convs(x) + return (feat, ) + + @staticmethod + def forward_gvp(x: Tensor) -> Tensor: + return torch.mean(x, [2, 3], keepdim=True) diff --git a/models/YOLO-World/deploy/easydeploy/nms/__init__.py b/models/YOLO-World/deploy/easydeploy/nms/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..59c5cdbd2b3b195125a14f473b825f616755fd6e --- /dev/null +++ b/models/YOLO-World/deploy/easydeploy/nms/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .ort_nms import onnx_nms +from .trt_nms import batched_nms, efficient_nms + +__all__ = ['efficient_nms', 'batched_nms', 'onnx_nms'] diff --git a/models/YOLO-World/deploy/easydeploy/nms/ort_nms.py b/models/YOLO-World/deploy/easydeploy/nms/ort_nms.py new file mode 100644 index 0000000000000000000000000000000000000000..597f3fb6f33c5bf182aa9c5ba4740e53168b005a --- /dev/null +++ b/models/YOLO-World/deploy/easydeploy/nms/ort_nms.py @@ -0,0 +1,215 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +from torch import Tensor +from torchvision.ops import batched_nms + +_XYWH2XYXY = torch.tensor([[1.0, 0.0, 1.0, 0.0], [0.0, 1.0, 0.0, 1.0], + [-0.5, 0.0, 0.5, 0.0], [0.0, -0.5, 0.0, 0.5]], + dtype=torch.float32) + + +def sort_nms_index(nms_index, scores, batch_size, keep_top_k=-1): + """ + first sort the nms_index by batch, and then sort by score in every image result, final apply keep_top_k strategy. In the process, we can also get the number of detections for each image: num_dets + """ + # first sort by batch index to make sure that the same batch index is together + device = nms_index.device + nms_index_indices = torch.argsort(nms_index[:, 0], dim=0).to(device) + nms_index = nms_index[nms_index_indices] + + scores = scores[nms_index[:, 0], nms_index[:, 1], nms_index[:, 2]] + batch_inds = nms_index[:, 0] + + # Get the number of detections for each image + num_dets = torch.bincount(batch_inds,minlength=batch_size).to(device) + # Calculate the sum from front to back + cumulative_sum = torch.cumsum(num_dets, dim=0).to(device) + # add initial value 0 + cumulative_sum = torch.cat((torch.tensor([0]).to(device), cumulative_sum)) + for i in range(len(num_dets)): + start = cumulative_sum[i] + end = cumulative_sum[i + 1] + # sort by score in every batch + block_idx = torch.argsort(scores[start:end], descending=True).to(device) + nms_index[start:end] = nms_index[start:end][block_idx] + if keep_top_k > 0 and end - start > keep_top_k: + # delete lines from start+keep_top_k to end to keep only top k + nms_index = torch.cat( + (nms_index[: start + keep_top_k], nms_index[end:]), dim=0 + ) + num_dets[i] -= end - start - keep_top_k + cumulative_sum -= end - start - keep_top_k + return nms_index, num_dets + + +def select_nms_index( + scores: Tensor, + boxes: Tensor, + nms_index: Tensor, + batch_size: int, + keep_top_k: int = -1, +): + if nms_index.numel() == 0: + return torch.empty(0), torch.empty(0, 4), torch.empty(0), torch.empty(0) + nms_index, num_dets = sort_nms_index(nms_index, scores, batch_size, keep_top_k) + batch_inds, cls_inds = nms_index[:, 0], nms_index[:, 1] + box_inds = nms_index[:, 2] + + # according to the nms_index to get the scores,boxes and labels + batched_scores = scores[batch_inds, cls_inds, box_inds] + batched_dets = boxes[batch_inds, box_inds, ...] + batched_labels = cls_inds + + return num_dets, batched_dets, batched_scores, batched_labels + + +def construct_indice(batch_idx, select_bbox_idxs, class_idxs, original_idxs): + num_bbox = len(select_bbox_idxs) + class_idxs = class_idxs[select_bbox_idxs] + indice = torch.zeros((num_bbox, 3), dtype=torch.int32).to(select_bbox_idxs.device) + # batch_idx + indice[:, 0] = batch_idx + # class_idxs + indice[:, 1] = class_idxs + # select_bbox_idxs + indice[:, 2] = original_idxs[select_bbox_idxs] + return indice + + +def filter_max_boxes_per_class( + select_bbox_idxs, class_idxs, max_output_boxes_per_class +): + class_counts = {} # used to track the count of each class + + filtered_select_bbox_idxs = [] + filtered_max_class_idxs = [] + + for bbox_idx, class_idx in zip(select_bbox_idxs, class_idxs): + class_count = class_counts.get( + class_idx.item(), 0 + ) # Get the count of the current class, or return 0 if it does not exist + if class_count < max_output_boxes_per_class: + filtered_select_bbox_idxs.append(bbox_idx) + filtered_max_class_idxs.append(class_idx) + class_counts[class_idx.item()] = class_count + 1 + return torch.tensor(filtered_select_bbox_idxs), torch.tensor( + filtered_max_class_idxs + ) + + +class ONNXNMSop(torch.autograd.Function): + + @staticmethod + def forward( + ctx, + boxes: Tensor, + scores: Tensor, + max_output_boxes_per_class: Tensor = torch.tensor([100]), + iou_threshold: Tensor = torch.tensor([0.5]), + score_threshold: Tensor = torch.tensor([0.05]) + ) -> Tensor: + """ + Non-Maximum Suppression (NMS) implementation. + + Args: + boxes (Tensor): Bounding boxes of shape (batch_size, num_boxes, 4). + scores (Tensor): Confidence scores of shape (batch_size, num_classes, num_boxes). + max_output_boxes_per_class (Tensor): Maximum number of output boxes per class. + iou_threshold (Tensor): IoU threshold for NMS. + score_threshold (Tensor): Confidence score threshold. + + Returns: + Tensor: Selected indices of shape (num_det, 3).first value is batch index, second value is class index, third value is box index + """ + device = boxes.device + batch_size, num_classes, num_boxes = scores.shape + selected_indices = [] + for batch_idx in range(batch_size): + boxes_per_image = boxes[batch_idx] + scores_per_image = scores[batch_idx] + + # If no boxes in this image, continue to the next image + if boxes_per_image.numel() == 0: + continue + + # for one box, only exist one class,so use torch.max to get the max score and class index + scores_per_image, class_idxs = torch.max(scores_per_image, dim=0) + # Apply score threshold before batched_nms bacause nms operation is time expensive + keep_idxs = scores_per_image > score_threshold + if not torch.any(keep_idxs): + # If no boxes left after applying score threshold, continue to the next image + continue + + boxes_per_image = boxes_per_image[keep_idxs] + scores_per_image = scores_per_image[keep_idxs] + class_idxs = class_idxs[keep_idxs] + + # The purpose of original_idxs is we want to return the indexs to the original input data instead of the filtered. + original_idxs = torch.arange(num_boxes, device=device)[keep_idxs] + # reference: https://pytorch.org/vision/main/generated/torchvision.ops.batched_nms.html + select_bbox_idxs = batched_nms( + boxes_per_image, scores_per_image, class_idxs, iou_threshold + ) + if ( + select_bbox_idxs.shape[0] > max_output_boxes_per_class + ): # If the boxes detected by all classes together are less than max_output_boxes_per_class, then there is no need to filter + select_bbox_idxs, _ = filter_max_boxes_per_class( + select_bbox_idxs, + class_idxs[select_bbox_idxs], + max_output_boxes_per_class, + ) + selected_indice = construct_indice( + batch_idx, select_bbox_idxs, class_idxs, original_idxs + ) + selected_indices.append(selected_indice) + if len(selected_indices) == 0: + return torch.tensor([], device=device) + selected_indices = torch.cat(selected_indices, dim=0) + return selected_indices + + @staticmethod + def symbolic( + g, + boxes: Tensor, + scores: Tensor, + max_output_boxes_per_class: Tensor = torch.tensor([100]), + iou_threshold: Tensor = torch.tensor([0.5]), + score_threshold: Tensor = torch.tensor([0.05]), + ): + return g.op( + 'NonMaxSuppression', + boxes, + scores, + max_output_boxes_per_class, + iou_threshold, + score_threshold, + outputs=1) + + +def onnx_nms( + boxes: torch.Tensor, + scores: torch.Tensor, + max_output_boxes_per_class: int = 100, + iou_threshold: float = 0.5, + score_threshold: float = 0.05, + pre_top_k: int = -1, + keep_top_k: int = 100, + box_coding: int = 0, +): + max_output_boxes_per_class = torch.tensor([max_output_boxes_per_class]) + iou_threshold = torch.tensor([iou_threshold]).to(boxes.device) + score_threshold = torch.tensor([score_threshold]).to(boxes.device) + + batch_size, _, _ = scores.shape + if box_coding == 1: + boxes = boxes @ (_XYWH2XYXY.to(boxes.device)) + scores = scores.transpose(1, 2).contiguous() + selected_indices = ONNXNMSop.apply(boxes, scores, + max_output_boxes_per_class, + iou_threshold, score_threshold) + + num_dets, batched_dets, batched_scores, batched_labels = select_nms_index( + scores, boxes, selected_indices, batch_size, keep_top_k=keep_top_k) + + return num_dets, batched_dets, batched_scores, batched_labels.to( + torch.int32) diff --git a/models/YOLO-World/deploy/easydeploy/nms/trt_nms.py b/models/YOLO-World/deploy/easydeploy/nms/trt_nms.py new file mode 100644 index 0000000000000000000000000000000000000000..e0db1e2164d4366ff9ce4f74d39ded917c39ba79 --- /dev/null +++ b/models/YOLO-World/deploy/easydeploy/nms/trt_nms.py @@ -0,0 +1,226 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +from torch import Tensor + +_XYWH2XYXY = torch.tensor([[1.0, 0.0, 1.0, 0.0], [0.0, 1.0, 0.0, 1.0], + [-0.5, 0.0, 0.5, 0.0], [0.0, -0.5, 0.0, 0.5]], + dtype=torch.float32) + + +class TRTEfficientNMSop(torch.autograd.Function): + + @staticmethod + def forward( + ctx, + boxes: Tensor, + scores: Tensor, + background_class: int = -1, + box_coding: int = 0, + iou_threshold: float = 0.45, + max_output_boxes: int = 100, + plugin_version: str = '1', + score_activation: int = 0, + score_threshold: float = 0.25, + ): + batch_size, _, num_classes = scores.shape + num_det = torch.randint( + 0, max_output_boxes, (batch_size, 1), dtype=torch.int32) + det_boxes = torch.randn(batch_size, max_output_boxes, 4) + det_scores = torch.randn(batch_size, max_output_boxes) + det_classes = torch.randint( + 0, num_classes, (batch_size, max_output_boxes), dtype=torch.int32) + return num_det, det_boxes, det_scores, det_classes + + @staticmethod + def symbolic(g, + boxes: Tensor, + scores: Tensor, + background_class: int = -1, + box_coding: int = 0, + iou_threshold: float = 0.45, + max_output_boxes: int = 100, + plugin_version: str = '1', + score_activation: int = 0, + score_threshold: float = 0.25): + out = g.op( + 'TRT::EfficientNMS_TRT', + boxes, + scores, + background_class_i=background_class, + box_coding_i=box_coding, + iou_threshold_f=iou_threshold, + max_output_boxes_i=max_output_boxes, + plugin_version_s=plugin_version, + score_activation_i=score_activation, + score_threshold_f=score_threshold, + outputs=4) + num_det, det_boxes, det_scores, det_classes = out + return num_det, det_boxes, det_scores, det_classes + + +class TRTbatchedNMSop(torch.autograd.Function): + """TensorRT NMS operation.""" + + @staticmethod + def forward( + ctx, + boxes: Tensor, + scores: Tensor, + plugin_version: str = '1', + shareLocation: int = 1, + backgroundLabelId: int = -1, + numClasses: int = 80, + topK: int = 1000, + keepTopK: int = 100, + scoreThreshold: float = 0.25, + iouThreshold: float = 0.45, + isNormalized: int = 0, + clipBoxes: int = 0, + scoreBits: int = 16, + caffeSemantics: int = 1, + ): + batch_size, _, numClasses = scores.shape + num_det = torch.randint( + 0, keepTopK, (batch_size, 1), dtype=torch.int32) + det_boxes = torch.randn(batch_size, keepTopK, 4) + det_scores = torch.randn(batch_size, keepTopK) + det_classes = torch.randint(0, numClasses, + (batch_size, keepTopK)).float() + return num_det, det_boxes, det_scores, det_classes + + @staticmethod + def symbolic( + g, + boxes: Tensor, + scores: Tensor, + plugin_version: str = '1', + shareLocation: int = 1, + backgroundLabelId: int = -1, + numClasses: int = 80, + topK: int = 1000, + keepTopK: int = 100, + scoreThreshold: float = 0.25, + iouThreshold: float = 0.45, + isNormalized: int = 0, + clipBoxes: int = 0, + scoreBits: int = 16, + caffeSemantics: int = 1, + ): + out = g.op( + 'TRT::BatchedNMSDynamic_TRT', + boxes, + scores, + shareLocation_i=shareLocation, + plugin_version_s=plugin_version, + backgroundLabelId_i=backgroundLabelId, + numClasses_i=numClasses, + topK_i=topK, + keepTopK_i=keepTopK, + scoreThreshold_f=scoreThreshold, + iouThreshold_f=iouThreshold, + isNormalized_i=isNormalized, + clipBoxes_i=clipBoxes, + scoreBits_i=scoreBits, + caffeSemantics_i=caffeSemantics, + outputs=4) + num_det, det_boxes, det_scores, det_classes = out + return num_det, det_boxes, det_scores, det_classes + + +def _efficient_nms( + boxes: Tensor, + scores: Tensor, + max_output_boxes_per_class: int = 1000, + iou_threshold: float = 0.5, + score_threshold: float = 0.05, + pre_top_k: int = -1, + keep_top_k: int = 100, + box_coding: int = 0, +): + """Wrapper for `efficient_nms` with TensorRT. + Args: + boxes (Tensor): The bounding boxes of shape [N, num_boxes, 4]. + scores (Tensor): The detection scores of shape + [N, num_boxes, num_classes]. + max_output_boxes_per_class (int): Maximum number of output + boxes per class of nms. Defaults to 1000. + iou_threshold (float): IOU threshold of nms. Defaults to 0.5. + score_threshold (float): score threshold of nms. + Defaults to 0.05. + pre_top_k (int): Number of top K boxes to keep before nms. + Defaults to -1. + keep_top_k (int): Number of top K boxes to keep after nms. + Defaults to -1. + box_coding (int): Bounding boxes format for nms. + Defaults to 0 means [x1, y1 ,x2, y2]. + Set to 1 means [x, y, w, h]. + Returns: + tuple[Tensor, Tensor, Tensor, Tensor]: + (num_det, det_boxes, det_scores, det_classes), + `num_det` of shape [N, 1] + `det_boxes` of shape [N, num_det, 4] + `det_scores` of shape [N, num_det] + `det_classes` of shape [N, num_det] + """ + num_det, det_boxes, det_scores, det_classes = TRTEfficientNMSop.apply( + boxes, scores, -1, box_coding, iou_threshold, keep_top_k, '1', 0, + score_threshold) + return num_det, det_boxes, det_scores, det_classes + + +def _batched_nms( + boxes: Tensor, + scores: Tensor, + max_output_boxes_per_class: int = 1000, + iou_threshold: float = 0.5, + score_threshold: float = 0.05, + pre_top_k: int = -1, + keep_top_k: int = 100, + box_coding: int = 0, +): + """Wrapper for `efficient_nms` with TensorRT. + Args: + boxes (Tensor): The bounding boxes of shape [N, num_boxes, 4]. + scores (Tensor): The detection scores of shape + [N, num_boxes, num_classes]. + max_output_boxes_per_class (int): Maximum number of output + boxes per class of nms. Defaults to 1000. + iou_threshold (float): IOU threshold of nms. Defaults to 0.5. + score_threshold (float): score threshold of nms. + Defaults to 0.05. + pre_top_k (int): Number of top K boxes to keep before nms. + Defaults to -1. + keep_top_k (int): Number of top K boxes to keep after nms. + Defaults to -1. + box_coding (int): Bounding boxes format for nms. + Defaults to 0 means [x1, y1 ,x2, y2]. + Set to 1 means [x, y, w, h]. + Returns: + tuple[Tensor, Tensor, Tensor, Tensor]: + (num_det, det_boxes, det_scores, det_classes), + `num_det` of shape [N, 1] + `det_boxes` of shape [N, num_det, 4] + `det_scores` of shape [N, num_det] + `det_classes` of shape [N, num_det] + """ + if box_coding == 1: + boxes = boxes @ (_XYWH2XYXY.to(boxes.device)) + boxes = boxes if boxes.dim() == 4 else boxes.unsqueeze(2) + _, _, numClasses = scores.shape + + num_det, det_boxes, det_scores, det_classes = TRTbatchedNMSop.apply( + boxes, scores, '1', 1, -1, int(numClasses), min(pre_top_k, 4096), + keep_top_k, score_threshold, iou_threshold, 0, 0, 16, 1) + + det_classes = det_classes.int() + return num_det, det_boxes, det_scores, det_classes + + +def efficient_nms(*args, **kwargs): + """Wrapper function for `_efficient_nms`.""" + return _efficient_nms(*args, **kwargs) + + +def batched_nms(*args, **kwargs): + """Wrapper function for `_batched_nms`.""" + return _batched_nms(*args, **kwargs) diff --git a/models/YOLO-World/deploy/easydeploy/onnx_demo.py b/models/YOLO-World/deploy/easydeploy/onnx_demo.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/YOLO-World/deploy/easydeploy/tools/build_engine.py b/models/YOLO-World/deploy/easydeploy/tools/build_engine.py new file mode 100644 index 0000000000000000000000000000000000000000..b400c9db826878a7bb0fb13f4b1dea9b793583e7 --- /dev/null +++ b/models/YOLO-World/deploy/easydeploy/tools/build_engine.py @@ -0,0 +1,136 @@ +import argparse +from pathlib import Path +from typing import List, Optional, Tuple, Union + +try: + import tensorrt as trt +except Exception: + trt = None +import warnings + +import numpy as np +import torch + +warnings.filterwarnings(action='ignore', category=DeprecationWarning) + + +class EngineBuilder: + + def __init__( + self, + checkpoint: Union[str, Path], + opt_shape: Union[Tuple, List] = (1, 3, 640, 640), + device: Optional[Union[str, int, torch.device]] = None) -> None: + checkpoint = Path(checkpoint) if isinstance(checkpoint, + str) else checkpoint + assert checkpoint.exists() and checkpoint.suffix == '.onnx' + if isinstance(device, str): + device = torch.device(device) + elif isinstance(device, int): + device = torch.device(f'cuda:{device}') + + self.checkpoint = checkpoint + self.opt_shape = np.array(opt_shape, dtype=np.float32) + self.device = device + + def __build_engine(self, + scale: Optional[List[List]] = None, + fp16: bool = True, + with_profiling: bool = True) -> None: + logger = trt.Logger(trt.Logger.WARNING) + trt.init_libnvinfer_plugins(logger, namespace='') + builder = trt.Builder(logger) + config = builder.create_builder_config() + config.max_workspace_size = torch.cuda.get_device_properties( + self.device).total_memory + flag = (1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) + network = builder.create_network(flag) + parser = trt.OnnxParser(network, logger) + if not parser.parse_from_file(str(self.checkpoint)): + raise RuntimeError( + f'failed to load ONNX file: {str(self.checkpoint)}') + inputs = [network.get_input(i) for i in range(network.num_inputs)] + outputs = [network.get_output(i) for i in range(network.num_outputs)] + profile = None + dshape = -1 in network.get_input(0).shape + if dshape: + profile = builder.create_optimization_profile() + if scale is None: + scale = np.array( + [[1, 1, 0.5, 0.5], [1, 1, 1, 1], [4, 1, 1.5, 1.5]], + dtype=np.float32) + scale = (self.opt_shape * scale).astype(np.int32) + elif isinstance(scale, List): + scale = np.array(scale, dtype=np.int32) + assert scale.shape[0] == 3, 'Input a wrong scale list' + else: + raise NotImplementedError + + for inp in inputs: + logger.log( + trt.Logger.WARNING, + f'input "{inp.name}" with shape{inp.shape} {inp.dtype}') + if dshape: + profile.set_shape(inp.name, *scale) + for out in outputs: + logger.log( + trt.Logger.WARNING, + f'output "{out.name}" with shape{out.shape} {out.dtype}') + if fp16 and builder.platform_has_fast_fp16: + config.set_flag(trt.BuilderFlag.FP16) + self.weight = self.checkpoint.with_suffix('.engine') + if dshape: + config.add_optimization_profile(profile) + if with_profiling: + config.profiling_verbosity = trt.ProfilingVerbosity.DETAILED + with builder.build_engine(network, config) as engine: + self.weight.write_bytes(engine.serialize()) + logger.log( + trt.Logger.WARNING, f'Build tensorrt engine finish.\n' + f'Save in {str(self.weight.absolute())}') + + def build(self, + scale: Optional[List[List]] = None, + fp16: bool = True, + with_profiling=True): + self.__build_engine(scale, fp16, with_profiling) + + +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument('checkpoint', help='Checkpoint file') + parser.add_argument( + '--img-size', + nargs='+', + type=int, + default=[640, 640], + help='Image size of height and width') + parser.add_argument( + '--device', type=str, default='cuda:0', help='TensorRT builder device') + parser.add_argument( + '--scales', + type=str, + default='[[1,3,640,640],[1,3,640,640],[1,3,640,640]]', + help='Input scales for build dynamic input shape engine') + parser.add_argument( + '--fp16', action='store_true', help='Build model with fp16 mode') + args = parser.parse_args() + args.img_size *= 2 if len(args.img_size) == 1 else 1 + return args + + +def main(args): + img_size = (1, 3, *args.img_size) + try: + scales = eval(args.scales) + except Exception: + print('Input scales is not a python variable') + print('Set scales default None') + scales = None + builder = EngineBuilder(args.checkpoint, img_size, args.device) + builder.build(scales, fp16=args.fp16) + + +if __name__ == '__main__': + args = parse_args() + main(args) diff --git a/models/YOLO-World/deploy/easydeploy/tools/export_onnx.py b/models/YOLO-World/deploy/easydeploy/tools/export_onnx.py new file mode 100644 index 0000000000000000000000000000000000000000..b937cc8a72b5c09d61580ddb1297213693adaf1c --- /dev/null +++ b/models/YOLO-World/deploy/easydeploy/tools/export_onnx.py @@ -0,0 +1,157 @@ +import argparse +import os +import sys +import warnings +from io import BytesIO +from pathlib import Path + +import onnx +import torch +from mmdet.apis import init_detector +from mmengine.config import ConfigDict +from mmengine.logging import print_log +from mmengine.utils.path import mkdir_or_exist + +# Add MMYOLO ROOT to sys.path +sys.path.append(str(Path(__file__).resolve().parents[3])) +from projects.easydeploy.model import DeployModel, MMYOLOBackend # noqa E402 + +warnings.filterwarnings(action='ignore', category=torch.jit.TracerWarning) +warnings.filterwarnings(action='ignore', category=torch.jit.ScriptWarning) +warnings.filterwarnings(action='ignore', category=UserWarning) +warnings.filterwarnings(action='ignore', category=FutureWarning) +warnings.filterwarnings(action='ignore', category=ResourceWarning) + + +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument('config', help='Config file') + parser.add_argument('checkpoint', help='Checkpoint file') + parser.add_argument( + '--model-only', action='store_true', help='Export model only') + parser.add_argument( + '--work-dir', default='./work_dir', help='Path to save export model') + parser.add_argument( + '--img-size', + nargs='+', + type=int, + default=[640, 640], + help='Image size of height and width') + parser.add_argument('--batch-size', type=int, default=1, help='Batch size') + parser.add_argument( + '--device', default='cuda:0', help='Device used for inference') + parser.add_argument( + '--simplify', + action='store_true', + help='Simplify onnx model by onnx-sim') + parser.add_argument( + '--opset', type=int, default=11, help='ONNX opset version') + parser.add_argument( + '--backend', + type=str, + default='onnxruntime', + help='Backend for export onnx') + parser.add_argument( + '--pre-topk', + type=int, + default=1000, + help='Postprocess pre topk bboxes feed into NMS') + parser.add_argument( + '--keep-topk', + type=int, + default=100, + help='Postprocess keep topk bboxes out of NMS') + parser.add_argument( + '--iou-threshold', + type=float, + default=0.65, + help='IoU threshold for NMS') + parser.add_argument( + '--score-threshold', + type=float, + default=0.25, + help='Score threshold for NMS') + args = parser.parse_args() + args.img_size *= 2 if len(args.img_size) == 1 else 1 + return args + + +def build_model_from_cfg(config_path, checkpoint_path, device): + model = init_detector(config_path, checkpoint_path, device=device) + model.eval() + return model + + +def main(): + args = parse_args() + mkdir_or_exist(args.work_dir) + backend = MMYOLOBackend(args.backend.lower()) + if backend in (MMYOLOBackend.ONNXRUNTIME, MMYOLOBackend.OPENVINO, + MMYOLOBackend.TENSORRT8, MMYOLOBackend.TENSORRT7): + if not args.model_only: + print_log('Export ONNX with bbox decoder and NMS ...') + else: + args.model_only = True + print_log(f'Can not export postprocess for {args.backend.lower()}.\n' + f'Set "args.model_only=True" default.') + if args.model_only: + postprocess_cfg = None + output_names = None + else: + postprocess_cfg = ConfigDict( + pre_top_k=args.pre_topk, + keep_top_k=args.keep_topk, + iou_threshold=args.iou_threshold, + score_threshold=args.score_threshold) + output_names = ['num_dets', 'boxes', 'scores', 'labels'] + baseModel = build_model_from_cfg(args.config, args.checkpoint, args.device) + + deploy_model = DeployModel( + baseModel=baseModel, backend=backend, postprocess_cfg=postprocess_cfg) + deploy_model.eval() + + fake_input = torch.randn(args.batch_size, 3, + *args.img_size).to(args.device) + # dry run + deploy_model(fake_input) + + save_onnx_path = os.path.join( + args.work_dir, + os.path.basename(args.checkpoint).replace('pth', 'onnx')) + # export onnx + with BytesIO() as f: + torch.onnx.export( + deploy_model, + fake_input, + f, + input_names=['images'], + output_names=output_names, + opset_version=args.opset) + f.seek(0) + onnx_model = onnx.load(f) + onnx.checker.check_model(onnx_model) + + # Fix tensorrt onnx output shape, just for view + if not args.model_only and backend in (MMYOLOBackend.TENSORRT8, + MMYOLOBackend.TENSORRT7): + shapes = [ + args.batch_size, 1, args.batch_size, args.keep_topk, 4, + args.batch_size, args.keep_topk, args.batch_size, + args.keep_topk + ] + for i in onnx_model.graph.output: + for j in i.type.tensor_type.shape.dim: + j.dim_param = str(shapes.pop(0)) + if args.simplify: + try: + import onnxsim + onnx_model, check = onnxsim.simplify(onnx_model) + assert check, 'assert check failed' + except Exception as e: + print_log(f'Simplify failure: {e}') + onnx.save(onnx_model, save_onnx_path) + print_log(f'ONNX export success, save into {save_onnx_path}') + + +if __name__ == '__main__': + main() diff --git a/models/YOLO-World/deploy/easydeploy/tools/image-demo.py b/models/YOLO-World/deploy/easydeploy/tools/image-demo.py new file mode 100644 index 0000000000000000000000000000000000000000..12ebaddce60b30021fea6a2f512cb8248db45a8e --- /dev/null +++ b/models/YOLO-World/deploy/easydeploy/tools/image-demo.py @@ -0,0 +1,152 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from easydeploy.model import ORTWrapper, TRTWrapper # isort:skip +import os +import random +from argparse import ArgumentParser + +import cv2 +import mmcv +import numpy as np +import torch +from mmcv.transforms import Compose +from mmdet.utils import get_test_pipeline_cfg +from mmengine.config import Config, ConfigDict +from mmengine.utils import ProgressBar, path + +from mmyolo.utils import register_all_modules +from mmyolo.utils.misc import get_file_list + + +def parse_args(): + parser = ArgumentParser() + parser.add_argument( + 'img', help='Image path, include image file, dir and URL.') + parser.add_argument('config', help='Config file') + parser.add_argument('checkpoint', help='Checkpoint file') + parser.add_argument( + '--out-dir', default='./output', help='Path to output file') + parser.add_argument( + '--device', default='cuda:0', help='Device used for inference') + parser.add_argument( + '--show', action='store_true', help='Show the detection results') + args = parser.parse_args() + return args + + +def preprocess(config): + data_preprocess = config.get('model', {}).get('data_preprocessor', {}) + mean = data_preprocess.get('mean', [0., 0., 0.]) + std = data_preprocess.get('std', [1., 1., 1.]) + mean = torch.tensor(mean, dtype=torch.float32).reshape(1, 3, 1, 1) + std = torch.tensor(std, dtype=torch.float32).reshape(1, 3, 1, 1) + + class PreProcess(torch.nn.Module): + + def __init__(self): + super().__init__() + + def forward(self, x): + x = x[None].float() + x -= mean.to(x.device) + x /= std.to(x.device) + return x + + return PreProcess().eval() + + +def main(): + args = parse_args() + + # register all modules in mmdet into the registries + register_all_modules() + + colors = [[random.randint(0, 255) for _ in range(3)] for _ in range(1000)] + + # build the model from a config file and a checkpoint file + if args.checkpoint.endswith('.onnx'): + model = ORTWrapper(args.checkpoint, args.device) + elif args.checkpoint.endswith('.engine') or args.checkpoint.endswith( + '.plan'): + model = TRTWrapper(args.checkpoint, args.device) + else: + raise NotImplementedError + + model.to(args.device) + + cfg = Config.fromfile(args.config) + class_names = cfg.get('class_name') + + test_pipeline = get_test_pipeline_cfg(cfg) + test_pipeline[0] = ConfigDict({'type': 'mmdet.LoadImageFromNDArray'}) + test_pipeline = Compose(test_pipeline) + + pre_pipeline = preprocess(cfg) + + if not args.show: + path.mkdir_or_exist(args.out_dir) + + # get file list + files, source_type = get_file_list(args.img) + + # start detector inference + progress_bar = ProgressBar(len(files)) + for i, file in enumerate(files): + bgr = mmcv.imread(file) + rgb = mmcv.imconvert(bgr, 'bgr', 'rgb') + data, samples = test_pipeline(dict(img=rgb, img_id=i)).values() + pad_param = samples.get('pad_param', + np.array([0, 0, 0, 0], dtype=np.float32)) + h, w = samples.get('ori_shape', rgb.shape[:2]) + pad_param = torch.asarray( + [pad_param[2], pad_param[0], pad_param[2], pad_param[0]], + device=args.device) + scale_factor = samples.get('scale_factor', [1., 1]) + scale_factor = torch.asarray(scale_factor * 2, device=args.device) + data = pre_pipeline(data).to(args.device) + + result = model(data) + if source_type['is_dir']: + filename = os.path.relpath(file, args.img).replace('/', '_') + else: + filename = os.path.basename(file) + out_file = None if args.show else os.path.join(args.out_dir, filename) + + # Get candidate predict info by num_dets + num_dets, bboxes, scores, labels = result + scores = scores[0, :num_dets] + bboxes = bboxes[0, :num_dets] + labels = labels[0, :num_dets] + bboxes -= pad_param + bboxes /= scale_factor + + bboxes[:, 0::2].clamp_(0, w) + bboxes[:, 1::2].clamp_(0, h) + bboxes = bboxes.round().int() + + for (bbox, score, label) in zip(bboxes, scores, labels): + bbox = bbox.tolist() + color = colors[label] + + if class_names is not None: + label_name = class_names[label] + name = f'cls:{label_name}_score:{score:0.4f}' + else: + name = f'cls:{label}_score:{score:0.4f}' + + cv2.rectangle(bgr, bbox[:2], bbox[2:], color, 2) + cv2.putText( + bgr, + name, (bbox[0], bbox[1] - 2), + cv2.FONT_HERSHEY_SIMPLEX, + 2.0, [225, 255, 255], + thickness=3) + + if args.show: + mmcv.imshow(bgr, 'result', 0) + else: + mmcv.imwrite(bgr, out_file) + progress_bar.update() + + +if __name__ == '__main__': + main() diff --git a/models/YOLO-World/deploy/export_onnx.py b/models/YOLO-World/deploy/export_onnx.py new file mode 100644 index 0000000000000000000000000000000000000000..4041b12112ae96d5410177c51f08fcd28ad3bb48 --- /dev/null +++ b/models/YOLO-World/deploy/export_onnx.py @@ -0,0 +1,182 @@ +# # Copyright (c) OpenMMLab. All rights reserved. +import os +import json +import warnings +import argparse +from io import BytesIO + +import onnx +import torch +from mmdet.apis import init_detector +from mmengine.config import ConfigDict +from mmengine.logging import print_log +from mmengine.utils.path import mkdir_or_exist + +from easydeploy.model import DeployModel, MMYOLOBackend # noqa E402 + +warnings.filterwarnings(action='ignore', category=torch.jit.TracerWarning) +warnings.filterwarnings(action='ignore', category=torch.jit.ScriptWarning) +warnings.filterwarnings(action='ignore', category=UserWarning) +warnings.filterwarnings(action='ignore', category=FutureWarning) +warnings.filterwarnings(action='ignore', category=ResourceWarning) + + +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument('config', help='Config file') + parser.add_argument('checkpoint', help='Checkpoint file') + parser.add_argument('--custom-text', + type=str, + help='custom text inputs (text json) for YOLO-World.') + parser.add_argument('--add-padding', + action="store_true", + help="add an empty padding to texts.") + parser.add_argument('--model-only', + action='store_true', + help='Export model only') + parser.add_argument('--without-nms', + action='store_true', + help='Export model without NMS') + parser.add_argument('--without-bbox-decoder', + action='store_true', + help='Export model without Bbox Decoder (for INT8 Quantization)') + parser.add_argument('--work-dir', + default='./work_dirs', + help='Path to save export model') + parser.add_argument('--img-size', + nargs='+', + type=int, + default=[640, 640], + help='Image size of height and width') + parser.add_argument('--batch-size', type=int, default=1, help='Batch size') + parser.add_argument('--device', + default='cuda:0', + help='Device used for inference') + parser.add_argument('--simplify', + action='store_true', + help='Simplify onnx model by onnx-sim') + parser.add_argument('--opset', + type=int, + default=11, + help='ONNX opset version') + parser.add_argument('--backend', + type=str, + default='onnxruntime', + help='Backend for export onnx') + parser.add_argument('--pre-topk', + type=int, + default=1000, + help='Postprocess pre topk bboxes feed into NMS') + parser.add_argument('--keep-topk', + type=int, + default=100, + help='Postprocess keep topk bboxes out of NMS') + parser.add_argument('--iou-threshold', + type=float, + default=0.65, + help='IoU threshold for NMS') + parser.add_argument('--score-threshold', + type=float, + default=0.25, + help='Score threshold for NMS') + args = parser.parse_args() + args.img_size *= 2 if len(args.img_size) == 1 else 1 + return args + + +def build_model_from_cfg(config_path, checkpoint_path, device): + model = init_detector(config_path, checkpoint_path, device=device) + model.eval() + return model + + +def main(): + args = parse_args() + mkdir_or_exist(args.work_dir) + backend = MMYOLOBackend(args.backend.lower()) + if backend in (MMYOLOBackend.ONNXRUNTIME, MMYOLOBackend.OPENVINO, + MMYOLOBackend.TENSORRT8, MMYOLOBackend.TENSORRT7): + if not args.model_only: + print_log('Export ONNX with bbox decoder and NMS ...') + else: + args.model_only = True + print_log(f'Can not export postprocess for {args.backend.lower()}.\n' + f'Set "args.model_only=True" default.') + if args.model_only: + postprocess_cfg = None + output_names = None + else: + postprocess_cfg = ConfigDict(pre_top_k=args.pre_topk, + keep_top_k=args.keep_topk, + iou_threshold=args.iou_threshold, + score_threshold=args.score_threshold) + + output_names = ['num_dets', 'boxes', 'scores', 'labels'] + if args.without_bbox_decoder or args.without_nms: + output_names = ['scores', 'boxes'] + + if args.custom_text is not None and len(args.custom_text) > 0: + with open(args.custom_text) as f: + texts = json.load(f) + texts = [x[0] for x in texts] + else: + from mmdet.datasets import CocoDataset + texts = CocoDataset.METAINFO['classes'] + if args.add_padding: + texts = texts + [' '] + + baseModel = build_model_from_cfg(args.config, args.checkpoint, args.device) + if hasattr(baseModel, 'reparameterize'): + # reparameterize text into YOLO-World + baseModel.reparameterize([texts]) + deploy_model = DeployModel(baseModel=baseModel, + backend=backend, + postprocess_cfg=postprocess_cfg, + with_nms=not args.without_nms, + without_bbox_decoder=args.without_bbox_decoder) + deploy_model.eval() + + fake_input = torch.randn(args.batch_size, 3, + *args.img_size).to(args.device) + # dry run + deploy_model(fake_input) + + save_onnx_path = os.path.join( + args.work_dir, + os.path.basename(args.checkpoint).replace('pth', 'onnx')) + # export onnx + with BytesIO() as f: + torch.onnx.export(deploy_model, + fake_input, + f, + input_names=['images'], + output_names=output_names, + opset_version=args.opset) + f.seek(0) + onnx_model = onnx.load(f) + onnx.checker.check_model(onnx_model) + + # Fix tensorrt onnx output shape, just for view + if not args.model_only and not args.without_nms and backend in ( + MMYOLOBackend.TENSORRT8, MMYOLOBackend.TENSORRT7): + shapes = [ + args.batch_size, 1, args.batch_size, args.keep_topk, 4, + args.batch_size, args.keep_topk, args.batch_size, + args.keep_topk + ] + for i in onnx_model.graph.output: + for j in i.type.tensor_type.shape.dim: + j.dim_param = str(shapes.pop(0)) + if args.simplify: + try: + import onnxsim + onnx_model, check = onnxsim.simplify(onnx_model) + assert check, 'assert check failed' + except Exception as e: + print_log(f'Simplify failure: {e}') + onnx.save(onnx_model, save_onnx_path) + print_log(f'ONNX export success, save into {save_onnx_path}') + + +if __name__ == '__main__': + main() diff --git a/models/YOLO-World/deploy/onnx_demo.py b/models/YOLO-World/deploy/onnx_demo.py new file mode 100644 index 0000000000000000000000000000000000000000..35f2713ecd695a18c47837d3036022983f75a254 --- /dev/null +++ b/models/YOLO-World/deploy/onnx_demo.py @@ -0,0 +1,235 @@ +import os +import json +import argparse +import os.path as osp + +import cv2 +import numpy as np +import supervision as sv +import onnxruntime as ort +from mmengine.utils import ProgressBar + +try: + import torch + from torchvision.ops import nms +except Exception as e: + print(e) + +BOUNDING_BOX_ANNOTATOR = sv.BoundingBoxAnnotator(thickness=1) +MASK_ANNOTATOR = sv.MaskAnnotator() + + +class LabelAnnotator(sv.LabelAnnotator): + + @staticmethod + def resolve_text_background_xyxy( + center_coordinates, + text_wh, + position, + ): + center_x, center_y = center_coordinates + text_w, text_h = text_wh + return center_x, center_y, center_x + text_w, center_y + text_h + + +LABEL_ANNOTATOR = LabelAnnotator(text_padding=4, + text_scale=0.5, + text_thickness=1) + + +def parse_args(): + parser = argparse.ArgumentParser('YOLO-World ONNX Demo') + parser.add_argument('onnx', help='onnx file') + parser.add_argument('image', help='image path, include image file or dir.') + parser.add_argument( + 'text', + help= + 'detecting texts (str or json), should be consistent with the ONNX model' + ) + parser.add_argument('--output-dir', + default='./output', + help='directory to save output files') + parser.add_argument('--device', + default='cuda:0', + help='device used for inference') + parser.add_argument( + '--onnx-nms', + action='store_false', + help='whether ONNX model contains NMS and postprocessing') + args = parser.parse_args() + return args + + +def preprocess(image, size=(640, 640)): + h, w = image.shape[:2] + max_size = max(h, w) + scale_factor = size[0] / max_size + pad_h = (max_size - h) // 2 + pad_w = (max_size - w) // 2 + pad_image = np.zeros((max_size, max_size, 3), dtype=image.dtype) + pad_image[pad_h:h + pad_h, pad_w:w + pad_w] = image + image = cv2.resize(pad_image, size, + interpolation=cv2.INTER_LINEAR).astype('float32') + image /= 255.0 + image = image[None] + return image, scale_factor, (pad_h, pad_w) + + +def visualize(image, bboxes, labels, scores, texts): + detections = sv.Detections(xyxy=bboxes, class_id=labels, confidence=scores) + labels = [ + f"{texts[class_id][0]} {confidence:0.2f}" for class_id, confidence in + zip(detections.class_id, detections.confidence) + ] + + image = BOUNDING_BOX_ANNOTATOR.annotate(image, detections) + image = LABEL_ANNOTATOR.annotate(image, detections, labels=labels) + return image + + +def inference(ort_session, + image_path, + texts, + output_dir, + size=(640, 640), + **kwargs): + # normal export + # with NMS and postprocessing + ori_image = cv2.imread(image_path) + h, w = ori_image.shape[:2] + image, scale_factor, pad_param = preprocess(ori_image[:, :, [2, 1, 0]], + size) + input_ort = ort.OrtValue.ortvalue_from_numpy(image.transpose((0, 3, 1, 2))) + results = ort_session.run(["num_dets", "labels", "scores", "boxes"], + {"images": input_ort}) + num_dets, labels, scores, bboxes = results + num_dets = num_dets[0][0] + labels = labels[0, :num_dets] + scores = scores[0, :num_dets] + bboxes = bboxes[0, :num_dets] + + bboxes -= np.array( + [pad_param[1], pad_param[0], pad_param[1], pad_param[0]]) + bboxes /= scale_factor + bboxes[:, 0::2] = np.clip(bboxes[:, 0::2], 0, w) + bboxes[:, 1::2] = np.clip(bboxes[:, 1::2], 0, h) + bboxes = bboxes.round().astype('int') + + image_out = visualize(ori_image, bboxes, labels, scores, texts) + cv2.imwrite(osp.join(output_dir, osp.basename(image_path)), image_out) + return image_out + + +def inference_with_postprocessing(ort_session, + image_path, + texts, + output_dir, + size=(640, 640), + nms_thr=0.7, + score_thr=0.3, + max_dets=300): + # export with `--without-nms` + ori_image = cv2.imread(image_path) + h, w = ori_image.shape[:2] + image, scale_factor, pad_param = preprocess(ori_image[:, :, [2, 1, 0]], + size) + input_ort = ort.OrtValue.ortvalue_from_numpy(image.transpose((0, 3, 1, 2))) + results = ort_session.run(["scores", "boxes"], {"images": input_ort}) + scores, bboxes = results + # move numpy array to torch + ori_scores = torch.from_numpy(scores[0]).to('cuda:0') + ori_bboxes = torch.from_numpy(bboxes[0]).to('cuda:0') + + scores_list = [] + labels_list = [] + bboxes_list = [] + # class-specific NMS + for cls_id in range(len(texts)): + cls_scores = ori_scores[:, cls_id] + labels = torch.ones(cls_scores.shape[0], dtype=torch.long) * cls_id + keep_idxs = nms(ori_bboxes, cls_scores, iou_threshold=nms_thr) + cur_bboxes = ori_bboxes[keep_idxs] + cls_scores = cls_scores[keep_idxs] + labels = labels[keep_idxs] + scores_list.append(cls_scores) + labels_list.append(labels) + bboxes_list.append(cur_bboxes) + + scores = torch.cat(scores_list, dim=0) + labels = torch.cat(labels_list, dim=0) + bboxes = torch.cat(bboxes_list, dim=0) + + keep_idxs = scores > score_thr + scores = scores[keep_idxs] + labels = labels[keep_idxs] + bboxes = bboxes[keep_idxs] + if len(keep_idxs) > max_dets: + _, sorted_idx = torch.sort(scores, descending=True) + keep_idxs = sorted_idx[:max_dets] + bboxes = bboxes[keep_idxs] + scores = scores[keep_idxs] + labels = labels[keep_idxs] + + # Get candidate predict info by num_dets + scores = scores.cpu().numpy() + bboxes = bboxes.cpu().numpy() + labels = labels.cpu().numpy() + + bboxes -= np.array( + [pad_param[1], pad_param[0], pad_param[1], pad_param[0]]) + bboxes /= scale_factor + bboxes[:, 0::2] = np.clip(bboxes[:, 0::2], 0, w) + bboxes[:, 1::2] = np.clip(bboxes[:, 1::2], 0, h) + bboxes = bboxes.round().astype('int') + + image_out = visualize(ori_image, bboxes, labels, scores, texts) + cv2.imwrite(osp.join(output_dir, osp.basename(image_path)), image_out) + return image_out + + +def main(): + + args = parse_args() + onnx_file = args.onnx + # init ONNX session + ort_session = ort.InferenceSession( + onnx_file, providers=['CUDAExecutionProvider', 'CPUExecutionProvider']) + print("Init ONNX Runtime session") + output_dir = "onnx_outputs" + if not osp.exists(output_dir): + os.mkdir(output_dir) + + # load images + if not osp.isfile(args.image): + images = [ + osp.join(args.image, img) for img in os.listdir(args.image) + if img.endswith('.png') or img.endswith('.jpg') + ] + else: + images = [args.image] + + if args.text.endswith('.txt'): + with open(args.text) as f: + lines = f.readlines() + texts = [[t.rstrip('\r\n')] for t in lines] + elif args.text.endswith('.json'): + texts = json.load(open(args.text)) + else: + texts = [[t.strip()] for t in args.text.split(',')] + + print("Start to inference.") + progress_bar = ProgressBar(len(images)) + + if args.onnx_nms: + inference_func = inference + else: + inference_func = inference_with_postprocessing + + for img in images: + inference_func(ort_session, img, texts, output_dir=output_dir) + progress_bar.update() + print("Finish inference") + + +if __name__ == "__main__": + main() diff --git a/models/YOLO-World/deploy/tflite_demo.py b/models/YOLO-World/deploy/tflite_demo.py new file mode 100644 index 0000000000000000000000000000000000000000..ae5bf1a7013d07eef032917391bfa20caede8395 --- /dev/null +++ b/models/YOLO-World/deploy/tflite_demo.py @@ -0,0 +1,254 @@ +import os +import json +import argparse +import os.path as osp + +import cv2 +import tqdm +import torch +import numpy as np +import tensorflow as tf +import supervision as sv +from torchvision.ops import nms + +BOUNDING_BOX_ANNOTATOR = sv.BoundingBoxAnnotator(thickness=1) +MASK_ANNOTATOR = sv.MaskAnnotator() + + +class LabelAnnotator(sv.LabelAnnotator): + + @staticmethod + def resolve_text_background_xyxy( + center_coordinates, + text_wh, + position, + ): + center_x, center_y = center_coordinates + text_w, text_h = text_wh + return center_x, center_y, center_x + text_w, center_y + text_h + + +LABEL_ANNOTATOR = LabelAnnotator(text_padding=4, + text_scale=0.5, + text_thickness=1) + + +def parse_args(): + parser = argparse.ArgumentParser('YOLO-World TFLite (INT8) Demo') + parser.add_argument('path', help='TFLite Model `.tflite`') + parser.add_argument('image', help='image path, include image file or dir.') + parser.add_argument( + 'text', + help= + 'detecting texts (str, txt, or json), should be consistent with the ONNX model' + ) + parser.add_argument('--output-dir', + default='./output', + help='directory to save output files') + args = parser.parse_args() + return args + + +def preprocess(image, size=(640, 640)): + h, w = image.shape[:2] + max_size = max(h, w) + scale_factor = size[0] / max_size + pad_h = (max_size - h) // 2 + pad_w = (max_size - w) // 2 + pad_image = np.zeros((max_size, max_size, 3), dtype=image.dtype) + pad_image[pad_h:h + pad_h, pad_w:w + pad_w] = image + image = cv2.resize(pad_image, size, + interpolation=cv2.INTER_LINEAR).astype('float32') + image /= 255.0 + image = image[None] + return image, scale_factor, (pad_h, pad_w) + + +def generate_anchors_per_level(feat_size, stride, offset=0.5): + h, w = feat_size + shift_x = (torch.arange(0, w) + offset) * stride + shift_y = (torch.arange(0, h) + offset) * stride + yy, xx = torch.meshgrid(shift_y, shift_x) + anchors = torch.stack([xx, yy]).reshape(2, -1).transpose(0, 1) + return anchors + + +def generate_anchors(feat_sizes=[(80, 80), (40, 40), (20, 20)], + strides=[8, 16, 32], + offset=0.5): + anchors = [ + generate_anchors_per_level(fs, s, offset) + for fs, s in zip(feat_sizes, strides) + ] + anchors = torch.cat(anchors) + return anchors + + +def simple_bbox_decode(points, pred_bboxes, stride): + + pred_bboxes = pred_bboxes * stride[None, :, None] + x1 = points[..., 0] - pred_bboxes[..., 0] + y1 = points[..., 1] - pred_bboxes[..., 1] + x2 = points[..., 0] + pred_bboxes[..., 2] + y2 = points[..., 1] + pred_bboxes[..., 3] + bboxes = torch.stack([x1, y1, x2, y2], -1) + + return bboxes + + +def visualize(image, bboxes, labels, scores, texts): + detections = sv.Detections(xyxy=bboxes, class_id=labels, confidence=scores) + labels = [ + f"{texts[class_id][0]} {confidence:0.2f}" for class_id, confidence in + zip(detections.class_id, detections.confidence) + ] + + image = BOUNDING_BOX_ANNOTATOR.annotate(image, detections) + image = LABEL_ANNOTATOR.annotate(image, detections, labels=labels) + return image + + +def inference_per_sample(interp, + image_path, + texts, + priors, + strides, + output_dir, + size=(640, 640), + vis=False, + score_thr=0.05, + nms_thr=0.3, + max_dets=300): + + # input / output details from TFLite + input_details = interp.get_input_details() + output_details = interp.get_output_details() + + # load image from path + ori_image = cv2.imread(image_path) + h, w = ori_image.shape[:2] + image, scale_factor, pad_param = preprocess(ori_image[:, :, [2, 1, 0]], + size) + + # inference + interp.set_tensor(input_details[0]['index'], image) + interp.invoke() + + scores = interp.get_tensor(output_details[1]['index']) + bboxes = interp.get_tensor(output_details[0]['index']) + + # can be converted to numpy for other devices + # using torch here is only for references. + ori_scores = torch.from_numpy(scores[0]) + ori_bboxes = torch.from_numpy(bboxes) + + # decode bbox cordinates with priors + decoded_bboxes = simple_bbox_decode(priors, ori_bboxes, strides)[0] + scores_list = [] + labels_list = [] + bboxes_list = [] + for cls_id in range(len(texts)): + cls_scores = ori_scores[:, cls_id] + labels = torch.ones(cls_scores.shape[0], dtype=torch.long) * cls_id + keep_idxs = nms(decoded_bboxes, cls_scores, iou_threshold=0.5) + cur_bboxes = decoded_bboxes[keep_idxs] + cls_scores = cls_scores[keep_idxs] + labels = labels[keep_idxs] + scores_list.append(cls_scores) + labels_list.append(labels) + bboxes_list.append(cur_bboxes) + + scores = torch.cat(scores_list, dim=0) + labels = torch.cat(labels_list, dim=0) + bboxes = torch.cat(bboxes_list, dim=0) + + keep_idxs = scores > score_thr + scores = scores[keep_idxs] + labels = labels[keep_idxs] + bboxes = bboxes[keep_idxs] + # only for visualization, add an extra NMS + keep_idxs = nms(bboxes, scores, iou_threshold=nms_thr) + num_dets = min(len(keep_idxs), max_dets) + bboxes = bboxes[keep_idxs].unsqueeze(0) + scores = scores[keep_idxs].unsqueeze(0) + labels = labels[keep_idxs].unsqueeze(0) + + scores = scores[0, :num_dets].numpy() + bboxes = bboxes[0, :num_dets].numpy() + labels = labels[0, :num_dets].numpy() + + bboxes -= np.array( + [pad_param[1], pad_param[0], pad_param[1], pad_param[0]]) + bboxes /= scale_factor + bboxes[:, 0::2] = np.clip(bboxes[:, 0::2], 0, w) + bboxes[:, 1::2] = np.clip(bboxes[:, 1::2], 0, h) + + if vis: + image_out = visualize(ori_image, bboxes, labels, scores, texts) + cv2.imwrite(osp.join(output_dir, osp.basename(image_path)), image_out) + print(f"detecting {num_dets} objects.") + return image_out, ori_scores, ori_bboxes[0] + else: + return bboxes, labels, scores + + +def main(): + + args = parse_args() + tflite_file = args.tflite + # init ONNX session + interpreter = tf.lite.Interpreter(model_path=tflite_file, + experimental_preserve_all_tensors=True) + interpreter.allocate_tensors() + print("Init TFLite Interpter") + output_dir = "onnx_outputs" + if not osp.exists(output_dir): + os.mkdir(output_dir) + + # load images + if not osp.isfile(args.image): + images = [ + osp.join(args.image, img) for img in os.listdir(args.image) + if img.endswith('.png') or img.endswith('.jpg') + ] + else: + images = [args.image] + + if args.text.endswith('.txt'): + with open(args.text) as f: + lines = f.readlines() + texts = [[t.rstrip('\r\n')] for t in lines] + elif args.text.endswith('.json'): + texts = json.load(open(args.text)) + else: + texts = [[t.strip()] for t in args.text.split(',')] + + size = (640, 640) + strides = [8, 16, 32] + + # prepare anchors, since TFLite models does not contain anchors, due to INT8 quantization. + featmap_sizes = [(size[0] // s, size[1] // s) for s in strides] + flatten_priors = generate_anchors(featmap_sizes, strides=strides) + mlvl_strides = [ + flatten_priors.new_full((featmap_size[0] * featmap_size[1] * 1, ), + stride) + for featmap_size, stride in zip(featmap_sizes, strides) + ] + flatten_strides = torch.cat(mlvl_strides) + + print("Start to inference.") + for img in tqdm.tqdm(images): + inference_per_sample(interpreter, + img, + texts, + flatten_priors[None], + flatten_strides, + output_dir=output_dir, + vis=True, + score_thr=0.3, + nms_thr=0.5) + print("Finish inference") + + +if __name__ == "__main__": + main() diff --git a/models/YOLO-World/docs/data.md b/models/YOLO-World/docs/data.md new file mode 100644 index 0000000000000000000000000000000000000000..9e792f63da4f4cf73cd0c28ea3ccdd0b0b309e1d --- /dev/null +++ b/models/YOLO-World/docs/data.md @@ -0,0 +1,124 @@ +## Preparing Data for YOLO-World + +### Overview + +For pre-training YOLO-World, we adopt several datasets as listed in the below table: + +| Data | Samples | Type | Boxes | +| :-- | :-----: | :---:| :---: | +| Objects365v1 | 609k | detection | 9,621k | +| GQA | 621k | grounding | 3,681k | +| Flickr | 149k | grounding | 641k | +| CC3M-Lite | 245k | image-text | 821k | + +### Dataset Directory + +We put all data into the `data` directory, such as: + +```bash +├── coco +│ ├── annotations +│ ├── lvis +│ ├── train2017 +│ ├── val2017 +├── flickr +│ ├── annotations +│ └── images +├── mixed_grounding +│ ├── annotations +│ ├── images +├── mixed_grounding +│ ├── annotations +│ ├── images +├── objects365v1 +│ ├── annotations +│ ├── train +│ ├── val +``` +**NOTE**: We strongly suggest that you check the directories or paths in the dataset part of the config file, especially for the values `ann_file`, `data_root`, and `data_prefix`. + +We provide the annotations of the pre-training data in the below table: + +| Data | images | Annotation File | +| :--- | :------| :-------------- | +| Objects365v1 | [`Objects365 train`](https://opendatalab.com/OpenDataLab/Objects365_v1) | [`objects365_train.json`](https://opendatalab.com/OpenDataLab/Objects365_v1) | +| MixedGrounding | [`GQA`](https://nlp.stanford.edu/data/gqa/images.zip) | [`final_mixed_train_no_coco.json`](https://huggingface.co/GLIPModel/GLIP/tree/main/mdetr_annotations/final_mixed_train_no_coco.json) | +| Flickr30k | [`Flickr30k`](https://shannon.cs.illinois.edu/DenotationGraph/) |[`final_flickr_separateGT_train.json`](https://huggingface.co/GLIPModel/GLIP/tree/main/mdetr_annotations/final_flickr_separateGT_train.json) | +| LVIS-minival | [`COCO val2017`](https://cocodataset.org/) | [`lvis_v1_minival_inserted_image_name.json`](https://huggingface.co/GLIPModel/GLIP/blob/main/lvis_v1_minival_inserted_image_name.json) | + +**Acknowledgement:** We sincerely thank [GLIP](https://github.com/microsoft/GLIP) and [mdetr](https://github.com/ashkamath/mdetr) for providing the annotation files for pre-training. + + +### Dataset Class + +> For fine-tuning YOLO-World on Close-set Object Detection, using `MultiModalDataset` is recommended. + +#### Setting CLASSES/Categories + +If you use `COCO-format` custom datasets, you "DO NOT" need to define a dataset class for custom vocabularies/categories. +Explicitly setting the CLASSES in the config file through `metainfo=dict(classes=your_classes),` is simple: + +```python + +coco_train_dataset = dict( + _delete_=True, + type='MultiModalDataset', + dataset=dict( + type='YOLOv5CocoDataset', + metainfo=dict(classes=your_classes), + data_root='data/your_data', + ann_file='annotations/your_annotation.json', + data_prefix=dict(img='images/'), + filter_cfg=dict(filter_empty_gt=False, min_size=32)), + class_text_path='data/texts/your_class_texts.json', + pipeline=train_pipeline) +``` + + +For training YOLO-World, we mainly adopt two kinds of dataset classs: + +#### 1. `MultiModalDataset` + +`MultiModalDataset` is a simple wrapper for pre-defined Dataset Class, such as `Objects365` or `COCO`, which add the texts (category texts) into the dataset instance for formatting input texts. + +**Text JSON** + +The json file is formatted as follows: + +```json +[ + ['A_1','A_2'], + ['B'], + ['C_1', 'C_2', 'C_3'], + ... +] +``` + +We have provided the text json for [`LVIS`](./../data/texts/lvis_v1_class_texts.json), [`COCO`](../data/texts/coco_class_texts.json), and [`Objects365`](../data/texts/obj365v1_class_texts.json) + +#### 2. `YOLOv5MixedGroundingDataset` + +The `YOLOv5MixedGroundingDataset` extends the `COCO` dataset by supporting loading texts/captions from the json file. It's desgined for `MixedGrounding` or `Flickr30K` with text tokens for each object. + + + +### 🔥 Custom Datasets + +For custom dataset, we suggest the users convert the annotation files according to the usage. Note that, converting the annotations to the **standard COCO format** is basically required. + +1. **Large vocabulary, grounding, referring:** you can follow the annotation format as the `MixedGrounding` dataset, which adds `caption` and `tokens_positive` for assigning the text for each object. The texts can be a category or a noun phrases. + +2. **Custom vocabulary (fixed):** you can adopt the `MultiModalDataset` wrapper as the `Objects365` and create a **text json** for your custom categories. + + +### CC3M Pseudo Annotations + +The following annotations are generated according to the automatic labeling process in our paper. Adn we report the results based on these annotations. + +To use CC3M annotations, you need to prepare the `CC3M` images first. + +| Data | Images | Boxes | File | +| :--: | :----: | :---: | :---: | +| CC3M-246K | 246,363 | 820,629 | [Download 🤗](https://huggingface.co/wondervictor/YOLO-World/blob/main/cc3m_pseudo_annotations.json) | +| CC3M-500K | 536,405 | 1,784,405| [Download 🤗](https://huggingface.co/wondervictor/YOLO-World/blob/main/cc3m_pseudo_500k_annotations.json) | +| CC3M-750K | 750,000 | 4,504,805 | [Download 🤗](https://huggingface.co/wondervictor/YOLO-World/blob/main/cc3m_pseudo_750k_annotations.json) | \ No newline at end of file diff --git a/models/YOLO-World/docs/deploy.md b/models/YOLO-World/docs/deploy.md new file mode 100644 index 0000000000000000000000000000000000000000..b55d64c96b164a23fdf9ecad3e3dcdb288b253a0 --- /dev/null +++ b/models/YOLO-World/docs/deploy.md @@ -0,0 +1,59 @@ +## Deploy YOLO-World + +- [x] ONNX export +- [x] ONNX demo +- [ ] TensorRT +- [ ] TFLite + +We provide several ways to deploy YOLO-World with ONNX or TensorRT + +### Priliminaries + +```bash +pip install supervision onnx onnxruntime onnxsim +``` + +### Export ONNX on Gradio Demo + +start the `demo.py` and you can modify the texts in the demo and output the ONNX model. + +```bash +python demo.py path/to/config path/to/weights +``` + +### Export YOLO-World to ONNX models + +You can also use [`export_onnx.py`](../deploy/export_onnx.py) to obtain the ONNX model. You might specify the `--custom-text` with your own `Text JSON` for your custom prompts. The format of `Text JSON` can be found in [`docs/data`](../docs/data.md). + +```bash +PYTHONPATH=./ python deploy/export_onnx.py path/to/config path/to/weights --custom-text path/to/customtexts --opset 11 +``` + +If you don't want to include `NMS` or "post-processing" into the ONNX model, you can add `--without-nms` +```bash +PYTHONPATH=./ python deploy/export_onnx.py path/to/config path/to/weights --custom-text path/to/customtexts --opset 11 --without-nms +``` + +If you want to quantize YOLO-World with ONNX model, you'd better remove `NMS` and `bbox_decoder` by adding `--without-bbox-decoder` + +```bash +PYTHONPATH=./ python deploy/export_onnx.py path/to/config path/to/weights --custom-text path/to/customtexts --opset 11 --without-bbox-decoder +``` + +**Running ONNX demo** + +```bash +python deploy/onnx_demo.py path/to/model.onnx path/to/images path/to/texts +``` + + +### Export YOLO-World to TensorRT models + +coming soon. + +### FAQ + +**Q1**. `RuntimeError: Exporting the operator einsum to ONNX opset version 11 is not supported. Support for this operator was added in version 12, try exporting with this version.` + +**A:** This error arises because YOLO-World adopts `einsum` for matrix multiplication while it is not supported by `opset 11`. You can set the `--opset` from `11` to `12` if your device supports or change the `einsum` to normal `permute/reshape/multiplication` by set `use_einsum=False` in the `MaxSigmoidCSPLayerWithTwoConv` and `YOLOWorldHeadModule`. You can refer to the [sample config](../configs/pretrain/yolo_world_v2_m_vlpan_bn_noeinsum_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival.py) without einsum. + diff --git a/models/YOLO-World/docs/faq.md b/models/YOLO-World/docs/faq.md new file mode 100644 index 0000000000000000000000000000000000000000..48817d21558e2b481577b4dea4a74876b3e14e59 --- /dev/null +++ b/models/YOLO-World/docs/faq.md @@ -0,0 +1,9 @@ +## Frequently Asked Questions (FAQ) + + +1. ` Incorrect path_or_model_id` +```bash +OSError: class `YOLOWorldDetector` in yolo_world/models/detectors/yolo_world.py: class `MultiModalYOLOBackbone` in yolo_world/models/backbones/mm_backbone.py: class `HuggingCLIPLanguageBackbone` in yolo_world/models/backbones/mm_backbone.py: Incorrect path_or_model_id: '../pretrained_models/clip-vit-base-patch32-projection'. Please provide either the path to a local folder or the repo_id of a model on the Hub. +``` + +**Solution:** \ No newline at end of file diff --git a/models/YOLO-World/docs/finetuning.md b/models/YOLO-World/docs/finetuning.md new file mode 100644 index 0000000000000000000000000000000000000000..d128369f0c5b50f81917becd6c696f46797f452c --- /dev/null +++ b/models/YOLO-World/docs/finetuning.md @@ -0,0 +1,105 @@ +## Fine-tuning YOLO-World + +Fine-tuning YOLO-World is easy and we provide the samples for COCO object detection as a simple guidance. + + +### Fine-tuning Requirements + +Fine-tuning YOLO-World is cheap: + +* it does not require 32 GPUs for multi-node distributed training. **8 GPUs or even 1 GPU** is enough. + +* it does not require the long schedule, *e.g.,* 300 epochs or 500 epochs for training YOLOv5 or YOLOv8. **80 epochs or fewer** is enough considering that we provide the good pre-trained weights. + +### Data Preparation + +The fine-tuning dataset should have the similar format as the that of the pre-training dataset. +We suggest you refer to [`docs/data`](./data.md) for more details about how to build the datasets: + +* if you fine-tune YOLO-World for close-set / custom vocabulary object detection, using `MultiModalDataset` with a `text json` is preferred. + +* if you fine-tune YOLO-World for open-vocabulary detection with rich texts or grounding tasks, using `MixedGroundingDataset` is preferred. + +### Hyper-parameters and Config + +Please refer to the [config for fine-tuning YOLO-World-L on COCO](../configs/finetune_coco/yolo_world_l_dual_vlpan_2e-4_80e_8gpus_finetune_coco.py) for more details. + +1. Basic config file: + +If the fine-tuning dataset **contains mask annotations**: + +```python +_base_ = ('../../third_party/mmyolo/configs/yolov8/yolov8_l_mask-refine_syncbn_fast_8xb16-500e_coco.py') +``` + +If the fine-tuning dataset **doesn't contain mask annotations**: + +```python +_base_ = ('../../third_party/mmyolo/configs/yolov8/yolov8_l_syncbn_fast_8xb16-500e_coco.py') +``` + +2. Training Schemes: + +Reducing the epochs and adjusting the learning rate + +```python +max_epochs = 80 +base_lr = 2e-4 +weight_decay = 0.05 +train_batch_size_per_gpu = 16 +close_mosaic_epochs=10 + +train_cfg = dict( + max_epochs=max_epochs, + val_interval=5, + dynamic_intervals=[((max_epochs - close_mosaic_epochs), + _base_.val_interval_stage2)]) + +``` + +3. Datasets: + +```python +coco_train_dataset = dict( + _delete_=True, + type='MultiModalDataset', + dataset=dict( + type='YOLOv5CocoDataset', + data_root='data/coco', + ann_file='annotations/instances_train2017.json', + data_prefix=dict(img='train2017/'), + filter_cfg=dict(filter_empty_gt=False, min_size=32)), + class_text_path='data/texts/coco_class_texts.json', + pipeline=train_pipeline) +``` + +#### Finetuning without RepVL-PAN or Text Encoder 🚀 + +For further efficiency and simplicity, we can fine-tune an efficient version of YOLO-World without RepVL-PAN and the text encoder. +The efficient version of YOLO-World has the similar architecture or layers with the orignial YOLOv8 but we provide the pre-trained weights on large-scale datasets. +The pre-trained YOLO-World has strong generalization capabilities and is more robust compared to YOLOv8 trained on the COCO dataset. + +You can refer to the [config for Efficient YOLO-World](./../configs/finetune_coco/yolo_world_l_efficient_neck_2e-4_80e_8gpus_finetune_coco.py) for more details. + +The efficient YOLO-World adopts `EfficientCSPLayerWithTwoConv` and the text encoder can be removed during inference or exporting models. + +```python + +model = dict( + type='YOLOWorldDetector', + mm_neck=True, + neck=dict(type='YOLOWorldPAFPN', + guide_channels=text_channels, + embed_channels=neck_embed_channels, + num_heads=neck_num_heads, + block_cfg=dict(type='EfficientCSPLayerWithTwoConv'))) + +``` + +### Launch Fine-tuning! + +It's easy: + +```bash +./dist_train.sh --amp +``` diff --git a/models/YOLO-World/docs/installation.md b/models/YOLO-World/docs/installation.md new file mode 100644 index 0000000000000000000000000000000000000000..52befc1f7801fce2d3b506cbf9f0067761caff53 --- /dev/null +++ b/models/YOLO-World/docs/installation.md @@ -0,0 +1,41 @@ +## Installation Guide + +We provide the `requirements` files in [./requirements](./../requirements/): + +* `basic_requirements`: training, finetuning, evaluation. +* `demo_requirements`: running YOLO-World [demos](./../demo/). +* `onnx_requirements`: converting YOLO-World to ONNX or TFLite models (TFLite is coming soon). + +#### Install `MMCV` + +YOLO-World adopts `mmcv>=2.0.0`. There are several ways to install `mmcv` + +**1. using `openmim`**: + +see more in [official guide](https://github.com/open-mmlab/mmcv/tree/master?tab=readme-ov-file#install-mmcv-full). + +```bash +pip install openmim +mim install mmcv==2.0.0 +``` + +**2. using `pip`**: + +go to [install-with-pip](https://mmcv.readthedocs.io/en/latest/get_started/installation.html#install-with-pip) to select the pip index. + +```bash +# cuda=11.3, torch=1.11 +pip install mmcv==2.0.0 -f https://download.openmmlab.com/mmcv/dist/cu113/torch1.11/index.html +# cuda=11.7, torch=1.13 +pip install mmcv==2.2.0 -f https://download.openmmlab.com/mmcv/dist/cu117/torch1.13/index.html +# cuda=12.1, torch=2.1 +pip install mmcv==2.1.0 -f https://download.openmmlab.com/mmcv/dist/cu121/torch2.1/index.html +``` + +**3. using `whl`** + +go to [index packages](https://download.openmmlab.com/mmcv/dist/cu117/torch1.13/index.html) to find a suitable version and download. + +```bash +pip install mmcv-2.0.1-cp38-cp38-manylinux1_x86_64.whl +``` \ No newline at end of file diff --git a/models/YOLO-World/docs/prompt_yolo_world.md b/models/YOLO-World/docs/prompt_yolo_world.md new file mode 100644 index 0000000000000000000000000000000000000000..c7ba81c99a8226f6b31c256a036967c39252d982 --- /dev/null +++ b/models/YOLO-World/docs/prompt_yolo_world.md @@ -0,0 +1,73 @@ +## Prompt YOLO-World + + +### 1. Simple YOLO-World with Embeddings + +For simplifying YOLO-World and get rid of the language model, we define a new basic detector `YOLOWorldPromptDetector`: + +The `YOLOWorldPromptDetector` supports prompt embeddings as the input and doesn't not contain a language model anymore! +Now, YOLO-World adopts `embeddings` as language inputs, and the embeddings support several kinds: (1) text embeddings from the language model, e.g., CLIP language encoder, (2) image embeddings from a vision model, e.g., CLIP vision encoder, and (3) image-text fused embeddings, and (4) random embeddings. +The (1)(2)(3) supports zero-shot inference and (4), including (1)(2)(3) are designed for prompt tuning on your custom data. + +The basic detector is defined as follows: + +```python +class YOLOWorldPromptDetector(YOLODetector): + """Implementation of YOLO World Series""" + + def __init__(self, + *args, + mm_neck: bool = False, + num_train_classes=80, + num_test_classes=80, + prompt_dim=512, + num_prompts=80, + embedding_path='', + freeze_prompt=False, + use_mlp_adapter=False, + **kwargs) +``` + +To use it in a zero-shot manner, you need to pre-compute the text embeddings (image embeddings) and save it as a `numpy array (*.npy)` with a `NxD` shape (N is the number of prompts and D is the dimension of the embeddings). Currently, we only support one prompt for one class. You can use several prompts for one class but you need to merge the results in the post-processing steps. + + +### 2. Prompt Tuning YOLO-World + +We introduce prompt tuning for YOLO-World to maintain the zero-shot ability while improve the performance on your custom datasets. + +For more details about writing configs for prompt tuning, you can refer to [`prompt tuning for COCO data`](./../configs/prompt_tuning_coco/yolo_world_v2_l_vlpan_bn_2e-4_80e_8gpus_mask-refine_prompt_tuning_coco.py). + +1. Use random prompts + +```python +dict(type='YOLOWorldPromptDetector', + mm_neck=True, + num_train_classes=num_training_classes, + num_test_classes=num_classes, + prompt_dim=text_channels, + num_prompts=80, + ...) +``` + +2. Use CLIP embeddings (text, image, or text-image embeddings) + +the `clip_vit_b32_coco_80_embeddings.npy` can be downloaded at [HuggingFace](https://huggingface.co/wondervictor/YOLO-World/blob/main/clip_vit_b32_coco_80_embeddings.npy). + +```python +dict(type='YOLOWorldPromptDetector', + mm_neck=True, + num_train_classes=num_training_classes, + num_test_classes=num_classes, + embedding_path='embeddings/clip_vit_b32_coco_80_embeddings.npy', + prompt_dim=text_channels, + num_prompts=80, + ...) +``` + +Using CLIP model to obtains the image and text embeddings will maintain the zero-shot performace. + + +| Model | Config | AP | AP50 | AP75 | APS | APM | APL | +| :---- | :----: | :--: | :--: | :---: | :-: | :-: | :-: | +| YOLO-World-v2-L | Zero-shot | 45.7 | 61.6 | 49.8 | 29.9 | 50.0 | 60.8 | +| [YOLO-World-v2-L](./../configs/prompt_tuning_coco/yolo_world_v2_l_vlpan_bn_2e-4_80e_8gpus_mask-refine_prompt_tuning_coco.py) | Prompt tuning | 47.9 | 64.3 | 52.5 | 31.9 | 52.6 | 61.3 | diff --git a/models/YOLO-World/docs/reparameterize.md b/models/YOLO-World/docs/reparameterize.md new file mode 100644 index 0000000000000000000000000000000000000000..9115783d086812c94bf97320933d7ddae1c3847e --- /dev/null +++ b/models/YOLO-World/docs/reparameterize.md @@ -0,0 +1,77 @@ +## Reparameterize YOLO-World + +The reparameterization incorporates text embeddings as parameters into the model. For example, in the final classification layer, text embeddings are reparameterized into a simple 1x1 convolutional layer. + +
+ +
+ +### Key Advantages from Reparameterization + +> Reparameterized YOLO-World still has zero-shot ability! + +* **Efficiency:** reparameterized YOLO-World has a simple and efficient archtecture, e.g., `conv1x1` is faster than `transpose & matmul`. In addition, it enables further optmization for deployment. + +* **Accuracy:** reparameterized YOLO-World supports fine-tuning. Compared to the normal `fine-tuning` or `prompt tuning`, **reparameterized version can optimize the `neck` and `head` independently** since the `neck` and `head` have different parameters and do not depend on `text embeddings` anymore! +For example, fine-tuning the **reparameterized YOLO-World** obtains *46.3 AP* on COCO *val2017* while fine-tuning the normal version obtains *46.1 AP*, with all hyper-parameters kept the same. + +### Getting Started + +#### 1. Prepare cutstom text embeddings + +You need to generate the text embeddings by [`toos/generate_text_prompts.py`](../tools/generate_text_prompts.py) and save it as a `numpy.array` with shape `NxD`. + +#### 2. Reparameterizing + +Reparameterizing will generate a new checkpoint with text embeddings! + +Check those files first: + +* model checkpoint +* text embeddings + +We mainly reparameterize two groups of modules: + +* head (`YOLOWorldHeadModule`) +* neck (`MaxSigmoidCSPLayerWithTwoConv`) + +```bash +python tools/reparameterize_yoloworld.py \ + --model path/to/checkpoint \ + --out-dir path/to/save/re-parameterized/ \ + --text-embed path/to/text/embeddings \ + --conv-neck +``` + + +#### 3. Prepare the model config + +Please see the sample config: [`finetune_coco/yolo_world_v2_s_rep_vlpan_bn_2e-4_80e_8gpus_mask-refine_finetune_coco.py`](../configs/finetune_coco/yolo_world_v2_s_rep_vlpan_bn_2e-4_80e_8gpus_mask-refine_finetune_coco.py) for reparameterized training. + + +* `RepConvMaxSigmoidCSPLayerWithTwoConv`: + +```python +neck=dict(type='YOLOWorldPAFPN', + guide_channels=num_classes, + embed_channels=neck_embed_channels, + num_heads=neck_num_heads, + block_cfg=dict(type='RepConvMaxSigmoidCSPLayerWithTwoConv', + guide_channels=num_classes)), +``` + +* `RepYOLOWorldHeadModule`: + +```python +bbox_head=dict(head_module=dict(type='RepYOLOWorldHeadModule', + embed_dims=text_channels, + num_guide=num_classes, + num_classes=num_classes)), + +``` + +#### 4. Reparameterized Training + +**Reparameterized YOLO-World** is easier to fine-tune and can be treated as an enhanced and pre-trained YOLOv8! + +You can check [`finetune_coco/yolo_world_v2_s_rep_vlpan_bn_2e-4_80e_8gpus_mask-refine_finetune_coco.py`](../configs/finetune_coco/yolo_world_v2_s_rep_vlpan_bn_2e-4_80e_8gpus_mask-refine_finetune_coco.py) for more details. \ No newline at end of file diff --git a/models/YOLO-World/docs/tflite_deploy.md b/models/YOLO-World/docs/tflite_deploy.md new file mode 100644 index 0000000000000000000000000000000000000000..4cdadffbadb54dee49e46b77a4bc118e1fb16631 --- /dev/null +++ b/models/YOLO-World/docs/tflite_deploy.md @@ -0,0 +1,78 @@ +## Run YOLO-World (Quantized) on TF-Lite + +- [x] Export YOLO-World to TFLite with INT8 Quantization. +- [x] TFLite demo + +### Priliminaries + +```bash +pip install onnxruntime onnx onnx-simplifier +pip install tensorflow==2.15.1 +``` + +See [onnx2tf](https://github.com/PINTO0309/onnx2tf) for more details about export TFLite models. +The contributor of `onnx2tf` is very nice! + +### Export TFLite INT8 Quantization models + +Please use **Reparameterized YOLO-World** for TFLite!! + +1. Prepare the ONNX model + +Please export the ONNX model without `postprocessing` and `bbox_decoder`, just add `--without-bbox-decoder`! +`bbox_decoder` is not supported for INT8 quantization, please take care! + +```bash +PYTHONPATH=./ python deploy/export_onnx.py path/to/config path/to/weights --custom-text path/to/customtexts --opset 11 --without-bbox-decoder +``` + +2. Generate the calibration samples + +Using 100 COCO images is suggested to create a simple calibration dataset for quantization. + +```python +import os +import random +from PIL import Image, ImageOps +import cv2 +import glob +import numpy as np + +root = "data/coco/val2017/" +image_list = os.listdir(root) +image_list = [os.path.join(root, f) for f in image_list] +random.shuffle(image_list) + +img_datas = [] +for idx, file in enumerate(image_list[:100]): + image = Image.open(file).convert('RGB') + # Get sample input data as a numpy array in a method of your choosing. + img_width, img_height = image.size + size = max(img_width, img_height) + image = ImageOps.pad(image, (size, size), method=Image.BILINEAR) + image = image.resize((640, 640), Image.BILINEAR) + tensor_image = np.asarray(image).astype(np.float32) + tensor_image /= 255.0 + tensor_image = np.expand_dims(tensor_image, axis=0) + img_datas.append(tensor_image) + +calib_datas = np.vstack(img_datas) +print(f'calib_datas.shape: {calib_datas.shape}') +np.save(file='tflite_calibration_data_100_images_640.npy', arr=calib_datas) + +``` + +3. Export ONNX to TFLite using `onnx2tf` + +```bash +onnx2tf -i [ONNX] -o [OUTPUT] -oiqt -cind "images" "tflite_calibration_data_100_images_640.npy" "[[[[0.,0.,0.]]]]" "[[[[1.,1.,1.]]]]" -onimc "scores" "bboxes" --verbosity debug +``` + +We provide a sample TFLite INT8 model: [yolo_world_x_coco_zeroshot_rep_integer_quant.tflite](https://huggingface.co/wondervictor/YOLO-World/blob/main/yolo_x_coco_zeroshot_rep_integer_quant.tflite) + +### Inference using TFLite + +```bash +python deploy/tflite_demo.py path/to/tflite path/to/images path/to/texts + +``` \ No newline at end of file diff --git a/models/YOLO-World/docs/updates.md b/models/YOLO-World/docs/updates.md new file mode 100644 index 0000000000000000000000000000000000000000..4605c767af577df857e9cf09cf8098a1ffb4ba7c --- /dev/null +++ b/models/YOLO-World/docs/updates.md @@ -0,0 +1,14 @@ +## Update Notes + +We provide the details for important updates of YOLO-World in this note. + +### Model Architecture + +**[2024-2-29]:** YOLO-World-v2: + +1. We remove the `I-PoolingAttention`: though it improves the performance for zero-shot LVIS evaluation, it affects the inference speeds after exporting YOLO-World to ONNX or TensorRT. Considering the trade-off, we remove the `I-PoolingAttention` in the newest version. +2. We replace the `L2-Norm` in the contrastive head with the `BatchNorm`. The `L2-Norm` contains complex operations, such as `reduce`, which is time-consuming for deployment. However, the `BatchNorm` can be fused into the convolution, which is much more efficient and also improves the zero-shot performance. + + + + diff --git a/models/YOLO-World/pyproject.toml b/models/YOLO-World/pyproject.toml new file mode 100644 index 0000000000000000000000000000000000000000..f56a1c239fc37ab97efa2cdc6f59fa5630ce7825 --- /dev/null +++ b/models/YOLO-World/pyproject.toml @@ -0,0 +1,56 @@ +[build-system] +requires = ["setuptools","wheel","torch"] +build-backend = "setuptools.build_meta" + +[project] +name = "yolo_world" +version = "0.1.0" +description = "YOLO-World: Real-time Open Vocabulary Object Detection" +readme = "README.md" +keywords = ["object detection"] +authors = [ + { name = "Tencent AILab", email = "ronnysong@tencent.com" }, +] +license = {text = "Apache License 2.0"} + +classifiers = [ + "Development Status :: 4 - Beta", + "License :: OSI Approved :: Apache Software License", + "Operating System :: OS Independent", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.7", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Topic :: Scientific/Engineering :: Artificial Intelligence", +] +requires-python = ">= 3.7" + +dependencies = [ + "wheel", + "torch>=2.1.0", + "torchvision>=0.16.2", + "transformers", + "tokenizers", + "numpy", + "opencv-python", + "supervision==0.19.0", + "openmim", + "mmcv-lite>=2.0.0rc4", + "mmdet>=3.0.0", + "mmengine>=0.7.1", + "mmcv", + 'mmyolo @ git+https://github.com/onuralpszr/mmyolo.git', + +] + +[tool.setuptools] +package-dir = {"yolo_world" = "yolo_world"} +include-package-data = false +license-files = ["LICENSE"] +zip-safe = true + +[tool.setuptools.packages.find] +include = ["yolo_world*"] +exclude = ["docs*", "tests*","third_party*","assets*"] \ No newline at end of file diff --git a/models/YOLO-World/requirements/basic_requirements.txt b/models/YOLO-World/requirements/basic_requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..d9c56e20f6a955bd905ee0c02d4a71447a7f086b --- /dev/null +++ b/models/YOLO-World/requirements/basic_requirements.txt @@ -0,0 +1,9 @@ +opencv-python==4.9.0.80 +opencv-python-headless==4.2.0.34 +mmcv==2.0.0 +mmdet==3.0.0 +mmengine==0.10.3 +mmyolo==0.6.0 +timm==0.6.13 +transformers==4.36.2 +albumentations \ No newline at end of file diff --git a/models/YOLO-World/requirements/demo_requirements.txt b/models/YOLO-World/requirements/demo_requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..0268ad3ce823bd680e831a8c0b98a70edc2a1c20 --- /dev/null +++ b/models/YOLO-World/requirements/demo_requirements.txt @@ -0,0 +1,2 @@ +gradio==4.16.0 +supervision \ No newline at end of file diff --git a/models/YOLO-World/requirements/onnx_requirements.txt b/models/YOLO-World/requirements/onnx_requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..a96fb18e6389de5eafcd0c73289049c51f649550 --- /dev/null +++ b/models/YOLO-World/requirements/onnx_requirements.txt @@ -0,0 +1,4 @@ +supervision +onnx +onnxruntime +onnxsim \ No newline at end of file diff --git a/models/YOLO-World/third_party/mmyolo/.circleci/config.yml b/models/YOLO-World/third_party/mmyolo/.circleci/config.yml new file mode 100644 index 0000000000000000000000000000000000000000..59ba321aeec5dd3904c8df29e2833a41dbc676f7 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/.circleci/config.yml @@ -0,0 +1,34 @@ +version: 2.1 + +# this allows you to use CircleCI's dynamic configuration feature +setup: true + +# the path-filtering orb is required to continue a pipeline based on +# the path of an updated fileset +orbs: + path-filtering: circleci/path-filtering@0.1.2 + +workflows: + # the always-run workflow is always triggered, regardless of the pipeline parameters. + always-run: + jobs: + # the path-filtering/filter job determines which pipeline + # parameters to update. + - path-filtering/filter: + name: check-updated-files + # 3-column, whitespace-delimited mapping. One mapping per + # line: + # + mapping: | + mmyolo/.* lint_only false + requirements/.* lint_only false + tests/.* lint_only false + tools/.* lint_only false + configs/.* lint_only false + .circleci/.* lint_only false + base-revision: main + # this is the path of the configuration we should trigger once + # path filtering and pipeline parameter value updates are + # complete. In this case, we are using the parent dynamic + # configuration itself. + config-path: .circleci/test.yml diff --git a/models/YOLO-World/third_party/mmyolo/.circleci/docker/Dockerfile b/models/YOLO-World/third_party/mmyolo/.circleci/docker/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..d9cf8cc7712d5241975c3b748fb0d01a5545b4fd --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/.circleci/docker/Dockerfile @@ -0,0 +1,11 @@ +ARG PYTORCH="1.8.1" +ARG CUDA="10.2" +ARG CUDNN="7" + +FROM pytorch/pytorch:${PYTORCH}-cuda${CUDA}-cudnn${CUDNN}-devel + +# To fix GPG key error when running apt-get update +RUN apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/3bf863cc.pub +RUN apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64/7fa2af80.pub + +RUN apt-get update && apt-get install -y ninja-build libglib2.0-0 libsm6 libxrender-dev libxext6 libgl1-mesa-glx diff --git a/models/YOLO-World/third_party/mmyolo/.circleci/test.yml b/models/YOLO-World/third_party/mmyolo/.circleci/test.yml new file mode 100644 index 0000000000000000000000000000000000000000..149d6cac15ff9643a21535638a6cd5f961a17d4a --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/.circleci/test.yml @@ -0,0 +1,213 @@ +version: 2.1 + +# the default pipeline parameters, which will be updated according to +# the results of the path-filtering orb +parameters: + lint_only: + type: boolean + default: true + +jobs: + lint: + docker: + - image: cimg/python:3.7.4 + steps: + - checkout + - run: + name: Install pre-commit hook + command: | + pip install pre-commit + pre-commit install + - run: + name: Linting + command: pre-commit run --all-files + - run: + name: Check docstring coverage + command: | + pip install interrogate + interrogate -v --ignore-init-method --ignore-module --ignore-nested-functions --ignore-magic --ignore-regex "__repr__" --fail-under 90 mmyolo + build_cpu: + parameters: + # The python version must match available image tags in + # https://circleci.com/developer/images/image/cimg/python + python: + type: string + torch: + type: string + torchvision: + type: string + docker: + - image: cimg/python:<< parameters.python >> + resource_class: large + steps: + - checkout + - run: + name: Install Libraries + command: | + sudo apt-get update + sudo apt-get install -y ninja-build libglib2.0-0 libsm6 libxrender-dev libxext6 libgl1-mesa-glx libjpeg-dev zlib1g-dev libtinfo-dev libncurses5 + - run: + name: Configure Python & pip + command: | + pip install --upgrade pip + pip install wheel + - run: + name: Install PyTorch + command: | + python -V + pip install torch==<< parameters.torch >>+cpu torchvision==<< parameters.torchvision >>+cpu -f https://download.pytorch.org/whl/torch_stable.html + - run: + name: Install ONNXRuntime + command: | + pip install onnxruntime==1.8.1 + wget https://github.com/microsoft/onnxruntime/releases/download/v1.8.1/onnxruntime-linux-x64-1.8.1.tgz + tar xvf onnxruntime-linux-x64-1.8.1.tgz + - run: + name: Install mmyolo dependencies + command: | + pip install -U openmim + mim install git+https://github.com/open-mmlab/mmengine.git@main + mim install 'mmcv >= 2.0.0' + mim install git+https://github.com/open-mmlab/mmdetection.git@dev-3.x + pip install -r requirements/albu.txt + pip install -r requirements/tests.txt + - run: + name: Install mmdeploy + command: | + pip install setuptools + git clone -b dev-1.x --depth 1 https://github.com/open-mmlab/mmdeploy.git mmdeploy --recurse-submodules + wget https://github.com/Kitware/CMake/releases/download/v3.20.0/cmake-3.20.0-linux-x86_64.tar.gz + tar -xzvf cmake-3.20.0-linux-x86_64.tar.gz + sudo ln -sf $(pwd)/cmake-3.20.0-linux-x86_64/bin/* /usr/bin/ + cd mmdeploy && mkdir build && cd build && cmake .. -DMMDEPLOY_TARGET_BACKENDS=ort -DONNXRUNTIME_DIR=/home/circleci/project/onnxruntime-linux-x64-1.8.1 && make -j8 && make install + export LD_LIBRARY_PATH=/home/circleci/project/onnxruntime-linux-x64-1.8.1/lib:${LD_LIBRARY_PATH} + cd /home/circleci/project/mmdeploy && python -m pip install -v -e . + - run: + name: Build and install + command: | + pip install -e . + - run: + name: Run unittests + command: | + export LD_LIBRARY_PATH=/home/circleci/project/onnxruntime-linux-x64-1.8.1/lib:${LD_LIBRARY_PATH} + pytest tests/ +# coverage run --branch --source mmyolo -m pytest tests/ +# coverage xml +# coverage report -m + build_cuda: + parameters: + torch: + type: string + cuda: + type: enum + enum: ["10.1", "10.2", "11.0", "11.7"] + cudnn: + type: integer + default: 7 + machine: + image: ubuntu-2004-cuda-11.4:202110-01 + # docker_layer_caching: true + resource_class: gpu.nvidia.small + steps: + - checkout + - run: + # Cloning repos in VM since Docker doesn't have access to the private key + name: Clone Repos + command: | + git clone -b main --depth 1 https://github.com/open-mmlab/mmengine.git /home/circleci/mmengine + git clone -b dev-3.x --depth 1 https://github.com/open-mmlab/mmdetection.git /home/circleci/mmdetection + - run: + name: Build Docker image + command: | + docker build .circleci/docker -t mmyolo:gpu --build-arg PYTORCH=<< parameters.torch >> --build-arg CUDA=<< parameters.cuda >> --build-arg CUDNN=<< parameters.cudnn >> + docker run --gpus all -t -d -v /home/circleci/project:/mmyolo -v /home/circleci/mmengine:/mmengine -v /home/circleci/mmdetection:/mmdetection -w /mmyolo --name mmyolo mmyolo:gpu + - run: + name: Install mmyolo dependencies + command: | + docker exec mmyolo pip install -U openmim + docker exec mmyolo mim install -e /mmengine + docker exec mmyolo mim install 'mmcv >= 2.0.0' + docker exec mmyolo pip install -e /mmdetection + docker exec mmyolo pip install -r requirements/albu.txt + docker exec mmyolo pip install -r requirements/tests.txt + - run: + name: Build and install + command: | + docker exec mmyolo pip install -e . + - run: + name: Run unittests + command: | + docker exec mmyolo pytest tests/ + +workflows: + pr_stage_lint: + when: << pipeline.parameters.lint_only >> + jobs: + - lint: + name: lint + filters: + branches: + ignore: + - main + + pr_stage_test: + when: + not: << pipeline.parameters.lint_only >> + jobs: + - lint: + name: lint + filters: + branches: + ignore: + - main + - build_cpu: + name: minimum_version_cpu + torch: 1.8.0 + torchvision: 0.9.0 + python: 3.8.0 # The lowest python 3.7.x version available on CircleCI images + requires: + - lint + - build_cpu: + name: maximum_version_cpu + # mmdeploy not supported +# torch: 2.0.0 +# torchvision: 0.15.1 + torch: 1.12.1 + torchvision: 0.13.1 + python: 3.9.0 + requires: + - minimum_version_cpu + - hold: + type: approval + requires: + - maximum_version_cpu + - build_cuda: + name: mainstream_version_gpu + torch: 1.8.1 + # Use double quotation mark to explicitly specify its type + # as string instead of number + cuda: "10.2" + requires: + - hold + - build_cuda: + name: maximum_version_gpu + torch: 2.0.0 + cuda: "11.7" + cudnn: 8 + requires: + - hold + merge_stage_test: + when: + not: << pipeline.parameters.lint_only >> + jobs: + - build_cuda: + name: minimum_version_gpu + torch: 1.7.0 + # Use double quotation mark to explicitly specify its type + # as string instead of number + cuda: "11.0" + cudnn: 8 + filters: + branches: + only: + - main diff --git a/models/YOLO-World/third_party/mmyolo/.dev_scripts/gather_models.py b/models/YOLO-World/third_party/mmyolo/.dev_scripts/gather_models.py new file mode 100644 index 0000000000000000000000000000000000000000..f05e2b5b31329e12f1bd62196de6592fade0a7c8 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/.dev_scripts/gather_models.py @@ -0,0 +1,312 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import glob +import os +import os.path as osp +import shutil +import subprocess +import time +from collections import OrderedDict + +import torch +import yaml +from mmengine.config import Config +from mmengine.fileio import dump +from mmengine.utils import mkdir_or_exist, scandir + + +def ordered_yaml_dump(data, stream=None, Dumper=yaml.SafeDumper, **kwds): + + class OrderedDumper(Dumper): + pass + + def _dict_representer(dumper, data): + return dumper.represent_mapping( + yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, data.items()) + + OrderedDumper.add_representer(OrderedDict, _dict_representer) + return yaml.dump(data, stream, OrderedDumper, **kwds) + + +def process_checkpoint(in_file, out_file): + checkpoint = torch.load(in_file, map_location='cpu') + # remove optimizer for smaller file size + if 'optimizer' in checkpoint: + del checkpoint['optimizer'] + if 'message_hub' in checkpoint: + del checkpoint['message_hub'] + if 'ema_state_dict' in checkpoint: + del checkpoint['ema_state_dict'] + + for key in list(checkpoint['state_dict']): + if key.startswith('data_preprocessor'): + checkpoint['state_dict'].pop(key) + elif 'priors_base_sizes' in key: + checkpoint['state_dict'].pop(key) + elif 'grid_offset' in key: + checkpoint['state_dict'].pop(key) + elif 'prior_inds' in key: + checkpoint['state_dict'].pop(key) + + # if it is necessary to remove some sensitive data in checkpoint['meta'], + # add the code here. + if torch.__version__ >= '1.6': + torch.save(checkpoint, out_file, _use_new_zipfile_serialization=False) + else: + torch.save(checkpoint, out_file) + sha = subprocess.check_output(['sha256sum', out_file]).decode() + final_file = out_file.rstrip('.pth') + f'-{sha[:8]}.pth' + subprocess.Popen(['mv', out_file, final_file]) + return final_file + + +def is_by_epoch(config): + cfg = Config.fromfile('./configs/' + config) + return cfg.train_cfg.type == 'EpochBasedTrainLoop' + + +def get_final_epoch_or_iter(config): + cfg = Config.fromfile('./configs/' + config) + if cfg.train_cfg.type == 'EpochBasedTrainLoop': + return cfg.train_cfg.max_epochs + else: + return cfg.train_cfg.max_iters + + +def get_best_epoch_or_iter(exp_dir): + best_epoch_iter_full_path = list( + sorted(glob.glob(osp.join(exp_dir, 'best_*.pth'))))[-1] + best_epoch_or_iter_model_path = best_epoch_iter_full_path.split('/')[-1] + best_epoch_or_iter = best_epoch_or_iter_model_path. \ + split('_')[-1].split('.')[0] + return best_epoch_or_iter_model_path, int(best_epoch_or_iter) + + +def get_real_epoch_or_iter(config): + cfg = Config.fromfile('./configs/' + config) + if cfg.train_cfg.type == 'EpochBasedTrainLoop': + epoch = cfg.train_cfg.max_epochs + return epoch + else: + return cfg.runner.max_iters + + +def get_final_results(log_json_path, + epoch_or_iter, + results_lut='coco/bbox_mAP', + by_epoch=True): + result_dict = dict() + with open(log_json_path) as f: + r = f.readlines()[-1] + last_metric = r.split(',')[0].split(': ')[-1].strip() + result_dict[results_lut] = last_metric + return result_dict + + +def get_dataset_name(config): + # If there are more dataset, add here. + name_map = dict( + CityscapesDataset='Cityscapes', + CocoDataset='COCO', + PoseCocoDataset='COCO Person', + YOLOv5CocoDataset='COCO', + CocoPanopticDataset='COCO', + YOLOv5DOTADataset='DOTA 1.0', + DeepFashionDataset='Deep Fashion', + LVISV05Dataset='LVIS v0.5', + LVISV1Dataset='LVIS v1', + VOCDataset='Pascal VOC', + YOLOv5VOCDataset='Pascal VOC', + WIDERFaceDataset='WIDER Face', + OpenImagesDataset='OpenImagesDataset', + OpenImagesChallengeDataset='OpenImagesChallengeDataset') + cfg = Config.fromfile('./configs/' + config) + return name_map[cfg.dataset_type] + + +def find_last_dir(model_dir): + dst_times = [] + for time_stamp in os.scandir(model_dir): + if osp.isdir(time_stamp): + dst_time = time.mktime( + time.strptime(time_stamp.name, '%Y%m%d_%H%M%S')) + dst_times.append([dst_time, time_stamp.name]) + return max(dst_times, key=lambda x: x[0])[1] + + +def convert_model_info_to_pwc(model_infos): + pwc_files = {} + for model in model_infos: + cfg_folder_name = osp.split(model['config'])[-2] + pwc_model_info = OrderedDict() + pwc_model_info['Name'] = osp.split(model['config'])[-1].split('.')[0] + pwc_model_info['In Collection'] = 'Please fill in Collection name' + pwc_model_info['Config'] = osp.join('configs', model['config']) + + # get metadata + meta_data = OrderedDict() + if 'epochs' in model: + meta_data['Epochs'] = get_real_epoch_or_iter(model['config']) + else: + meta_data['Iterations'] = get_real_epoch_or_iter(model['config']) + pwc_model_info['Metadata'] = meta_data + + # get dataset name + dataset_name = get_dataset_name(model['config']) + + # get results + results = [] + # if there are more metrics, add here. + if 'bbox_mAP' in model['results']: + metric = round(model['results']['bbox_mAP'] * 100, 1) + results.append( + OrderedDict( + Task='Object Detection', + Dataset=dataset_name, + Metrics={'box AP': metric})) + if 'segm_mAP' in model['results']: + metric = round(model['results']['segm_mAP'] * 100, 1) + results.append( + OrderedDict( + Task='Instance Segmentation', + Dataset=dataset_name, + Metrics={'mask AP': metric})) + if 'PQ' in model['results']: + metric = round(model['results']['PQ'], 1) + results.append( + OrderedDict( + Task='Panoptic Segmentation', + Dataset=dataset_name, + Metrics={'PQ': metric})) + pwc_model_info['Results'] = results + + link_string = 'https://download.openmmlab.com/mmyolo/v0/' + link_string += '{}/{}'.format(model['config'].rstrip('.py'), + osp.split(model['model_path'])[-1]) + pwc_model_info['Weights'] = link_string + if cfg_folder_name in pwc_files: + pwc_files[cfg_folder_name].append(pwc_model_info) + else: + pwc_files[cfg_folder_name] = [pwc_model_info] + return pwc_files + + +def parse_args(): + parser = argparse.ArgumentParser(description='Gather benchmarked models') + parser.add_argument( + 'root', + type=str, + help='root path of benchmarked models to be gathered') + parser.add_argument( + 'out', type=str, help='output path of gathered models to be stored') + parser.add_argument( + '--best', + action='store_true', + help='whether to gather the best model.') + + args = parser.parse_args() + return args + + +# TODO: Refine +def main(): + args = parse_args() + models_root = args.root + models_out = args.out + mkdir_or_exist(models_out) + + # find all models in the root directory to be gathered + raw_configs = list(scandir('./configs', '.py', recursive=True)) + + # filter configs that is not trained in the experiments dir + used_configs = [] + for raw_config in raw_configs: + if osp.exists(osp.join(models_root, raw_config)): + used_configs.append(raw_config) + print(f'Find {len(used_configs)} models to be gathered') + + # find final_ckpt and log file for trained each config + # and parse the best performance + model_infos = [] + for used_config in used_configs: + exp_dir = osp.join(models_root, used_config) + by_epoch = is_by_epoch(used_config) + # check whether the exps is finished + if args.best is True: + final_model, final_epoch_or_iter = get_best_epoch_or_iter(exp_dir) + else: + final_epoch_or_iter = get_final_epoch_or_iter(used_config) + final_model = '{}_{}.pth'.format('epoch' if by_epoch else 'iter', + final_epoch_or_iter) + + model_path = osp.join(exp_dir, final_model) + # skip if the model is still training + if not osp.exists(model_path): + continue + + # get the latest logs + latest_exp_name = find_last_dir(exp_dir) + latest_exp_json = osp.join(exp_dir, latest_exp_name, 'vis_data', + latest_exp_name + '.json') + + model_performance = get_final_results( + latest_exp_json, final_epoch_or_iter, by_epoch=by_epoch) + + if model_performance is None: + continue + + model_info = dict( + config=used_config, + results=model_performance, + final_model=final_model, + latest_exp_json=latest_exp_json, + latest_exp_name=latest_exp_name) + model_info['epochs' if by_epoch else 'iterations'] = \ + final_epoch_or_iter + model_infos.append(model_info) + + # publish model for each checkpoint + publish_model_infos = [] + for model in model_infos: + model_publish_dir = osp.join(models_out, model['config'].rstrip('.py')) + mkdir_or_exist(model_publish_dir) + + model_name = osp.split(model['config'])[-1].split('.')[0] + + model_name += '_' + model['latest_exp_name'] + publish_model_path = osp.join(model_publish_dir, model_name) + trained_model_path = osp.join(models_root, model['config'], + model['final_model']) + + # convert model + final_model_path = process_checkpoint(trained_model_path, + publish_model_path) + + # copy log + shutil.copy(model['latest_exp_json'], + osp.join(model_publish_dir, f'{model_name}.log.json')) + + # copy config to guarantee reproducibility + config_path = model['config'] + config_path = osp.join( + 'configs', + config_path) if 'configs' not in config_path else config_path + target_config_path = osp.split(config_path)[-1] + shutil.copy(config_path, osp.join(model_publish_dir, + target_config_path)) + + model['model_path'] = final_model_path + publish_model_infos.append(model) + + models = dict(models=publish_model_infos) + print(f'Totally gathered {len(publish_model_infos)} models') + dump(models, osp.join(models_out, 'model_info.json')) + + pwc_files = convert_model_info_to_pwc(publish_model_infos) + for name in pwc_files: + with open(osp.join(models_out, name + '_metafile.yml'), 'w') as f: + ordered_yaml_dump(pwc_files[name], f, encoding='utf-8') + + +if __name__ == '__main__': + main() diff --git a/models/YOLO-World/third_party/mmyolo/.dev_scripts/print_registers.py b/models/YOLO-World/third_party/mmyolo/.dev_scripts/print_registers.py new file mode 100644 index 0000000000000000000000000000000000000000..52646da205969db62d3d59dc2736be00954510e2 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/.dev_scripts/print_registers.py @@ -0,0 +1,448 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import importlib +import os +import os.path as osp +import pkgutil +import sys +import tempfile +from multiprocessing import Pool +from pathlib import Path + +import numpy as np +import pandas as pd + +# host_addr = 'https://gitee.com/open-mmlab' +host_addr = 'https://github.com/open-mmlab' +tools_list = ['tools', '.dev_scripts'] +proxy_names = { + 'mmdet': 'mmdetection', + 'mmseg': 'mmsegmentation', + 'mmcls': 'mmclassification' +} +merge_module_keys = {'mmcv': ['mmengine']} +# exclude_prefix = {'mmcv': ['{_k}') + table_data.append((registry_name, registry_strings)) + + # sort the data list + table_data = sorted(table_data, key=lambda x: len(x[1])) + # split multi parts + table_data_multi_parts = [] + for (registry_name, registry_strings) in table_data: + multi_parts = False + if len(registry_strings) > max_size_per_cell: + multi_parts = True + for cell_idx, registry_cell in enumerate( + divide_list_into_groups(registry_strings, max_size_per_cell)): + registry_str = ''.join(registry_cell.tolist()) + registry_str = f'
    {registry_str}
' + table_data_multi_parts.append([ + registry_name if not multi_parts else + f'{registry_name} (part {cell_idx + 1})', registry_str + ]) + + for table_data in divide_list_into_groups(table_data_multi_parts, + max_col_per_row): + table_data = list(zip(*table_data.tolist())) + html += dataframe_to_html( + pd.DataFrame([table_data[1]], columns=table_data[0])) + if html: + html = f'
{title}
\n{html}' + html = f'
{html}
\n' + return html + + +def tools_to_html(tools_dict, repo_name=''): + + def _recurse(_dict, _connector, _result): + assert isinstance(_dict, dict), \ + f'unknown recurse type: {_dict} ({type(_dict)})' + for _k, _v in _dict.items(): + if _v is None: + if _connector not in _result: + _result[_connector] = [] + _result[_connector].append(_k) + else: + _recurse(_v, osp.join(_connector, _k), _result) + + table_data = {} + title = f'{capitalize(repo_name)} Tools' + _recurse(tools_dict, '', table_data) + return registries_to_html(table_data, title) + + +def dataframe_to_html(dataframe): + styler = dataframe.style + styler = styler.hide(axis='index') + styler = styler.format(na_rep='-') + styler = styler.set_properties(**{ + 'text-align': 'left', + 'align': 'center', + 'vertical-align': 'top' + }) + styler = styler.set_table_styles([{ + 'selector': + 'thead th', + 'props': + 'align:center;text-align:center;vertical-align:bottom' + }]) + html = styler.to_html() + html = f'
\n{html}
' + return html + + +def generate_markdown_by_repository(repo_name, + module_name, + branch, + pulldir, + throw_error=False): + # add the pull dir to the system path so that it can be found + if pulldir not in sys.path: + sys.path.insert(0, pulldir) + module_list, error_dict = load_modules_from_dir( + module_name, pulldir, throw_error=throw_error) + registries_tree = get_registries_from_modules(module_list) + if error_dict: + error_dict_name = 'error_modules' + assert (error_dict_name not in registries_tree), \ + f'duplicate module name was found: {error_dict_name}' + registries_tree.update({error_dict_name: error_dict}) + # get the tools files + for tools_name in tools_list: + assert (tools_name not in registries_tree), \ + f'duplicate tools name was found: {tools_name}' + tools_tree = osp.join(pulldir, tools_name) + tools_tree = get_scripts_from_dir(tools_tree) + registries_tree.update({tools_name: tools_tree}) + # print_tree(registries_tree) + # get registries markdown string + module_registries = registries_tree.get(module_name, {}) + for merge_key in merge_module_keys.get(module_name, []): + merge_dict = registries_tree.get(merge_key, {}) + merge_registries(module_registries, merge_dict) + for exclude_key in exclude_prefix.get(module_name, []): + exclude_registries(module_registries, exclude_key) + markdown_str = registries_to_html( + module_registries, title=f'{capitalize(repo_name)} Module Components') + # get tools markdown string + tools_registries = {} + for tools_name in tools_list: + tools_registries.update( + {tools_name: registries_tree.get(tools_name, {})}) + markdown_str += tools_to_html(tools_registries, repo_name=repo_name) + version_str = get_version_from_module_name(module_name, branch) + title_str = f'\n\n## {capitalize(repo_name)}{version_str}\n' + # remove the pull dir from system path + if pulldir in sys.path: + sys.path.remove(pulldir) + return f'{title_str}{markdown_str}' + + +def parse_args(): + parser = argparse.ArgumentParser( + description='print registries in openmmlab repositories') + parser.add_argument( + '-r', + '--repositories', + nargs='+', + default=['mmdet', 'mmcls', 'mmseg', 'mmengine', 'mmcv'], + type=str, + help='git repositories name in OpenMMLab') + parser.add_argument( + '-b', + '--branches', + nargs='+', + default=['3.x', '1.x', '1.x', 'main', '2.x'], + type=str, + help='the branch names of git repositories, the length of branches ' + 'must be same as the length of repositories') + parser.add_argument( + '-o', '--out', type=str, default='.', help='output path of the file') + parser.add_argument( + '--throw-error', + action='store_true', + default=False, + help='whether to throw error when trying to import modules') + args = parser.parse_args() + return args + + +# TODO: Refine +def main(): + args = parse_args() + repositories = args.repositories + branches = args.branches + assert isinstance(repositories, list), \ + 'Type of repositories must be list' + if branches is None: + branches = [None] * len(repositories) + assert isinstance(branches, list) and \ + len(branches) == len(repositories), \ + 'The length of branches must be same as ' \ + 'that of repositories' + assert isinstance(args.out, str), \ + 'The type of output path must be string' + # save path of file + mkdir_or_exist(args.out) + save_path = osp.join(args.out, 'registries_info.md') + with tempfile.TemporaryDirectory() as tmpdir: + # multi process init + pool = Pool(processes=len(repositories)) + multi_proc_input_list = [] + multi_proc_output_list = [] + # get the git repositories + for branch, repository in zip(branches, repositories): + repo_name, module_name = parse_repo_name(repository) + pulldir = osp.join(tmpdir, f'tmp_{repo_name}') + git_pull_branch( + repo_name=repo_name, branch_name=branch, pulldir=pulldir) + multi_proc_input_list.append( + (repo_name, module_name, branch, pulldir, args.throw_error)) + print('starting the multi process to get the registries') + for multi_proc_input in multi_proc_input_list: + multi_proc_output_list.append( + pool.apply_async(generate_markdown_by_repository, + multi_proc_input)) + pool.close() + pool.join() + with open(save_path, 'w', encoding='utf-8') as fw: + fw.write(f'{markdown_title}\n') + for multi_proc_output in multi_proc_output_list: + markdown_str = multi_proc_output.get() + fw.write(f'{markdown_str}\n') + print(f'saved registries to the path: {save_path}') + + +if __name__ == '__main__': + main() diff --git a/models/YOLO-World/third_party/mmyolo/.github/CODE_OF_CONDUCT.md b/models/YOLO-World/third_party/mmyolo/.github/CODE_OF_CONDUCT.md new file mode 100644 index 0000000000000000000000000000000000000000..92afad1c5ab5d5781115dee45c131d3751d3cd31 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/.github/CODE_OF_CONDUCT.md @@ -0,0 +1,76 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, sex characteristics, gender identity and expression, +level of experience, education, socio-economic status, nationality, personal +appearance, race, religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +- Using welcoming and inclusive language +- Being respectful of differing viewpoints and experiences +- Gracefully accepting constructive criticism +- Focusing on what is best for the community +- Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +- The use of sexualized language or imagery and unwelcome sexual attention or + advances +- Trolling, insulting/derogatory comments, and personal or political attacks +- Public or private harassment +- Publishing others' private information, such as a physical or electronic + address, without explicit permission +- Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at chenkaidev@gmail.com. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html + +For answers to common questions about this code of conduct, see +https://www.contributor-covenant.org/faq + +[homepage]: https://www.contributor-covenant.org diff --git a/models/YOLO-World/third_party/mmyolo/.github/CONTRIBUTING.md b/models/YOLO-World/third_party/mmyolo/.github/CONTRIBUTING.md new file mode 100644 index 0000000000000000000000000000000000000000..4ac764f10587497cb6da5ba453c08056d5bc9df7 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/.github/CONTRIBUTING.md @@ -0,0 +1 @@ +We appreciate all contributions to improve MMYOLO. Please refer to [CONTRIBUTING.md](https://github.com/open-mmlab/mmcv/blob/master/CONTRIBUTING.md) in MMCV for more details about the contributing guideline. diff --git a/models/YOLO-World/third_party/mmyolo/.github/ISSUE_TEMPLATE/1-bug-report.yml b/models/YOLO-World/third_party/mmyolo/.github/ISSUE_TEMPLATE/1-bug-report.yml new file mode 100644 index 0000000000000000000000000000000000000000..0cec5853ebbde572c2c6322f9d7123cac5a97df7 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/.github/ISSUE_TEMPLATE/1-bug-report.yml @@ -0,0 +1,67 @@ +name: "🐞 Bug report" +description: "Create a report to help us reproduce and fix the bug" + + +body: + - type: markdown + attributes: + value: | + Thank you for reporting this issue to help us improve! + If you have already identified the reason, we strongly appreciate you creating a new PR to fix it [here](https://github.com/open-mmlab/mmyolo/pulls)! + If this issue is about installing MMCV, please file an issue at [MMCV](https://github.com/open-mmlab/mmcv/issues/new/choose). + If you need our help, please fill in as much of the following form as you're able. + + - type: checkboxes + attributes: + label: Prerequisite + description: Please check the following items before creating a new issue. + options: + - label: I have searched [the existing and past issues](https://github.com/open-mmlab/mmyolo/issues) but cannot get the expected help. + required: true + - label: I have read the [FAQ documentation](https://mmyolo.readthedocs.io/en/latest/faq.html) but cannot get the expected help. + required: true + - label: The bug has not been fixed in the [latest version](https://github.com/open-mmlab/mmyolo). + required: true + + - type: textarea + attributes: + label: 🐞 Describe the bug + description: | + Please provide a clear and concise description of what the bug is. + Preferably a simple and minimal code snippet that we can reproduce the error by running the code. + placeholder: | + A clear and concise description of what the bug is. + + ```python + # Sample code to reproduce the problem + ``` + + ```shell + The command or script you run. + ``` + + ``` + The error message or logs you got, with the full traceback. + ``` + validations: + required: true + + - type: textarea + attributes: + label: Environment + description: | + Please run `python mmyolo/utils/collect_env.py` to collect necessary environment information and paste it here. + You may add addition that may be helpful for locating the problem, such as + - How you installed PyTorch \[e.g., pip, conda, source\] + - Other environment variables that may be related (such as `$PATH`, `$LD_LIBRARY_PATH`, `$PYTHONPATH`, etc.) + validations: + required: true + + - type: textarea + attributes: + label: Additional information + description: Tell us anything else you think we should know. + placeholder: | + 1. Did you make any modifications on the code or config? Did you understand what you have modified? + 2. What dataset did you use? + 3. What do you think might be the reason? diff --git a/models/YOLO-World/third_party/mmyolo/.github/ISSUE_TEMPLATE/2-feature-request.yml b/models/YOLO-World/third_party/mmyolo/.github/ISSUE_TEMPLATE/2-feature-request.yml new file mode 100644 index 0000000000000000000000000000000000000000..8b24846777e89685bcb99c5d79663839536b6607 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/.github/ISSUE_TEMPLATE/2-feature-request.yml @@ -0,0 +1,32 @@ +name: 🚀 Feature request +description: Suggest an idea for this project +labels: [feature request] + +body: + - type: markdown + attributes: + value: | + Thank you for suggesting an idea to make MMYOLO better. + We strongly appreciate you creating a PR to implete this feature [here](https://github.com/open-mmlab/mmyolo/pulls)! + + If you need our help, please fill in as much of the following form as you're able. + + - type: textarea + attributes: + label: What is the problem this feature will solve? + placeholder: | + E.g., It is inconvenient when \[....\]. + validations: + required: true + + - type: textarea + attributes: + label: What is the feature you are proposing to solve the problem? + validations: + required: true + + - type: textarea + attributes: + label: What alternatives have you considered? + description: | + Add any other context or screenshots about the feature request here. diff --git a/models/YOLO-World/third_party/mmyolo/.github/ISSUE_TEMPLATE/3-new-model.yml b/models/YOLO-World/third_party/mmyolo/.github/ISSUE_TEMPLATE/3-new-model.yml new file mode 100644 index 0000000000000000000000000000000000000000..2aacff4abc353c1e999c8e5952c86ffcac38b063 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/.github/ISSUE_TEMPLATE/3-new-model.yml @@ -0,0 +1,30 @@ +name: "\U0001F31F New model/dataset addition" +description: Submit a proposal/request to implement a new model / dataset +labels: [ "New model/dataset" ] + +body: + - type: textarea + id: description-request + validations: + required: true + attributes: + label: Model/Dataset description + description: | + Put any and all important information relative to the model/dataset + + - type: checkboxes + attributes: + label: Open source status + description: | + Please provide the open-source status, which would be very helpful + options: + - label: "The model implementation is available" + - label: "The model weights are available." + + - type: textarea + id: additional-info + attributes: + label: Provide useful links for the implementation + description: | + Please provide information regarding the implementation, the weights, and the authors. + Please mention the authors by @gh-username if you're aware of their usernames. diff --git a/models/YOLO-World/third_party/mmyolo/.github/ISSUE_TEMPLATE/4-documentation.yml b/models/YOLO-World/third_party/mmyolo/.github/ISSUE_TEMPLATE/4-documentation.yml new file mode 100644 index 0000000000000000000000000000000000000000..dbf1ef8107a33c41067743097ba78e047be43cdb --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/.github/ISSUE_TEMPLATE/4-documentation.yml @@ -0,0 +1,22 @@ +name: 📚 Documentation +description: Report an issue related to https://mmyolo.readthedocs.io/en/latest/. + +body: +- type: textarea + attributes: + label: 📚 The doc issue + description: > + A clear and concise description of what content in https://mmyolo.readthedocs.io/en/latest/ is an issue. + validations: + required: true + +- type: textarea + attributes: + label: Suggest a potential alternative/fix + description: > + Tell us how we could improve the documentation in this regard. + +- type: markdown + attributes: + value: > + Thanks for contributing 🎉! diff --git a/models/YOLO-World/third_party/mmyolo/.github/ISSUE_TEMPLATE/5-reimplementation.yml b/models/YOLO-World/third_party/mmyolo/.github/ISSUE_TEMPLATE/5-reimplementation.yml new file mode 100644 index 0000000000000000000000000000000000000000..1240aa896a50151ad47cc1bf0813d0b40d7e7169 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/.github/ISSUE_TEMPLATE/5-reimplementation.yml @@ -0,0 +1,87 @@ +name: "💥 Reimplementation Questions" +description: "Ask about questions during model reimplementation" + + +body: + - type: markdown + attributes: + value: | + If you have already identified the reason, we strongly appreciate you creating a new PR to fix it [here](https://github.com/open-mmlab/mmyolo/pulls)! + + - type: checkboxes + attributes: + label: Prerequisite + description: Please check the following items before creating a new issue. + options: + - label: I have searched [the existing and past issues](https://github.com/open-mmlab/mmyolo/issues) but cannot get the expected help. + required: true + - label: I have read the [FAQ documentation](https://mmyolo.readthedocs.io/en/latest/faq.html) but cannot get the expected help. + required: true + - label: The bug has not been fixed in the [latest version](https://github.com/open-mmlab/mmyolo). + required: true + validations: + required: true + + - type: textarea + attributes: + label: 💬 Describe the reimplementation questions + description: | + A clear and concise description of what the problem you meet and what have you done. + There are several common situations in the reimplementation issues as below + + 1. Reimplement a model in the model zoo using the provided configs + 2. Reimplement a model in the model zoo on other dataset (e.g., custom datasets) + 3. Reimplement a custom model but all the components are implemented in MMDetection + 4. Reimplement a custom model with new modules implemented by yourself + + There are several things to do for different cases as below. + + - For case 1 & 3, please follow the steps in the following sections thus we could help to quick identify the issue. + - For case 2 & 4, please understand that we are not able to do much help here because we usually do not know the full code and the users should be responsible to the code they write. + - One suggestion for case 2 & 4 is that the users should first check whether the bug lies in the self-implemented code or the original code. For example, users can first make sure that the same model runs well on supported datasets. If you still need help, please describe what you have done and what you obtain in the issue, and follow the steps in the following sections and try as clear as possible so that we can better help you. + placeholder: | + A clear and concise description of what the bug is. + What config dir you run? + + ```none + A placeholder for the config. + ``` + + ```shell + The command or script you run. + ``` + + ``` + The error message or logs you got, with the full traceback. + ``` + validations: + required: true + + - type: textarea + attributes: + label: Environment + description: | + Please run `python mmyolo/utils/collect_env.py` to collect necessary environment information and paste it here. + You may add addition that may be helpful for locating the problem, such as + - How you installed PyTorch \[e.g., pip, conda, source\] + - Other environment variables that may be related (such as `$PATH`, `$LD_LIBRARY_PATH`, `$PYTHONPATH`, etc.) + validations: + required: true + + - type: textarea + attributes: + label: Expected results + description: If applicable, paste the related results here, e.g., what you expect and what you get. + placeholder: | + ```none + A placeholder for results comparison + ``` + + - type: textarea + attributes: + label: Additional information + description: Tell us anything else you think we should know. + placeholder: | + 1. Did you make any modifications on the code or config? Did you understand what you have modified? + 2. What dataset did you use? + 3. What do you think might be the reason? diff --git a/models/YOLO-World/third_party/mmyolo/.github/ISSUE_TEMPLATE/config.yml b/models/YOLO-World/third_party/mmyolo/.github/ISSUE_TEMPLATE/config.yml new file mode 100644 index 0000000000000000000000000000000000000000..585c786b50b3692e996a1d150470852e876a24dc --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/.github/ISSUE_TEMPLATE/config.yml @@ -0,0 +1,9 @@ +blank_issues_enabled: true + +contact_links: + - name: 💬 Forum + url: https://github.com/open-mmlab/mmyolo/discussions + about: Ask general usage questions and discuss with other MMYOLO community members + - name: 🌐 Explore OpenMMLab + url: https://openmmlab.com/ + about: Get know more about OpenMMLab diff --git a/models/YOLO-World/third_party/mmyolo/.github/pull_request_template.md b/models/YOLO-World/third_party/mmyolo/.github/pull_request_template.md new file mode 100644 index 0000000000000000000000000000000000000000..2997d883eec5e36302b7a4505f2d218f5cdf7c91 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/.github/pull_request_template.md @@ -0,0 +1,25 @@ +Thanks for your contribution and we appreciate it a lot. The following instructions would make your pull request more healthy and more easily get feedback. If you do not understand some items, don't worry, just make the pull request and seek help from maintainers. + +## Motivation + +Please describe the motivation for this PR and the goal you want to achieve through this PR. + +## Modification + +Please briefly describe what modification is made in this PR. + +## BC-breaking (Optional) + +Does the modification introduce changes that break the backward compatibility of the downstream repos? +If so, please describe how it breaks the compatibility and how the downstream projects should modify their code to keep compatibility with this PR. + +## Use cases (Optional) + +If this PR introduces a new feature, it is better to list some use cases here and update the documentation. + +## Checklist + +1. Pre-commit or other linting tools are used to fix potential lint issues. +2. The modification is covered by complete unit tests. If not, please add more unit tests to ensure the correctness. +3. If the modification has a potential influence on downstream projects, this PR should be tested with downstream projects, like MMDetection or MMClassification. +4. The documentation has been modified accordingly, like docstring or example tutorials. diff --git a/models/YOLO-World/third_party/mmyolo/.github/workflows/deploy.yml b/models/YOLO-World/third_party/mmyolo/.github/workflows/deploy.yml new file mode 100644 index 0000000000000000000000000000000000000000..08f542bbaaae1a1f0f33712544e1ff08c7aa2e85 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/.github/workflows/deploy.yml @@ -0,0 +1,28 @@ +name: deploy + +on: push + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + build-n-publish: + runs-on: ubuntu-latest + if: startsWith(github.event.ref, 'refs/tags') + steps: + - uses: actions/checkout@v2 + - name: Set up Python 3.7 + uses: actions/setup-python@v2 + with: + python-version: 3.7 + - name: Install torch + run: pip install torch + - name: Install wheel + run: pip install wheel + - name: Build MMYOLO + run: python setup.py sdist bdist_wheel + - name: Publish distribution to PyPI + run: | + pip install twine + twine upload dist/* -u __token__ -p ${{ secrets.pypi_password }} diff --git a/models/YOLO-World/third_party/mmyolo/.pre-commit-config-zh-cn.yaml b/models/YOLO-World/third_party/mmyolo/.pre-commit-config-zh-cn.yaml new file mode 100644 index 0000000000000000000000000000000000000000..52bb607e86cedc4f0ac9d188bb7ec717d88b35fb --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/.pre-commit-config-zh-cn.yaml @@ -0,0 +1,60 @@ +exclude: ^tests/data/ +repos: + - repo: https://gitee.com/openmmlab/mirrors-flake8 + rev: 5.0.4 + hooks: + - id: flake8 + - repo: https://gitee.com/openmmlab/mirrors-isort + rev: 5.11.5 + hooks: + - id: isort + - repo: https://gitee.com/openmmlab/mirrors-yapf + rev: v0.32.0 + hooks: + - id: yapf + - repo: https://gitee.com/openmmlab/mirrors-pre-commit-hooks + rev: v4.3.0 + hooks: + - id: trailing-whitespace + - id: check-yaml + - id: end-of-file-fixer + - id: requirements-txt-fixer + - id: double-quote-string-fixer + - id: check-merge-conflict + - id: fix-encoding-pragma + args: ["--remove"] + - id: mixed-line-ending + args: ["--fix=lf"] + - repo: https://gitee.com/openmmlab/mirrors-mdformat + rev: 0.7.9 + hooks: + - id: mdformat + args: ["--number"] + additional_dependencies: + - mdformat-openmmlab + - mdformat_frontmatter + - linkify-it-py + - repo: https://gitee.com/openmmlab/mirrors-codespell + rev: v2.2.1 + hooks: + - id: codespell + - repo: https://gitee.com/openmmlab/mirrors-docformatter + rev: v1.3.1 + hooks: + - id: docformatter + args: ["--in-place", "--wrap-descriptions", "79"] + - repo: https://gitee.com/openmmlab/mirrors-pyupgrade + rev: v3.0.0 + hooks: + - id: pyupgrade + args: ["--py36-plus"] + - repo: https://github.com/open-mmlab/pre-commit-hooks + rev: v0.2.0 + hooks: + - id: check-copyright + args: ["mmyolo", "tests"] +# - repo: https://gitee.com/openmmlab/mirrors-mypy +# rev: v0.812 +# hooks: +# - id: mypy +# exclude: "docs" diff --git a/models/YOLO-World/third_party/mmyolo/.pre-commit-config.yaml b/models/YOLO-World/third_party/mmyolo/.pre-commit-config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ffae20d2d3941607fd541e03e22c0e351f296d88 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/.pre-commit-config.yaml @@ -0,0 +1,60 @@ +exclude: ^tests/data/ +repos: + - repo: https://github.com/PyCQA/flake8 + rev: 5.0.4 + hooks: + - id: flake8 + - repo: https://github.com/PyCQA/isort + rev: 5.11.5 + hooks: + - id: isort + - repo: https://github.com/pre-commit/mirrors-yapf + rev: v0.32.0 + hooks: + - id: yapf + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.3.0 + hooks: + - id: trailing-whitespace + - id: check-yaml + - id: end-of-file-fixer + - id: requirements-txt-fixer + - id: double-quote-string-fixer + - id: check-merge-conflict + - id: fix-encoding-pragma + args: ["--remove"] + - id: mixed-line-ending + args: ["--fix=lf"] + - repo: https://github.com/executablebooks/mdformat + rev: 0.7.9 + hooks: + - id: mdformat + args: ["--number"] + additional_dependencies: + - mdformat-openmmlab + - mdformat_frontmatter + - linkify-it-py + - repo: https://github.com/codespell-project/codespell + rev: v2.2.1 + hooks: + - id: codespell + - repo: https://github.com/myint/docformatter + rev: v1.3.1 + hooks: + - id: docformatter + args: ["--in-place", "--wrap-descriptions", "79"] + - repo: https://github.com/asottile/pyupgrade + rev: v3.0.0 + hooks: + - id: pyupgrade + args: ["--py36-plus"] + - repo: https://github.com/open-mmlab/pre-commit-hooks + rev: v0.2.0 + hooks: + - id: check-copyright + args: ["mmyolo", "tests"] +# - repo: https://github.com/pre-commit/mirrors-mypy +# rev: v0.812 +# hooks: +# - id: mypy +# exclude: "docs" diff --git a/models/YOLO-World/third_party/mmyolo/.readthedocs.yml b/models/YOLO-World/third_party/mmyolo/.readthedocs.yml new file mode 100644 index 0000000000000000000000000000000000000000..c9ab01ce18caeebce129472bd63b0465405d6a50 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/.readthedocs.yml @@ -0,0 +1,8 @@ +version: 2 + +formats: all + +python: + version: 3.7 + install: + - requirements: requirements/docs.txt diff --git a/models/YOLO-World/third_party/mmyolo/LICENSE b/models/YOLO-World/third_party/mmyolo/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..f288702d2fa16d3cdf0035b15a9fcbc552cd88e7 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/models/YOLO-World/third_party/mmyolo/MANIFEST.in b/models/YOLO-World/third_party/mmyolo/MANIFEST.in new file mode 100644 index 0000000000000000000000000000000000000000..5bf1d9ebabcc5ca1f28207b62eab10141474db51 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/MANIFEST.in @@ -0,0 +1,6 @@ +include requirements/*.txt +include mmyolo/VERSION +include mmyolo/.mim/model-index.yml +include mmyolo/.mim/demo/*/* +recursive-include mmyolo/.mim/configs *.py *.yml +recursive-include mmyolo/.mim/tools *.sh *.py diff --git a/models/YOLO-World/third_party/mmyolo/README.md b/models/YOLO-World/third_party/mmyolo/README.md new file mode 100644 index 0000000000000000000000000000000000000000..b799a759c367938cbeea728b0763a36cda5b2544 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/README.md @@ -0,0 +1,428 @@ +
+ +
 
+
+ OpenMMLab website + + + HOT + + +      + OpenMMLab platform + + + TRY IT OUT + + +
+
 
+ +[![PyPI](https://img.shields.io/pypi/v/mmyolo)](https://pypi.org/project/mmyolo) +[![docs](https://img.shields.io/badge/docs-latest-blue)](https://mmyolo.readthedocs.io/en/latest/) +[![deploy](https://github.com/open-mmlab/mmyolo/workflows/deploy/badge.svg)](https://github.com/open-mmlab/mmyolo/actions) +[![codecov](https://codecov.io/gh/open-mmlab/mmyolo/branch/main/graph/badge.svg)](https://codecov.io/gh/open-mmlab/mmyolo) +[![license](https://img.shields.io/github/license/open-mmlab/mmyolo.svg)](https://github.com/open-mmlab/mmyolo/blob/main/LICENSE) +[![open issues](https://isitmaintained.com/badge/open/open-mmlab/mmyolo.svg)](https://github.com/open-mmlab/mmyolo/issues) +[![issue resolution](https://isitmaintained.com/badge/resolution/open-mmlab/mmyolo.svg)](https://github.com/open-mmlab/mmyolo/issues) + +[📘Documentation](https://mmyolo.readthedocs.io/en/latest/) | +[🛠️Installation](https://mmyolo.readthedocs.io/en/latest/get_started/installation.html) | +[👀Model Zoo](https://mmyolo.readthedocs.io/en/latest/model_zoo.html) | +[🆕Update News](https://mmyolo.readthedocs.io/en/latest/notes/changelog.html) | +[🤔Reporting Issues](https://github.com/open-mmlab/mmyolo/issues/new/choose) + +
+ +
+ +English | [简体中文](README_zh-CN.md) + +
+ +
+ + + + + + + + + + + + + + + + + +
+ +## 📄 Table of Contents + +- [🥳 🚀 What's New](#--whats-new-) + - [✨ Highlight](#-highlight-) +- [📖 Introduction](#-introduction-) +- [🛠️ Installation](#%EF%B8%8F-installation-) +- [👨‍🏫 Tutorial](#-tutorial-) +- [📊 Overview of Benchmark and Model Zoo](#-overview-of-benchmark-and-model-zoo-) +- [❓ FAQ](#-faq-) +- [🙌 Contributing](#-contributing-) +- [🤝 Acknowledgement](#-acknowledgement-) +- [🖊️ Citation](#️-citation-) +- [🎫 License](#-license-) +- [🏗️ Projects in OpenMMLab](#%EF%B8%8F-projects-in-openmmlab-) + +## 🥳 🚀 What's New [🔝](#-table-of-contents) + +💎 **v0.6.0** was released on 15/8/2023: + +- Support YOLOv5 instance segmentation +- Support YOLOX-Pose based on MMPose +- Add 15 minutes instance segmentation tutorial. +- YOLOv5 supports using mask annotation to optimize bbox +- Add Multi-scale training and testing docs + +For release history and update details, please refer to [changelog](https://mmyolo.readthedocs.io/en/latest/notes/changelog.html). + +### ✨ Highlight [🔝](#-table-of-contents) + +We are excited to announce our latest work on real-time object recognition tasks, **RTMDet**, a family of fully convolutional single-stage detectors. RTMDet not only achieves the best parameter-accuracy trade-off on object detection from tiny to extra-large model sizes but also obtains new state-of-the-art performance on instance segmentation and rotated object detection tasks. Details can be found in the [technical report](https://arxiv.org/abs/2212.07784). Pre-trained models are [here](configs/rtmdet). + +[![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/rtmdet-an-empirical-study-of-designing-real/real-time-instance-segmentation-on-mscoco)](https://paperswithcode.com/sota/real-time-instance-segmentation-on-mscoco?p=rtmdet-an-empirical-study-of-designing-real) +[![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/rtmdet-an-empirical-study-of-designing-real/object-detection-in-aerial-images-on-dota-1)](https://paperswithcode.com/sota/object-detection-in-aerial-images-on-dota-1?p=rtmdet-an-empirical-study-of-designing-real) +[![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/rtmdet-an-empirical-study-of-designing-real/object-detection-in-aerial-images-on-hrsc2016)](https://paperswithcode.com/sota/object-detection-in-aerial-images-on-hrsc2016?p=rtmdet-an-empirical-study-of-designing-real) + +| Task | Dataset | AP | FPS(TRT FP16 BS1 3090) | +| ------------------------ | ------- | ------------------------------------ | ---------------------- | +| Object Detection | COCO | 52.8 | 322 | +| Instance Segmentation | COCO | 44.6 | 188 | +| Rotated Object Detection | DOTA | 78.9(single-scale)/81.3(multi-scale) | 121 | + +
+ +
+ +MMYOLO currently implements the object detection and rotated object detection algorithm, but it has a significant training acceleration compared to the MMDeteciton version. The training speed is 2.6 times faster than the previous version. + +## 📖 Introduction [🔝](#-table-of-contents) + +MMYOLO is an open source toolbox for YOLO series algorithms based on PyTorch and [MMDetection](https://github.com/open-mmlab/mmdetection). It is a part of the [OpenMMLab](https://openmmlab.com/) project. + +The master branch works with **PyTorch 1.6+**. + + +
+Major features + +- 🕹️ **Unified and convenient benchmark** + + MMYOLO unifies the implementation of modules in various YOLO algorithms and provides a unified benchmark. Users can compare and analyze in a fair and convenient way. + +- 📚 **Rich and detailed documentation** + + MMYOLO provides rich documentation for getting started, model deployment, advanced usages, and algorithm analysis, making it easy for users at different levels to get started and make extensions quickly. + +- 🧩 **Modular Design** + + MMYOLO decomposes the framework into different components where users can easily customize a model by combining different modules with various training and testing strategies. + +BaseModule-P5 + The figure above is contributed by RangeKing@GitHub, thank you very much! + +And the figure of P6 model is in [model_design.md](docs/en/recommended_topics/model_design.md). + +
+ +## 🛠️ Installation [🔝](#-table-of-contents) + +MMYOLO relies on PyTorch, MMCV, MMEngine, and MMDetection. Below are quick steps for installation. Please refer to the [Install Guide](docs/en/get_started/installation.md) for more detailed instructions. + +```shell +conda create -n mmyolo python=3.8 pytorch==1.10.1 torchvision==0.11.2 cudatoolkit=11.3 -c pytorch -y +conda activate mmyolo +pip install openmim +mim install "mmengine>=0.6.0" +mim install "mmcv>=2.0.0rc4,<2.1.0" +mim install "mmdet>=3.0.0,<4.0.0" +git clone https://github.com/open-mmlab/mmyolo.git +cd mmyolo +# Install albumentations +pip install -r requirements/albu.txt +# Install MMYOLO +mim install -v -e . +``` + +## 👨‍🏫 Tutorial [🔝](#-table-of-contents) + +MMYOLO is based on MMDetection and adopts the same code structure and design approach. To get better use of this, please read [MMDetection Overview](https://mmdetection.readthedocs.io/en/latest/get_started.html) for the first understanding of MMDetection. + +The usage of MMYOLO is almost identical to MMDetection and all tutorials are straightforward to use, you can also learn about [MMDetection User Guide and Advanced Guide](https://mmdetection.readthedocs.io/en/3.x/). + +For different parts from MMDetection, we have also prepared user guides and advanced guides, please read our [documentation](https://mmyolo.readthedocs.io/zenh_CN/latest/). + +
+Get Started + +- [Overview](docs/en/get_started/overview.md) +- [Dependencies](docs/en/get_started/dependencies.md) +- [Installation](docs/en/get_started/installation.md) +- [15 minutes object detection](docs/en/get_started/15_minutes_object_detection.md) +- [15 minutes rotated object detection](docs/en/get_started/15_minutes_rotated_object_detection.md) +- [15 minutes instance segmentation](docs/en/get_started/15_minutes_instance_segmentation.md) +- [Resources summary](docs/en/get_started/article.md) + +
+ +
+Recommended Topics + +- [How to contribute code to MMYOLO](docs/en/recommended_topics/contributing.md) +- [Training testing tricks](docs/en/recommended_topics/training_testing_tricks.md) +- [MMYOLO model design](docs/en/recommended_topics/model_design.md) +- [Algorithm principles and implementation](docs/en/recommended_topics/algorithm_descriptions/) +- [Replace the backbone network](docs/en/recommended_topics/replace_backbone.md) +- [MMYOLO model complexity analysis](docs/en/recommended_topics/complexity_analysis.md) +- [Annotation-to-deployment workflow for custom dataset](docs/en/recommended_topics/labeling_to_deployment_tutorials.md) +- [Visualization](docs/en/recommended_topics/visualization.md) +- [Model deployment](docs/en/recommended_topics/deploy/) +- [Troubleshooting steps](docs/en/recommended_topics/troubleshooting_steps.md) +- [MMYOLO application examples](docs/en/recommended_topics/application_examples/) +- [MM series repo essential basics](docs/en/recommended_topics/mm_basics.md) +- [Dataset preparation and description](docs/en/recommended_topics/dataset_preparation.md) + +
+ +
+Common Usage + +- [Resume training](docs/en/common_usage/resume_training.md) +- [Enabling and disabling SyncBatchNorm](docs/en/common_usage/syncbn.md) +- [Enabling AMP](docs/en/common_usage/amp_training.md) +- [Multi-scale training and testing](docs/en/common_usage/ms_training_testing.md) +- [TTA Related Notes](docs/en/common_usage/tta.md) +- [Add plugins to the backbone network](docs/en/common_usage/plugins.md) +- [Freeze layers](docs/en/common_usage/freeze_layers.md) +- [Output model predictions](docs/en/common_usage/output_predictions.md) +- [Set random seed](docs/en/common_usage/set_random_seed.md) +- [Module combination](docs/en/common_usage/module_combination.md) +- [Cross-library calls using mim](docs/en/common_usage/mim_usage.md) +- [Apply multiple Necks](docs/en/common_usage/multi_necks.md) +- [Specify specific device training or inference](docs/en/common_usage/specify_device.md) +- [Single and multi-channel application examples](docs/en/common_usage/single_multi_channel_applications.md) + +
+ +
+Useful Tools + +- [Browse coco json](docs/en/useful_tools/browse_coco_json.md) +- [Browse dataset](docs/en/useful_tools/browse_dataset.md) +- [Print config](docs/en/useful_tools/print_config.md) +- [Dataset analysis](docs/en/useful_tools/dataset_analysis.md) +- [Optimize anchors](docs/en/useful_tools/optimize_anchors.md) +- [Extract subcoco](docs/en/useful_tools/extract_subcoco.md) +- [Visualization scheduler](docs/en/useful_tools/vis_scheduler.md) +- [Dataset converters](docs/en/useful_tools/dataset_converters.md) +- [Download dataset](docs/en/useful_tools/download_dataset.md) +- [Log analysis](docs/en/useful_tools/log_analysis.md) +- [Model converters](docs/en/useful_tools/model_converters.md) + +
+ +
+Basic Tutorials + +- [Learn about configs with YOLOv5](docs/en/tutorials/config.md) +- [Data flow](docs/en/tutorials/data_flow.md) +- [Rotated detection](docs/en/tutorials/rotated_detection.md) +- [Custom Installation](docs/en/tutorials/custom_installation.md) +- [Common Warning Notes](docs/zh_cn/tutorials/warning_notes.md) +- [FAQ](docs/en/tutorials/faq.md) + +
+ +
+Advanced Tutorials + +- [MMYOLO cross-library application](docs/en/advanced_guides/cross-library_application.md) + +
+ +
+Descriptions + +- [Changelog](docs/en/notes/changelog.md) +- [Compatibility](docs/en/notes/compatibility.md) +- [Conventions](docs/en/notes/conventions.md) +- [Code Style](docs/en/notes/code_style.md) + +
+ +## 📊 Overview of Benchmark and Model Zoo [🔝](#-table-of-contents) + +
+ +
+ +Results and models are available in the [model zoo](docs/en/model_zoo.md). + +
+Supported Tasks + +- [x] Object detection +- [x] Rotated object detection + +
+ +
+Supported Algorithms + +- [x] [YOLOv5](configs/yolov5) +- [ ] [YOLOv5u](configs/yolov5/yolov5u) (Inference only) +- [x] [YOLOX](configs/yolox) +- [x] [RTMDet](configs/rtmdet) +- [x] [RTMDet-Rotated](configs/rtmdet) +- [x] [YOLOv6](configs/yolov6) +- [x] [YOLOv7](configs/yolov7) +- [x] [PPYOLOE](configs/ppyoloe) +- [x] [YOLOv8](configs/yolov8) + +
+ +
+Supported Datasets + +- [x] COCO Dataset +- [x] VOC Dataset +- [x] CrowdHuman Dataset +- [x] DOTA 1.0 Dataset + +
+ +
+
+ Module Components +
+ + + + + + + + + + + + + + + + + +
+ Backbones + + Necks + + Loss + + Common +
+
    +
  • YOLOv5CSPDarknet
  • +
  • YOLOv8CSPDarknet
  • +
  • YOLOXCSPDarknet
  • +
  • EfficientRep
  • +
  • CSPNeXt
  • +
  • YOLOv7Backbone
  • +
  • PPYOLOECSPResNet
  • +
  • mmdet backbone
  • +
  • mmcls backbone
  • +
  • timm
  • +
+
+
    +
  • YOLOv5PAFPN
  • +
  • YOLOv8PAFPN
  • +
  • YOLOv6RepPAFPN
  • +
  • YOLOXPAFPN
  • +
  • CSPNeXtPAFPN
  • +
  • YOLOv7PAFPN
  • +
  • PPYOLOECSPPAFPN
  • +
+
+
    +
  • IoULoss
  • +
  • mmdet loss
  • +
+
+
    +
+
+ +
+ +## ❓ FAQ [🔝](#-table-of-contents) + +Please refer to the [FAQ](docs/en/tutorials/faq.md) for frequently asked questions. + +## 🙌 Contributing [🔝](#-table-of-contents) + +We appreciate all contributions to improving MMYOLO. Ongoing projects can be found in our [GitHub Projects](https://github.com/open-mmlab/mmyolo/projects). Welcome community users to participate in these projects. Please refer to [CONTRIBUTING.md](.github/CONTRIBUTING.md) for the contributing guideline. + +## 🤝 Acknowledgement [🔝](#-table-of-contents) + +MMYOLO is an open source project that is contributed by researchers and engineers from various colleges and companies. We appreciate all the contributors who implement their methods or add new features, as well as users who give valuable feedback. +We wish that the toolbox and benchmark could serve the growing research community by providing a flexible toolkit to re-implement existing methods and develop their own new detectors. + +
+ +
+ +## 🖊️ Citation [🔝](#-table-of-contents) + +If you find this project useful in your research, please consider citing: + +```latex +@misc{mmyolo2022, + title={{MMYOLO: OpenMMLab YOLO} series toolbox and benchmark}, + author={MMYOLO Contributors}, + howpublished = {\url{https://github.com/open-mmlab/mmyolo}}, + year={2022} +} +``` + +## 🎫 License [🔝](#-table-of-contents) + +This project is released under the [GPL 3.0 license](LICENSE). + +## 🏗️ Projects in OpenMMLab [🔝](#-table-of-contents) + +- [MMEngine](https://github.com/open-mmlab/mmengine): OpenMMLab foundational library for training deep learning models. +- [MMCV](https://github.com/open-mmlab/mmcv): OpenMMLab foundational library for computer vision. +- [MMPreTrain](https://github.com/open-mmlab/mmpretrain): OpenMMLab pre-training toolbox and benchmark. +- [MMagic](https://github.com/open-mmlab/mmagic): Open**MM**Lab **A**dvanced, **G**enerative and **I**ntelligent **C**reation toolbox. +- [MMDetection](https://github.com/open-mmlab/mmdetection): OpenMMLab detection toolbox and benchmark. +- [MMDetection3D](https://github.com/open-mmlab/mmdetection3d): OpenMMLab's next-generation platform for general 3D object detection. +- [MMRotate](https://github.com/open-mmlab/mmrotate): OpenMMLab rotated object detection toolbox and benchmark. +- [MMYOLO](https://github.com/open-mmlab/mmyolo): OpenMMLab YOLO series toolbox and benchmark. +- [MMSegmentation](https://github.com/open-mmlab/mmsegmentation): OpenMMLab semantic segmentation toolbox and benchmark. +- [MMOCR](https://github.com/open-mmlab/mmocr): OpenMMLab text detection, recognition, and understanding toolbox. +- [MMPose](https://github.com/open-mmlab/mmpose): OpenMMLab pose estimation toolbox and benchmark. +- [MMHuman3D](https://github.com/open-mmlab/mmhuman3d): OpenMMLab 3D human parametric model toolbox and benchmark. +- [MMSelfSup](https://github.com/open-mmlab/mmselfsup): OpenMMLab self-supervised learning toolbox and benchmark. +- [MMRazor](https://github.com/open-mmlab/mmrazor): OpenMMLab model compression toolbox and benchmark. +- [MMFewShot](https://github.com/open-mmlab/mmfewshot): OpenMMLab fewshot learning toolbox and benchmark. +- [MMAction2](https://github.com/open-mmlab/mmaction2): OpenMMLab's next-generation action understanding toolbox and benchmark. +- [MMTracking](https://github.com/open-mmlab/mmtracking): OpenMMLab video perception toolbox and benchmark. +- [MMFlow](https://github.com/open-mmlab/mmflow): OpenMMLab optical flow toolbox and benchmark. +- [MMEditing](https://github.com/open-mmlab/mmediting): OpenMMLab image and video editing toolbox. +- [MMGeneration](https://github.com/open-mmlab/mmgeneration): OpenMMLab image and video generative models toolbox. +- [MMDeploy](https://github.com/open-mmlab/mmdeploy): OpenMMLab model deployment framework. +- [MIM](https://github.com/open-mmlab/mim): MIM installs OpenMMLab packages. +- [MMEval](https://github.com/open-mmlab/mmeval): OpenMMLab machine learning evaluation library. +- [Playground](https://github.com/open-mmlab/playground): A central hub for gathering and showcasing amazing projects built upon OpenMMLab. diff --git a/models/YOLO-World/third_party/mmyolo/README_zh-CN.md b/models/YOLO-World/third_party/mmyolo/README_zh-CN.md new file mode 100644 index 0000000000000000000000000000000000000000..6eb4d95fe5c6d013d677482762d722b20ce826f0 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/README_zh-CN.md @@ -0,0 +1,468 @@ +
+ +
 
+
+ OpenMMLab 官网 + + + HOT + + +      + OpenMMLab 开放平台 + + + TRY IT OUT + + +
+
 
+ +[![PyPI](https://img.shields.io/pypi/v/mmyolo)](https://pypi.org/project/mmyolo) +[![docs](https://img.shields.io/badge/docs-latest-blue)](https://mmyolo.readthedocs.io/zh_CN/latest/) +[![deploy](https://github.com/open-mmlab/mmyolo/workflows/deploy/badge.svg)](https://github.com/open-mmlab/mmyolo/actions) +[![codecov](https://codecov.io/gh/open-mmlab/mmyolo/branch/main/graph/badge.svg)](https://codecov.io/gh/open-mmlab/mmyolo) +[![license](https://img.shields.io/github/license/open-mmlab/mmyolo.svg)](https://github.com/open-mmlab/mmyolo/blob/main/LICENSE) +[![open issues](https://isitmaintained.com/badge/open/open-mmlab/mmyolo.svg)](https://github.com/open-mmlab/mmyolo/issues) +[![issue resolution](https://isitmaintained.com/badge/resolution/open-mmlab/mmyolo.svg)](https://github.com/open-mmlab/mmyolo/issues) + +[📘使用文档](https://mmyolo.readthedocs.io/zh_CN/latest/) | +[🛠️安装教程](https://mmyolo.readthedocs.io/zh_CN/latest/get_started/installation.html) | +[👀模型库](https://mmyolo.readthedocs.io/zh_CN/latest/model_zoo.html) | +[🆕更新日志](https://mmyolo.readthedocs.io/zh_CN/latest/notes/changelog.html) | +[🤔报告问题](https://github.com/open-mmlab/mmyolo/issues/new/choose) + +
+ +
+ +[English](README.md) | 简体中文 + +
+ +
+ + + + + + + + + + + + + + + + + +
+ +## 📄 Table of Contents + +- [🥳 🚀 最新进展](#--最新进展-) + - [✨ 亮点](#-亮点-) +- [📖 简介](#-简介-) +- [🛠️ 安装](#️%EF%B8%8F-安装-) +- [👨‍🏫 教程](#-教程-) +- [📊 基准测试和模型库](#-基准测试和模型库-) +- [❓ 常见问题](#-常见问题-) +- [🙌 贡献指南](#-贡献指南-) +- [🤝 致谢](#🤝-致谢-) +- [🖊️ 引用](#️-引用-) +- [🎫 开源许可证](#-开源许可证-) +- [🏗️ OpenMMLab 的其他项目](#%EF%B8%8F-openmmlab-的其他项目-) +- [❤️ 欢迎加入 OpenMMLab 社区](#%EF%B8%8F-欢迎加入-openmmlab-社区-) + +## 🥳 🚀 最新进展 [🔝](#-table-of-contents) + +💎 **v0.6.0** 版本已经在 2023.8.15 发布: + +- 支持 YOLOv5 实例分割 +- 基于 MMPose 支持 YOLOX-Pose +- 添加 15 分钟的实例分割教程 +- YOLOv5 支持使用 mask 标注来优化边界框 +- 添加多尺度训练和测试文档 + +我们提供了实用的**脚本命令速查表** + +
+ +
+ +你可以点击[链接](https://pan.baidu.com/s/1QEaqT7YayUdEvh1an0gjHg?pwd=yolo),下载高清版 PDF 文件。 + +同时我们也推出了解读视频: + +| | 内容 | 视频 | 课程中的代码 | +| :-: | :--------------------------------: | :-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| 🌟 | 特征图可视化 | [![Link](https://i2.hdslb.com/bfs/archive/480a0eb41fce26e0acb65f82a74501418eee1032.jpg@112w_63h_1c.webp)](https://www.bilibili.com/video/BV188411s7o8) [![bilibili](https://img.shields.io/badge/dynamic/json?label=views&style=social&logo=bilibili&query=data.stat.view&url=https%3A%2F%2Fapi.bilibili.com%2Fx%2Fweb-interface%2Fview%3Fbvid%3DBV188411s7o8)](https://www.bilibili.com/video/BV188411s7o8) | [特征图可视化.ipynb](https://github.com/open-mmlab/OpenMMLabCourse/blob/main/codes/MMYOLO_tutorials/%5B%E5%B7%A5%E5%85%B7%E7%B1%BB%E7%AC%AC%E4%B8%80%E6%9C%9F%5D%E7%89%B9%E5%BE%81%E5%9B%BE%E5%8F%AF%E8%A7%86%E5%8C%96.ipynb) | +| 🌟 | 源码阅读和调试「必备」技巧 | [![Link](https://i2.hdslb.com/bfs/archive/790d2422c879ff20488910da1c4422b667ea6af7.jpg@112w_63h_1c.webp)](https://www.bilibili.com/video/BV1N14y1V7mB) [![bilibili](https://img.shields.io/badge/dynamic/json?label=views&style=social&logo=bilibili&query=data.stat.view&url=https%3A%2F%2Fapi.bilibili.com%2Fx%2Fweb-interface%2Fview%3Fbvid%3DBV1N14y1V7mB)](https://www.bilibili.com/video/BV1N14y1V7mB) | [源码阅读和调试「必备」技巧文档](https://zhuanlan.zhihu.com/p/580885852) | +| 🌟 | 10分钟换遍主干网络 | [![Link](http://i0.hdslb.com/bfs/archive/c51f1aef7c605856777249a7b4478f44bd69f3bd.jpg@112w_63h_1c.webp)](https://www.bilibili.com/video/BV1JG4y1d7GC) [![bilibili](https://img.shields.io/badge/dynamic/json?label=views&style=social&logo=bilibili&query=data.stat.view&url=https%3A%2F%2Fapi.bilibili.com%2Fx%2Fweb-interface%2Fview%3Fbvid%3DBV1JG4y1d7GC)](https://www.bilibili.com/video/BV1JG4y1d7GC) | [10分钟换遍主干网络文档](https://zhuanlan.zhihu.com/p/585641598)
[10分钟换遍主干网络.ipynb](https://github.com/open-mmlab/OpenMMLabCourse/blob/main/codes/MMYOLO_tutorials/[实用类第二期]10分钟换遍主干网络.ipynb) | +| 🌟 | 自定义数据集从标注到部署保姆级教程 | [![Link](https://i2.hdslb.com/bfs/archive/13f566c89a18c9c881713b63ec14da952d4c0b14.jpg@112w_63h_1c.webp)](https://www.bilibili.com/video/BV1RG4y137i5) [![bilibili](https://img.shields.io/badge/dynamic/json?label=views&style=social&logo=bilibili&query=data.stat.view&url=https%3A%2F%2Fapi.bilibili.com%2Fx%2Fweb-interface%2Fview%3Fbvid%3DBV1RG4y137i5)](https://www.bilibili.com/video/BV1JG4y1d7GC) | [自定义数据集从标注到部署保姆级教程](https://github.com/open-mmlab/mmyolo/blob/dev/docs/zh_cn/user_guides/custom_dataset.md) | +| 🌟 | 顶会第一步 · 模块自定义 | [![Link](http://i2.hdslb.com/bfs/archive/5b23d41ac57466824eaf185ef806ef734414e93b.jpg@112w_63h_1c.webp)](https://www.bilibili.com/video/BV1yd4y1j7VD) [![bilibili](https://img.shields.io/badge/dynamic/json?label=views&style=social&logo=bilibili&query=data.stat.view&url=https%3A%2F%2Fapi.bilibili.com%2Fx%2Fweb-interface%2Fview%3Fbvid%3DBV1yd4y1j7VD)](https://www.bilibili.com/video/BV1yd4y1j7VD) | [顶会第一步·模块自定义.ipynb](https://github.com/open-mmlab/OpenMMLabCourse/blob/main/codes/MMYOLO_tutorials/[实用类第四期]顶会第一步·模块自定义.ipynb) | + +完整视频列表请参考 [中文解读资源汇总 - 视频](https://mmyolo.readthedocs.io/zh_CN/latest/get_started/article.html) + +发布历史和更新细节请参考 [更新日志](https://mmyolo.readthedocs.io/zh_CN/latest/notes/changelog.html) + +### ✨ 亮点 [🔝](#-table-of-contents) + +我们很高兴向大家介绍我们在实时目标识别任务方面的最新成果 RTMDet,包含了一系列的全卷积单阶段检测模型。 RTMDet 不仅在从 tiny 到 extra-large 尺寸的目标检测模型上实现了最佳的参数量和精度的平衡,而且在实时实例分割和旋转目标检测任务上取得了最先进的成果。 更多细节请参阅[技术报告](https://arxiv.org/abs/2212.07784)。 预训练模型可以在[这里](configs/rtmdet)找到。 + +[![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/rtmdet-an-empirical-study-of-designing-real/real-time-instance-segmentation-on-mscoco)](https://paperswithcode.com/sota/real-time-instance-segmentation-on-mscoco?p=rtmdet-an-empirical-study-of-designing-real) +[![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/rtmdet-an-empirical-study-of-designing-real/object-detection-in-aerial-images-on-dota-1)](https://paperswithcode.com/sota/object-detection-in-aerial-images-on-dota-1?p=rtmdet-an-empirical-study-of-designing-real) +[![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/rtmdet-an-empirical-study-of-designing-real/object-detection-in-aerial-images-on-hrsc2016)](https://paperswithcode.com/sota/object-detection-in-aerial-images-on-hrsc2016?p=rtmdet-an-empirical-study-of-designing-real) + +| Task | Dataset | AP | FPS(TRT FP16 BS1 3090) | +| ------------------------ | ------- | ------------------------------------ | ---------------------- | +| Object Detection | COCO | 52.8 | 322 | +| Instance Segmentation | COCO | 44.6 | 188 | +| Rotated Object Detection | DOTA | 78.9(single-scale)/81.3(multi-scale) | 121 | + +
+ +
+ +MMYOLO 中目前实现了目标检测和旋转框目标检测算法,但是相比 MMDeteciton 版本有显著训练加速,训练速度相比原先版本提升 2.6 倍。 + +## 📖 简介 [🔝](#-table-of-contents) + +MMYOLO 是一个基于 PyTorch 和 MMDetection 的 YOLO 系列算法开源工具箱。它是 [OpenMMLab](https://openmmlab.com/) 项目的一部分。 + +主分支代码目前支持 PyTorch 1.6 以上的版本。 + + +
+主要特性 + +- 🕹️ **统一便捷的算法评测** + + MMYOLO 统一了各类 YOLO 算法模块的实现, 并提供了统一的评测流程,用户可以公平便捷地进行对比分析。 + +- 📚 **丰富的入门和进阶文档** + + MMYOLO 提供了从入门到部署到进阶和算法解析等一系列文档,方便不同用户快速上手和扩展。 + +- 🧩 **模块化设计** + + MMYOLO 将框架解耦成不同的模块组件,通过组合不同的模块和训练测试策略,用户可以便捷地构建自定义模型。 + +基类-P5 + 图为 RangeKing@GitHub 提供,非常感谢! + +P6 模型图详见 [model_design.md](docs/zh_cn/recommended_topics/model_design.md)。 + +
+ +## 🛠️ 安装 [🔝](#-table-of-contents) + +MMYOLO 依赖 PyTorch, MMCV, MMEngine 和 MMDetection,以下是安装的简要步骤。 更详细的安装指南请参考[安装文档](docs/zh_cn/get_started/installation.md)。 + +```shell +conda create -n mmyolo python=3.8 pytorch==1.10.1 torchvision==0.11.2 cudatoolkit=11.3 -c pytorch -y +conda activate mmyolo +pip install openmim +mim install "mmengine>=0.6.0" +mim install "mmcv>=2.0.0rc4,<2.1.0" +mim install "mmdet>=3.0.0,<4.0.0" +git clone https://github.com/open-mmlab/mmyolo.git +cd mmyolo +# Install albumentations +pip install -r requirements/albu.txt +# Install MMYOLO +mim install -v -e . +``` + +## 👨‍🏫 教程 [🔝](#-table-of-contents) + +MMYOLO 基于 MMDetection 开源库,并且采用相同的代码组织和设计方式。为了更好的使用本开源库,请先阅读 [MMDetection 概述](https://mmdetection.readthedocs.io/zh_CN/latest/get_started.html) 对 MMDetection 进行初步地了解。 + +MMYOLO 用法和 MMDetection 几乎一致,所有教程都是通用的,你也可以了解 [MMDetection 用户指南和进阶指南](https://mmdetection.readthedocs.io/zh_CN/3.x/) 。 + +针对和 MMDetection 不同的部分,我们也准备了用户指南和进阶指南,请阅读我们的 [文档](https://mmyolo.readthedocs.io/zh_CN/latest/) 。 + +
+开启 MMYOLO 之旅 + +- [概述](docs/zh_cn/get_started/overview.md) +- [依赖](docs/zh_cn/get_started/dependencies.md) +- [安装和验证](docs/zh_cn/get_started/installation.md) +- [15 分钟上手 MMYOLO 目标检测](docs/zh_cn/get_started/15_minutes_object_detection.md) +- [15 分钟上手 MMYOLO 旋转框目标检测](docs/zh_cn/get_started/15_minutes_rotated_object_detection.md) +- [15 分钟上手 MMYOLO 实例分割](docs/zh_cn/get_started/15_minutes_instance_segmentation.md) +- [中文解读资源汇总](docs/zh_cn/get_started/article.md) + +
+ +
+推荐专题 + +- [如何给 MMYOLO 贡献代码](docs/zh_cn/recommended_topics/contributing.md) +- [训练和测试技巧](docs/zh_cn/recommended_topics/training_testing_tricks.md) +- [MMYOLO 模型结构设计](docs/zh_cn/recommended_topics/model_design.md) +- [原理和实现全解析](docs/zh_cn/recommended_topics/algorithm_descriptions/) +- [轻松更换主干网络](docs/zh_cn/recommended_topics/replace_backbone.md) +- [MMYOLO 模型复杂度分析](docs/zh_cn/recommended_topics/complexity_analysis.md) +- [标注+训练+测试+部署全流程](docs/zh_cn/recommended_topics/labeling_to_deployment_tutorials.md) +- [关于可视化的一切](docs/zh_cn/recommended_topics/visualization.md) +- [模型部署流程](docs/zh_cn/recommended_topics/deploy/) +- [常见错误排查步骤](docs/zh_cn/recommended_topics/troubleshooting_steps.md) +- [MMYOLO 应用范例介绍](docs/zh_cn/recommended_topics/application_examples/) +- [MM 系列 Repo 必备基础](docs/zh_cn/recommended_topics/mm_basics.md) +- [数据集准备和说明](docs/zh_cn/recommended_topics/dataset_preparation.md) + +
+ +
+常用功能 + +- [恢复训练](docs/zh_cn/common_usage/resume_training.md) +- [开启和关闭 SyncBatchNorm](docs/zh_cn/common_usage/syncbn.md) +- [开启混合精度训练](docs/zh_cn/common_usage/amp_training.md) +- [多尺度训练和测试](docs/zh_cn/common_usage/ms_training_testing.md) +- [测试时增强相关说明](docs/zh_cn/common_usage/tta.md) +- [给主干网络增加插件](docs/zh_cn/common_usage/plugins.md) +- [冻结指定网络层权重](docs/zh_cn/common_usage/freeze_layers.md) +- [输出模型预测结果](docs/zh_cn/common_usage/output_predictions.md) +- [设置随机种子](docs/zh_cn/common_usage/set_random_seed.md) +- [算法组合替换教程](docs/zh_cn/common_usage/module_combination.md) +- [使用 mim 跨库调用其他 OpenMMLab 仓库的脚本](docs/zh_cn/common_usage/mim_usage.md) +- [应用多个 Neck](docs/zh_cn/common_usage/multi_necks.md) +- [指定特定设备训练或推理](docs/zh_cn/common_usage/specify_device.md) +- [单通道和多通道应用案例](docs/zh_cn/common_usage/single_multi_channel_applications.md) +- [MM 系列开源库注册表](docs/zh_cn/common_usage/registries_info.md) + +
+ +
+实用工具 + +- [可视化 COCO 标签](docs/zh_cn/useful_tools/browse_coco_json.md) +- [可视化数据集](docs/zh_cn/useful_tools/browse_dataset.md) +- [打印完整配置文件](docs/zh_cn/useful_tools/print_config.md) +- [可视化数据集分析结果](docs/zh_cn/useful_tools/dataset_analysis.md) +- [优化锚框尺寸](docs/zh_cn/useful_tools/optimize_anchors.md) +- [提取 COCO 子集](docs/zh_cn/useful_tools/extract_subcoco.md) +- [可视化优化器参数策略](docs/zh_cn/useful_tools/vis_scheduler.md) +- [数据集转换](docs/zh_cn/useful_tools/dataset_converters.md) +- [数据集下载](docs/zh_cn/useful_tools/download_dataset.md) +- [日志分析](docs/zh_cn/useful_tools/log_analysis.md) +- [模型转换](docs/zh_cn/useful_tools/model_converters.md) + +
+ +
+基础教程 + +- [学习 YOLOv5 配置文件](docs/zh_cn/tutorials/config.md) +- [数据流](docs/zh_cn/tutorials/data_flow.md) +- [旋转目标检测](docs/zh_cn/tutorials/rotated_detection.md) +- [自定义安装](docs/zh_cn/tutorials/custom_installation.md) +- [常见警告说明](docs/zh_cn/tutorials/warning_notes.md) +- [常见问题](docs/zh_cn/tutorials/faq.md) + +
+ +
+进阶教程 + +- [MMYOLO 跨库应用解析](docs/zh_cn/advanced_guides/cross-library_application.md) + +
+ +
+说明 + +- [更新日志](docs/zh_cn/notes/changelog.md) +- [兼容性说明](docs/zh_cn/notes/compatibility.md) +- [默认约定](docs/zh_cn/notes/conventions.md) +- [代码规范](docs/zh_cn/notes/code_style.md) + +
+ +## 📊 基准测试和模型库 [🔝](#-table-of-contents) + +
+ +
+ +测试结果和模型可以在 [模型库](docs/zh_cn/model_zoo.md) 中找到。 + +
+支持的任务 + +- [x] 目标检测 +- [x] 旋转框目标检测 + +
+ +
+支持的算法 + +- [x] [YOLOv5](configs/yolov5) +- [ ] [YOLOv5u](configs/yolov5/yolov5u) (仅推理) +- [x] [YOLOX](configs/yolox) +- [x] [RTMDet](configs/rtmdet) +- [x] [RTMDet-Rotated](configs/rtmdet) +- [x] [YOLOv6](configs/yolov6) +- [x] [YOLOv7](configs/yolov7) +- [x] [PPYOLOE](configs/ppyoloe) +- [x] [YOLOv8](configs/yolov8) + +
+ +
+支持的数据集 + +- [x] COCO Dataset +- [x] VOC Dataset +- [x] CrowdHuman Dataset +- [x] DOTA 1.0 Dataset + +
+ +
+
+ 模块组件 +
+ + + + + + + + + + + + + + + + + +
+ Backbones + + Necks + + Loss + + Common +
+
    +
  • YOLOv5CSPDarknet
  • +
  • YOLOv8CSPDarknet
  • +
  • YOLOXCSPDarknet
  • +
  • EfficientRep
  • +
  • CSPNeXt
  • +
  • YOLOv7Backbone
  • +
  • PPYOLOECSPResNet
  • +
  • mmdet backbone
  • +
  • mmcls backbone
  • +
  • timm
  • +
+
+
    +
  • YOLOv5PAFPN
  • +
  • YOLOv8PAFPN
  • +
  • YOLOv6RepPAFPN
  • +
  • YOLOXPAFPN
  • +
  • CSPNeXtPAFPN
  • +
  • YOLOv7PAFPN
  • +
  • PPYOLOECSPPAFPN
  • +
+
+
    +
  • IoULoss
  • +
  • mmdet loss
  • +
+
+
    +
+
+ +
+ +## ❓ 常见问题 [🔝](#-table-of-contents) + +请参考 [FAQ](docs/zh_cn/tutorials/faq.md) 了解其他用户的常见问题。 + +## 🙌 贡献指南 [🔝](#-table-of-contents) + +我们感谢所有的贡献者为改进和提升 MMYOLO 所作出的努力。我们将正在进行中的项目添加进了[GitHub Projects](https://github.com/open-mmlab/mmyolo/projects)页面,非常欢迎社区用户能参与进这些项目中来。请参考[贡献指南](.github/CONTRIBUTING.md)来了解参与项目贡献的相关指引。 + +## 🤝 致谢 [🔝](#-table-of-contents) + +MMYOLO 是一款由来自不同高校和企业的研发人员共同参与贡献的开源项目。我们感谢所有为项目提供算法复现和新功能支持的贡献者,以及提供宝贵反馈的用户。 我们希望这个工具箱和基准测试可以为社区提供灵活的代码工具,供用户复现已有算法并开发自己的新模型,从而不断为开源社区提供贡献。 + +
+ +
+ +## 🖊️ 引用 [🔝](#-table-of-contents) + +如果你觉得本项目对你的研究工作有所帮助,请参考如下 bibtex 引用 MMYOLO + +```latex +@misc{mmyolo2022, + title={{MMYOLO: OpenMMLab YOLO} series toolbox and benchmark}, + author={MMYOLO Contributors}, + howpublished = {\url{https://github.com/open-mmlab/mmyolo}}, + year={2022} +} +``` + +## 🎫 开源许可证 [🔝](#-table-of-contents) + +该项目采用 [GPL 3.0 开源许可证](LICENSE)。 + +## 🏗️ OpenMMLab 的其他项目 [🔝](#-table-of-contents) + +- [MMEngine](https://github.com/open-mmlab/mmengine): OpenMMLab 深度学习模型训练基础库 +- [MMCV](https://github.com/open-mmlab/mmcv): OpenMMLab 计算机视觉基础库 +- [MMPreTrain](https://github.com/open-mmlab/mmpretrain): OpenMMLab 深度学习预训练工具箱 +- [MMagic](https://github.com/open-mmlab/mmagic): OpenMMLab 新一代人工智能内容生成(AIGC)工具箱 +- [MMDetection](https://github.com/open-mmlab/mmdetection): OpenMMLab 目标检测工具箱 +- [MMDetection3D](https://github.com/open-mmlab/mmdetection3d): OpenMMLab 新一代通用 3D 目标检测平台 +- [MMRotate](https://github.com/open-mmlab/mmrotate): OpenMMLab 旋转框检测工具箱与测试基准 +- [MMYOLO](https://github.com/open-mmlab/mmyolo): OpenMMLab YOLO 系列工具箱 +- [MMSegmentation](https://github.com/open-mmlab/mmsegmentation): OpenMMLab 语义分割工具箱 +- [MMOCR](https://github.com/open-mmlab/mmocr): OpenMMLab 全流程文字检测识别理解工具包 +- [MMPose](https://github.com/open-mmlab/mmpose): OpenMMLab 姿态估计工具箱 +- [MMHuman3D](https://github.com/open-mmlab/mmhuman3d): OpenMMLab 人体参数化模型工具箱与测试基准 +- [MMSelfSup](https://github.com/open-mmlab/mmselfsup): OpenMMLab 自监督学习工具箱与测试基准 +- [MMRazor](https://github.com/open-mmlab/mmrazor): OpenMMLab 模型压缩工具箱与测试基准 +- [MMFewShot](https://github.com/open-mmlab/mmfewshot): OpenMMLab 少样本学习工具箱与测试基准 +- [MMAction2](https://github.com/open-mmlab/mmaction2): OpenMMLab 新一代视频理解工具箱 +- [MMTracking](https://github.com/open-mmlab/mmtracking): OpenMMLab 一体化视频目标感知平台 +- [MMFlow](https://github.com/open-mmlab/mmflow): OpenMMLab 光流估计工具箱与测试基准 +- [MMEditing](https://github.com/open-mmlab/mmediting): OpenMMLab 图像视频编辑工具箱 +- [MMGeneration](https://github.com/open-mmlab/mmgeneration): OpenMMLab 图片视频生成模型工具箱 +- [MMDeploy](https://github.com/open-mmlab/mmdeploy): OpenMMLab 模型部署框架 +- [MIM](https://github.com/open-mmlab/mim): MIM 是 OpenMMlab 项目、算法、模型的统一入口 +- [MMEval](https://github.com/open-mmlab/mmeval): OpenMMLab 机器学习算法评测库 +- [Playground](https://github.com/open-mmlab/playground): 收集和展示 OpenMMLab 相关的前沿、有趣的社区项目 + +## ❤️ 欢迎加入 OpenMMLab 社区 [🔝](#-table-of-contents) + +扫描下方的二维码可关注 OpenMMLab 团队的 [知乎官方账号](https://www.zhihu.com/people/openmmlab),加入 OpenMMLab 团队的 [官方交流 QQ 群](https://jq.qq.com/?_wv=1027&k=aCvMxdr3) + +
+ +
+ +我们会在 OpenMMLab 社区为大家 + +- 📢 分享 AI 框架的前沿核心技术 +- 💻 解读 PyTorch 常用模块源码 +- 📰 发布 OpenMMLab 的相关新闻 +- 🚀 介绍 OpenMMLab 开发的前沿算法 +- 🏃 获取更高效的问题答疑和意见反馈 +- 🔥 提供与各行各业开发者充分交流的平台 + +干货满满 📘,等你来撩 💗,OpenMMLab 社区期待您的加入 👬 diff --git a/models/YOLO-World/third_party/mmyolo/configs/_base_/default_runtime.py b/models/YOLO-World/third_party/mmyolo/configs/_base_/default_runtime.py new file mode 100644 index 0000000000000000000000000000000000000000..098f220573cf481056f2f55f0621198270d51c49 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/_base_/default_runtime.py @@ -0,0 +1,43 @@ +default_scope = 'mmyolo' + +default_hooks = dict( + timer=dict(type='IterTimerHook'), + logger=dict(type='LoggerHook', interval=50), + param_scheduler=dict(type='ParamSchedulerHook'), + checkpoint=dict(type='CheckpointHook', interval=1), + sampler_seed=dict(type='DistSamplerSeedHook'), + visualization=dict(type='mmdet.DetVisualizationHook')) + +env_cfg = dict( + cudnn_benchmark=False, + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0), + dist_cfg=dict(backend='nccl'), +) + +vis_backends = [dict(type='LocalVisBackend')] +visualizer = dict( + type='mmdet.DetLocalVisualizer', + vis_backends=vis_backends, + name='visualizer') +log_processor = dict(type='LogProcessor', window_size=50, by_epoch=True) + +log_level = 'INFO' +load_from = None +resume = False + +# Example to use different file client +# Method 1: simply set the data root and let the file I/O module +# automatically infer from prefix (not support LMDB and Memcache yet) + +# data_root = 's3://openmmlab/datasets/detection/coco/' + +# Method 2: Use `backend_args`, `file_client_args` in versions +# before MMDet 3.0.0rc6 +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# './data/': 's3://openmmlab/datasets/detection/', +# 'data/': 's3://openmmlab/datasets/detection/' +# })) + +backend_args = None diff --git a/models/YOLO-World/third_party/mmyolo/configs/_base_/det_p5_tta.py b/models/YOLO-World/third_party/mmyolo/configs/_base_/det_p5_tta.py new file mode 100644 index 0000000000000000000000000000000000000000..8df0d5ea8db46fe748cc8fe1074aa928c64b4309 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/_base_/det_p5_tta.py @@ -0,0 +1,58 @@ +# TODO: Need to solve the problem of multiple backend_args parameters +# _backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# './data/': 's3://openmmlab/datasets/detection/', +# 'data/': 's3://openmmlab/datasets/detection/' +# })) + +_backend_args = None + +tta_model = dict( + type='mmdet.DetTTAModel', + tta_cfg=dict(nms=dict(type='nms', iou_threshold=0.65), max_per_img=300)) + +img_scales = [(640, 640), (320, 320), (960, 960)] + +# LoadImageFromFile +# / | \ +# (RatioResize,LetterResize) (RatioResize,LetterResize) (RatioResize,LetterResize) # noqa +# / \ / \ / \ +# RandomFlip RandomFlip RandomFlip RandomFlip RandomFlip RandomFlip # noqa +# | | | | | | +# LoadAnn LoadAnn LoadAnn LoadAnn LoadAnn LoadAnn +# | | | | | | +# PackDetIn PackDetIn PackDetIn PackDetIn PackDetIn PackDetIn # noqa + +_multiscale_resize_transforms = [ + dict( + type='Compose', + transforms=[ + dict(type='YOLOv5KeepRatioResize', scale=s), + dict( + type='LetterResize', + scale=s, + allow_scale_up=False, + pad_val=dict(img=114)) + ]) for s in img_scales +] + +tta_pipeline = [ + dict(type='LoadImageFromFile', backend_args=_backend_args), + dict( + type='TestTimeAug', + transforms=[ + _multiscale_resize_transforms, + [ + dict(type='mmdet.RandomFlip', prob=1.), + dict(type='mmdet.RandomFlip', prob=0.) + ], [dict(type='mmdet.LoadAnnotations', with_bbox=True)], + [ + dict( + type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'pad_param', 'flip', + 'flip_direction')) + ] + ]) +] diff --git a/models/YOLO-World/third_party/mmyolo/configs/_base_/pose/coco.py b/models/YOLO-World/third_party/mmyolo/configs/_base_/pose/coco.py new file mode 100644 index 0000000000000000000000000000000000000000..865a95bc02fedd318f32d2e7aa8397147d78fdb5 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/_base_/pose/coco.py @@ -0,0 +1,181 @@ +dataset_info = dict( + dataset_name='coco', + paper_info=dict( + author='Lin, Tsung-Yi and Maire, Michael and ' + 'Belongie, Serge and Hays, James and ' + 'Perona, Pietro and Ramanan, Deva and ' + r'Doll{\'a}r, Piotr and Zitnick, C Lawrence', + title='Microsoft coco: Common objects in context', + container='European conference on computer vision', + year='2014', + homepage='http://cocodataset.org/', + ), + keypoint_info={ + 0: + dict(name='nose', id=0, color=[51, 153, 255], type='upper', swap=''), + 1: + dict( + name='left_eye', + id=1, + color=[51, 153, 255], + type='upper', + swap='right_eye'), + 2: + dict( + name='right_eye', + id=2, + color=[51, 153, 255], + type='upper', + swap='left_eye'), + 3: + dict( + name='left_ear', + id=3, + color=[51, 153, 255], + type='upper', + swap='right_ear'), + 4: + dict( + name='right_ear', + id=4, + color=[51, 153, 255], + type='upper', + swap='left_ear'), + 5: + dict( + name='left_shoulder', + id=5, + color=[0, 255, 0], + type='upper', + swap='right_shoulder'), + 6: + dict( + name='right_shoulder', + id=6, + color=[255, 128, 0], + type='upper', + swap='left_shoulder'), + 7: + dict( + name='left_elbow', + id=7, + color=[0, 255, 0], + type='upper', + swap='right_elbow'), + 8: + dict( + name='right_elbow', + id=8, + color=[255, 128, 0], + type='upper', + swap='left_elbow'), + 9: + dict( + name='left_wrist', + id=9, + color=[0, 255, 0], + type='upper', + swap='right_wrist'), + 10: + dict( + name='right_wrist', + id=10, + color=[255, 128, 0], + type='upper', + swap='left_wrist'), + 11: + dict( + name='left_hip', + id=11, + color=[0, 255, 0], + type='lower', + swap='right_hip'), + 12: + dict( + name='right_hip', + id=12, + color=[255, 128, 0], + type='lower', + swap='left_hip'), + 13: + dict( + name='left_knee', + id=13, + color=[0, 255, 0], + type='lower', + swap='right_knee'), + 14: + dict( + name='right_knee', + id=14, + color=[255, 128, 0], + type='lower', + swap='left_knee'), + 15: + dict( + name='left_ankle', + id=15, + color=[0, 255, 0], + type='lower', + swap='right_ankle'), + 16: + dict( + name='right_ankle', + id=16, + color=[255, 128, 0], + type='lower', + swap='left_ankle') + }, + skeleton_info={ + 0: + dict(link=('left_ankle', 'left_knee'), id=0, color=[0, 255, 0]), + 1: + dict(link=('left_knee', 'left_hip'), id=1, color=[0, 255, 0]), + 2: + dict(link=('right_ankle', 'right_knee'), id=2, color=[255, 128, 0]), + 3: + dict(link=('right_knee', 'right_hip'), id=3, color=[255, 128, 0]), + 4: + dict(link=('left_hip', 'right_hip'), id=4, color=[51, 153, 255]), + 5: + dict(link=('left_shoulder', 'left_hip'), id=5, color=[51, 153, 255]), + 6: + dict(link=('right_shoulder', 'right_hip'), id=6, color=[51, 153, 255]), + 7: + dict( + link=('left_shoulder', 'right_shoulder'), + id=7, + color=[51, 153, 255]), + 8: + dict(link=('left_shoulder', 'left_elbow'), id=8, color=[0, 255, 0]), + 9: + dict( + link=('right_shoulder', 'right_elbow'), id=9, color=[255, 128, 0]), + 10: + dict(link=('left_elbow', 'left_wrist'), id=10, color=[0, 255, 0]), + 11: + dict(link=('right_elbow', 'right_wrist'), id=11, color=[255, 128, 0]), + 12: + dict(link=('left_eye', 'right_eye'), id=12, color=[51, 153, 255]), + 13: + dict(link=('nose', 'left_eye'), id=13, color=[51, 153, 255]), + 14: + dict(link=('nose', 'right_eye'), id=14, color=[51, 153, 255]), + 15: + dict(link=('left_eye', 'left_ear'), id=15, color=[51, 153, 255]), + 16: + dict(link=('right_eye', 'right_ear'), id=16, color=[51, 153, 255]), + 17: + dict(link=('left_ear', 'left_shoulder'), id=17, color=[51, 153, 255]), + 18: + dict( + link=('right_ear', 'right_shoulder'), id=18, color=[51, 153, 255]) + }, + joint_weights=[ + 1., 1., 1., 1., 1., 1., 1., 1.2, 1.2, 1.5, 1.5, 1., 1., 1.2, 1.2, 1.5, + 1.5 + ], + sigmas=[ + 0.026, 0.025, 0.025, 0.035, 0.035, 0.079, 0.079, 0.072, 0.072, 0.062, + 0.062, 0.107, 0.107, 0.087, 0.087, 0.089, 0.089 + ]) diff --git a/models/YOLO-World/third_party/mmyolo/configs/deploy/base_dynamic.py b/models/YOLO-World/third_party/mmyolo/configs/deploy/base_dynamic.py new file mode 100644 index 0000000000000000000000000000000000000000..747c21fd2bf0523c7d1e2ace67cff3f3d6612c2a --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/deploy/base_dynamic.py @@ -0,0 +1,17 @@ +_base_ = ['./base_static.py'] +onnx_config = dict( + dynamic_axes={ + 'input': { + 0: 'batch', + 2: 'height', + 3: 'width' + }, + 'dets': { + 0: 'batch', + 1: 'num_dets' + }, + 'labels': { + 0: 'batch', + 1: 'num_dets' + } + }) diff --git a/models/YOLO-World/third_party/mmyolo/configs/deploy/base_static.py b/models/YOLO-World/third_party/mmyolo/configs/deploy/base_static.py new file mode 100644 index 0000000000000000000000000000000000000000..dee01dd5dde1185b5e156b036f72fb3ccb0bf5bc --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/deploy/base_static.py @@ -0,0 +1,23 @@ +onnx_config = dict( + type='onnx', + export_params=True, + keep_initializers_as_inputs=False, + opset_version=11, + save_file='end2end.onnx', + input_names=['input'], + output_names=['dets', 'labels'], + input_shape=None, + optimize=True) +codebase_config = dict( + type='mmyolo', + task='ObjectDetection', + model_type='end2end', + post_processing=dict( + score_threshold=0.05, + confidence_threshold=0.005, + iou_threshold=0.5, + max_output_boxes_per_class=200, + pre_top_k=5000, + keep_top_k=100, + background_label_id=-1), + module=['mmyolo.deploy']) diff --git a/models/YOLO-World/third_party/mmyolo/configs/deploy/detection_onnxruntime_dynamic.py b/models/YOLO-World/third_party/mmyolo/configs/deploy/detection_onnxruntime_dynamic.py new file mode 100644 index 0000000000000000000000000000000000000000..14f4a12115f403fb4d091db9c07f925ba2ad83ec --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/deploy/detection_onnxruntime_dynamic.py @@ -0,0 +1,15 @@ +_base_ = ['./base_dynamic.py'] +codebase_config = dict( + type='mmyolo', + task='ObjectDetection', + model_type='end2end', + post_processing=dict( + score_threshold=0.05, + confidence_threshold=0.005, + iou_threshold=0.5, + max_output_boxes_per_class=200, + pre_top_k=5000, + keep_top_k=100, + background_label_id=-1), + module=['mmyolo.deploy']) +backend_config = dict(type='onnxruntime') diff --git a/models/YOLO-World/third_party/mmyolo/configs/deploy/detection_onnxruntime_static.py b/models/YOLO-World/third_party/mmyolo/configs/deploy/detection_onnxruntime_static.py new file mode 100644 index 0000000000000000000000000000000000000000..3eac8ca75715b711bdf03784dbb977a81bf444d3 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/deploy/detection_onnxruntime_static.py @@ -0,0 +1,15 @@ +_base_ = ['./base_static.py'] +codebase_config = dict( + type='mmyolo', + task='ObjectDetection', + model_type='end2end', + post_processing=dict( + score_threshold=0.05, + confidence_threshold=0.005, + iou_threshold=0.5, + max_output_boxes_per_class=200, + pre_top_k=5000, + keep_top_k=100, + background_label_id=-1), + module=['mmyolo.deploy']) +backend_config = dict(type='onnxruntime') diff --git a/models/YOLO-World/third_party/mmyolo/configs/deploy/detection_rknn-fp16_static-320x320.py b/models/YOLO-World/third_party/mmyolo/configs/deploy/detection_rknn-fp16_static-320x320.py new file mode 100644 index 0000000000000000000000000000000000000000..b7bd31331ebae8374dc06f9ed4e0e82a3204e36f --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/deploy/detection_rknn-fp16_static-320x320.py @@ -0,0 +1,9 @@ +_base_ = ['./base_static.py'] +onnx_config = dict( + input_shape=[320, 320], output_names=['feat0', 'feat1', 'feat2']) +codebase_config = dict(model_type='rknn') +backend_config = dict( + type='rknn', + common_config=dict(target_platform='rv1126', optimization_level=1), + quantization_config=dict(do_quantization=False, dataset=None), + input_size_list=[[3, 320, 320]]) diff --git a/models/YOLO-World/third_party/mmyolo/configs/deploy/detection_rknn-int8_static-320x320.py b/models/YOLO-World/third_party/mmyolo/configs/deploy/detection_rknn-int8_static-320x320.py new file mode 100644 index 0000000000000000000000000000000000000000..10c96b2f26d27be28b384612d9ae8ee2cae84983 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/deploy/detection_rknn-int8_static-320x320.py @@ -0,0 +1,9 @@ +_base_ = ['./base_static.py'] +onnx_config = dict( + input_shape=[320, 320], output_names=['feat0', 'feat1', 'feat2']) +codebase_config = dict(model_type='rknn') +backend_config = dict( + type='rknn', + common_config=dict(target_platform='rv1126', optimization_level=1), + quantization_config=dict(do_quantization=True, dataset=None), + input_size_list=[[3, 320, 320]]) diff --git a/models/YOLO-World/third_party/mmyolo/configs/deploy/detection_tensorrt-fp16_dynamic-192x192-960x960.py b/models/YOLO-World/third_party/mmyolo/configs/deploy/detection_tensorrt-fp16_dynamic-192x192-960x960.py new file mode 100644 index 0000000000000000000000000000000000000000..da565b6c341add02a74579a734eb4cb123847e6d --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/deploy/detection_tensorrt-fp16_dynamic-192x192-960x960.py @@ -0,0 +1,13 @@ +_base_ = ['./base_dynamic.py'] +backend_config = dict( + type='tensorrt', + common_config=dict(fp16_mode=True, max_workspace_size=1 << 30), + model_inputs=[ + dict( + input_shapes=dict( + input=dict( + min_shape=[1, 3, 192, 192], + opt_shape=[1, 3, 640, 640], + max_shape=[1, 3, 960, 960]))) + ]) +use_efficientnms = False # whether to replace TRTBatchedNMS plugin with EfficientNMS plugin # noqa E501 diff --git a/models/YOLO-World/third_party/mmyolo/configs/deploy/detection_tensorrt-fp16_dynamic-64x64-1344x1344.py b/models/YOLO-World/third_party/mmyolo/configs/deploy/detection_tensorrt-fp16_dynamic-64x64-1344x1344.py new file mode 100644 index 0000000000000000000000000000000000000000..bad8521afa6ebd4f9bb24a137b66fd1c66668361 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/deploy/detection_tensorrt-fp16_dynamic-64x64-1344x1344.py @@ -0,0 +1,13 @@ +_base_ = ['./base_dynamic.py'] +backend_config = dict( + type='tensorrt', + common_config=dict(fp16_mode=True, max_workspace_size=1 << 32), + model_inputs=[ + dict( + input_shapes=dict( + input=dict( + min_shape=[1, 3, 64, 64], + opt_shape=[1, 3, 640, 640], + max_shape=[1, 3, 1344, 1344]))) + ]) +use_efficientnms = False # whether to replace TRTBatchedNMS plugin with EfficientNMS plugin # noqa E501 diff --git a/models/YOLO-World/third_party/mmyolo/configs/deploy/detection_tensorrt-fp16_static-640x640.py b/models/YOLO-World/third_party/mmyolo/configs/deploy/detection_tensorrt-fp16_static-640x640.py new file mode 100644 index 0000000000000000000000000000000000000000..24d2a00d9340b2e3cd3392ab2881b68cccd75e8a --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/deploy/detection_tensorrt-fp16_static-640x640.py @@ -0,0 +1,14 @@ +_base_ = ['./base_static.py'] +onnx_config = dict(input_shape=(640, 640)) +backend_config = dict( + type='tensorrt', + common_config=dict(fp16_mode=True, max_workspace_size=1 << 30), + model_inputs=[ + dict( + input_shapes=dict( + input=dict( + min_shape=[1, 3, 640, 640], + opt_shape=[1, 3, 640, 640], + max_shape=[1, 3, 640, 640]))) + ]) +use_efficientnms = False # whether to replace TRTBatchedNMS plugin with EfficientNMS plugin # noqa E501 diff --git a/models/YOLO-World/third_party/mmyolo/configs/deploy/detection_tensorrt-int8_dynamic-192x192-960x960.py b/models/YOLO-World/third_party/mmyolo/configs/deploy/detection_tensorrt-int8_dynamic-192x192-960x960.py new file mode 100644 index 0000000000000000000000000000000000000000..21591c4d4e72a867392adf9c49cd60c6bb994e35 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/deploy/detection_tensorrt-int8_dynamic-192x192-960x960.py @@ -0,0 +1,15 @@ +_base_ = ['./base_dynamic.py'] +backend_config = dict( + type='tensorrt', + common_config=dict( + fp16_mode=True, max_workspace_size=1 << 30, int8_mode=True), + model_inputs=[ + dict( + input_shapes=dict( + input=dict( + min_shape=[1, 3, 192, 192], + opt_shape=[1, 3, 640, 640], + max_shape=[1, 3, 960, 960]))) + ]) +calib_config = dict(create_calib=True, calib_file='calib_data.h5') +use_efficientnms = False # whether to replace TRTBatchedNMS plugin with EfficientNMS plugin # noqa E501 diff --git a/models/YOLO-World/third_party/mmyolo/configs/deploy/detection_tensorrt-int8_static-640x640.py b/models/YOLO-World/third_party/mmyolo/configs/deploy/detection_tensorrt-int8_static-640x640.py new file mode 100644 index 0000000000000000000000000000000000000000..ac394a6b3f854a0d23a1d37ff07d87c523c9784a --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/deploy/detection_tensorrt-int8_static-640x640.py @@ -0,0 +1,16 @@ +_base_ = ['./base_static.py'] +onnx_config = dict(input_shape=(640, 640)) +backend_config = dict( + type='tensorrt', + common_config=dict( + fp16_mode=True, max_workspace_size=1 << 30, int8_mode=True), + model_inputs=[ + dict( + input_shapes=dict( + input=dict( + min_shape=[1, 3, 640, 640], + opt_shape=[1, 3, 640, 640], + max_shape=[1, 3, 640, 640]))) + ]) +calib_config = dict(create_calib=True, calib_file='calib_data.h5') +use_efficientnms = False # whether to replace TRTBatchedNMS plugin with EfficientNMS plugin # noqa E501 diff --git a/models/YOLO-World/third_party/mmyolo/configs/deploy/detection_tensorrt_dynamic-192x192-960x960.py b/models/YOLO-World/third_party/mmyolo/configs/deploy/detection_tensorrt_dynamic-192x192-960x960.py new file mode 100644 index 0000000000000000000000000000000000000000..17047d7380043da537f2f6029bb4373986062c04 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/deploy/detection_tensorrt_dynamic-192x192-960x960.py @@ -0,0 +1,13 @@ +_base_ = ['./base_dynamic.py'] +backend_config = dict( + type='tensorrt', + common_config=dict(fp16_mode=False, max_workspace_size=1 << 30), + model_inputs=[ + dict( + input_shapes=dict( + input=dict( + min_shape=[1, 3, 192, 192], + opt_shape=[1, 3, 640, 640], + max_shape=[1, 3, 960, 960]))) + ]) +use_efficientnms = False # whether to replace TRTBatchedNMS plugin with EfficientNMS plugin # noqa E501 diff --git a/models/YOLO-World/third_party/mmyolo/configs/deploy/detection_tensorrt_static-640x640.py b/models/YOLO-World/third_party/mmyolo/configs/deploy/detection_tensorrt_static-640x640.py new file mode 100644 index 0000000000000000000000000000000000000000..9ec49cc114cc0025310766be17bb5c45af56c516 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/deploy/detection_tensorrt_static-640x640.py @@ -0,0 +1,14 @@ +_base_ = ['./base_static.py'] +onnx_config = dict(input_shape=(640, 640)) +backend_config = dict( + type='tensorrt', + common_config=dict(fp16_mode=False, max_workspace_size=1 << 30), + model_inputs=[ + dict( + input_shapes=dict( + input=dict( + min_shape=[1, 3, 640, 640], + opt_shape=[1, 3, 640, 640], + max_shape=[1, 3, 640, 640]))) + ]) +use_efficientnms = False # whether to replace TRTBatchedNMS plugin with EfficientNMS plugin # noqa E501 diff --git a/models/YOLO-World/third_party/mmyolo/configs/deploy/model/yolov5_s-static.py b/models/YOLO-World/third_party/mmyolo/configs/deploy/model/yolov5_s-static.py new file mode 100644 index 0000000000000000000000000000000000000000..11b7f6a040271f4c82fce8e8240b23ad54fd18c7 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/deploy/model/yolov5_s-static.py @@ -0,0 +1,19 @@ +_base_ = '../../yolov5/yolov5_s-v61_syncbn_8xb16-300e_coco.py' + +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args=_base_.backend_args), + dict( + type='LetterResize', + scale=_base_.img_scale, + allow_scale_up=False, + use_mini_pad=False, + ), + dict(type='LoadAnnotations', with_bbox=True, _scope_='mmdet'), + dict( + type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'pad_param')) +] + +test_dataloader = dict( + dataset=dict(pipeline=test_pipeline, batch_shapes_cfg=None)) diff --git a/models/YOLO-World/third_party/mmyolo/configs/deploy/model/yolov6_s-static.py b/models/YOLO-World/third_party/mmyolo/configs/deploy/model/yolov6_s-static.py new file mode 100644 index 0000000000000000000000000000000000000000..4f64438ca3d3ba1699e514bc2c8ee900d5095d4d --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/deploy/model/yolov6_s-static.py @@ -0,0 +1,19 @@ +_base_ = '../../yolov6/yolov6_s_syncbn_fast_8xb32-400e_coco.py' + +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args=_base_.backend_args), + dict( + type='LetterResize', + scale=_base_.img_scale, + allow_scale_up=False, + use_mini_pad=False, + ), + dict(type='LoadAnnotations', with_bbox=True, _scope_='mmdet'), + dict( + type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'pad_param')) +] + +test_dataloader = dict( + dataset=dict(pipeline=test_pipeline, batch_shapes_cfg=None)) diff --git a/models/YOLO-World/third_party/mmyolo/configs/ppyoloe/README.md b/models/YOLO-World/third_party/mmyolo/configs/ppyoloe/README.md new file mode 100644 index 0000000000000000000000000000000000000000..70a5b2055bbbc79cc6e4817cc3d936780b09f73e --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/ppyoloe/README.md @@ -0,0 +1,43 @@ +# PPYOLOE + + + +## Abstract + +PP-YOLOE is an excellent single-stage anchor-free model based on PP-YOLOv2, surpassing a variety of popular YOLO models. PP-YOLOE has a series of models, named s/m/l/x, which are configured through width multiplier and depth multiplier. PP-YOLOE avoids using special operators, such as Deformable Convolution or Matrix NMS, to be deployed friendly on various hardware. + +
+ +
+ +
+ +PPYOLOE-PLUS-l model structure +
+ +## Results and models + +### PPYOLOE+ COCO + +| Backbone | Arch | Size | Epoch | SyncBN | Mem (GB) | Box AP | Config | Download | +| :---------: | :--: | :--: | :---: | :----: | :------: | :----: | :----------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| PPYOLOE+ -s | P5 | 640 | 80 | Yes | 4.7 | 43.5 | [config](./ppyoloe_plus_s_fast_8xb8-80e_coco.py) | [model](https://download.openmmlab.com/mmyolo/v0/ppyoloe/ppyoloe_plus_s_fast_8xb8-80e_coco/ppyoloe_plus_s_fast_8xb8-80e_coco_20230101_154052-9fee7619.pth) \| [log](https://download.openmmlab.com/mmyolo/v0/ppyoloe/ppyoloe_plus_s_fast_8xb8-80e_coco/ppyoloe_plus_s_fast_8xb8-80e_coco_20230101_154052.log.json) | +| PPYOLOE+ -m | P5 | 640 | 80 | Yes | 8.4 | 49.5 | [config](./ppyoloe_plus_m_fast_8xb8-80e_coco.py) | [model](https://download.openmmlab.com/mmyolo/v0/ppyoloe/ppyoloe_plus_m_fast_8xb8-80e_coco/ppyoloe_plus_m_fast_8xb8-80e_coco_20230104_193132-e4325ada.pth) \| [log](https://download.openmmlab.com/mmyolo/v0/ppyoloe/ppyoloe_plus_m_fast_8xb8-80e_coco/ppyoloe_plus_m_fast_8xb8-80e_coco_20230104_193132.log.json) | +| PPYOLOE+ -l | P5 | 640 | 80 | Yes | 13.2 | 52.6 | [config](./ppyoloe_plus_l_fast_8xb8-80e_coco.py) | [model](https://download.openmmlab.com/mmyolo/v0/ppyoloe/ppyoloe_plus_l_fast_8xb8-80e_coco/ppyoloe_plus_l_fast_8xb8-80e_coco_20230102_203825-1864e7b3.pth) \| [log](https://download.openmmlab.com/mmyolo/v0/ppyoloe/ppyoloe_plus_l_fast_8xb8-80e_coco/ppyoloe_plus_l_fast_8xb8-80e_coco_20230102_203825.log.json) | +| PPYOLOE+ -x | P5 | 640 | 80 | Yes | 19.1 | 54.2 | [config](./ppyoloe_plus_x_fast_8xb8-80e_coco.py) | [model](https://download.openmmlab.com/mmyolo/v0/ppyoloe/ppyoloe_plus_x_fast_8xb8-80e_coco/ppyoloe_plus_x_fast_8xb8-80e_coco_20230104_194921-8c953949.pth) \| [log](https://download.openmmlab.com/mmyolo/v0/ppyoloe/ppyoloe_plus_x_fast_8xb8-80e_coco/ppyoloe_plus_x_fast_8xb8-80e_coco_20230104_194921.log.json) | + +**Note**: + +1. The above Box APs are all models with the best performance in COCO +2. The gap between the above performance and the official release is about 0.3. To speed up training in mmyolo, we use pytorch to implement the image resizing in `PPYOLOEBatchRandomResize` for multi-scale training, while official PPYOLOE use opencv. And `lanczos4` is not yet supported in `PPYOLOEBatchRandomResize`. The above two reasons lead to the gap. We will continue to experiment and address the gap in future releases. +3. The mAP of the non-Plus version needs more verification, and we will update more details of the non-Plus version in future versions. + +```latex +@article{Xu2022PPYOLOEAE, + title={PP-YOLOE: An evolved version of YOLO}, + author={Shangliang Xu and Xinxin Wang and Wenyu Lv and Qinyao Chang and Cheng Cui and Kaipeng Deng and Guanzhong Wang and Qingqing Dang and Shengyun Wei and Yuning Du and Baohua Lai}, + journal={ArXiv}, + year={2022}, + volume={abs/2203.16250} +} +``` diff --git a/models/YOLO-World/third_party/mmyolo/configs/ppyoloe/metafile.yml b/models/YOLO-World/third_party/mmyolo/configs/ppyoloe/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..5b7ed9487b60afecbd9db87f0ad89d9b3be8c93d --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/ppyoloe/metafile.yml @@ -0,0 +1,69 @@ +Collections: + - Name: PPYOLOE + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Nesterov + - Weight Decay + - Synchronize BN + Training Resources: 8x A100 GPUs + Architecture: + - PPYOLOECSPResNet + - PPYOLOECSPPAFPN + Paper: + URL: https://arxiv.org/abs/2203.16250 + Title: 'PP-YOLOE: An evolved version of YOLO' + README: configs/ppyoloe/README.md + Code: + URL: https://github.com/open-mmlab/mmyolo/blob/v0.0.1/mmyolo/models/detectors/yolo_detector.py#L12 + Version: v0.0.1 + +Models: + - Name: ppyoloe_plus_s_fast_8xb8-80e_coco + In Collection: PPYOLOE + Config: configs/ppyoloe/ppyoloe_plus_s_fast_8xb8-80e_coco.py + Metadata: + Training Memory (GB): 4.7 + Epochs: 80 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 43.5 + Weights: https://download.openmmlab.com/mmyolo/v0/ppyoloe/ppyoloe_plus_s_fast_8xb8-80e_coco/ppyoloe_plus_s_fast_8xb8-80e_coco_20230101_154052-9fee7619.pth + - Name: ppyoloe_plus_m_fast_8xb8-80e_coco + In Collection: PPYOLOE + Config: configs/ppyoloe/ppyoloe_plus_m_fast_8xb8-80e_coco.py + Metadata: + Training Memory (GB): 8.4 + Epochs: 80 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 49.5 + Weights: https://download.openmmlab.com/mmyolo/v0/ppyoloe/ppyoloe_plus_m_fast_8xb8-80e_coco/ppyoloe_plus_m_fast_8xb8-80e_coco_20230104_193132-e4325ada.pth + - Name: ppyoloe_plus_L_fast_8xb8-80e_coco + In Collection: PPYOLOE + Config: configs/ppyoloe/ppyoloe_plus_L_fast_8xb8-80e_coco.py + Metadata: + Training Memory (GB): 13.2 + Epochs: 80 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 52.6 + Weights: https://download.openmmlab.com/mmyolo/v0/ppyoloe/ppyoloe_plus_l_fast_8xb8-80e_coco/ppyoloe_plus_l_fast_8xb8-80e_coco_20230102_203825-1864e7b3.pth + - Name: ppyoloe_plus_x_fast_8xb8-80e_coco + In Collection: PPYOLOE + Config: configs/ppyoloe/ppyoloe_plus_x_fast_8xb8-80e_coco.py + Metadata: + Training Memory (GB): 19.1 + Epochs: 80 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 54.2 + Weights: https://download.openmmlab.com/mmyolo/v0/ppyoloe/ppyoloe_plus_x_fast_8xb8-80e_coco/ppyoloe_plus_x_fast_8xb8-80e_coco_20230104_194921-8c953949.pth diff --git a/models/YOLO-World/third_party/mmyolo/configs/ppyoloe/ppyoloe_l_fast_8xb20-300e_coco.py b/models/YOLO-World/third_party/mmyolo/configs/ppyoloe/ppyoloe_l_fast_8xb20-300e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..ef1b4eaae7240e07a5e8450f35b6f71f2271e09f --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/ppyoloe/ppyoloe_l_fast_8xb20-300e_coco.py @@ -0,0 +1,23 @@ +_base_ = './ppyoloe_s_fast_8xb32-300e_coco.py' + +# The pretrained model is geted and converted from official PPYOLOE. +# https://github.com/PaddlePaddle/PaddleDetection/blob/release/2.5/configs/ppyoloe/README.md +checkpoint = 'https://download.openmmlab.com/mmyolo/v0/ppyoloe/ppyoloe_pretrain/cspresnet_l_imagenet1k_pretrained-c0010e6c.pth' # noqa + +deepen_factor = 1.0 +widen_factor = 1.0 + +train_batch_size_per_gpu = 20 + +model = dict( + backbone=dict( + deepen_factor=deepen_factor, + widen_factor=widen_factor, + init_cfg=dict(checkpoint=checkpoint)), + neck=dict( + deepen_factor=deepen_factor, + widen_factor=widen_factor, + ), + bbox_head=dict(head_module=dict(widen_factor=widen_factor))) + +train_dataloader = dict(batch_size=train_batch_size_per_gpu) diff --git a/models/YOLO-World/third_party/mmyolo/configs/ppyoloe/ppyoloe_m_fast_8xb28-300e_coco.py b/models/YOLO-World/third_party/mmyolo/configs/ppyoloe/ppyoloe_m_fast_8xb28-300e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..abcfd7833016164fbef84a70366b958f28ea6648 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/ppyoloe/ppyoloe_m_fast_8xb28-300e_coco.py @@ -0,0 +1,23 @@ +_base_ = './ppyoloe_s_fast_8xb32-300e_coco.py' + +# The pretrained model is geted and converted from official PPYOLOE. +# https://github.com/PaddlePaddle/PaddleDetection/blob/release/2.5/configs/ppyoloe/README.md +checkpoint = 'https://download.openmmlab.com/mmyolo/v0/ppyoloe/ppyoloe_pretrain/cspresnet_m_imagenet1k_pretrained-09f1eba2.pth' # noqa + +deepen_factor = 0.67 +widen_factor = 0.75 + +train_batch_size_per_gpu = 28 + +model = dict( + backbone=dict( + deepen_factor=deepen_factor, + widen_factor=widen_factor, + init_cfg=dict(checkpoint=checkpoint)), + neck=dict( + deepen_factor=deepen_factor, + widen_factor=widen_factor, + ), + bbox_head=dict(head_module=dict(widen_factor=widen_factor))) + +train_dataloader = dict(batch_size=train_batch_size_per_gpu) diff --git a/models/YOLO-World/third_party/mmyolo/configs/ppyoloe/ppyoloe_plus_l_fast_8xb8-80e_coco.py b/models/YOLO-World/third_party/mmyolo/configs/ppyoloe/ppyoloe_plus_l_fast_8xb8-80e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..9db53e26f4168e82b6cd760e1b8f41c0bebfae8f --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/ppyoloe/ppyoloe_plus_l_fast_8xb8-80e_coco.py @@ -0,0 +1,16 @@ +_base_ = './ppyoloe_plus_s_fast_8xb8-80e_coco.py' + +# The pretrained model is geted and converted from official PPYOLOE. +# https://github.com/PaddlePaddle/PaddleDetection/blob/release/2.5/configs/ppyoloe/README.md +load_from = 'https://download.openmmlab.com/mmyolo/v0/ppyoloe/ppyoloe_pretrain/ppyoloe_plus_l_obj365_pretrained-3dd89562.pth' # noqa + +deepen_factor = 1.0 +widen_factor = 1.0 + +model = dict( + backbone=dict(deepen_factor=deepen_factor, widen_factor=widen_factor), + neck=dict( + deepen_factor=deepen_factor, + widen_factor=widen_factor, + ), + bbox_head=dict(head_module=dict(widen_factor=widen_factor))) diff --git a/models/YOLO-World/third_party/mmyolo/configs/ppyoloe/ppyoloe_plus_m_fast_8xb8-80e_coco.py b/models/YOLO-World/third_party/mmyolo/configs/ppyoloe/ppyoloe_plus_m_fast_8xb8-80e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..17cb33556f7ff111a4d702e6798abda1aaafeb01 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/ppyoloe/ppyoloe_plus_m_fast_8xb8-80e_coco.py @@ -0,0 +1,16 @@ +_base_ = './ppyoloe_plus_s_fast_8xb8-80e_coco.py' + +# The pretrained model is geted and converted from official PPYOLOE. +# https://github.com/PaddlePaddle/PaddleDetection/blob/release/2.5/configs/ppyoloe/README.md +load_from = 'https://download.openmmlab.com/mmyolo/v0/ppyoloe/ppyoloe_pretrain/ppyoloe_plus_m_ojb365_pretrained-03206892.pth' # noqa + +deepen_factor = 0.67 +widen_factor = 0.75 + +model = dict( + backbone=dict(deepen_factor=deepen_factor, widen_factor=widen_factor), + neck=dict( + deepen_factor=deepen_factor, + widen_factor=widen_factor, + ), + bbox_head=dict(head_module=dict(widen_factor=widen_factor))) diff --git a/models/YOLO-World/third_party/mmyolo/configs/ppyoloe/ppyoloe_plus_s_fast_1xb12-40e_cat.py b/models/YOLO-World/third_party/mmyolo/configs/ppyoloe/ppyoloe_plus_s_fast_1xb12-40e_cat.py new file mode 100644 index 0000000000000000000000000000000000000000..752ff63388cee00156dc729b68242eae68e4d052 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/ppyoloe/ppyoloe_plus_s_fast_1xb12-40e_cat.py @@ -0,0 +1,56 @@ +# Compared to other same scale models, this configuration consumes too much +# GPU memory and is not validated for now +_base_ = 'ppyoloe_plus_s_fast_8xb8-80e_coco.py' + +data_root = './data/cat/' +class_name = ('cat', ) +num_classes = len(class_name) +metainfo = dict(classes=class_name, palette=[(20, 220, 60)]) + +num_last_epochs = 5 + +max_epochs = 40 +train_batch_size_per_gpu = 12 +train_num_workers = 2 + +load_from = 'https://download.openmmlab.com/mmyolo/v0/ppyoloe/ppyoloe_plus_s_fast_8xb8-80e_coco/ppyoloe_plus_s_fast_8xb8-80e_coco_20230101_154052-9fee7619.pth' # noqa + +model = dict( + backbone=dict(frozen_stages=4), + bbox_head=dict(head_module=dict(num_classes=num_classes)), + train_cfg=dict( + initial_assigner=dict(num_classes=num_classes), + assigner=dict(num_classes=num_classes))) + +train_dataloader = dict( + batch_size=train_batch_size_per_gpu, + num_workers=train_num_workers, + dataset=dict( + data_root=data_root, + metainfo=metainfo, + ann_file='annotations/trainval.json', + data_prefix=dict(img='images/'))) + +val_dataloader = dict( + dataset=dict( + metainfo=metainfo, + data_root=data_root, + ann_file='annotations/test.json', + data_prefix=dict(img='images/'))) + +test_dataloader = val_dataloader + +default_hooks = dict( + param_scheduler=dict( + warmup_min_iter=10, + warmup_epochs=3, + total_epochs=int(max_epochs * 1.2))) + +val_evaluator = dict(ann_file=data_root + 'annotations/test.json') +test_evaluator = val_evaluator + +default_hooks = dict( + checkpoint=dict(interval=10, max_keep_ckpts=2, save_best='auto'), + logger=dict(type='LoggerHook', interval=5)) +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +# visualizer = dict(vis_backends = [dict(type='LocalVisBackend'), dict(type='WandbVisBackend')]) # noqa diff --git a/models/YOLO-World/third_party/mmyolo/configs/ppyoloe/ppyoloe_plus_s_fast_8xb8-80e_coco.py b/models/YOLO-World/third_party/mmyolo/configs/ppyoloe/ppyoloe_plus_s_fast_8xb8-80e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..3d98252ccaec23c75b3e8aa3ddb095ee85010bd8 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/ppyoloe/ppyoloe_plus_s_fast_8xb8-80e_coco.py @@ -0,0 +1,239 @@ +_base_ = ['../_base_/default_runtime.py', '../_base_/det_p5_tta.py'] + +# dataset settings +data_root = 'data/coco/' +dataset_type = 'YOLOv5CocoDataset' + +# parameters that often need to be modified +img_scale = (640, 640) # width, height +deepen_factor = 0.33 +widen_factor = 0.5 +max_epochs = 80 +num_classes = 80 +save_epoch_intervals = 5 +train_batch_size_per_gpu = 8 +train_num_workers = 8 +val_batch_size_per_gpu = 1 +val_num_workers = 2 + +# The pretrained model is geted and converted from official PPYOLOE. +# https://github.com/PaddlePaddle/PaddleDetection/blob/release/2.5/configs/ppyoloe/README.md +load_from = 'https://download.openmmlab.com/mmyolo/v0/ppyoloe/ppyoloe_pretrain/ppyoloe_plus_s_obj365_pretrained-bcfe8478.pth' # noqa + +# persistent_workers must be False if num_workers is 0. +persistent_workers = True + +# Base learning rate for optim_wrapper +base_lr = 0.001 + +strides = [8, 16, 32] + +model = dict( + type='YOLODetector', + data_preprocessor=dict( + # use this to support multi_scale training + type='PPYOLOEDetDataPreprocessor', + pad_size_divisor=32, + batch_augments=[ + dict( + type='PPYOLOEBatchRandomResize', + random_size_range=(320, 800), + interval=1, + size_divisor=32, + random_interp=True, + keep_ratio=False) + ], + mean=[0., 0., 0.], + std=[255., 255., 255.], + bgr_to_rgb=True), + backbone=dict( + type='PPYOLOECSPResNet', + deepen_factor=deepen_factor, + widen_factor=widen_factor, + block_cfg=dict( + type='PPYOLOEBasicBlock', shortcut=True, use_alpha=True), + norm_cfg=dict(type='BN', momentum=0.1, eps=1e-5), + act_cfg=dict(type='SiLU', inplace=True), + attention_cfg=dict( + type='EffectiveSELayer', act_cfg=dict(type='HSigmoid')), + use_large_stem=True), + neck=dict( + type='PPYOLOECSPPAFPN', + in_channels=[256, 512, 1024], + out_channels=[192, 384, 768], + deepen_factor=deepen_factor, + widen_factor=widen_factor, + num_csplayer=1, + num_blocks_per_layer=3, + block_cfg=dict( + type='PPYOLOEBasicBlock', shortcut=False, use_alpha=False), + norm_cfg=dict(type='BN', momentum=0.1, eps=1e-5), + act_cfg=dict(type='SiLU', inplace=True), + drop_block_cfg=None, + use_spp=True), + bbox_head=dict( + type='PPYOLOEHead', + head_module=dict( + type='PPYOLOEHeadModule', + num_classes=num_classes, + in_channels=[192, 384, 768], + widen_factor=widen_factor, + featmap_strides=strides, + reg_max=16, + norm_cfg=dict(type='BN', momentum=0.1, eps=1e-5), + act_cfg=dict(type='SiLU', inplace=True), + num_base_priors=1), + prior_generator=dict( + type='mmdet.MlvlPointGenerator', offset=0.5, strides=strides), + bbox_coder=dict(type='DistancePointBBoxCoder'), + loss_cls=dict( + type='mmdet.VarifocalLoss', + use_sigmoid=True, + alpha=0.75, + gamma=2.0, + iou_weighted=True, + reduction='sum', + loss_weight=1.0), + loss_bbox=dict( + type='IoULoss', + iou_mode='giou', + bbox_format='xyxy', + reduction='mean', + loss_weight=2.5, + return_iou=False), + # Since the dflloss is implemented differently in the official + # and mmdet, we're going to divide loss_weight by 4. + loss_dfl=dict( + type='mmdet.DistributionFocalLoss', + reduction='mean', + loss_weight=0.5 / 4)), + train_cfg=dict( + initial_epoch=30, + initial_assigner=dict( + type='BatchATSSAssigner', + num_classes=num_classes, + topk=9, + iou_calculator=dict(type='mmdet.BboxOverlaps2D')), + assigner=dict( + type='BatchTaskAlignedAssigner', + num_classes=num_classes, + topk=13, + alpha=1, + beta=6, + eps=1e-9)), + test_cfg=dict( + multi_label=True, + nms_pre=1000, + score_thr=0.01, + nms=dict(type='nms', iou_threshold=0.7), + max_per_img=300)) + +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args=_base_.backend_args), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='PPYOLOERandomDistort'), + dict(type='mmdet.Expand', mean=(103.53, 116.28, 123.675)), + dict(type='PPYOLOERandomCrop'), + dict(type='mmdet.RandomFlip', prob=0.5), + dict( + type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'flip', + 'flip_direction')) +] + +train_dataloader = dict( + batch_size=train_batch_size_per_gpu, + num_workers=train_num_workers, + persistent_workers=persistent_workers, + pin_memory=True, + sampler=dict(type='DefaultSampler', shuffle=True), + collate_fn=dict(type='yolov5_collate', use_ms_training=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='annotations/instances_train2017.json', + data_prefix=dict(img='train2017/'), + filter_cfg=dict(filter_empty_gt=True, min_size=0), + pipeline=train_pipeline)) + +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args=_base_.backend_args), + dict( + type='mmdet.FixShapeResize', + width=img_scale[0], + height=img_scale[1], + keep_ratio=False, + interpolation='bicubic'), + dict(type='LoadAnnotations', with_bbox=True, _scope_='mmdet'), + dict( + type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')) +] + +val_dataloader = dict( + batch_size=val_batch_size_per_gpu, + num_workers=val_num_workers, + persistent_workers=persistent_workers, + pin_memory=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + test_mode=True, + data_prefix=dict(img='val2017/'), + filter_cfg=dict(filter_empty_gt=True, min_size=0), + ann_file='annotations/instances_val2017.json', + pipeline=test_pipeline)) + +test_dataloader = val_dataloader + +param_scheduler = None +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict( + type='SGD', + lr=base_lr, + momentum=0.9, + weight_decay=5e-4, + nesterov=False), + paramwise_cfg=dict(norm_decay_mult=0.)) + +default_hooks = dict( + param_scheduler=dict( + type='PPYOLOEParamSchedulerHook', + warmup_min_iter=1000, + start_factor=0., + warmup_epochs=5, + min_lr_ratio=0.0, + total_epochs=int(max_epochs * 1.2)), + checkpoint=dict( + type='CheckpointHook', + interval=save_epoch_intervals, + save_best='auto', + max_keep_ckpts=3)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + strict_load=False, + priority=49) +] + +val_evaluator = dict( + type='mmdet.CocoMetric', + proposal_nums=(100, 1, 10), + ann_file=data_root + 'annotations/instances_val2017.json', + metric='bbox') +test_evaluator = val_evaluator + +train_cfg = dict( + type='EpochBasedTrainLoop', + max_epochs=max_epochs, + val_interval=save_epoch_intervals) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') diff --git a/models/YOLO-World/third_party/mmyolo/configs/ppyoloe/ppyoloe_plus_x_fast_8xb8-80e_coco.py b/models/YOLO-World/third_party/mmyolo/configs/ppyoloe/ppyoloe_plus_x_fast_8xb8-80e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..b8e61120bee63c67da1ae31e492709381b365b47 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/ppyoloe/ppyoloe_plus_x_fast_8xb8-80e_coco.py @@ -0,0 +1,16 @@ +_base_ = './ppyoloe_plus_s_fast_8xb8-80e_coco.py' + +# The pretrained model is geted and converted from official PPYOLOE. +# https://github.com/PaddlePaddle/PaddleDetection/blob/release/2.5/configs/ppyoloe/README.md +load_from = 'https://download.openmmlab.com/mmyolo/v0/ppyoloe/ppyoloe_pretrain/ppyoloe_plus_x_obj365_pretrained-43a8000d.pth' # noqa + +deepen_factor = 1.33 +widen_factor = 1.25 + +model = dict( + backbone=dict(deepen_factor=deepen_factor, widen_factor=widen_factor), + neck=dict( + deepen_factor=deepen_factor, + widen_factor=widen_factor, + ), + bbox_head=dict(head_module=dict(widen_factor=widen_factor))) diff --git a/models/YOLO-World/third_party/mmyolo/configs/ppyoloe/ppyoloe_s_fast_8xb32-300e_coco.py b/models/YOLO-World/third_party/mmyolo/configs/ppyoloe/ppyoloe_s_fast_8xb32-300e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..622332899cd4f8589559ed3484fb5affb6a7963b --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/ppyoloe/ppyoloe_s_fast_8xb32-300e_coco.py @@ -0,0 +1,36 @@ +_base_ = './ppyoloe_plus_s_fast_8xb8-80e_coco.py' + +# The pretrained model is geted and converted from official PPYOLOE. +# https://github.com/PaddlePaddle/PaddleDetection/blob/release/2.5/configs/ppyoloe/README.md +checkpoint = 'https://download.openmmlab.com/mmyolo/v0/ppyoloe/ppyoloe_pretrain/cspresnet_s_imagenet1k_pretrained-2be81763.pth' # noqa + +train_batch_size_per_gpu = 32 +max_epochs = 300 + +# Base learning rate for optim_wrapper +base_lr = 0.01 + +model = dict( + data_preprocessor=dict( + mean=[0.485 * 255, 0.456 * 255, 0.406 * 255], + std=[0.229 * 255., 0.224 * 255., 0.225 * 255.]), + backbone=dict( + block_cfg=dict(use_alpha=False), + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint=checkpoint, + map_location='cpu')), + train_cfg=dict(initial_epoch=100)) + +train_dataloader = dict(batch_size=train_batch_size_per_gpu) + +optim_wrapper = dict(optimizer=dict(lr=base_lr)) + +default_hooks = dict(param_scheduler=dict(total_epochs=int(max_epochs * 1.2))) + +train_cfg = dict(max_epochs=max_epochs) + +# PPYOLOE plus use obj365 pretrained model, but PPYOLOE not, +# `load_from` need to set to None. +load_from = None diff --git a/models/YOLO-World/third_party/mmyolo/configs/ppyoloe/ppyoloe_s_fast_8xb32-400e_coco.py b/models/YOLO-World/third_party/mmyolo/configs/ppyoloe/ppyoloe_s_fast_8xb32-400e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..bef9e9130d6194fceeb6471369941050110ace2d --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/ppyoloe/ppyoloe_s_fast_8xb32-400e_coco.py @@ -0,0 +1,9 @@ +_base_ = './ppyoloe_s_fast_8xb32-300e_coco.py' + +max_epochs = 400 + +model = dict(train_cfg=dict(initial_epoch=133)) + +default_hooks = dict(param_scheduler=dict(total_epochs=int(max_epochs * 1.2))) + +train_cfg = dict(max_epochs=max_epochs) diff --git a/models/YOLO-World/third_party/mmyolo/configs/ppyoloe/ppyoloe_x_fast_8xb16-300e_coco.py b/models/YOLO-World/third_party/mmyolo/configs/ppyoloe/ppyoloe_x_fast_8xb16-300e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..fed594f0d08acf2fa64feffa419d0143d1036c55 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/ppyoloe/ppyoloe_x_fast_8xb16-300e_coco.py @@ -0,0 +1,23 @@ +_base_ = './ppyoloe_s_fast_8xb32-300e_coco.py' + +# The pretrained model is geted and converted from official PPYOLOE. +# https://github.com/PaddlePaddle/PaddleDetection/blob/release/2.5/configs/ppyoloe/README.md +checkpoint = 'https://download.openmmlab.com/mmyolo/v0/ppyoloe/ppyoloe_pretrain/cspresnet_x_imagenet1k_pretrained-81c33ccb.pth' # noqa + +deepen_factor = 1.33 +widen_factor = 1.25 + +train_batch_size_per_gpu = 16 + +model = dict( + backbone=dict( + deepen_factor=deepen_factor, + widen_factor=widen_factor, + init_cfg=dict(checkpoint=checkpoint)), + neck=dict( + deepen_factor=deepen_factor, + widen_factor=widen_factor, + ), + bbox_head=dict(head_module=dict(widen_factor=widen_factor))) + +train_dataloader = dict(batch_size=train_batch_size_per_gpu) diff --git a/models/YOLO-World/third_party/mmyolo/configs/razor/subnets/README.md b/models/YOLO-World/third_party/mmyolo/configs/razor/subnets/README.md new file mode 100644 index 0000000000000000000000000000000000000000..456021bdd32036a31ca9863194dd74a174fcdd76 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/razor/subnets/README.md @@ -0,0 +1,79 @@ +# Projecs Based on MMRazor + +There are many research works and pre-trained models built on MMRazor. We list some of them as examples of how to use MMRazor slimmable models for downstream frameworks. As the page might not be completed, please feel free to contribute more efficient mmrazor-models to update this page. + +## Description + +This is an implementation of MMRazor Searchable Backbone Application, we provide detection configs and models for MMRazor in MMYOLO. + +### Backbone support + +Here are the Neural Architecture Search(NAS) Models that come from MMRazor which support YOLO Series. If you are looking for MMRazor models only for Backbone, you could refer to MMRazor [ModelZoo](https://github.com/open-mmlab/mmrazor/blob/dev-1.x/docs/en/get_started/model_zoo.md) and corresponding repository. + +- [x] [AttentiveMobileNetV3](https://github.com/open-mmlab/mmrazor/blob/dev-1.x/configs/_base_/nas_backbones/attentive_mobilenetv3_supernet.py) +- [x] [SearchableShuffleNetV2](https://github.com/open-mmlab/mmrazor/blob/dev-1.x/configs/_base_/nas_backbones/spos_shufflenet_supernet.py) +- [x] [SearchableMobileNetV2](https://github.com/open-mmlab/mmrazor/blob/dev-1.x/configs/_base_/nas_backbones/spos_mobilenet_supernet.py) + +## Usage + +### Prerequisites + +- [MMRazor v1.0.0rc2](https://github.com/open-mmlab/mmrazor/tree/v1.0.0rc2) or higher (dev-1.x) + +Install MMRazor using MIM. + +```shell +mim install mmengine +mim install "mmrazor>=1.0.0rc2" +``` + +Install MMRazor from source + +``` +git clone -b dev-1.x https://github.com/open-mmlab/mmrazor.git +cd mmrazor +# Install MMRazor +mim install -v -e . +``` + +### Training commands + +In MMYOLO's root directory, if you want to use single GPU for training, run the following command to train the model: + +```bash +CUDA_VISIBLE_DEVICES=0 PORT=29500 ./tools/dist_train.sh configs/razor/subnets/yolov5_s_spos_shufflenetv2_syncbn_8xb16-300e_coco.py +``` + +If you want to use several of these GPUs to train in parallel, you can use the following command: + +```bash +CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 PORT=29500 ./tools/dist_train.sh configs/razor/subnets/yolov5_s_spos_shufflenetv2_syncbn_8xb16-300e_coco.py +``` + +### Testing commands + +In MMYOLO's root directory, run the following command to test the model: + +```bash +CUDA_VISIBLE_DEVICES=0 PORT=29500 ./tools/dist_test.sh configs/razor/subnets/yolov5_s_spos_shufflenetv2_syncbn_8xb16-300e_coco.py ${CHECKPOINT_PATH} +``` + +## Results and Models + +Here we provide the baseline version of YOLO Series with NAS backbone. + +| Model | size | box AP | Params(M) | FLOPs(G) | Config | Download | +| :------------------------: | :--: | :----: | :----------: | :------: | :---------------------------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| yolov5-s | 640 | 37.7 | 7.235 | 8.265 | [config](../../yolov5/yolov5_s-v61_syncbn_fast_8xb16-300e_coco.py) | [model](https://download.openmmlab.com/mmyolo/v0/yolov5/yolov5_s-v61_syncbn_fast_8xb16-300e_coco/yolov5_s-v61_syncbn_fast_8xb16-300e_coco_20220918_084700-86e02187.pth) \| [log](https://download.openmmlab.com/mmyolo/v0/yolov5/yolov5_s-v61_syncbn_fast_8xb16-300e_coco/yolov5_s-v61_syncbn_fast_8xb16-300e_coco_20220918_084700.log.json) | +| yolov5_s_spos_shufflenetv2 | 640 | 38.0 | 7.04(-2.7%) | 7.03 | [config](./yolov5_s_spos_shufflenetv2_syncbn_8xb16-300e_coco.py) | [model](https://download.openmmlab.com/mmrazor/v1/yolo_nas_backbone/yolov5_s_spos_shufflenetv2_syncbn_8xb16-300e_coco_20230211_220635-578be9a9.pth) \| log | +| yolov6-s | 640 | 44.0 | 18.869 | 24.253 | [config](../../yolov6/yolov6_s_syncbn_fast_8xb32-400e_coco.py) | [model](https://download.openmmlab.com/mmyolo/v0/yolov6/yolov6_s_syncbn_fast_8xb32-400e_coco/yolov6_s_syncbn_fast_8xb32-400e_coco_20221102_203035-932e1d91.pth) \| [log](https://download.openmmlab.com/mmyolo/v0/yolov6/yolov6_s_syncbn_fast_8xb32-400e_coco/yolov6_s_syncbn_fast_8xb32-400e_coco_20221102_203035.log.json) | +| yolov6_l_attentivenas_a6 | 640 | 45.3 | 18.38(-2.6%) | 8.49 | [config](./yolov6_l_attentivenas_a6_d12_syncbn_fast_8xb32-300e_coco.py) | [model](https://download.openmmlab.com/mmrazor/v1/yolo_nas_backbone/yolov6_l_attentivenas_a6_d12_syncbn_fast_8xb32-300e_coco_20230211_222409-dcc72668.pth) \| log | +| RTMDet-tiny | 640 | 41.0 | 4.8 | 8.1 | [config](../../rtmdet/rtmdet_l_syncbn_fast_8xb32-300e_coco.py) | [model](https://download.openmmlab.com/mmyolo/v0/rtmdet/rtmdet_tiny_syncbn_fast_8xb32-300e_coco/rtmdet_tiny_syncbn_fast_8xb32-300e_coco_20230102_140117-dbb1dc83.pth) \| [log](https://download.openmmlab.com/mmyolo/v0/rtmdet/rtmdet_tiny_syncbn_fast_8xb32-300e_coco/rtmdet_tiny_syncbn_fast_8xb32-300e_coco_20230102_140117.log.json) | +| rtmdet_tiny_ofa_lat31 | 960 | 41.3 | 3.91(-18.5%) | 6.09 | [config](./rtmdet_tiny_ofa_lat31_syncbn_16xb16-300e_coco.py) | [model](https://download.openmmlab.com/mmrazor/v1/yolo_nas_backbone/rtmdet_tiny_ofa_lat31_syncbn_16xb16-300e_coco_20230214_210623-449bb2a0.pth) \| log | + +**Note**: + +1. For fair comparison, the training configuration is consistent with the original configuration and results in an improvement of about 0.2-0.5% AP. +2. `yolov5_s_spos_shufflenetv2` achieves 38.0% AP with only 7.042M parameters, directly instead of the backbone, and outperforms `yolov5_s` with a similar size by more than 0.3% AP. +3. With the efficient backbone of `yolov6_l_attentivenas_a6`, the input channels of `YOLOv6RepPAFPN` are reduced. Meanwhile, modify the **deepen_factor** and the neck is made deeper to restore the AP. +4. with the `rtmdet_tiny_ofa_lat31` backbone with only 3.315M parameters and 3.634G flops, we can modify the input resolution to 960, with a similar model size compared to `rtmdet_tiny` and exceeds `rtmdet_tiny` by 0.4% AP, reducing the size of the whole model to 3.91 MB. diff --git a/models/YOLO-World/third_party/mmyolo/configs/razor/subnets/rtmdet_tiny_ofa_lat31_syncbn_16xb16-300e_coco.py b/models/YOLO-World/third_party/mmyolo/configs/razor/subnets/rtmdet_tiny_ofa_lat31_syncbn_16xb16-300e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..2f9da6685ef0ef920ceb137a165dfb8adcd36254 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/razor/subnets/rtmdet_tiny_ofa_lat31_syncbn_16xb16-300e_coco.py @@ -0,0 +1,124 @@ +_base_ = [ + 'mmrazor::_base_/nas_backbones/ofa_mobilenetv3_supernet.py', + '../../rtmdet/rtmdet_s_syncbn_fast_8xb32-300e_coco.py' +] + +checkpoint_file = 'https://download.openmmlab.com/mmrazor/v1/ofa/ofa_mobilenet_subnet_8xb256_in1k_note8_lat%4031ms_top1%4072.8_finetune%4025.py_20221214_0939-981a8b2a.pth' # noqa +fix_subnet = 'https://download.openmmlab.com/mmrazor/v1/yolo_nas_backbone/OFA_SUBNET_NOTE8_LAT31.yaml' # noqa +deepen_factor = 0.167 +widen_factor = 1.0 +channels = [40, 112, 160] +train_batch_size_per_gpu = 16 +img_scale = (960, 960) + +_base_.nas_backbone.out_indices = (2, 4, 5) +_base_.nas_backbone.conv_cfg = dict(type='mmrazor.OFAConv2d') +_base_.nas_backbone.init_cfg = dict( + type='Pretrained', + checkpoint=checkpoint_file, + prefix='architecture.backbone.') +nas_backbone = dict( + type='mmrazor.sub_model', + fix_subnet=fix_subnet, + cfg=_base_.nas_backbone, + extra_prefix='backbone.') + +_base_.model.backbone = nas_backbone +_base_.model.neck.widen_factor = widen_factor +_base_.model.neck.deepen_factor = deepen_factor +_base_.model.neck.in_channels = channels +_base_.model.neck.out_channels = channels[0] +_base_.model.bbox_head.head_module.in_channels = channels[0] +_base_.model.bbox_head.head_module.feat_channels = channels[0] +_base_.model.bbox_head.head_module.widen_factor = widen_factor + +_base_.model.test_cfg = dict( + multi_label=True, + nms_pre=1000, + min_bbox_size=0, + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.6), + max_per_img=100) + +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args=_base_.backend_args), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='Mosaic', + img_scale=img_scale, + use_cached=True, + max_cached_images=20, + random_pop=False, + pad_val=114.0), + dict( + type='mmdet.RandomResize', + scale=(1280, 1280), + ratio_range=(0.5, 2.0), # note + resize_type='mmdet.Resize', + keep_ratio=True), + dict(type='mmdet.RandomCrop', crop_size=img_scale), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict(type='mmdet.RandomFlip', prob=0.5), + dict(type='mmdet.Pad', size=img_scale, pad_val=dict(img=(114, 114, 114))), + dict( + type='YOLOXMixUp', + img_scale=(960, 960), + ratio_range=(1.0, 1.0), + max_cached_images=10, + use_cached=True, + random_pop=False, + pad_val=(114, 114, 114), + prob=0.5), + dict(type='mmdet.PackDetInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImageFromFile', backend_args=_base_.backend_args), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='mmdet.RandomResize', + scale=img_scale, + ratio_range=(0.5, 2.0), # note + resize_type='mmdet.Resize', + keep_ratio=True), + dict(type='mmdet.RandomCrop', crop_size=img_scale), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict(type='mmdet.RandomFlip', prob=0.5), + dict(type='mmdet.Pad', size=img_scale, pad_val=dict(img=(114, 114, 114))), + dict(type='mmdet.PackDetInputs') +] + +train_dataloader = dict( + batch_size=train_batch_size_per_gpu, dataset=dict(pipeline=train_pipeline)) + +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args=_base_.backend_args), + dict(type='mmdet.Resize', scale=(960, 960), keep_ratio=True), + dict(type='mmdet.Pad', size=(960, 960), pad_val=dict(img=(114, 114, 114))), + dict(type='LoadAnnotations', with_bbox=True, _scope_='mmdet'), + dict( + type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')) +] + +val_dataloader = dict( + dataset=dict(pipeline=test_pipeline, batch_shapes_cfg=None)) + +test_dataloader = val_dataloader + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + strict_load=False, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=_base_.max_epochs - _base_.num_epochs_stage2, + switch_pipeline=train_pipeline_stage2) +] + +find_unused_parameters = True diff --git a/models/YOLO-World/third_party/mmyolo/configs/razor/subnets/yolov5_s_spos_shufflenetv2_syncbn_8xb16-300e_coco.py b/models/YOLO-World/third_party/mmyolo/configs/razor/subnets/yolov5_s_spos_shufflenetv2_syncbn_8xb16-300e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..beb4941cfa482ec52e83abc67df70d9734fa3d3a --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/razor/subnets/yolov5_s_spos_shufflenetv2_syncbn_8xb16-300e_coco.py @@ -0,0 +1,29 @@ +_base_ = [ + 'mmrazor::_base_/nas_backbones/spos_shufflenet_supernet.py', + '../../yolov5/yolov5_s-v61_syncbn_fast_8xb16-300e_coco.py' +] + +checkpoint_file = 'https://download.openmmlab.com/mmrazor/v1/spos/spos_shufflenetv2_subnet_8xb128_in1k_flops_0.33M_acc_73.87_20211222-1f0a0b4d_v3.pth' # noqa +fix_subnet = 'https://download.openmmlab.com/mmrazor/v1/spos/spos_shufflenetv2_subnet_8xb128_in1k_flops_0.33M_acc_73.87_20211222-1f0a0b4d_subnet_cfg_v3.yaml' # noqa +widen_factor = 1.0 +channels = [160, 320, 640] + +_base_.nas_backbone.out_indices = (1, 2, 3) +_base_.nas_backbone.init_cfg = dict( + type='Pretrained', + checkpoint=checkpoint_file, + prefix='architecture.backbone.') +nas_backbone = dict( + type='mmrazor.sub_model', + fix_subnet=fix_subnet, + cfg=_base_.nas_backbone, + extra_prefix='architecture.backbone.') + +_base_.model.backbone = nas_backbone +_base_.model.neck.widen_factor = widen_factor +_base_.model.neck.in_channels = channels +_base_.model.neck.out_channels = channels +_base_.model.bbox_head.head_module.in_channels = channels +_base_.model.bbox_head.head_module.widen_factor = widen_factor + +find_unused_parameters = True diff --git a/models/YOLO-World/third_party/mmyolo/configs/razor/subnets/yolov6_l_attentivenas_a6_d12_syncbn_fast_8xb32-300e_coco.py b/models/YOLO-World/third_party/mmyolo/configs/razor/subnets/yolov6_l_attentivenas_a6_d12_syncbn_fast_8xb32-300e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..0ab64a6460b3fbb29cc1a47a1bd1a2456bb11ac3 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/razor/subnets/yolov6_l_attentivenas_a6_d12_syncbn_fast_8xb32-300e_coco.py @@ -0,0 +1,35 @@ +_base_ = [ + 'mmrazor::_base_/nas_backbones/attentive_mobilenetv3_supernet.py', + '../../yolov6/yolov6_l_syncbn_fast_8xb32-300e_coco.py' +] + +checkpoint_file = 'https://download.openmmlab.com/mmrazor/v1/bignas/attentive_mobilenet_subnet_8xb256_in1k_flops-0.93G_acc-80.81_20221229_200440-73d92cc6.pth' # noqa +fix_subnet = 'https://download.openmmlab.com/mmrazor/v1/bignas/ATTENTIVE_SUBNET_A6.yaml' # noqa +deepen_factor = 1.2 +widen_factor = 1 +channels = [40, 128, 224] +mid_channels = [40, 128, 224] + +_base_.train_dataloader.batch_size = 16 +_base_.nas_backbone.out_indices = (2, 4, 6) +_base_.nas_backbone.conv_cfg = dict(type='mmrazor.BigNasConv2d') +_base_.nas_backbone.norm_cfg = dict(type='mmrazor.DynamicBatchNorm2d') +_base_.nas_backbone.init_cfg = dict( + type='Pretrained', + checkpoint=checkpoint_file, + prefix='architecture.backbone.') +nas_backbone = dict( + type='mmrazor.sub_model', + fix_subnet=fix_subnet, + cfg=_base_.nas_backbone, + extra_prefix='backbone.') + +_base_.model.backbone = nas_backbone +_base_.model.neck.widen_factor = widen_factor +_base_.model.neck.deepen_factor = deepen_factor +_base_.model.neck.in_channels = channels +_base_.model.neck.out_channels = mid_channels +_base_.model.bbox_head.head_module.in_channels = mid_channels +_base_.model.bbox_head.head_module.widen_factor = widen_factor + +find_unused_parameters = True diff --git a/models/YOLO-World/third_party/mmyolo/configs/rtmdet/README.md b/models/YOLO-World/third_party/mmyolo/configs/rtmdet/README.md new file mode 100644 index 0000000000000000000000000000000000000000..94e86546a34c3d70da4b51d81ff46e8ee7d5f242 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/rtmdet/README.md @@ -0,0 +1,83 @@ +# RTMDet: An Empirical Study of Designing Real-Time Object Detectors + +[![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/rtmdet-an-empirical-study-of-designing-real/real-time-instance-segmentation-on-mscoco)](https://paperswithcode.com/sota/real-time-instance-segmentation-on-mscoco?p=rtmdet-an-empirical-study-of-designing-real) +[![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/rtmdet-an-empirical-study-of-designing-real/object-detection-in-aerial-images-on-dota-1)](https://paperswithcode.com/sota/object-detection-in-aerial-images-on-dota-1?p=rtmdet-an-empirical-study-of-designing-real) +[![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/rtmdet-an-empirical-study-of-designing-real/object-detection-in-aerial-images-on-hrsc2016)](https://paperswithcode.com/sota/object-detection-in-aerial-images-on-hrsc2016?p=rtmdet-an-empirical-study-of-designing-real) + + + +## Abstract + +In this paper, we aim to design an efficient real-time object detector that exceeds the YOLO series and is easily extensible for many object recognition tasks such as instance segmentation and rotated object detection. To obtain a more efficient model architecture, we explore an architecture that has compatible capacities in the backbone and neck, constructed by a basic building block that consists of large-kernel depth-wise convolutions. We further introduce soft labels when calculating matching costs in the dynamic label assignment to improve accuracy. Together with better training techniques, the resulting object detector, named RTMDet, achieves 52.8% AP on COCO with 300+ FPS on an NVIDIA 3090 GPU, outperforming the current mainstream industrial detectors. RTMDet achieves the best parameter-accuracy trade-off with tiny/small/medium/large/extra-large model sizes for various application scenarios, and obtains new state-of-the-art performance on real-time instance segmentation and rotated object detection. We hope the experimental results can provide new insights into designing versatile real-time object detectors for many object recognition tasks. + +
+ +
+ +
+ +RTMDet-l model structure +
+ +## Results and Models + +### Object Detection + +| Model | size | Params(M) | FLOPs(G) | TRT-FP16-Latency(ms) | box AP | TTA box AP | Config | Download | +| :------------: | :--: | :-------: | :------: | :------------------: | :---------: | :---------: | :---------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| RTMDet-tiny | 640 | 4.8 | 8.1 | 0.98 | 41.0 | 42.7 | [config](./rtmdet_tiny_syncbn_fast_8xb32-300e_coco.py) | [model](https://download.openmmlab.com/mmyolo/v0/rtmdet/rtmdet_tiny_syncbn_fast_8xb32-300e_coco/rtmdet_tiny_syncbn_fast_8xb32-300e_coco_20230102_140117-dbb1dc83.pth) \| [log](https://download.openmmlab.com/mmyolo/v0/rtmdet/rtmdet_tiny_syncbn_fast_8xb32-300e_coco/rtmdet_tiny_syncbn_fast_8xb32-300e_coco_20230102_140117.log.json) | +| RTMDet-tiny \* | 640 | 4.8 | 8.1 | 0.98 | 41.8 (+0.8) | 43.2 (+0.5) | [config](./distillation/kd_tiny_rtmdet_s_neck_300e_coco.py) | [model](https://download.openmmlab.com/mmrazor/v1/rtmdet_distillation/kd_tiny_rtmdet_s_neck_300e_coco/kd_tiny_rtmdet_s_neck_300e_coco_20230213_104240-e1e4197c.pth) \| [log](https://download.openmmlab.com/mmrazor/v1/rtmdet_distillation/kd_tiny_rtmdet_s_neck_300e_coco/kd_tiny_rtmdet_s_neck_300e_coco_20230213_104240-176901d8.json) | +| RTMDet-s | 640 | 8.89 | 14.8 | 1.22 | 44.6 | 45.8 | [config](./rtmdet_s_syncbn_fast_8xb32-300e_coco.py) | [model](https://download.openmmlab.com/mmyolo/v0/rtmdet/rtmdet_s_syncbn_fast_8xb32-300e_coco/rtmdet_s_syncbn_fast_8xb32-300e_coco_20221230_182329-0a8c901a.pth) \| [log](https://download.openmmlab.com/mmyolo/v0/rtmdet/rtmdet_s_syncbn_fast_8xb32-300e_coco/rtmdet_s_syncbn_fast_8xb32-300e_coco_20221230_182329.log.json) | +| RTMDet-s \* | 640 | 8.89 | 14.8 | 1.22 | 45.7 (+1.1) | 47.3 (+1.5) | [config](./distillation/kd_s_rtmdet_m_neck_300e_coco.py) | [model](https://download.openmmlab.com/mmrazor/v1/rtmdet_distillation/kd_s_rtmdet_m_neck_300e_coco/kd_s_rtmdet_m_neck_300e_coco_20230220_140647-446ff003.pth) \| [log](https://download.openmmlab.com/mmrazor/v1/rtmdet_distillation/kd_s_rtmdet_m_neck_300e_coco/kd_s_rtmdet_m_neck_300e_coco_20230220_140647-89862269.json) | +| RTMDet-m | 640 | 24.71 | 39.27 | 1.62 | 49.3 | 50.9 | [config](./rtmdet_m_syncbn_fast_8xb32-300e_coco.py) | [model](https://download.openmmlab.com/mmyolo/v0/rtmdet/rtmdet_m_syncbn_fast_8xb32-300e_coco/rtmdet_m_syncbn_fast_8xb32-300e_coco_20230102_135952-40af4fe8.pth) \| [log](https://download.openmmlab.com/mmyolo/v0/rtmdet/rtmdet_m_syncbn_fast_8xb32-300e_coco/rtmdet_m_syncbn_fast_8xb32-300e_coco_20230102_135952.log.json) | +| RTMDet-m \* | 640 | 24.71 | 39.27 | 1.62 | 50.2 (+0.9) | 51.9 (+1.0) | [config](./distillation/kd_m_rtmdet_l_neck_300e_coco.py) | [model](https://download.openmmlab.com/mmrazor/v1/rtmdet_distillation/kd_m_rtmdet_l_neck_300e_coco/kd_m_rtmdet_l_neck_300e_coco_20230220_141313-b806f503.pth) \| [log](https://download.openmmlab.com/mmrazor/v1/rtmdet_distillation/kd_m_rtmdet_l_neck_300e_coco/kd_m_rtmdet_l_neck_300e_coco_20230220_141313-bd028fd3.json) | +| RTMDet-l | 640 | 52.3 | 80.23 | 2.44 | 51.4 | 53.1 | [config](./rtmdet_l_syncbn_fast_8xb32-300e_coco.py) | [model](https://download.openmmlab.com/mmyolo/v0/rtmdet/rtmdet_l_syncbn_fast_8xb32-300e_coco/rtmdet_l_syncbn_fast_8xb32-300e_coco_20230102_135928-ee3abdc4.pth) \| [log](https://download.openmmlab.com/mmyolo/v0/rtmdet/rtmdet_l_syncbn_fast_8xb32-300e_coco/rtmdet_l_syncbn_fast_8xb32-300e_coco_20230102_135928.log.json) | +| RTMDet-l \* | 640 | 52.3 | 80.23 | 2.44 | 52.3 (+0.9) | 53.7 (+0.6) | [config](./distillation/kd_l_rtmdet_x_neck_300e_coco.py) | [model](https://download.openmmlab.com/mmrazor/v1/rtmdet_distillation/kd_l_rtmdet_x_neck_300e_coco/kd_l_rtmdet_x_neck_300e_coco_20230220_141912-c9979722.pth) \| [log](https://download.openmmlab.com/mmrazor/v1/rtmdet_distillation/kd_l_rtmdet_x_neck_300e_coco/kd_l_rtmdet_x_neck_300e_coco_20230220_141912-c5c4e17b.json) | +| RTMDet-x | 640 | 94.86 | 141.67 | 3.10 | 52.8 | 54.2 | [config](./rtmdet_x_syncbn_fast_8xb32-300e_coco.py) | [model](https://download.openmmlab.com/mmyolo/v0/rtmdet/rtmdet_x_syncbn_fast_8xb32-300e_coco/rtmdet_x_syncbn_fast_8xb32-300e_coco_20221231_100345-b85cd476.pth) \| [log](https://download.openmmlab.com/mmyolo/v0/rtmdet/rtmdet_x_syncbn_fast_8xb32-300e_coco/rtmdet_x_syncbn_fast_8xb32-300e_coco_20221231_100345.log.json) | + +**Note**: + +1. The inference speed of RTMDet is measured on an NVIDIA 3090 GPU with TensorRT 8.4.3, cuDNN 8.2.0, FP16, batch size=1, and without NMS. +2. For a fair comparison, the config of bbox postprocessing is changed to be consistent with YOLOv5/6/7 after [PR#9494](https://github.com/open-mmlab/mmdetection/pull/9494), bringing about 0.1~0.3% AP improvement. +3. `TTA` means that Test Time Augmentation. It's perform 3 multi-scaling transformations on the image, followed by 2 flipping transformations (flipping and not flipping). You only need to specify `--tta` when testing to enable. see [TTA](https://github.com/open-mmlab/mmyolo/blob/dev/docs/en/common_usage/tta.md) for details. +4. \* means checkpoints are trained with knowledge distillation. More details can be found in [RTMDet distillation](./distillation). + +### Rotated Object Detection + +RTMDet-R achieves state-of-the-art on various remote sensing datasets. + +| Backbone | pretrain | Epoch | Batch Size | Aug | mmAP | mAP50 | mAP75 | Mem (GB) | Params(M) | FLOPS(G) | TRT-FP16-Latency(ms) | Config | Download | +| :---------: | :------: | :---: | :--------: | :-------------: | :---: | :---: | :---: | :------: | :-------: | :------: | :------------------: | :--------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| RTMDet-tiny | IN | 36 | 1xb8 | RR | 46.94 | 75.07 | 50.11 | 12.7 | 4.88 | 20.45 | 4.40 | [config](./rotated/rtmdet-r_tiny_fast_1xb8-36e_dota.py) | [model](https://download.openmmlab.com/mmyolo/v0/rtmdet/rotated/rtmdet-r_tiny_fast_1xb8-36e_dota/rtmdet-r_tiny_fast_1xb8-36e_dota_20230228_162210-e8ccfb1c.pth) \| [log](https://download.openmmlab.com/mmyolo/v0/rtmdet/rotated/rtmdet-r_tiny_fast_1xb8-36e_dota/rtmdet-r_tiny_fast_1xb8-36e_dota_20230228_162210.log.json) | +| RTMDet-s | IN | 36 | 1xb8 | RR | 48.99 | 77.33 | 52.65 | 16.6 | 8.86 | 37.62 | 4.86 | [config](./rotated/rtmdet-r_s_fast_1xb8-36e_dota.py) | [model](https://download.openmmlab.com/mmyolo/v0/rtmdet/rotated/rtmdet-r_s_fast_1xb8-36e_dota/rtmdet-r_s_fast_1xb8-36e_dota_20230224_110307-3946a5aa.pth) \| [log](https://download.openmmlab.com/mmyolo/v0/rtmdet/rotated/rtmdet-r_s_fast_1xb8-36e_dota/rtmdet-r_s_fast_1xb8-36e_dota_20230224_110307.log.json) | +| RTMDet-m | IN | 36 | 2xb4 | RR | 50.38 | 78.43 | 54.28 | 10.9 | 24.67 | 99.76 | 7.82 | [config](./rotated/rtmdet-r_m_syncbn_fast_2xb4-36e_dota.py) | [model](https://download.openmmlab.com/mmyolo/v0/rtmdet/rotated/rtmdet-r_m_syncbn_fast_2xb4-36e_dota/rtmdet-r_m_syncbn_fast_2xb4-36e_dota_20230224_124237-29ae1619.pth) \| [log](https://download.openmmlab.com/mmyolo/v0/rtmdet/rotated/rtmdet-r_m_syncbn_fast_2xb4-36e_dota/rtmdet-r_m_syncbn_fast_2xb4-36e_dota_20230224_124237.log.json) | +| RTMDet-l | IN | 36 | 2xb4 | RR | 50.61 | 78.66 | 54.95 | 16.1 | 52.27 | 204.21 | 10.82 | [config](./rotated/rtmdet-r_l_syncbn_fast_2xb4-36e_dota.py) | [model](https://download.openmmlab.com/mmyolo/v0/rtmdet/rotated/rtmdet-r_l_syncbn_fast_2xb4-36e_dota/rtmdet-r_l_syncbn_fast_2xb4-36e_dota_20230224_124544-38bc5f08.pth) \| [log](https://download.openmmlab.com/mmyolo/v0/rtmdet/rotated/rtmdet-r_l_syncbn_fast_2xb4-36e_dota/rtmdet-r_l_syncbn_fast_2xb4-36e_dota_20230224_124544.log.json) | +| RTMDet-tiny | IN | 36 | 1xb8 | MS+RR | - | - | - | | 4.88 | 20.45 | 4.40 | [config](./rotated/rtmdet-r_tiny_fast_1xb8-36e_dota-ms.py) | \| | +| RTMDet-s | IN | 36 | 1xb8 | MS+RR | - | - | - | | 8.86 | 37.62 | 4.86 | [config](./rotated/rtmdet-r_s_fast_1xb8-36e_dota-ms.py) | \| | +| RTMDet-m | IN | 36 | 2xb4 | MS+RR | - | - | - | | 24.67 | 99.76 | 7.82 | [config](./rotated/rtmdet-r_m_syncbn_fast_2xb4-36e_dota-ms.py) | \| | +| RTMDet-l | IN | 36 | 2xb4 | MS+RR | - | - | - | | 52.27 | 204.21 | 10.82 | [config](./rotated/rtmdet-r_l_syncbn_fast_2xb4-36e_dota-ms.py) | \| | +| RTMDet-l | COCO | 36 | 2xb4 | MS+RR | - | - | - | | 52.27 | 204.21 | 10.82 | [config](./rotated/rtmdet-r_l_syncbn_fast_coco-pretrain_2xb4-36e_dota-ms.py) | \| | +| RTMDet-l | IN | 100 | 2xb4 | Mixup+Mosaic+RR | 55.05 | 80.14 | 61.32 | 19.6 | 52.27 | 204.21 | 10.82 | [config](./rotated/rtmdet-r_l_syncbn_fast_2xb4-aug-100e_dota.py) | [model](https://download.openmmlab.com/mmyolo/v0/rtmdet/rotated/rtmdet-r_l_syncbn_fast_2xb4-aug-100e_dota/rtmdet-r_l_syncbn_fast_2xb4-aug-100e_dota_20230224_124735-ed4ea966.pth) \| [log](https://download.openmmlab.com/mmyolo/v0/rtmdet/rotated/rtmdet-r_l_syncbn_fast_2xb4-aug-100e_dota/rtmdet-r_l_syncbn_fast_2xb4-aug-100e_dota_20230224_124735.log.json) | + +**Note**: + +1. Please follow doc to get start with rotated detection. [Rotated Object Detection](../../docs/zh_cn/tutorials/rotated_detection.md) +2. We follow the latest metrics from the DOTA evaluation server, original voc format mAP is now mAP50. +3. All models trained with image size 1024\*1024. +4. `IN` means ImageNet pretrain, `COCO` means COCO pretrain. +5. For Aug, RR means `RandomRotate`, MS means multi-scale augmentation in data prepare. +6. The inference speed here is measured on an NVIDIA 2080Ti GPU with TensorRT 8.4.3, cuDNN 8.2.0, FP16, batch size=1, and with NMS. +7. Currently, the training process of RTMDet-R tiny is unstable and may have 1% accuracy fluctuation, we will continue to investigate why. + +## Citation + +```latex +@misc{lyu2022rtmdet, + title={RTMDet: An Empirical Study of Designing Real-Time Object Detectors}, + author={Chengqi Lyu and Wenwei Zhang and Haian Huang and Yue Zhou and Yudong Wang and Yanyi Liu and Shilong Zhang and Kai Chen}, + year={2022}, + eprint={2212.07784}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` diff --git a/models/YOLO-World/third_party/mmyolo/configs/rtmdet/cspnext_imagenet_pretrain/README.md b/models/YOLO-World/third_party/mmyolo/configs/rtmdet/cspnext_imagenet_pretrain/README.md new file mode 100644 index 0000000000000000000000000000000000000000..2db5a50ec5ed0d3b499ca7d3c83bc4963c95af3f --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/rtmdet/cspnext_imagenet_pretrain/README.md @@ -0,0 +1,53 @@ +# CSPNeXt ImageNet Pre-training + +In this folder, we provide the imagenet pre-training config of RTMDet's backbone CSPNeXt. + +## Requirements + +To train with these configs, please install [MMClassification 1.x](https://github.com/open-mmlab/mmclassification/tree/1.x) first. + +Install by MIM: + +```shell +mim install mmcls>=1.0.0rc0 +``` + +or install by pip: + +```shell +pip install mmcls>=1.0.0rc0 +``` + +## Prepare Dataset + +To pre-train on ImageNet, you need to prepare the dataset first. Please refer to the [guide](https://mmclassification.readthedocs.io/en/1.x/user_guides/dataset_prepare.html#imagenet). + +## How to Train + +You can use the classification config in the same way as the detection config. + +For single-GPU training, run: + +```shell +python tools/train.py \ + ${CONFIG_FILE} \ + [optional arguments] +``` + +For multi-GPU training, run: + +```shell +bash ./tools/dist_train.sh \ + ${CONFIG_FILE} \ + ${GPU_NUM} \ + [optional arguments] +``` + +More details can be found in [user guides](https://mmdetection.readthedocs.io/en/3.x/user_guides/train.html). + +## Results and Models + +| Model | resolution | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Download | +| :----------: | :--------: | :-------: | :------: | :-------: | :-------: | :-----------------------------------------------------------------------------------------------------------------: | +| CSPNeXt-tiny | 224x224 | 2.73 | 0.339 | 69.44 | 89.45 | [model](https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-tiny_imagenet_600e.pth) | +| CSPNeXt-s | 224x224 | 4.89 | 0.664 | 74.41 | 92.23 | [model](https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-s_imagenet_600e.pth) | diff --git a/models/YOLO-World/third_party/mmyolo/configs/rtmdet/cspnext_imagenet_pretrain/cspnext-s_8xb256-rsb-a1-600e_in1k.py b/models/YOLO-World/third_party/mmyolo/configs/rtmdet/cspnext_imagenet_pretrain/cspnext-s_8xb256-rsb-a1-600e_in1k.py new file mode 100644 index 0000000000000000000000000000000000000000..4281f9cd7d260f22d7b0e8d18d2c4f56866ad840 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/rtmdet/cspnext_imagenet_pretrain/cspnext-s_8xb256-rsb-a1-600e_in1k.py @@ -0,0 +1,67 @@ +_base_ = [ + 'mmcls::_base_/datasets/imagenet_bs256_rsb_a12.py', + 'mmcls::_base_/schedules/imagenet_bs2048_rsb.py', + 'mmcls::_base_/default_runtime.py' +] + +custom_imports = dict( + imports=['mmdet.models', 'mmyolo.models'], allow_failed_imports=False) + +model = dict( + type='ImageClassifier', + backbone=dict( + type='mmyolo.CSPNeXt', + arch='P5', + out_indices=(4, ), + expand_ratio=0.5, + deepen_factor=0.33, + widen_factor=0.5, + channel_attention=True, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='mmyolo.SiLU')), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=512, + loss=dict( + type='LabelSmoothLoss', + label_smooth_val=0.1, + mode='original', + loss_weight=1.0), + topk=(1, 5)), + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.2, num_classes=1000), + dict(type='CutMix', alpha=1.0, num_classes=1000) + ])) + +# dataset settings +train_dataloader = dict(sampler=dict(type='RepeatAugSampler', shuffle=True)) + +# schedule settings +optim_wrapper = dict( + optimizer=dict(weight_decay=0.01), + paramwise_cfg=dict(bias_decay_mult=0., norm_decay_mult=0.), +) + +param_scheduler = [ + # warm up learning rate scheduler + dict( + type='LinearLR', + start_factor=0.0001, + by_epoch=True, + begin=0, + end=5, + # update by iter + convert_to_iter_based=True), + # main learning rate scheduler + dict( + type='CosineAnnealingLR', + T_max=595, + eta_min=1.0e-6, + by_epoch=True, + begin=5, + end=600) +] + +train_cfg = dict(by_epoch=True, max_epochs=600) diff --git a/models/YOLO-World/third_party/mmyolo/configs/rtmdet/cspnext_imagenet_pretrain/cspnext-tiny_8xb256-rsb-a1-600e_in1k.py b/models/YOLO-World/third_party/mmyolo/configs/rtmdet/cspnext_imagenet_pretrain/cspnext-tiny_8xb256-rsb-a1-600e_in1k.py new file mode 100644 index 0000000000000000000000000000000000000000..af3170bdc51778c4601d4426aa88cc27c608f100 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/rtmdet/cspnext_imagenet_pretrain/cspnext-tiny_8xb256-rsb-a1-600e_in1k.py @@ -0,0 +1,5 @@ +_base_ = './cspnext-s_8xb256-rsb-a1-600e_in1k.py' + +model = dict( + backbone=dict(deepen_factor=0.167, widen_factor=0.375), + head=dict(in_channels=384)) diff --git a/models/YOLO-World/third_party/mmyolo/configs/rtmdet/distillation/README.md b/models/YOLO-World/third_party/mmyolo/configs/rtmdet/distillation/README.md new file mode 100644 index 0000000000000000000000000000000000000000..452a46cb9904a1782c0fee9cd7d469c0749caadb --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/rtmdet/distillation/README.md @@ -0,0 +1,146 @@ +# Distill RTM Detectors Based on MMRazor + +## Description + +To further improve the model accuracy while not introducing much additional +computation cost, we apply the feature-based distillation to the training phase +of these RTM detectors. In summary, our distillation strategy are threefold: + +(1) Inspired by [PKD](https://arxiv.org/abs/2207.02039), we first normalize +the intermediate feature maps to have zero mean and unit variances before calculating +the distillation loss. + +(2) Inspired by [CWD](https://arxiv.org/abs/2011.13256), we adopt the channel-wise +distillation paradigm, which can pay more attention to the most salient regions +of each channel. + +(3) Inspired by [DAMO-YOLO](https://arxiv.org/abs/2211.15444), the distillation +process is split into two stages. 1) The teacher distills the student at the +first stage (280 epochs) on strong mosaic domain. 2) The student finetunes itself +on no masaic domain at the second stage (20 epochs). + +## Results and Models + +| Location | Dataset | Teacher | Student | mAP | mAP(T) | mAP(S) | Config | Download | +| :------: | :-----: | :---------------------------------------------------------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------: | :---------: | :----: | :----: | :------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| FPN | COCO | [RTMDet-s](https://github.com/open-mmlab/mmyolo/blob/main/configs/rtmdet/rtmdet_s_syncbn_fast_8xb32-300e_coco.py) | [RTMDet-tiny](https://github.com/open-mmlab/mmyolo/blob/main/configs/rtmdet/rtmdet_tiny_syncbn_fast_8xb32-300e_coco.py) | 41.8 (+0.8) | 44.6 | 41.0 | [config](kd_tiny_rtmdet_s_neck_300e_coco.py) | [teacher](https://download.openmmlab.com/mmyolo/v0/rtmdet/rtmdet_s_syncbn_fast_8xb32-300e_coco/rtmdet_s_syncbn_fast_8xb32-300e_coco_20221230_182329-0a8c901a.pth) \|[model](https://download.openmmlab.com/mmrazor/v1/rtmdet_distillation/kd_tiny_rtmdet_s_neck_300e_coco/kd_tiny_rtmdet_s_neck_300e_coco_20230213_104240-e1e4197c.pth) \| [log](https://download.openmmlab.com/mmrazor/v1/rtmdet_distillation/kd_tiny_rtmdet_s_neck_300e_coco/kd_tiny_rtmdet_s_neck_300e_coco_20230213_104240-176901d8.json) | +| FPN | COCO | [RTMDet-m](https://github.com/open-mmlab/mmyolo/blob/main/configs/rtmdet/rtmdet_m_syncbn_fast_8xb32-300e_coco.py) | [RTMDet-s](https://github.com/open-mmlab/mmyolo/blob/main/configs/rtmdet/rtmdet_s_syncbn_fast_8xb32-300e_coco.py) | 45.7 (+1.1) | 49.3 | 44.6 | [config](kd_s_rtmdet_m_neck_300e_coco.py) | [teacher](https://download.openmmlab.com/mmyolo/v0/rtmdet/rtmdet_m_syncbn_fast_8xb32-300e_coco/rtmdet_m_syncbn_fast_8xb32-300e_coco_20230102_135952-40af4fe8.pth) \|[model](https://download.openmmlab.com/mmrazor/v1/rtmdet_distillation/kd_s_rtmdet_m_neck_300e_coco/kd_s_rtmdet_m_neck_300e_coco_20230220_140647-446ff003.pth) \| [log](https://download.openmmlab.com/mmrazor/v1/rtmdet_distillation/kd_s_rtmdet_m_neck_300e_coco/kd_s_rtmdet_m_neck_300e_coco_20230220_140647-89862269.json) | +| FPN | COCO | [RTMDet-l](https://github.com/open-mmlab/mmyolo/blob/main/configs/rtmdet/rtmdet_l_syncbn_fast_8xb32-300e_coco.py) | [RTMDet-m](https://github.com/open-mmlab/mmyolo/blob/main/configs/rtmdet/rtmdet_m_syncbn_fast_8xb32-300e_coco.py) | 50.2 (+0.9) | 51.4 | 49.3 | [config](kd_m_rtmdet_l_neck_300e_coco.py) | [teacher](https://download.openmmlab.com/mmyolo/v0/rtmdet/rtmdet_l_syncbn_fast_8xb32-300e_coco/rtmdet_l_syncbn_fast_8xb32-300e_coco_20230102_135928-ee3abdc4.pth) \|[model](https://download.openmmlab.com/mmrazor/v1/rtmdet_distillation/kd_m_rtmdet_l_neck_300e_coco/kd_m_rtmdet_l_neck_300e_coco_20230220_141313-b806f503.pth) \| [log](https://download.openmmlab.com/mmrazor/v1/rtmdet_distillation/kd_m_rtmdet_l_neck_300e_coco/kd_m_rtmdet_l_neck_300e_coco_20230220_141313-bd028fd3.json) | +| FPN | COCO | [RTMDet-x](https://github.com/open-mmlab/mmyolo/blob/main/configs/rtmdet/rtmdet_x_syncbn_fast_8xb32-300e_coco.py) | [RTMDet-l](https://github.com/open-mmlab/mmyolo/blob/main/configs/rtmdet/rtmdet_l_syncbn_fast_8xb32-300e_coco.py) | 52.3 (+0.9) | 52.8 | 51.4 | [config](kd_l_rtmdet_x_neck_300e_coco.py) | [teacher](https://download.openmmlab.com/mmyolo/v0/rtmdet/rtmdet_x_syncbn_fast_8xb32-300e_coco/rtmdet_x_syncbn_fast_8xb32-300e_coco_20221231_100345-b85cd476.pth) \|[model](https://download.openmmlab.com/mmrazor/v1/rtmdet_distillation/kd_l_rtmdet_x_neck_300e_coco/kd_l_rtmdet_x_neck_300e_coco_20230220_141912-c9979722.pth) \| [log](https://download.openmmlab.com/mmrazor/v1/rtmdet_distillation/kd_l_rtmdet_x_neck_300e_coco/kd_l_rtmdet_x_neck_300e_coco_20230220_141912-c5c4e17b.json) | + +## Usage + +### Prerequisites + +- [MMRazor dev-1.x](https://github.com/open-mmlab/mmrazor/tree/dev-1.x) + +Install MMRazor from source + +``` +git clone -b dev-1.x https://github.com/open-mmlab/mmrazor.git +cd mmrazor +# Install MMRazor +mim install -v -e . +``` + +### Training commands + +In MMYOLO's root directory, run the following command to train the RTMDet-tiny +with 8 GPUs, using RTMDet-s as the teacher: + +```bash +CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 PORT=29500 ./tools/dist_train.sh configs/rtmdet/distillation/kd_tiny_rtmdet_s_neck_300e_coco.py +``` + +### Testing commands + +In MMYOLO's root directory, run the following command to test the model: + +```bash +CUDA_VISIBLE_DEVICES=0 PORT=29500 ./tools/dist_test.sh configs/rtmdet/distillation/kd_tiny_rtmdet_s_neck_300e_coco.py ${CHECKPOINT_PATH} +``` + +### Getting student-only checkpoint + +After training, the checkpoint contains parameters for both student and teacher models. +Run the following command to convert it to student-only checkpoint: + +```bash +python ./tools/model_converters/convert_kd_ckpt_to_student.py ${CHECKPOINT_PATH} --out-path ${OUTPUT_CHECKPOINT_PATH} +``` + +## Configs + +Here we provide detection configs and models for MMRazor in MMYOLO. For clarify, +we take `./kd_tiny_rtmdet_s_neck_300e_coco.py` as an example to show how to +distill a RTM detector based on MMRazor. + +Here is the main part of `./kd_tiny_rtmdet_s_neck_300e_coco.py`. + +```shell +norm_cfg = dict(type='BN', affine=False, track_running_stats=False) + +distiller=dict( + type='ConfigurableDistiller', + student_recorders=dict( + fpn0=dict(type='ModuleOutputs', source='neck.out_layers.0.conv'), + fpn1=dict(type='ModuleOutputs', source='neck.out_layers.1.conv'), + fpn2=dict(type='ModuleOutputs', source='neck.out_layers.2.conv'), + ), + teacher_recorders=dict( + fpn0=dict(type='ModuleOutputs', source='neck.out_layers.0.conv'), + fpn1=dict(type='ModuleOutputs', source='neck.out_layers.1.conv'), + fpn2=dict(type='ModuleOutputs', source='neck.out_layers.2.conv')), + connectors=dict( + fpn0_s=dict(type='ConvModuleConnector', in_channel=96, + out_channel=128, bias=False, norm_cfg=norm_cfg, + act_cfg=None), + fpn0_t=dict( + type='NormConnector', in_channels=128, norm_cfg=norm_cfg), + fpn1_s=dict( + type='ConvModuleConnector', in_channel=96, + out_channel=128, bias=False, norm_cfg=norm_cfg, + act_cfg=None), + fpn1_t=dict( + type='NormConnector', in_channels=128, norm_cfg=norm_cfg), + fpn2_s=dict( + type='ConvModuleConnector', in_channel=96, + out_channel=128, bias=False, norm_cfg=norm_cfg, + act_cfg=None), + fpn2_t=dict( + type='NormConnector', in_channels=128, norm_cfg=norm_cfg)), + distill_losses=dict( + loss_fpn0=dict(type='ChannelWiseDivergence', loss_weight=1), + loss_fpn1=dict(type='ChannelWiseDivergence', loss_weight=1), + loss_fpn2=dict(type='ChannelWiseDivergence', loss_weight=1)), + loss_forward_mappings=dict( + loss_fpn0=dict( + preds_S=dict(from_student=True, recorder='fpn0', connector='fpn0_s'), + preds_T=dict(from_student=False, recorder='fpn0', connector='fpn0_t')), + loss_fpn1=dict( + preds_S=dict(from_student=True, recorder='fpn1', connector='fpn1_s'), + preds_T=dict(from_student=False, recorder='fpn1', connector='fpn1_t')), + loss_fpn2=dict( + preds_S=dict(from_student=True, recorder='fpn2', connector='fpn2_s'), + preds_T=dict(from_student=False, recorder='fpn2', connector='fpn2_t')))) + +``` + +`recorders` are used to record various intermediate results during the model forward. +In this example, they can help record the output of 3 `nn.Module` of the teacher +and the student. Details are list in [Recorder](https://github.com/open-mmlab/mmrazor/blob/dev-1.x/docs/en/advanced_guides/recorder.md) and [MMRazor Distillation](https://zhuanlan.zhihu.com/p/596582609) (if users can read Chinese). + +`connectors` are adaptive layers which usually map teacher's and students features +to the same dimension. + +`distill_losses` are configs for multiple distill losses. + +`loss_forward_mappings` are mappings between distill loss forward arguments and records. + +In addition, the student finetunes itself on no masaic domain at the last 20 epochs, +so we add a new hook named `StopDistillHook` to stop distillation on time. +We need to add this hook to the `custom_hooks` list like this: + +```shell +custom_hooks = [..., dict(type='mmrazor.StopDistillHook', detach_epoch=280)] +``` diff --git a/models/YOLO-World/third_party/mmyolo/configs/rtmdet/distillation/kd_l_rtmdet_x_neck_300e_coco.py b/models/YOLO-World/third_party/mmyolo/configs/rtmdet/distillation/kd_l_rtmdet_x_neck_300e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..2bab26a0d20342c38d7d1ec0a8221fdc426f016b --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/rtmdet/distillation/kd_l_rtmdet_x_neck_300e_coco.py @@ -0,0 +1,99 @@ +_base_ = '../rtmdet_l_syncbn_fast_8xb32-300e_coco.py' + +teacher_ckpt = 'https://download.openmmlab.com/mmyolo/v0/rtmdet/rtmdet_x_syncbn_fast_8xb32-300e_coco/rtmdet_x_syncbn_fast_8xb32-300e_coco_20221231_100345-b85cd476.pth' # noqa: E501 + +norm_cfg = dict(type='BN', affine=False, track_running_stats=False) + +model = dict( + _delete_=True, + _scope_='mmrazor', + type='FpnTeacherDistill', + architecture=dict( + cfg_path='mmyolo::rtmdet/rtmdet_l_syncbn_fast_8xb32-300e_coco.py'), + teacher=dict( + cfg_path='mmyolo::rtmdet/rtmdet_x_syncbn_fast_8xb32-300e_coco.py'), + teacher_ckpt=teacher_ckpt, + distiller=dict( + type='ConfigurableDistiller', + # `recorders` are used to record various intermediate results during + # the model forward. + student_recorders=dict( + fpn0=dict(type='ModuleOutputs', source='neck.out_layers.0.conv'), + fpn1=dict(type='ModuleOutputs', source='neck.out_layers.1.conv'), + fpn2=dict(type='ModuleOutputs', source='neck.out_layers.2.conv'), + ), + teacher_recorders=dict( + fpn0=dict(type='ModuleOutputs', source='neck.out_layers.0.conv'), + fpn1=dict(type='ModuleOutputs', source='neck.out_layers.1.conv'), + fpn2=dict(type='ModuleOutputs', source='neck.out_layers.2.conv')), + # `connectors` are adaptive layers which usually map teacher's and + # students features to the same dimension. + connectors=dict( + fpn0_s=dict( + type='ConvModuleConnector', + in_channel=256, + out_channel=320, + bias=False, + norm_cfg=norm_cfg, + act_cfg=None), + fpn0_t=dict( + type='NormConnector', in_channels=320, norm_cfg=norm_cfg), + fpn1_s=dict( + type='ConvModuleConnector', + in_channel=256, + out_channel=320, + bias=False, + norm_cfg=norm_cfg, + act_cfg=None), + fpn1_t=dict( + type='NormConnector', in_channels=320, norm_cfg=norm_cfg), + fpn2_s=dict( + type='ConvModuleConnector', + in_channel=256, + out_channel=320, + bias=False, + norm_cfg=norm_cfg, + act_cfg=None), + fpn2_t=dict( + type='NormConnector', in_channels=320, norm_cfg=norm_cfg)), + distill_losses=dict( + loss_fpn0=dict(type='ChannelWiseDivergence', loss_weight=1), + loss_fpn1=dict(type='ChannelWiseDivergence', loss_weight=1), + loss_fpn2=dict(type='ChannelWiseDivergence', loss_weight=1)), + # `loss_forward_mappings` are mappings between distill loss forward + # arguments and records. + loss_forward_mappings=dict( + loss_fpn0=dict( + preds_S=dict( + from_student=True, recorder='fpn0', connector='fpn0_s'), + preds_T=dict( + from_student=False, recorder='fpn0', connector='fpn0_t')), + loss_fpn1=dict( + preds_S=dict( + from_student=True, recorder='fpn1', connector='fpn1_s'), + preds_T=dict( + from_student=False, recorder='fpn1', connector='fpn1_t')), + loss_fpn2=dict( + preds_S=dict( + from_student=True, recorder='fpn2', connector='fpn2_s'), + preds_T=dict( + from_student=False, recorder='fpn2', + connector='fpn2_t'))))) + +find_unused_parameters = True + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + strict_load=False, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=_base_.max_epochs - _base_.num_epochs_stage2, + switch_pipeline=_base_.train_pipeline_stage2), + # stop distillation after the 280th epoch + dict(type='mmrazor.StopDistillHook', stop_epoch=280) +] diff --git a/models/YOLO-World/third_party/mmyolo/configs/rtmdet/distillation/kd_m_rtmdet_l_neck_300e_coco.py b/models/YOLO-World/third_party/mmyolo/configs/rtmdet/distillation/kd_m_rtmdet_l_neck_300e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..f7d7f9211f1f77c4d83677f7f6c485a5c6212252 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/rtmdet/distillation/kd_m_rtmdet_l_neck_300e_coco.py @@ -0,0 +1,99 @@ +_base_ = '../rtmdet_m_syncbn_fast_8xb32-300e_coco.py' + +teacher_ckpt = 'https://download.openmmlab.com/mmyolo/v0/rtmdet/rtmdet_l_syncbn_fast_8xb32-300e_coco/rtmdet_l_syncbn_fast_8xb32-300e_coco_20230102_135928-ee3abdc4.pth' # noqa: E501 + +norm_cfg = dict(type='BN', affine=False, track_running_stats=False) + +model = dict( + _delete_=True, + _scope_='mmrazor', + type='FpnTeacherDistill', + architecture=dict( + cfg_path='mmyolo::rtmdet/rtmdet_m_syncbn_fast_8xb32-300e_coco.py'), + teacher=dict( + cfg_path='mmyolo::rtmdet/rtmdet_l_syncbn_fast_8xb32-300e_coco.py'), + teacher_ckpt=teacher_ckpt, + distiller=dict( + type='ConfigurableDistiller', + # `recorders` are used to record various intermediate results during + # the model forward. + student_recorders=dict( + fpn0=dict(type='ModuleOutputs', source='neck.out_layers.0.conv'), + fpn1=dict(type='ModuleOutputs', source='neck.out_layers.1.conv'), + fpn2=dict(type='ModuleOutputs', source='neck.out_layers.2.conv'), + ), + teacher_recorders=dict( + fpn0=dict(type='ModuleOutputs', source='neck.out_layers.0.conv'), + fpn1=dict(type='ModuleOutputs', source='neck.out_layers.1.conv'), + fpn2=dict(type='ModuleOutputs', source='neck.out_layers.2.conv')), + # `connectors` are adaptive layers which usually map teacher's and + # students features to the same dimension. + connectors=dict( + fpn0_s=dict( + type='ConvModuleConnector', + in_channel=192, + out_channel=256, + bias=False, + norm_cfg=norm_cfg, + act_cfg=None), + fpn0_t=dict( + type='NormConnector', in_channels=256, norm_cfg=norm_cfg), + fpn1_s=dict( + type='ConvModuleConnector', + in_channel=192, + out_channel=256, + bias=False, + norm_cfg=norm_cfg, + act_cfg=None), + fpn1_t=dict( + type='NormConnector', in_channels=256, norm_cfg=norm_cfg), + fpn2_s=dict( + type='ConvModuleConnector', + in_channel=192, + out_channel=256, + bias=False, + norm_cfg=norm_cfg, + act_cfg=None), + fpn2_t=dict( + type='NormConnector', in_channels=256, norm_cfg=norm_cfg)), + distill_losses=dict( + loss_fpn0=dict(type='ChannelWiseDivergence', loss_weight=1), + loss_fpn1=dict(type='ChannelWiseDivergence', loss_weight=1), + loss_fpn2=dict(type='ChannelWiseDivergence', loss_weight=1)), + # `loss_forward_mappings` are mappings between distill loss forward + # arguments and records. + loss_forward_mappings=dict( + loss_fpn0=dict( + preds_S=dict( + from_student=True, recorder='fpn0', connector='fpn0_s'), + preds_T=dict( + from_student=False, recorder='fpn0', connector='fpn0_t')), + loss_fpn1=dict( + preds_S=dict( + from_student=True, recorder='fpn1', connector='fpn1_s'), + preds_T=dict( + from_student=False, recorder='fpn1', connector='fpn1_t')), + loss_fpn2=dict( + preds_S=dict( + from_student=True, recorder='fpn2', connector='fpn2_s'), + preds_T=dict( + from_student=False, recorder='fpn2', + connector='fpn2_t'))))) + +find_unused_parameters = True + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + strict_load=False, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=_base_.max_epochs - _base_.num_epochs_stage2, + switch_pipeline=_base_.train_pipeline_stage2), + # stop distillation after the 280th epoch + dict(type='mmrazor.StopDistillHook', stop_epoch=280) +] diff --git a/models/YOLO-World/third_party/mmyolo/configs/rtmdet/distillation/kd_s_rtmdet_m_neck_300e_coco.py b/models/YOLO-World/third_party/mmyolo/configs/rtmdet/distillation/kd_s_rtmdet_m_neck_300e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..99b5dc5e48d04fed927cbd80c1538ca99912fc1b --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/rtmdet/distillation/kd_s_rtmdet_m_neck_300e_coco.py @@ -0,0 +1,99 @@ +_base_ = '../rtmdet_s_syncbn_fast_8xb32-300e_coco.py' + +teacher_ckpt = 'https://download.openmmlab.com/mmyolo/v0/rtmdet/rtmdet_m_syncbn_fast_8xb32-300e_coco/rtmdet_m_syncbn_fast_8xb32-300e_coco_20230102_135952-40af4fe8.pth' # noqa: E501 + +norm_cfg = dict(type='BN', affine=False, track_running_stats=False) + +model = dict( + _delete_=True, + _scope_='mmrazor', + type='FpnTeacherDistill', + architecture=dict( + cfg_path='mmyolo::rtmdet/rtmdet_s_syncbn_fast_8xb32-300e_coco.py'), + teacher=dict( + cfg_path='mmyolo::rtmdet/rtmdet_m_syncbn_fast_8xb32-300e_coco.py'), + teacher_ckpt=teacher_ckpt, + distiller=dict( + type='ConfigurableDistiller', + # `recorders` are used to record various intermediate results during + # the model forward. + student_recorders=dict( + fpn0=dict(type='ModuleOutputs', source='neck.out_layers.0.conv'), + fpn1=dict(type='ModuleOutputs', source='neck.out_layers.1.conv'), + fpn2=dict(type='ModuleOutputs', source='neck.out_layers.2.conv'), + ), + teacher_recorders=dict( + fpn0=dict(type='ModuleOutputs', source='neck.out_layers.0.conv'), + fpn1=dict(type='ModuleOutputs', source='neck.out_layers.1.conv'), + fpn2=dict(type='ModuleOutputs', source='neck.out_layers.2.conv')), + # `connectors` are adaptive layers which usually map teacher's and + # students features to the same dimension. + connectors=dict( + fpn0_s=dict( + type='ConvModuleConnector', + in_channel=128, + out_channel=192, + bias=False, + norm_cfg=norm_cfg, + act_cfg=None), + fpn0_t=dict( + type='NormConnector', in_channels=192, norm_cfg=norm_cfg), + fpn1_s=dict( + type='ConvModuleConnector', + in_channel=128, + out_channel=192, + bias=False, + norm_cfg=norm_cfg, + act_cfg=None), + fpn1_t=dict( + type='NormConnector', in_channels=192, norm_cfg=norm_cfg), + fpn2_s=dict( + type='ConvModuleConnector', + in_channel=128, + out_channel=192, + bias=False, + norm_cfg=norm_cfg, + act_cfg=None), + fpn2_t=dict( + type='NormConnector', in_channels=192, norm_cfg=norm_cfg)), + distill_losses=dict( + loss_fpn0=dict(type='ChannelWiseDivergence', loss_weight=1), + loss_fpn1=dict(type='ChannelWiseDivergence', loss_weight=1), + loss_fpn2=dict(type='ChannelWiseDivergence', loss_weight=1)), + # `loss_forward_mappings` are mappings between distill loss forward + # arguments and records. + loss_forward_mappings=dict( + loss_fpn0=dict( + preds_S=dict( + from_student=True, recorder='fpn0', connector='fpn0_s'), + preds_T=dict( + from_student=False, recorder='fpn0', connector='fpn0_t')), + loss_fpn1=dict( + preds_S=dict( + from_student=True, recorder='fpn1', connector='fpn1_s'), + preds_T=dict( + from_student=False, recorder='fpn1', connector='fpn1_t')), + loss_fpn2=dict( + preds_S=dict( + from_student=True, recorder='fpn2', connector='fpn2_s'), + preds_T=dict( + from_student=False, recorder='fpn2', + connector='fpn2_t'))))) + +find_unused_parameters = True + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + strict_load=False, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=_base_.max_epochs - _base_.num_epochs_stage2, + switch_pipeline=_base_.train_pipeline_stage2), + # stop distillation after the 280th epoch + dict(type='mmrazor.StopDistillHook', stop_epoch=280) +] diff --git a/models/YOLO-World/third_party/mmyolo/configs/rtmdet/distillation/kd_tiny_rtmdet_s_neck_300e_coco.py b/models/YOLO-World/third_party/mmyolo/configs/rtmdet/distillation/kd_tiny_rtmdet_s_neck_300e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..50c23580bf6b7c1a120267a65bc7cc334513c475 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/rtmdet/distillation/kd_tiny_rtmdet_s_neck_300e_coco.py @@ -0,0 +1,99 @@ +_base_ = '../rtmdet_tiny_syncbn_fast_8xb32-300e_coco.py' + +teacher_ckpt = 'https://download.openmmlab.com/mmyolo/v0/rtmdet/rtmdet_s_syncbn_fast_8xb32-300e_coco/rtmdet_s_syncbn_fast_8xb32-300e_coco_20221230_182329-0a8c901a.pth' # noqa: E501 + +norm_cfg = dict(type='BN', affine=False, track_running_stats=False) + +model = dict( + _delete_=True, + _scope_='mmrazor', + type='FpnTeacherDistill', + architecture=dict( + cfg_path='mmyolo::rtmdet/rtmdet_tiny_syncbn_fast_8xb32-300e_coco.py'), + teacher=dict( + cfg_path='mmyolo::rtmdet/rtmdet_s_syncbn_fast_8xb32-300e_coco.py'), + teacher_ckpt=teacher_ckpt, + distiller=dict( + type='ConfigurableDistiller', + # `recorders` are used to record various intermediate results during + # the model forward. + student_recorders=dict( + fpn0=dict(type='ModuleOutputs', source='neck.out_layers.0.conv'), + fpn1=dict(type='ModuleOutputs', source='neck.out_layers.1.conv'), + fpn2=dict(type='ModuleOutputs', source='neck.out_layers.2.conv'), + ), + teacher_recorders=dict( + fpn0=dict(type='ModuleOutputs', source='neck.out_layers.0.conv'), + fpn1=dict(type='ModuleOutputs', source='neck.out_layers.1.conv'), + fpn2=dict(type='ModuleOutputs', source='neck.out_layers.2.conv')), + # `connectors` are adaptive layers which usually map teacher's and + # students features to the same dimension. + connectors=dict( + fpn0_s=dict( + type='ConvModuleConnector', + in_channel=96, + out_channel=128, + bias=False, + norm_cfg=norm_cfg, + act_cfg=None), + fpn0_t=dict( + type='NormConnector', in_channels=128, norm_cfg=norm_cfg), + fpn1_s=dict( + type='ConvModuleConnector', + in_channel=96, + out_channel=128, + bias=False, + norm_cfg=norm_cfg, + act_cfg=None), + fpn1_t=dict( + type='NormConnector', in_channels=128, norm_cfg=norm_cfg), + fpn2_s=dict( + type='ConvModuleConnector', + in_channel=96, + out_channel=128, + bias=False, + norm_cfg=norm_cfg, + act_cfg=None), + fpn2_t=dict( + type='NormConnector', in_channels=128, norm_cfg=norm_cfg)), + distill_losses=dict( + loss_fpn0=dict(type='ChannelWiseDivergence', loss_weight=1), + loss_fpn1=dict(type='ChannelWiseDivergence', loss_weight=1), + loss_fpn2=dict(type='ChannelWiseDivergence', loss_weight=1)), + # `loss_forward_mappings` are mappings between distill loss forward + # arguments and records. + loss_forward_mappings=dict( + loss_fpn0=dict( + preds_S=dict( + from_student=True, recorder='fpn0', connector='fpn0_s'), + preds_T=dict( + from_student=False, recorder='fpn0', connector='fpn0_t')), + loss_fpn1=dict( + preds_S=dict( + from_student=True, recorder='fpn1', connector='fpn1_s'), + preds_T=dict( + from_student=False, recorder='fpn1', connector='fpn1_t')), + loss_fpn2=dict( + preds_S=dict( + from_student=True, recorder='fpn2', connector='fpn2_s'), + preds_T=dict( + from_student=False, recorder='fpn2', + connector='fpn2_t'))))) + +find_unused_parameters = True + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + strict_load=False, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=_base_.max_epochs - _base_.num_epochs_stage2, + switch_pipeline=_base_.train_pipeline_stage2), + # stop distillation after the 280th epoch + dict(type='mmrazor.StopDistillHook', stop_epoch=280) +] diff --git a/models/YOLO-World/third_party/mmyolo/configs/rtmdet/metafile.yml b/models/YOLO-World/third_party/mmyolo/configs/rtmdet/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..704a44ba83c90d1c639d4bcbabf88b72fa867553 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/rtmdet/metafile.yml @@ -0,0 +1,215 @@ +Collections: + - Name: RTMDet + Metadata: + Training Data: COCO + Training Techniques: + - AdamW + - Flat Cosine Annealing + Training Resources: 8x A100 GPUs + Architecture: + - CSPNeXt + - CSPNeXtPAFPN + README: configs/rtmdet/README.md + Code: + URL: https://github.com/open-mmlab/mmyolo/blob/main/mmyolo/models/detectors/yolo_detector.py#L12 + Version: v0.1.1 + - Name: Rotated_RTMDet + Metadata: + Training Data: DOTAv1.0 + Training Techniques: + - AdamW + - Flat Cosine Annealing + Training Resources: 1x A100 GPUs + Architecture: + - CSPNeXt + - CSPNeXtPAFPN + README: configs/rtmdet/README.md + Code: + URL: https://github.com/open-mmlab/mmyolo/blob/main/mmyolo/models/detectors/yolo_detector.py#L12 + Version: v0.1.1 + +Models: + - Name: rtmdet_tiny_syncbn_fast_8xb32-300e_coco + In Collection: RTMDet + Config: configs/rtmdet/rtmdet_tiny_syncbn_fast_8xb32-300e_coco.py + Metadata: + Training Memory (GB): 11.7 + Epochs: 300 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.0 + Weights: https://download.openmmlab.com/mmyolo/v0/rtmdet/rtmdet_tiny_syncbn_fast_8xb32-300e_coco/rtmdet_tiny_syncbn_fast_8xb32-300e_coco_20230102_140117-dbb1dc83.pth + + - Name: kd_tiny_rtmdet_s_neck_300e_coco + In Collection: RTMDet + Config: configs/rtmdet/distillation/kd_tiny_rtmdet_s_neck_300e_coco.py + Metadata: + Training Memory (GB): 11.9 + Epochs: 300 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.8 + Weights: https://download.openmmlab.com/mmrazor/v1/rtmdet_distillation/kd_tiny_rtmdet_s_neck_300e_coco/kd_tiny_rtmdet_s_neck_300e_coco_20230213_104240-e1e4197c.pth + + - Name: rtmdet_s_syncbn_fast_8xb32-300e_coco + In Collection: RTMDet + Config: configs/rtmdet/rtmdet_s_syncbn_fast_8xb32-300e_coco.py + Metadata: + Training Memory (GB): 15.9 + Epochs: 300 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 44.6 + Weights: https://download.openmmlab.com/mmyolo/v0/rtmdet/rtmdet_s_syncbn_fast_8xb32-300e_coco/rtmdet_s_syncbn_fast_8xb32-300e_coco_20221230_182329-0a8c901a.pth + + - Name: kd_s_rtmdet_m_neck_300e_coco + In Collection: RTMDet + Config: configs/rtmdet/distillation/kd_s_rtmdet_m_neck_300e_coco.py + Metadata: + Training Memory (GB): 16.3 + Epochs: 300 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 45.7 + Weights: https://download.openmmlab.com/mmrazor/v1/rtmdet_distillation/kd_s_rtmdet_m_neck_300e_coco/kd_s_rtmdet_m_neck_300e_coco_20230220_140647-446ff003.pth + + - Name: rtmdet_m_syncbn_fast_8xb32-300e_coco + In Collection: RTMDet + Config: configs/rtmdet/rtmdet_m_syncbn_fast_8xb32-300e_coco.py + Metadata: + Training Memory (GB): 27.8 + Epochs: 300 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 49.3 + Weights: https://download.openmmlab.com/mmyolo/v0/rtmdet/rtmdet_m_syncbn_fast_8xb32-300e_coco/rtmdet_m_syncbn_fast_8xb32-300e_coco_20230102_135952-40af4fe8.pth + + - Name: kd_m_rtmdet_l_neck_300e_coco + In Collection: RTMDet + Config: configs/rtmdet/distillation/kd_m_rtmdet_l_neck_300e_coco.py + Metadata: + Training Memory (GB): 29.0 + Epochs: 300 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 50.2 + Weights: https://download.openmmlab.com/mmrazor/v1/rtmdet_distillation/kd_m_rtmdet_l_neck_300e_coco/kd_m_rtmdet_l_neck_300e_coco_20230220_141313-b806f503.pth + + - Name: rtmdet_l_syncbn_fast_8xb32-300e_coco + In Collection: RTMDet + Config: configs/rtmdet/rtmdet_l_syncbn_fast_8xb32-300e_coco.py + Metadata: + Training Memory (GB): 43.2 + Epochs: 300 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 51.4 + Weights: https://download.openmmlab.com/mmyolo/v0/rtmdet/rtmdet_l_syncbn_fast_8xb32-300e_coco/rtmdet_l_syncbn_fast_8xb32-300e_coco_20230102_135928-ee3abdc4.pth + + - Name: kd_l_rtmdet_x_neck_300e_coco + In Collection: RTMDet + Config: configs/rtmdet/distillation/kd_l_rtmdet_x_neck_300e_coco.py + Metadata: + Training Memory (GB): 45.2 + Epochs: 300 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 52.3 + Weights: https://download.openmmlab.com/mmrazor/v1/rtmdet_distillation/kd_l_rtmdet_x_neck_300e_coco/kd_l_rtmdet_x_neck_300e_coco_20230220_141912-c9979722.pth + + - Name: rtmdet_x_syncbn_fast_8xb32-300e_coco + In Collection: RTMDet + Config: configs/rtmdet/rtmdet_x_syncbn_fast_8xb32-300e_coco.py + Metadata: + Training Memory (GB): 63.4 + Epochs: 300 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 52.8 + Weights: https://download.openmmlab.com/mmyolo/v0/rtmdet/rtmdet_x_syncbn_fast_8xb32-300e_coco/rtmdet_x_syncbn_fast_8xb32-300e_coco_20221231_100345-b85cd476.pth + + - Name: rtmdet-r_tiny_fast_1xb8-36e_dota + In Collection: Rotated_RTMDet + Config: configs/rtmdet/rotated/rtmdet-r_tiny_fast_1xb8-36e_dota.py + Metadata: + Training Memory (GB): 12.7 + Epochs: 36 + Results: + - Task: Oriented Object Detection + Dataset: DOTAv1.0 + Metrics: + mAP: 75.07 + Weights: https://download.openmmlab.com/mmyolo/v0/rtmdet/rotated/rtmdet-r_tiny_fast_1xb8-36e_dota/rtmdet-r_tiny_fast_1xb8-36e_dota_20230228_162210-e8ccfb1c.pth + + - Name: rtmdet-r_s_fast_1xb8-36e_dota + In Collection: Rotated_RTMDet + Config: configs/rtmdet/rotated/rtmdet-r_s_fast_1xb8-36e_dota.py + Metadata: + Training Memory (GB): 16.6 + Epochs: 36 + Results: + - Task: Oriented Object Detection + Dataset: DOTAv1.0 + Metrics: + mAP: 77.33 + Weights: https://download.openmmlab.com/mmyolo/v0/rtmdet/rotated/rtmdet-r_s_fast_1xb8-36e_dota/rtmdet-r_s_fast_1xb8-36e_dota_20230224_110307-3946a5aa.pth + + - Name: rtmdet-r_m_syncbn_fast_2xb4-36e_dota + In Collection: Rotated_RTMDet + Config: configs/rtmdet/rotated/rtmdet-r_m_syncbn_fast_2xb4-36e_dota.py + Metadata: + Training Resources: 2x A100 GPUs + Training Memory (GB): 10.9 + Epochs: 36 + Results: + - Task: Oriented Object Detection + Dataset: DOTAv1.0 + Metrics: + mAP: 78.43 + Weights: https://download.openmmlab.com/mmyolo/v0/rtmdet/rotated/rtmdet-r_m_syncbn_fast_2xb4-36e_dota/rtmdet-r_m_syncbn_fast_2xb4-36e_dota_20230224_124237-29ae1619.pth + + - Name: rtmdet-r_l_syncbn_fast_2xb4-36e_dota + In Collection: Rotated_RTMDet + Config: configs/rtmdet/rotated/rtmdet-r_l_syncbn_fast_2xb4-36e_dota.py + Metadata: + Training Resources: 2x A100 GPUs + Training Memory (GB): 16.1 + Epochs: 36 + Results: + - Task: Oriented Object Detection + Dataset: DOTAv1.0 + Metrics: + mAP: 78.66 + Weights: https://download.openmmlab.com/mmyolo/v0/rtmdet/rotated/rtmdet-r_l_syncbn_fast_2xb4-36e_dota/rtmdet-r_l_syncbn_fast_2xb4-36e_dota_20230224_124544-38bc5f08.pth + + - Name: rtmdet-r_l_syncbn_fast_2xb4-aug-100e_dota + In Collection: Rotated_RTMDet + Config: configs/rtmdet/rotated/rtmdet-r_l_syncbn_fast_2xb4-aug-100e_dota.py + Metadata: + Training Resources: 2x A100 GPUs + Training Memory (GB): 19.6 + Epochs: 100 + Results: + - Task: Oriented Object Detection + Dataset: DOTAv1.0 + Metrics: + mAP: 80.14 + Weights: https://download.openmmlab.com/mmyolo/v0/rtmdet/rotated/rtmdet-r_l_syncbn_fast_2xb4-aug-100e_dota/rtmdet-r_l_syncbn_fast_2xb4-aug-100e_dota_20230224_124735-ed4ea966.pth diff --git a/models/YOLO-World/third_party/mmyolo/configs/rtmdet/rotated/rtmdet-r_l_syncbn_fast_2xb4-36e_dota-ms.py b/models/YOLO-World/third_party/mmyolo/configs/rtmdet/rotated/rtmdet-r_l_syncbn_fast_2xb4-36e_dota-ms.py new file mode 100644 index 0000000000000000000000000000000000000000..ef29a1d051b84d8c546edb3cabb958ec586e1261 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/rtmdet/rotated/rtmdet-r_l_syncbn_fast_2xb4-36e_dota-ms.py @@ -0,0 +1,30 @@ +_base_ = './rtmdet-r_l_syncbn_fast_2xb4-36e_dota.py' + +# ========================modified parameters====================== +data_root = 'data/split_ms_dota/' +# Path of test images folder +test_data_prefix = 'test/images/' +# Submission dir for result submit +submission_dir = './work_dirs/{{fileBasenameNoExtension}}/submission' + +# =======================Unmodified in most cases================== +train_dataloader = dict(dataset=dict(data_root=data_root)) + +val_dataloader = dict(dataset=dict(data_root=data_root)) + +# Inference on val dataset +test_dataloader = val_dataloader + +# Inference on test dataset and format the output results +# for submission. Note: the test set has no annotation. +# test_dataloader = dict( +# dataset=dict( +# data_root=data_root, +# ann_file='', # test set has no annotation +# data_prefix=dict(img_path=test_data_prefix), +# pipeline=_base_.test_pipeline)) +# test_evaluator = dict( +# type='mmrotate.DOTAMetric', +# format_only=True, +# merge_patches=True, +# outfile_prefix=submission_dir) diff --git a/models/YOLO-World/third_party/mmyolo/configs/rtmdet/rotated/rtmdet-r_l_syncbn_fast_2xb4-36e_dota.py b/models/YOLO-World/third_party/mmyolo/configs/rtmdet/rotated/rtmdet-r_l_syncbn_fast_2xb4-36e_dota.py new file mode 100644 index 0000000000000000000000000000000000000000..cbb2ae77a370a73e463068e11291afb4a59cda02 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/rtmdet/rotated/rtmdet-r_l_syncbn_fast_2xb4-36e_dota.py @@ -0,0 +1,331 @@ +_base_ = '../../_base_/default_runtime.py' + +checkpoint = 'https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-l_8xb256-rsb-a1-600e_in1k-6a760974.pth' # noqa + +# ========================Frequently modified parameters====================== +# -----data related----- +data_root = 'data/split_ss_dota/' +# Path of train annotation folder +train_ann_file = 'trainval/annfiles/' +train_data_prefix = 'trainval/images/' # Prefix of train image path +# Path of val annotation folder +val_ann_file = 'trainval/annfiles/' +val_data_prefix = 'trainval/images/' # Prefix of val image path +# Path of test images folder +test_data_prefix = 'test/images/' + +# Submission dir for result submit +submission_dir = './work_dirs/{{fileBasenameNoExtension}}/submission' + +num_classes = 15 # Number of classes for classification +# Batch size of a single GPU during training +train_batch_size_per_gpu = 4 +# Worker to pre-fetch data for each single GPU during training +train_num_workers = 8 +# persistent_workers must be False if num_workers is 0. +persistent_workers = True + +# -----train val related----- +# Base learning rate for optim_wrapper. Corresponding to 1xb8=8 bs +base_lr = 0.00025 # 0.004 / 16 +max_epochs = 36 # Maximum training epochs + +model_test_cfg = dict( + # The config of multi-label for multi-class prediction. + multi_label=True, + # Decode rbox with angle, For RTMDet-R, Defaults to True. + # When set to True, use rbox coder such as DistanceAnglePointCoder + # When set to False, use hbox coder such as DistancePointBBoxCoder + # different setting lead to different AP. + decode_with_angle=True, + # The number of boxes before NMS + nms_pre=30000, + score_thr=0.05, # Threshold to filter out boxes. + nms=dict(type='nms_rotated', iou_threshold=0.1), # NMS type and threshold + max_per_img=2000) # Max number of detections of each image + +# ========================Possible modified parameters======================== +# -----data related----- +img_scale = (1024, 1024) # width, height +# ratio for random rotate +random_rotate_ratio = 0.5 +# label ids for rect objs +rotate_rect_obj_labels = [9, 11] +# Dataset type, this will be used to define the dataset +dataset_type = 'YOLOv5DOTADataset' +# Batch size of a single GPU during validation +val_batch_size_per_gpu = 8 +# Worker to pre-fetch data for each single GPU during validation +val_num_workers = 8 + +# Config of batch shapes. Only on val. Not use in RTMDet-R +batch_shapes_cfg = None + +# -----model related----- +# The scaling factor that controls the depth of the network structure +deepen_factor = 1.0 +# The scaling factor that controls the width of the network structure +widen_factor = 1.0 +# Strides of multi-scale prior box +strides = [8, 16, 32] +# The angle definition for model +angle_version = 'le90' # le90, le135, oc are available options + +norm_cfg = dict(type='BN') # Normalization config + +# -----train val related----- +lr_start_factor = 1.0e-5 +dsl_topk = 13 # Number of bbox selected in each level +loss_cls_weight = 1.0 +loss_bbox_weight = 2.0 +qfl_beta = 2.0 # beta of QualityFocalLoss +weight_decay = 0.05 + +# Save model checkpoint and validation intervals +save_checkpoint_intervals = 1 +# The maximum checkpoints to keep. +max_keep_ckpts = 3 +# single-scale training is recommended to +# be turned on, which can speed up training. +env_cfg = dict(cudnn_benchmark=True) + +# ===============================Unmodified in most cases==================== +model = dict( + type='YOLODetector', + data_preprocessor=dict( + type='YOLOv5DetDataPreprocessor', + mean=[103.53, 116.28, 123.675], + std=[57.375, 57.12, 58.395], + bgr_to_rgb=False), + backbone=dict( + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=deepen_factor, + widen_factor=widen_factor, + channel_attention=True, + norm_cfg=norm_cfg, + act_cfg=dict(type='SiLU', inplace=True), + init_cfg=dict( + type='Pretrained', prefix='backbone.', checkpoint=checkpoint)), + neck=dict( + type='CSPNeXtPAFPN', + deepen_factor=deepen_factor, + widen_factor=widen_factor, + in_channels=[256, 512, 1024], + out_channels=256, + num_csp_blocks=3, + expand_ratio=0.5, + norm_cfg=norm_cfg, + act_cfg=dict(type='SiLU', inplace=True)), + bbox_head=dict( + type='RTMDetRotatedHead', + head_module=dict( + type='RTMDetRotatedSepBNHeadModule', + num_classes=num_classes, + widen_factor=widen_factor, + in_channels=256, + stacked_convs=2, + feat_channels=256, + norm_cfg=norm_cfg, + act_cfg=dict(type='SiLU', inplace=True), + share_conv=True, + pred_kernel_size=1, + featmap_strides=strides), + prior_generator=dict( + type='mmdet.MlvlPointGenerator', offset=0, strides=strides), + bbox_coder=dict( + type='DistanceAnglePointCoder', angle_version=angle_version), + loss_cls=dict( + type='mmdet.QualityFocalLoss', + use_sigmoid=True, + beta=qfl_beta, + loss_weight=loss_cls_weight), + loss_bbox=dict( + type='mmrotate.RotatedIoULoss', + mode='linear', + loss_weight=loss_bbox_weight), + angle_version=angle_version, + # Used for angle encode and decode, similar to bbox coder + angle_coder=dict(type='mmrotate.PseudoAngleCoder'), + # If true, it will apply loss_bbox on horizontal box, and angle_loss + # needs to be specified. In this case the loss_bbox should use + # horizontal box loss e.g. IoULoss. Arg details can be seen in + # `docs/zh_cn/tutorials/rotated_detection.md` + use_hbbox_loss=False, + loss_angle=None), + train_cfg=dict( + assigner=dict( + type='BatchDynamicSoftLabelAssigner', + num_classes=num_classes, + topk=dsl_topk, + iou_calculator=dict(type='mmrotate.RBboxOverlaps2D'), + # RBboxOverlaps2D doesn't support batch input, use loop instead. + batch_iou=False), + allowed_border=-1, + pos_weight=-1, + debug=False), + test_cfg=model_test_cfg, +) + +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args=_base_.backend_args), + dict(type='LoadAnnotations', with_bbox=True, box_type='qbox'), + dict( + type='mmrotate.ConvertBoxType', + box_type_mapping=dict(gt_bboxes='rbox')), + dict(type='mmdet.Resize', scale=img_scale, keep_ratio=True), + dict( + type='mmdet.RandomFlip', + prob=0.75, + direction=['horizontal', 'vertical', 'diagonal']), + dict( + type='mmrotate.RandomRotate', + prob=random_rotate_ratio, + angle_range=180, + rotate_type='mmrotate.Rotate', + rect_obj_labels=rotate_rect_obj_labels), + dict(type='mmdet.Pad', size=img_scale, pad_val=dict(img=(114, 114, 114))), + dict(type='RegularizeRotatedBox', angle_version=angle_version), + dict(type='mmdet.PackDetInputs') +] + +val_pipeline = [ + dict(type='LoadImageFromFile', backend_args=_base_.backend_args), + dict(type='mmdet.Resize', scale=img_scale, keep_ratio=True), + dict(type='mmdet.Pad', size=img_scale, pad_val=dict(img=(114, 114, 114))), + dict( + type='LoadAnnotations', + with_bbox=True, + box_type='qbox', + _scope_='mmdet'), + dict( + type='mmrotate.ConvertBoxType', + box_type_mapping=dict(gt_bboxes='rbox')), + dict( + type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')) +] + +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args=_base_.backend_args), + dict(type='mmdet.Resize', scale=img_scale, keep_ratio=True), + dict(type='mmdet.Pad', size=img_scale, pad_val=dict(img=(114, 114, 114))), + dict( + type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')) +] + +train_dataloader = dict( + batch_size=train_batch_size_per_gpu, + num_workers=train_num_workers, + persistent_workers=persistent_workers, + pin_memory=True, + collate_fn=dict(type='yolov5_collate'), + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file=train_ann_file, + data_prefix=dict(img_path=train_data_prefix), + filter_cfg=dict(filter_empty_gt=True), + pipeline=train_pipeline)) + +val_dataloader = dict( + batch_size=val_batch_size_per_gpu, + num_workers=val_num_workers, + persistent_workers=persistent_workers, + pin_memory=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file=val_ann_file, + data_prefix=dict(img_path=val_data_prefix), + test_mode=True, + batch_shapes_cfg=batch_shapes_cfg, + pipeline=val_pipeline)) + +val_evaluator = dict(type='mmrotate.DOTAMetric', metric='mAP') + +# Inference on val dataset +test_dataloader = val_dataloader +test_evaluator = val_evaluator + +# Inference on test dataset and format the output results +# for submission. Note: the test set has no annotation. +# test_dataloader = dict( +# batch_size=val_batch_size_per_gpu, +# num_workers=val_num_workers, +# persistent_workers=True, +# drop_last=False, +# sampler=dict(type='DefaultSampler', shuffle=False), +# dataset=dict( +# type=dataset_type, +# data_root=data_root, +# data_prefix=dict(img_path=test_data_prefix), +# test_mode=True, +# batch_shapes_cfg=batch_shapes_cfg, +# pipeline=test_pipeline)) +# test_evaluator = dict( +# type='mmrotate.DOTAMetric', +# format_only=True, +# merge_patches=True, +# outfile_prefix=submission_dir) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=weight_decay), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=lr_start_factor, + by_epoch=False, + begin=0, + end=1000), + dict( + # use cosine lr from 150 to 300 epoch + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# hooks +default_hooks = dict( + checkpoint=dict( + type='CheckpointHook', + interval=save_checkpoint_intervals, + max_keep_ckpts=max_keep_ckpts, # only keep latest 3 checkpoints + save_best='auto')) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + strict_load=False, + priority=49) +] + +train_cfg = dict( + type='EpochBasedTrainLoop', + max_epochs=max_epochs, + val_interval=save_checkpoint_intervals) + +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') + +visualizer = dict(type='mmrotate.RotLocalVisualizer') diff --git a/models/YOLO-World/third_party/mmyolo/configs/rtmdet/rotated/rtmdet-r_l_syncbn_fast_2xb4-aug-100e_dota.py b/models/YOLO-World/third_party/mmyolo/configs/rtmdet/rotated/rtmdet-r_l_syncbn_fast_2xb4-aug-100e_dota.py new file mode 100644 index 0000000000000000000000000000000000000000..dcafa55db97ffd543af3bc382d15de361cadbd75 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/rtmdet/rotated/rtmdet-r_l_syncbn_fast_2xb4-aug-100e_dota.py @@ -0,0 +1,168 @@ +_base_ = './rtmdet-r_l_syncbn_fast_2xb4-36e_dota.py' + +# This config use longer schedule with Mixup, Mosaic and Random Rotate. + +checkpoint = 'https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-l_8xb256-rsb-a1-600e_in1k-6a760974.pth' # noqa + +# ========================modified parameters====================== + +# Base learning rate for optim_wrapper. Corresponding to 1xb8=8 bs +base_lr = 0.00025 # 0.004 / 16 +lr_start_factor = 1.0e-5 +max_epochs = 100 # Maximum training epochs +# Change train_pipeline for final 10 epochs (stage 2) +num_epochs_stage2 = 10 + +img_scale = (1024, 1024) # width, height +# ratio range for random resize +random_resize_ratio_range = (0.1, 2.0) +# Cached images number in mosaic +mosaic_max_cached_images = 40 +# Number of cached images in mixup +mixup_max_cached_images = 20 +# ratio for random rotate +random_rotate_ratio = 0.5 +# label ids for rect objs +rotate_rect_obj_labels = [9, 11] + +# Save model checkpoint and validation intervals +save_checkpoint_intervals = 1 +# validation intervals in stage 2 +val_interval_stage2 = 1 +# The maximum checkpoints to keep. +max_keep_ckpts = 3 + +# Submission dir for result submit +submission_dir = './work_dirs/{{fileBasenameNoExtension}}/submission' + +# =======================Unmodified in most cases================== + +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args=_base_.backend_args), + dict(type='LoadAnnotations', with_bbox=True, box_type='qbox'), + dict( + type='mmrotate.ConvertBoxType', + box_type_mapping=dict(gt_bboxes='rbox')), + dict( + type='Mosaic', + img_scale=img_scale, + use_cached=True, + max_cached_images=mosaic_max_cached_images, + pad_val=114.0), + dict( + type='mmdet.RandomResize', + # img_scale is (width, height) + scale=(img_scale[0] * 2, img_scale[1] * 2), + ratio_range=random_resize_ratio_range, + resize_type='mmdet.Resize', + keep_ratio=True), + dict( + type='mmrotate.RandomRotate', + prob=random_rotate_ratio, + angle_range=180, + rotate_type='mmrotate.Rotate', + rect_obj_labels=rotate_rect_obj_labels), + dict(type='mmdet.RandomCrop', crop_size=img_scale), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='mmdet.RandomFlip', + prob=0.75, + direction=['horizontal', 'vertical', 'diagonal']), + dict(type='mmdet.Pad', size=img_scale, pad_val=dict(img=(114, 114, 114))), + dict( + type='YOLOv5MixUp', + use_cached=True, + max_cached_images=mixup_max_cached_images), + dict(type='mmdet.PackDetInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImageFromFile', backend_args=_base_.backend_args), + dict(type='LoadAnnotations', with_bbox=True, box_type='qbox'), + dict( + type='mmrotate.ConvertBoxType', + box_type_mapping=dict(gt_bboxes='rbox')), + dict( + type='mmdet.RandomResize', + scale=img_scale, + ratio_range=random_resize_ratio_range, + resize_type='mmdet.Resize', + keep_ratio=True), + dict( + type='mmrotate.RandomRotate', + prob=random_rotate_ratio, + angle_range=180, + rotate_type='mmrotate.Rotate', + rect_obj_labels=rotate_rect_obj_labels), + dict(type='mmdet.RandomCrop', crop_size=img_scale), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict( + type='mmdet.RandomFlip', + prob=0.75, + direction=['horizontal', 'vertical', 'diagonal']), + dict(type='mmdet.Pad', size=img_scale, pad_val=dict(img=(114, 114, 114))), + dict(type='mmdet.PackDetInputs') +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=lr_start_factor, + by_epoch=False, + begin=0, + end=1000), + dict( + # use cosine lr from 150 to 300 epoch + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# hooks +default_hooks = dict( + checkpoint=dict( + type='CheckpointHook', + interval=save_checkpoint_intervals, + max_keep_ckpts=max_keep_ckpts, # only keep latest 3 checkpoints + save_best='auto')) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + strict_load=False, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - num_epochs_stage2, + switch_pipeline=train_pipeline_stage2) +] + +train_cfg = dict( + type='EpochBasedTrainLoop', + max_epochs=max_epochs, + val_interval=save_checkpoint_intervals, + dynamic_intervals=[(max_epochs - num_epochs_stage2, val_interval_stage2)]) + +# Inference on test dataset and format the output results +# for submission. Note: the test set has no annotation. +# test_dataloader = dict( +# dataset=dict( +# data_root=_base_.data_root, +# ann_file='', # test set has no annotation +# data_prefix=dict(img_path=_base_.test_data_prefix), +# pipeline=_base_.test_pipeline)) +# test_evaluator = dict( +# type='mmrotate.DOTAMetric', +# format_only=True, +# merge_patches=True, +# outfile_prefix=submission_dir) diff --git a/models/YOLO-World/third_party/mmyolo/configs/rtmdet/rotated/rtmdet-r_l_syncbn_fast_coco-pretrain_2xb4-36e_dota-ms.py b/models/YOLO-World/third_party/mmyolo/configs/rtmdet/rotated/rtmdet-r_l_syncbn_fast_coco-pretrain_2xb4-36e_dota-ms.py new file mode 100644 index 0000000000000000000000000000000000000000..1a9f50cdded21c36f9b76b49e291b60e0a2dff07 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/rtmdet/rotated/rtmdet-r_l_syncbn_fast_coco-pretrain_2xb4-36e_dota-ms.py @@ -0,0 +1,20 @@ +_base_ = './rtmdet-r_l_syncbn_fast_2xb4-36e_dota-ms.py' + +load_from = 'https://download.openmmlab.com/mmyolo/v0/rtmdet/rtmdet_l_syncbn_fast_8xb32-300e_coco/rtmdet_l_syncbn_fast_8xb32-300e_coco_20230102_135928-ee3abdc4.pth' # noqa + +# Submission dir for result submit +submission_dir = './work_dirs/{{fileBasenameNoExtension}}/submission' + +# Inference on test dataset and format the output results +# for submission. Note: the test set has no annotation. +# test_dataloader = dict( +# dataset=dict( +# data_root=_base_.data_root, +# ann_file='', # test set has no annotation +# data_prefix=dict(img_path=_base_.test_data_prefix), +# pipeline=_base_.test_pipeline)) +# test_evaluator = dict( +# type='mmrotate.DOTAMetric', +# format_only=True, +# merge_patches=True, +# outfile_prefix=submission_dir) diff --git a/models/YOLO-World/third_party/mmyolo/configs/rtmdet/rotated/rtmdet-r_m_syncbn_fast_2xb4-36e_dota-ms.py b/models/YOLO-World/third_party/mmyolo/configs/rtmdet/rotated/rtmdet-r_m_syncbn_fast_2xb4-36e_dota-ms.py new file mode 100644 index 0000000000000000000000000000000000000000..4be8605f6de383c4e39edae6cfdc19f5ea005353 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/rtmdet/rotated/rtmdet-r_m_syncbn_fast_2xb4-36e_dota-ms.py @@ -0,0 +1,33 @@ +_base_ = './rtmdet-r_l_syncbn_fast_2xb4-36e_dota-ms.py' + +checkpoint = 'https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-m_8xb256-rsb-a1-600e_in1k-ecb3bbd9.pth' # noqa + +# ========================modified parameters====================== +deepen_factor = 0.67 +widen_factor = 0.75 + +# Submission dir for result submit +submission_dir = './work_dirs/{{fileBasenameNoExtension}}/submission' + +# =======================Unmodified in most cases================== +model = dict( + backbone=dict( + deepen_factor=deepen_factor, + widen_factor=widen_factor, + init_cfg=dict(checkpoint=checkpoint)), + neck=dict(deepen_factor=deepen_factor, widen_factor=widen_factor), + bbox_head=dict(head_module=dict(widen_factor=widen_factor))) + +# Inference on test dataset and format the output results +# for submission. Note: the test set has no annotation. +# test_dataloader = dict( +# dataset=dict( +# data_root=_base_.data_root, +# ann_file='', # test set has no annotation +# data_prefix=dict(img_path=_base_.test_data_prefix), +# pipeline=_base_.test_pipeline)) +# test_evaluator = dict( +# type='mmrotate.DOTAMetric', +# format_only=True, +# merge_patches=True, +# outfile_prefix=submission_dir) diff --git a/models/YOLO-World/third_party/mmyolo/configs/rtmdet/rotated/rtmdet-r_m_syncbn_fast_2xb4-36e_dota.py b/models/YOLO-World/third_party/mmyolo/configs/rtmdet/rotated/rtmdet-r_m_syncbn_fast_2xb4-36e_dota.py new file mode 100644 index 0000000000000000000000000000000000000000..8df61cffd6e165e36965b2622735abb93fbe8d83 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/rtmdet/rotated/rtmdet-r_m_syncbn_fast_2xb4-36e_dota.py @@ -0,0 +1,33 @@ +_base_ = './rtmdet-r_l_syncbn_fast_2xb4-36e_dota.py' + +checkpoint = 'https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-m_8xb256-rsb-a1-600e_in1k-ecb3bbd9.pth' # noqa + +# ========================modified parameters====================== +deepen_factor = 0.67 +widen_factor = 0.75 + +# Submission dir for result submit +submission_dir = './work_dirs/{{fileBasenameNoExtension}}/submission' + +# =======================Unmodified in most cases================== +model = dict( + backbone=dict( + deepen_factor=deepen_factor, + widen_factor=widen_factor, + init_cfg=dict(checkpoint=checkpoint)), + neck=dict(deepen_factor=deepen_factor, widen_factor=widen_factor), + bbox_head=dict(head_module=dict(widen_factor=widen_factor))) + +# Inference on test dataset and format the output results +# for submission. Note: the test set has no annotation. +# test_dataloader = dict( +# dataset=dict( +# data_root=_base_.data_root, +# ann_file='', # test set has no annotation +# data_prefix=dict(img_path=_base_.test_data_prefix), +# pipeline=_base_.test_pipeline)) +# test_evaluator = dict( +# type='mmrotate.DOTAMetric', +# format_only=True, +# merge_patches=True, +# outfile_prefix=submission_dir) diff --git a/models/YOLO-World/third_party/mmyolo/configs/rtmdet/rotated/rtmdet-r_s_fast_1xb8-36e_dota-ms.py b/models/YOLO-World/third_party/mmyolo/configs/rtmdet/rotated/rtmdet-r_s_fast_1xb8-36e_dota-ms.py new file mode 100644 index 0000000000000000000000000000000000000000..2b7b0b6ffee9cdf2720696ce6fe51b87927ada6e --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/rtmdet/rotated/rtmdet-r_s_fast_1xb8-36e_dota-ms.py @@ -0,0 +1,38 @@ +_base_ = './rtmdet-r_l_syncbn_fast_2xb4-36e_dota-ms.py' + +checkpoint = 'https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-s_imagenet_600e.pth' # noqa + +# ========================modified parameters====================== +deepen_factor = 0.33 +widen_factor = 0.5 + +# Batch size of a single GPU during training +train_batch_size_per_gpu = 8 + +# Submission dir for result submit +submission_dir = './work_dirs/{{fileBasenameNoExtension}}/submission' + +# =======================Unmodified in most cases================== +model = dict( + backbone=dict( + deepen_factor=deepen_factor, + widen_factor=widen_factor, + init_cfg=dict(checkpoint=checkpoint)), + neck=dict(deepen_factor=deepen_factor, widen_factor=widen_factor), + bbox_head=dict(head_module=dict(widen_factor=widen_factor))) + +train_dataloader = dict(batch_size=train_batch_size_per_gpu) + +# Inference on test dataset and format the output results +# for submission. Note: the test set has no annotation. +# test_dataloader = dict( +# dataset=dict( +# data_root=_base_.data_root, +# ann_file='', # test set has no annotation +# data_prefix=dict(img_path=_base_.test_data_prefix), +# pipeline=_base_.test_pipeline)) +# test_evaluator = dict( +# type='mmrotate.DOTAMetric', +# format_only=True, +# merge_patches=True, +# outfile_prefix=submission_dir) diff --git a/models/YOLO-World/third_party/mmyolo/configs/rtmdet/rotated/rtmdet-r_s_fast_1xb8-36e_dota.py b/models/YOLO-World/third_party/mmyolo/configs/rtmdet/rotated/rtmdet-r_s_fast_1xb8-36e_dota.py new file mode 100644 index 0000000000000000000000000000000000000000..d200dd76491dafb306900de23a25359224205d13 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/rtmdet/rotated/rtmdet-r_s_fast_1xb8-36e_dota.py @@ -0,0 +1,38 @@ +_base_ = './rtmdet-r_l_syncbn_fast_2xb4-36e_dota.py' + +checkpoint = 'https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-s_imagenet_600e.pth' # noqa + +# ========================modified parameters====================== +deepen_factor = 0.33 +widen_factor = 0.5 + +# Batch size of a single GPU during training +train_batch_size_per_gpu = 8 + +# Submission dir for result submit +submission_dir = './work_dirs/{{fileBasenameNoExtension}}/submission' + +# =======================Unmodified in most cases================== +model = dict( + backbone=dict( + deepen_factor=deepen_factor, + widen_factor=widen_factor, + init_cfg=dict(checkpoint=checkpoint)), + neck=dict(deepen_factor=deepen_factor, widen_factor=widen_factor), + bbox_head=dict(head_module=dict(widen_factor=widen_factor))) + +train_dataloader = dict(batch_size=train_batch_size_per_gpu) + +# Inference on test dataset and format the output results +# for submission. Note: the test set has no annotation. +# test_dataloader = dict( +# dataset=dict( +# data_root=_base_.data_root, +# ann_file='', # test set has no annotation +# data_prefix=dict(img_path=_base_.test_data_prefix), +# pipeline=_base_.test_pipeline)) +# test_evaluator = dict( +# type='mmrotate.DOTAMetric', +# format_only=True, +# merge_patches=True, +# outfile_prefix=submission_dir) diff --git a/models/YOLO-World/third_party/mmyolo/configs/rtmdet/rotated/rtmdet-r_tiny_fast_1xb8-36e_dota-ms.py b/models/YOLO-World/third_party/mmyolo/configs/rtmdet/rotated/rtmdet-r_tiny_fast_1xb8-36e_dota-ms.py new file mode 100644 index 0000000000000000000000000000000000000000..56bf038b6500bb0640160e680ddbb5e4c34fd3f8 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/rtmdet/rotated/rtmdet-r_tiny_fast_1xb8-36e_dota-ms.py @@ -0,0 +1,38 @@ +_base_ = './rtmdet-r_l_syncbn_fast_2xb4-36e_dota-ms.py' + +checkpoint = 'https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-tiny_imagenet_600e.pth' # noqa + +# ========================modified parameters====================== +deepen_factor = 0.167 +widen_factor = 0.375 + +# Batch size of a single GPU during training +train_batch_size_per_gpu = 8 + +# Submission dir for result submit +submission_dir = './work_dirs/{{fileBasenameNoExtension}}/submission' + +# =======================Unmodified in most cases================== +model = dict( + backbone=dict( + deepen_factor=deepen_factor, + widen_factor=widen_factor, + init_cfg=dict(checkpoint=checkpoint)), + neck=dict(deepen_factor=deepen_factor, widen_factor=widen_factor), + bbox_head=dict(head_module=dict(widen_factor=widen_factor))) + +train_dataloader = dict(batch_size=train_batch_size_per_gpu) + +# Inference on test dataset and format the output results +# for submission. Note: the test set has no annotation. +# test_dataloader = dict( +# dataset=dict( +# data_root=_base_.data_root, +# ann_file='', # test set has no annotation +# data_prefix=dict(img_path=_base_.test_data_prefix), +# pipeline=_base_.test_pipeline)) +# test_evaluator = dict( +# type='mmrotate.DOTAMetric', +# format_only=True, +# merge_patches=True, +# outfile_prefix=submission_dir) diff --git a/models/YOLO-World/third_party/mmyolo/configs/rtmdet/rotated/rtmdet-r_tiny_fast_1xb8-36e_dota.py b/models/YOLO-World/third_party/mmyolo/configs/rtmdet/rotated/rtmdet-r_tiny_fast_1xb8-36e_dota.py new file mode 100644 index 0000000000000000000000000000000000000000..739a2de8020ad6879a8401255395df2e807f66c4 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/rtmdet/rotated/rtmdet-r_tiny_fast_1xb8-36e_dota.py @@ -0,0 +1,38 @@ +_base_ = './rtmdet-r_l_syncbn_fast_2xb4-36e_dota.py' + +checkpoint = 'https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-tiny_imagenet_600e.pth' # noqa + +# ========================modified parameters====================== +deepen_factor = 0.167 +widen_factor = 0.375 + +# Batch size of a single GPU during training +train_batch_size_per_gpu = 8 + +# Submission dir for result submit +submission_dir = './work_dirs/{{fileBasenameNoExtension}}/submission' + +# =======================Unmodified in most cases================== +model = dict( + backbone=dict( + deepen_factor=deepen_factor, + widen_factor=widen_factor, + init_cfg=dict(checkpoint=checkpoint)), + neck=dict(deepen_factor=deepen_factor, widen_factor=widen_factor), + bbox_head=dict(head_module=dict(widen_factor=widen_factor))) + +train_dataloader = dict(batch_size=train_batch_size_per_gpu) + +# Inference on test dataset and format the output results +# for submission. Note: the test set has no annotation. +# test_dataloader = dict( +# dataset=dict( +# data_root=_base_.data_root, +# ann_file='', # test set has no annotation +# data_prefix=dict(img_path=_base_.test_data_prefix), +# pipeline=_base_.test_pipeline)) +# test_evaluator = dict( +# type='mmrotate.DOTAMetric', +# format_only=True, +# merge_patches=True, +# outfile_prefix=submission_dir) diff --git a/models/YOLO-World/third_party/mmyolo/configs/rtmdet/rtmdet-ins_s_syncbn_fast_8xb32-300e_coco.py b/models/YOLO-World/third_party/mmyolo/configs/rtmdet/rtmdet-ins_s_syncbn_fast_8xb32-300e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..279a7990bc4a58a5c10bfc3dd29e570c7e3a14cc --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/rtmdet/rtmdet-ins_s_syncbn_fast_8xb32-300e_coco.py @@ -0,0 +1,31 @@ +_base_ = './rtmdet_s_syncbn_fast_8xb32-300e_coco.py' + +widen_factor = 0.5 + +model = dict( + bbox_head=dict( + type='RTMDetInsSepBNHead', + head_module=dict( + type='RTMDetInsSepBNHeadModule', + use_sigmoid_cls=True, + widen_factor=widen_factor), + loss_mask=dict( + type='mmdet.DiceLoss', loss_weight=2.0, eps=5e-6, + reduction='mean')), + test_cfg=dict( + multi_label=True, + nms_pre=1000, + min_bbox_size=0, + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.6), + max_per_img=100, + mask_thr_binary=0.5)) + +_base_.test_pipeline[-2] = dict( + type='LoadAnnotations', with_bbox=True, with_mask=True, _scope_='mmdet') + +val_dataloader = dict(dataset=dict(pipeline=_base_.test_pipeline)) +test_dataloader = val_dataloader + +val_evaluator = dict(metric=['bbox', 'segm']) +test_evaluator = val_evaluator diff --git a/models/YOLO-World/third_party/mmyolo/configs/rtmdet/rtmdet_l_syncbn_fast_8xb32-300e_coco.py b/models/YOLO-World/third_party/mmyolo/configs/rtmdet/rtmdet_l_syncbn_fast_8xb32-300e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..c36ac38ce16db6bbd66fe0c2271c34c252a538ab --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/rtmdet/rtmdet_l_syncbn_fast_8xb32-300e_coco.py @@ -0,0 +1,304 @@ +_base_ = ['../_base_/default_runtime.py', '../_base_/det_p5_tta.py'] + +# ========================Frequently modified parameters====================== +# -----data related----- +data_root = 'data/coco/' +# Path of train annotation file +train_ann_file = 'annotations/instances_train2017.json' +train_data_prefix = 'train2017/' # Prefix of train image path +# Path of val annotation file +val_ann_file = 'annotations/instances_val2017.json' +val_data_prefix = 'val2017/' # Prefix of val image path + +num_classes = 80 # Number of classes for classification +# Batch size of a single GPU during training +train_batch_size_per_gpu = 32 +# Worker to pre-fetch data for each single GPU during training +train_num_workers = 10 +# persistent_workers must be False if num_workers is 0. +persistent_workers = True + +# -----train val related----- +# Base learning rate for optim_wrapper. Corresponding to 8xb16=64 bs +base_lr = 0.004 +max_epochs = 300 # Maximum training epochs +# Change train_pipeline for final 20 epochs (stage 2) +num_epochs_stage2 = 20 + +model_test_cfg = dict( + # The config of multi-label for multi-class prediction. + multi_label=True, + # The number of boxes before NMS + nms_pre=30000, + score_thr=0.001, # Threshold to filter out boxes. + nms=dict(type='nms', iou_threshold=0.65), # NMS type and threshold + max_per_img=300) # Max number of detections of each image + +# ========================Possible modified parameters======================== +# -----data related----- +img_scale = (640, 640) # width, height +# ratio range for random resize +random_resize_ratio_range = (0.1, 2.0) +# Cached images number in mosaic +mosaic_max_cached_images = 40 +# Number of cached images in mixup +mixup_max_cached_images = 20 +# Dataset type, this will be used to define the dataset +dataset_type = 'YOLOv5CocoDataset' +# Batch size of a single GPU during validation +val_batch_size_per_gpu = 32 +# Worker to pre-fetch data for each single GPU during validation +val_num_workers = 10 + +# Config of batch shapes. Only on val. +batch_shapes_cfg = dict( + type='BatchShapePolicy', + batch_size=val_batch_size_per_gpu, + img_size=img_scale[0], + size_divisor=32, + extra_pad_ratio=0.5) + +# -----model related----- +# The scaling factor that controls the depth of the network structure +deepen_factor = 1.0 +# The scaling factor that controls the width of the network structure +widen_factor = 1.0 +# Strides of multi-scale prior box +strides = [8, 16, 32] + +norm_cfg = dict(type='BN') # Normalization config + +# -----train val related----- +lr_start_factor = 1.0e-5 +dsl_topk = 13 # Number of bbox selected in each level +loss_cls_weight = 1.0 +loss_bbox_weight = 2.0 +qfl_beta = 2.0 # beta of QualityFocalLoss +weight_decay = 0.05 + +# Save model checkpoint and validation intervals +save_checkpoint_intervals = 10 +# validation intervals in stage 2 +val_interval_stage2 = 1 +# The maximum checkpoints to keep. +max_keep_ckpts = 3 +# single-scale training is recommended to +# be turned on, which can speed up training. +env_cfg = dict(cudnn_benchmark=True) + +# ===============================Unmodified in most cases==================== +model = dict( + type='YOLODetector', + data_preprocessor=dict( + type='YOLOv5DetDataPreprocessor', + mean=[103.53, 116.28, 123.675], + std=[57.375, 57.12, 58.395], + bgr_to_rgb=False), + backbone=dict( + type='CSPNeXt', + arch='P5', + expand_ratio=0.5, + deepen_factor=deepen_factor, + widen_factor=widen_factor, + channel_attention=True, + norm_cfg=norm_cfg, + act_cfg=dict(type='SiLU', inplace=True)), + neck=dict( + type='CSPNeXtPAFPN', + deepen_factor=deepen_factor, + widen_factor=widen_factor, + in_channels=[256, 512, 1024], + out_channels=256, + num_csp_blocks=3, + expand_ratio=0.5, + norm_cfg=norm_cfg, + act_cfg=dict(type='SiLU', inplace=True)), + bbox_head=dict( + type='RTMDetHead', + head_module=dict( + type='RTMDetSepBNHeadModule', + num_classes=num_classes, + in_channels=256, + stacked_convs=2, + feat_channels=256, + norm_cfg=norm_cfg, + act_cfg=dict(type='SiLU', inplace=True), + share_conv=True, + pred_kernel_size=1, + featmap_strides=strides), + prior_generator=dict( + type='mmdet.MlvlPointGenerator', offset=0, strides=strides), + bbox_coder=dict(type='DistancePointBBoxCoder'), + loss_cls=dict( + type='mmdet.QualityFocalLoss', + use_sigmoid=True, + beta=qfl_beta, + loss_weight=loss_cls_weight), + loss_bbox=dict(type='mmdet.GIoULoss', loss_weight=loss_bbox_weight)), + train_cfg=dict( + assigner=dict( + type='BatchDynamicSoftLabelAssigner', + num_classes=num_classes, + topk=dsl_topk, + iou_calculator=dict(type='mmdet.BboxOverlaps2D')), + allowed_border=-1, + pos_weight=-1, + debug=False), + test_cfg=model_test_cfg, +) + +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args=_base_.backend_args), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='Mosaic', + img_scale=img_scale, + use_cached=True, + max_cached_images=mosaic_max_cached_images, + pad_val=114.0), + dict( + type='mmdet.RandomResize', + # img_scale is (width, height) + scale=(img_scale[0] * 2, img_scale[1] * 2), + ratio_range=random_resize_ratio_range, + resize_type='mmdet.Resize', + keep_ratio=True), + dict(type='mmdet.RandomCrop', crop_size=img_scale), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict(type='mmdet.RandomFlip', prob=0.5), + dict(type='mmdet.Pad', size=img_scale, pad_val=dict(img=(114, 114, 114))), + dict( + type='YOLOv5MixUp', + use_cached=True, + max_cached_images=mixup_max_cached_images), + dict(type='mmdet.PackDetInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImageFromFile', backend_args=_base_.backend_args), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='mmdet.RandomResize', + scale=img_scale, + ratio_range=random_resize_ratio_range, + resize_type='mmdet.Resize', + keep_ratio=True), + dict(type='mmdet.RandomCrop', crop_size=img_scale), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict(type='mmdet.RandomFlip', prob=0.5), + dict(type='mmdet.Pad', size=img_scale, pad_val=dict(img=(114, 114, 114))), + dict(type='mmdet.PackDetInputs') +] + +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args=_base_.backend_args), + dict(type='YOLOv5KeepRatioResize', scale=img_scale), + dict( + type='LetterResize', + scale=img_scale, + allow_scale_up=False, + pad_val=dict(img=114)), + dict(type='LoadAnnotations', with_bbox=True, _scope_='mmdet'), + dict( + type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'pad_param')) +] + +train_dataloader = dict( + batch_size=train_batch_size_per_gpu, + num_workers=train_num_workers, + persistent_workers=persistent_workers, + pin_memory=True, + collate_fn=dict(type='yolov5_collate'), + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file=train_ann_file, + data_prefix=dict(img=train_data_prefix), + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=train_pipeline)) + +val_dataloader = dict( + batch_size=val_batch_size_per_gpu, + num_workers=val_num_workers, + persistent_workers=persistent_workers, + pin_memory=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file=val_ann_file, + data_prefix=dict(img=val_data_prefix), + test_mode=True, + batch_shapes_cfg=batch_shapes_cfg, + pipeline=test_pipeline)) + +test_dataloader = val_dataloader + +# Reduce evaluation time +val_evaluator = dict( + type='mmdet.CocoMetric', + proposal_nums=(100, 1, 10), + ann_file=data_root + val_ann_file, + metric='bbox') +test_evaluator = val_evaluator + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=weight_decay), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', + start_factor=lr_start_factor, + by_epoch=False, + begin=0, + end=1000), + dict( + # use cosine lr from 150 to 300 epoch + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# hooks +default_hooks = dict( + checkpoint=dict( + type='CheckpointHook', + interval=save_checkpoint_intervals, + max_keep_ckpts=max_keep_ckpts # only keep latest 3 checkpoints + )) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + strict_load=False, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - num_epochs_stage2, + switch_pipeline=train_pipeline_stage2) +] + +train_cfg = dict( + type='EpochBasedTrainLoop', + max_epochs=max_epochs, + val_interval=save_checkpoint_intervals, + dynamic_intervals=[(max_epochs - num_epochs_stage2, val_interval_stage2)]) + +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') diff --git a/models/YOLO-World/third_party/mmyolo/configs/rtmdet/rtmdet_m_syncbn_fast_8xb32-300e_coco.py b/models/YOLO-World/third_party/mmyolo/configs/rtmdet/rtmdet_m_syncbn_fast_8xb32-300e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..52576bf41689f462e46e83e6236de91ead43e97c --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/rtmdet/rtmdet_m_syncbn_fast_8xb32-300e_coco.py @@ -0,0 +1,11 @@ +_base_ = './rtmdet_l_syncbn_fast_8xb32-300e_coco.py' + +# ========================modified parameters====================== +deepen_factor = 0.67 +widen_factor = 0.75 + +# =======================Unmodified in most cases================== +model = dict( + backbone=dict(deepen_factor=deepen_factor, widen_factor=widen_factor), + neck=dict(deepen_factor=deepen_factor, widen_factor=widen_factor), + bbox_head=dict(head_module=dict(widen_factor=widen_factor))) diff --git a/models/YOLO-World/third_party/mmyolo/configs/rtmdet/rtmdet_s_syncbn_fast_8xb32-300e_coco.py b/models/YOLO-World/third_party/mmyolo/configs/rtmdet/rtmdet_s_syncbn_fast_8xb32-300e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..8cead7805974a0a9434f41623ab92beb87fadc60 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/rtmdet/rtmdet_s_syncbn_fast_8xb32-300e_coco.py @@ -0,0 +1,92 @@ +_base_ = './rtmdet_l_syncbn_fast_8xb32-300e_coco.py' +checkpoint = 'https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-s_imagenet_600e.pth' # noqa + +# ========================modified parameters====================== +deepen_factor = 0.33 +widen_factor = 0.5 +img_scale = _base_.img_scale + +# ratio range for random resize +random_resize_ratio_range = (0.5, 2.0) +# Number of cached images in mosaic +mosaic_max_cached_images = 40 +# Number of cached images in mixup +mixup_max_cached_images = 20 + +# =======================Unmodified in most cases================== +model = dict( + backbone=dict( + deepen_factor=deepen_factor, + widen_factor=widen_factor, + # Since the checkpoint includes CUDA:0 data, + # it must be forced to set map_location. + # Once checkpoint is fixed, it can be removed. + init_cfg=dict( + type='Pretrained', + prefix='backbone.', + checkpoint=checkpoint, + map_location='cpu')), + neck=dict( + deepen_factor=deepen_factor, + widen_factor=widen_factor, + ), + bbox_head=dict(head_module=dict(widen_factor=widen_factor))) + +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args=_base_.backend_args), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='Mosaic', + img_scale=img_scale, + use_cached=True, + max_cached_images=mosaic_max_cached_images, + pad_val=114.0), + dict( + type='mmdet.RandomResize', + # img_scale is (width, height) + scale=(img_scale[0] * 2, img_scale[1] * 2), + ratio_range=random_resize_ratio_range, # note + resize_type='mmdet.Resize', + keep_ratio=True), + dict(type='mmdet.RandomCrop', crop_size=img_scale), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict(type='mmdet.RandomFlip', prob=0.5), + dict(type='mmdet.Pad', size=img_scale, pad_val=dict(img=(114, 114, 114))), + dict( + type='YOLOv5MixUp', + use_cached=True, + max_cached_images=mixup_max_cached_images), + dict(type='mmdet.PackDetInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImageFromFile', backend_args=_base_.backend_args), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='mmdet.RandomResize', + scale=img_scale, + ratio_range=random_resize_ratio_range, # note + resize_type='mmdet.Resize', + keep_ratio=True), + dict(type='mmdet.RandomCrop', crop_size=img_scale), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict(type='mmdet.RandomFlip', prob=0.5), + dict(type='mmdet.Pad', size=img_scale, pad_val=dict(img=(114, 114, 114))), + dict(type='mmdet.PackDetInputs') +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + strict_load=False, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=_base_.max_epochs - _base_.num_epochs_stage2, + switch_pipeline=train_pipeline_stage2) +] diff --git a/models/YOLO-World/third_party/mmyolo/configs/rtmdet/rtmdet_tiny_fast_1xb12-40e_cat.py b/models/YOLO-World/third_party/mmyolo/configs/rtmdet/rtmdet_tiny_fast_1xb12-40e_cat.py new file mode 100644 index 0000000000000000000000000000000000000000..8d1182c5ef663efdf06801c6cc22991b9545b2ea --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/rtmdet/rtmdet_tiny_fast_1xb12-40e_cat.py @@ -0,0 +1,70 @@ +_base_ = 'rtmdet_tiny_syncbn_fast_8xb32-300e_coco.py' + +data_root = './data/cat/' +class_name = ('cat', ) +num_classes = len(class_name) +metainfo = dict(classes=class_name, palette=[(20, 220, 60)]) + +num_epochs_stage2 = 5 + +max_epochs = 40 +train_batch_size_per_gpu = 12 +train_num_workers = 4 +val_batch_size_per_gpu = 1 +val_num_workers = 2 + +load_from = 'https://download.openmmlab.com/mmyolo/v0/rtmdet/rtmdet_tiny_syncbn_fast_8xb32-300e_coco/rtmdet_tiny_syncbn_fast_8xb32-300e_coco_20230102_140117-dbb1dc83.pth' # noqa + +model = dict( + backbone=dict(frozen_stages=4), + bbox_head=dict(head_module=dict(num_classes=num_classes)), + train_cfg=dict(assigner=dict(num_classes=num_classes))) + +train_dataloader = dict( + batch_size=train_batch_size_per_gpu, + num_workers=train_num_workers, + dataset=dict( + data_root=data_root, + metainfo=metainfo, + ann_file='annotations/trainval.json', + data_prefix=dict(img='images/'))) + +val_dataloader = dict( + batch_size=val_batch_size_per_gpu, + num_workers=val_num_workers, + dataset=dict( + metainfo=metainfo, + data_root=data_root, + ann_file='annotations/test.json', + data_prefix=dict(img='images/'))) + +test_dataloader = val_dataloader + +param_scheduler = [ + dict( + type='LinearLR', + start_factor=_base_.lr_start_factor, + by_epoch=False, + begin=0, + end=30), + dict( + # use cosine lr from 150 to 300 epoch + type='CosineAnnealingLR', + eta_min=_base_.base_lr * 0.05, + begin=max_epochs // 2, + end=max_epochs, + T_max=max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +_base_.custom_hooks[1].switch_epoch = max_epochs - num_epochs_stage2 + +val_evaluator = dict(ann_file=data_root + 'annotations/test.json') +test_evaluator = val_evaluator + +default_hooks = dict( + checkpoint=dict(interval=10, max_keep_ckpts=2, save_best='auto'), + logger=dict(type='LoggerHook', interval=5)) +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +# visualizer = dict(vis_backends = [dict(type='LocalVisBackend'), dict(type='WandbVisBackend')]) # noqa diff --git a/models/YOLO-World/third_party/mmyolo/configs/rtmdet/rtmdet_tiny_syncbn_fast_8xb32-300e_coco.py b/models/YOLO-World/third_party/mmyolo/configs/rtmdet/rtmdet_tiny_syncbn_fast_8xb32-300e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..257110d22e9f2330e4c5378001eaf72f6bb885d1 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/rtmdet/rtmdet_tiny_syncbn_fast_8xb32-300e_coco.py @@ -0,0 +1,58 @@ +_base_ = './rtmdet_s_syncbn_fast_8xb32-300e_coco.py' +checkpoint = 'https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-tiny_imagenet_600e.pth' # noqa + +# ========================modified parameters====================== +deepen_factor = 0.167 +widen_factor = 0.375 +img_scale = _base_.img_scale + +# ratio range for random resize +random_resize_ratio_range = (0.5, 2.0) +# Number of cached images in mosaic +mosaic_max_cached_images = 20 +# Number of cached images in mixup +mixup_max_cached_images = 10 + +# =======================Unmodified in most cases================== +model = dict( + backbone=dict( + deepen_factor=deepen_factor, + widen_factor=widen_factor, + init_cfg=dict(checkpoint=checkpoint)), + neck=dict( + deepen_factor=deepen_factor, + widen_factor=widen_factor, + ), + bbox_head=dict(head_module=dict(widen_factor=widen_factor))) + +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args=_base_.backend_args), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='Mosaic', + img_scale=img_scale, + use_cached=True, + max_cached_images=mosaic_max_cached_images, # note + random_pop=False, # note + pad_val=114.0), + dict( + type='mmdet.RandomResize', + # img_scale is (width, height) + scale=(img_scale[0] * 2, img_scale[1] * 2), + ratio_range=random_resize_ratio_range, + resize_type='mmdet.Resize', + keep_ratio=True), + dict(type='mmdet.RandomCrop', crop_size=img_scale), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict(type='mmdet.RandomFlip', prob=0.5), + dict(type='mmdet.Pad', size=img_scale, pad_val=dict(img=(114, 114, 114))), + dict( + type='YOLOv5MixUp', + use_cached=True, + random_pop=False, + max_cached_images=mixup_max_cached_images, + prob=0.5), + dict(type='mmdet.PackDetInputs') +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) diff --git a/models/YOLO-World/third_party/mmyolo/configs/rtmdet/rtmdet_x_syncbn_fast_8xb32-300e_coco.py b/models/YOLO-World/third_party/mmyolo/configs/rtmdet/rtmdet_x_syncbn_fast_8xb32-300e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..7fc9001f99ef3d468994c8201d43f08500bdeef9 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/rtmdet/rtmdet_x_syncbn_fast_8xb32-300e_coco.py @@ -0,0 +1,11 @@ +_base_ = './rtmdet_l_syncbn_fast_8xb32-300e_coco.py' + +# ========================modified parameters====================== +deepen_factor = 1.33 +widen_factor = 1.25 + +# =======================Unmodified in most cases================== +model = dict( + backbone=dict(deepen_factor=deepen_factor, widen_factor=widen_factor), + neck=dict(deepen_factor=deepen_factor, widen_factor=widen_factor), + bbox_head=dict(head_module=dict(widen_factor=widen_factor))) diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolov5/README.md b/models/YOLO-World/third_party/mmyolo/configs/yolov5/README.md new file mode 100644 index 0000000000000000000000000000000000000000..bd33e83f430b9309e4c0e95902a61db0dd7ae002 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolov5/README.md @@ -0,0 +1,146 @@ +# YOLOv5 + + + +## Abstract + +YOLOv5 is a family of object detection architectures and models pretrained on the COCO dataset, and represents Ultralytics open-source research into future vision AI methods, incorporating lessons learned and best practices evolved over thousands of hours of research and development. + +
+ +YOLOv5-l-P5 model structure +
+ +
+ +YOLOv5-l-P6 model structure +
+ +## Results and models + +### COCO + +| Backbone | Arch | size | Mask Refine | SyncBN | AMP | Mem (GB) | box AP | TTA box AP | Config | Download | +| :-------: | :--: | :--: | :---------: | :----: | :-: | :------: | :---------: | :--------: | :-----------------------------------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| YOLOv5-n | P5 | 640 | No | Yes | Yes | 1.5 | 28.0 | 30.7 | [config](./yolov5_n-v61_syncbn_fast_8xb16-300e_coco.py) | [model](https://download.openmmlab.com/mmyolo/v0/yolov5/yolov5_n-v61_syncbn_fast_8xb16-300e_coco/yolov5_n-v61_syncbn_fast_8xb16-300e_coco_20220919_090739-b804c1ad.pth) \| [log](https://download.openmmlab.com/mmyolo/v0/yolov5/yolov5_n-v61_syncbn_fast_8xb16-300e_coco/yolov5_n-v61_syncbn_fast_8xb16-300e_coco_20220919_090739.log.json) | +| YOLOv5-n | P5 | 640 | Yes | Yes | Yes | 1.5 | 28.0 | | [config](./mask_refine/yolov5_n_mask-refine-v61_syncbn_fast_8xb16-300e_coco.py) | [model](https://download.openmmlab.com/mmyolo/v0/yolov5/mask_refine/yolov5_n_mask-refine-v61_syncbn_fast_8xb16-300e_coco/yolov5_n_mask-refine-v61_syncbn_fast_8xb16-300e_coco_20230305_152706-712fb1b2.pth) \| [log](https://download.openmmlab.com/mmyolo/v0/yolov5/mask_refine/yolov5_n_mask-refine-v61_syncbn_fast_8xb16-300e_coco/yolov5_n_mask-refine-v61_syncbn_fast_8xb16-300e_coco_20230305_152706.log.json) | +| YOLOv5u-n | P5 | 640 | Yes | Yes | Yes | | | | [config](./yolov5/yolov5u/yolov5_n_mask-refine_syncbn_fast_8xb16-300e_coco.py) | [model](<>) \| [log](<>) | +| YOLOv5-s | P5 | 640 | No | Yes | Yes | 2.7 | 37.7 | 40.2 | [config](./yolov5_s-v61_syncbn_fast_8xb16-300e_coco.py) | [model](https://download.openmmlab.com/mmyolo/v0/yolov5/yolov5_s-v61_syncbn_fast_8xb16-300e_coco/yolov5_s-v61_syncbn_fast_8xb16-300e_coco_20220918_084700-86e02187.pth) \| [log](https://download.openmmlab.com/mmyolo/v0/yolov5/yolov5_s-v61_syncbn_fast_8xb16-300e_coco/yolov5_s-v61_syncbn_fast_8xb16-300e_coco_20220918_084700.log.json) | +| YOLOv5-s | P5 | 640 | Yes | Yes | Yes | 2.7 | 38.0 (+0.3) | | [config](./mask_refine/yolov5_s_mask-refine-v61_syncbn_fast_8xb16-300e_coco.py) | [model](https://download.openmmlab.com/mmyolo/v0/yolov5/mask_refine/yolov5_s_mask-refine-v61_syncbn_fast_8xb16-300e_coco/yolov5_s_mask-refine-v61_syncbn_fast_8xb16-300e_coco_20230304_033134-8e0cd271.pth) \| [log](https://download.openmmlab.com/mmyolo/v0/yolov5/mask_refine/yolov5_s_mask-refine-v61_syncbn_fast_8xb16-300e_coco/yolov5_s_mask-refine-v61_syncbn_fast_8xb16-300e_coco_20230304_033134.log.json) | +| YOLOv5u-s | P5 | 640 | Yes | Yes | Yes | | | | [config](./yolov5/yolov5u/yolov5_s_mask-refine_syncbn_fast_8xb16-300e_coco.py) | [model](<>) \| [log](<>) | +| YOLOv5-m | P5 | 640 | No | Yes | Yes | 5.0 | 45.3 | 46.9 | [config](./yolov5_m-v61_syncbn_fast_8xb16-300e_coco.py) | [model](https://download.openmmlab.com/mmyolo/v0/yolov5/yolov5_m-v61_syncbn_fast_8xb16-300e_coco/yolov5_m-v61_syncbn_fast_8xb16-300e_coco_20220917_204944-516a710f.pth) \| [log](https://download.openmmlab.com/mmyolo/v0/yolov5/yolov5_m-v61_syncbn_fast_8xb16-300e_coco/yolov5_m-v61_syncbn_fast_8xb16-300e_coco_20220917_204944.log.json) | +| YOLOv5-m | P5 | 640 | Yes | Yes | Yes | 5.0 | 45.3 | | [config](./mask_refine/yolov5_m_mask-refine-v61_syncbn_fast_8xb16-300e_coco.py) | [model](https://download.openmmlab.com/mmyolo/v0/yolov5/mask_refine/yolov5_m_mask-refine-v61_syncbn_fast_8xb16-300e_coco/yolov5_m_mask-refine-v61_syncbn_fast_8xb16-300e_coco_20230305_153946-44e96155.pth) \| [log](https://download.openmmlab.com/mmyolo/v0/yolov5/mask_refine/yolov5_m_mask-refine-v61_syncbn_fast_8xb16-300e_coco/yolov5_m_mask-refine-v61_syncbn_fast_8xb16-300e_coco_20230305_153946.log.json) | +| YOLOv5u-m | P5 | 640 | Yes | Yes | Yes | | | | [config](./yolov5/yolov5u/yolov5_m_mask-refine_syncbn_fast_8xb16-300e_coco.py) | [model](<>) \| [log](<>) | +| YOLOv5-l | P5 | 640 | No | Yes | Yes | 8.1 | 48.8 | 49.9 | [config](./yolov5_l-v61_syncbn_fast_8xb16-300e_coco.py) | [model](https://download.openmmlab.com/mmyolo/v0/yolov5/yolov5_l-v61_syncbn_fast_8xb16-300e_coco/yolov5_l-v61_syncbn_fast_8xb16-300e_coco_20220917_031007-096ef0eb.pth) \| [log](https://download.openmmlab.com/mmyolo/v0/yolov5/yolov5_l-v61_syncbn_fast_8xb16-300e_coco/yolov5_l-v61_syncbn_fast_8xb16-300e_coco_20220917_031007.log.json) | +| YOLOv5-l | P5 | 640 | Yes | Yes | Yes | 8.1 | 49.3 (+0.5) | | [config](./mask_refine/yolov5_l_mask-refine-v61_syncbn_fast_8xb16-300e_coco.py) | [model](https://download.openmmlab.com/mmyolo/v0/yolov5/mask_refine/yolov5_l_mask-refine-v61_syncbn_fast_8xb16-300e_coco/yolov5_l_mask-refine-v61_syncbn_fast_8xb16-300e_coco_20230305_154301-2c1d912a.pth) \| [log](https://download.openmmlab.com/mmyolo/v0/yolov5/mask_refine/yolov5_l_mask-refine-v61_syncbn_fast_8xb16-300e_coco/yolov5_l_mask-refine-v61_syncbn_fast_8xb16-300e_coco_20230305_154301.log.json) | +| YOLOv5u-l | P5 | 640 | Yes | Yes | Yes | | | | [config](./yolov5/yolov5u/yolov5_l_mask-refine_syncbn_fast_8xb16-300e_coco.py) | [model](<>) \| [log](<>) | +| YOLOv5-x | P5 | 640 | No | Yes | Yes | 12.2 | 50.2 | | [config](./yolov5_x-v61_syncbn_fast_8xb16-300e_coco.py) | [model](https://download.openmmlab.com/mmyolo/v0/yolov5/yolov5_x-v61_syncbn_fast_8xb16-300e_coco/yolov5_x-v61_syncbn_fast_8xb16-300e_coco_20230305_152943-00776a4b.pth) \| [log](https://download.openmmlab.com/mmyolo/v0/yolov5/yolov5_x-v61_syncbn_fast_8xb16-300e_coco/yolov5_x-v61_syncbn_fast_8xb16-300e_coco_20230305_152943.log.json) | +| YOLOv5-x | P5 | 640 | Yes | Yes | Yes | 12.2 | 50.9 (+0.7) | | [config](./mask_refine/yolov5_x_mask-refine-v61_syncbn_fast_8xb16-300e_coco.py) | [model](https://download.openmmlab.com/mmyolo/v0/yolov5/mask_refine/yolov5_x_mask-refine-v61_syncbn_fast_8xb16-300e_coco/yolov5_x_mask-refine-v61_syncbn_fast_8xb16-300e_coco_20230305_154321-07edeb62.pth) \| [log](https://download.openmmlab.com/mmyolo/v0/yolov5/mask_refine/yolov5_x_mask-refine-v61_syncbn_fast_8xb16-300e_coco/yolov5_x_mask-refine-v61_syncbn_fast_8xb16-300e_coco_20230305_154321.log.json) | +| YOLOv5u-x | P5 | 640 | Yes | Yes | Yes | | | | [config](./yolov5/yolov5u/yolov5_x_mask-refine_syncbn_fast_8xb16-300e_coco.py) | [model](<>) \| [log](<>) | +| YOLOv5-n | P6 | 1280 | No | Yes | Yes | 5.8 | 35.9 | | [config](./yolov5_n-p6-v62_syncbn_fast_8xb16-300e_coco.py) | [model](https://download.openmmlab.com/mmyolo/v0/yolov5/yolov5_n-p6-v62_syncbn_fast_8xb16-300e_coco/yolov5_n-p6-v62_syncbn_fast_8xb16-300e_coco_20221027_224705-d493c5f3.pth) \| [log](https://download.openmmlab.com/mmyolo/v0/yolov5/yolov5_n-p6-v62_syncbn_fast_8xb16-300e_coco/yolov5_n-p6-v62_syncbn_fast_8xb16-300e_coco_20221027_224705.log.json) | +| YOLOv5-s | P6 | 1280 | No | Yes | Yes | 10.5 | 44.4 | | [config](./yolov5_s-p6-v62_syncbn_fast_8xb16-300e_coco.py) | [model](https://download.openmmlab.com/mmyolo/v0/yolov5/yolov5_s-p6-v62_syncbn_fast_8xb16-300e_coco/yolov5_s-p6-v62_syncbn_fast_8xb16-300e_coco_20221027_215044-58865c19.pth) \| [log](https://download.openmmlab.com/mmyolo/v0/yolov5/yolov5_s-p6-v62_syncbn_fast_8xb16-300e_coco/yolov5_s-p6-v62_syncbn_fast_8xb16-300e_coco_20221027_215044.log.json) | +| YOLOv5-m | P6 | 1280 | No | Yes | Yes | 19.1 | 51.3 | | [config](./yolov5_m-p6-v62_syncbn_fast_8xb16-300e_coco.py) | [model](https://download.openmmlab.com/mmyolo/v0/yolov5/yolov5_m-p6-v62_syncbn_fast_8xb16-300e_coco/yolov5_m-p6-v62_syncbn_fast_8xb16-300e_coco_20221027_230453-49564d58.pth) \| [log](https://download.openmmlab.com/mmyolo/v0/yolov5/yolov5_m-p6-v62_syncbn_fast_8xb16-300e_coco/yolov5_m-p6-v62_syncbn_fast_8xb16-300e_coco_20221027_230453.log.json) | +| YOLOv5-l | P6 | 1280 | No | Yes | Yes | 30.5 | 53.7 | | [config](./yolov5_l-p6-v62_syncbn_fast_8xb16-300e_coco.py) | [model](https://download.openmmlab.com/mmyolo/v0/yolov5/yolov5_l-p6-v62_syncbn_fast_8xb16-300e_coco/yolov5_l-p6-v62_syncbn_fast_8xb16-300e_coco_20221027_234308-7a2ba6bf.pth) \| [log](https://download.openmmlab.com/mmyolo/v0/yolov5/yolov5_l-p6-v62_syncbn_fast_8xb16-300e_coco/yolov5_l-p6-v62_syncbn_fast_8xb16-300e_coco_20221027_234308.log.json) | + +**Note**: + +1. `fast` means that `YOLOv5DetDataPreprocessor` and `yolov5_collate` are used for data preprocessing, which is faster for training, but less flexible for multitasking. Recommended to use fast version config if you only care about object detection. +2. `detect` means that the network input is fixed to `640x640` and the post-processing thresholds is modified. +3. `SyncBN` means use SyncBN, `AMP` indicates training with mixed precision. +4. We use 8x A100 for training, and the single-GPU batch size is 16. This is different from the official code. +5. The performance is unstable and may fluctuate by about 0.4 mAP and the highest performance weight in `COCO` training in `YOLOv5` may not be the last epoch. +6. `TTA` means that Test Time Augmentation. It's perform 3 multi-scaling transformations on the image, followed by 2 flipping transformations (flipping and not flipping). You only need to specify `--tta` when testing to enable. see [TTA](https://github.com/open-mmlab/mmyolo/blob/dev/docs/en/common_usage/tta.md) for details. +7. The performance of `Mask Refine` training is for the weight performance officially released by YOLOv5. `Mask Refine` means refining bbox by mask while loading annotations and transforming after `YOLOv5RandomAffine`, `Copy Paste` means using `YOLOv5CopyPaste`. +8. `YOLOv5u` models use the same loss functions and split Detect head as `YOLOv8` models for improved performance, but only requires 300 epochs. + +### COCO Instance segmentation + +| Backbone | Arch | size | SyncBN | AMP | Mem (GB) | Box AP | Mask AP | Config | Download | +| :-------------------: | :--: | :--: | :----: | :-: | :------: | :----: | :-----: | :--------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| YOLOv5-n | P5 | 640 | Yes | Yes | 3.3 | 27.9 | 23.7 | [config](./ins_seg/yolov5_ins_n-v61_syncbn_fast_8xb16-300e_coco_instance.py) | [model](https://download.openmmlab.com/mmyolo/v0/yolov5/ins_seg/yolov5_ins_n-v61_syncbn_fast_8xb16-300e_coco_instance/yolov5_ins_n-v61_syncbn_fast_8xb16-300e_coco_instance_20230424_104807-84cc9240.pth) \| [log](https://download.openmmlab.com/mmyolo/v0/yolov5/ins_seg/yolov5_ins_n-v61_syncbn_fast_8xb16-300e_coco_instance/yolov5_ins_n-v61_syncbn_fast_8xb16-300e_coco_instance_20230424_104807.log.json) | +| YOLOv5-s | P5 | 640 | Yes | Yes | 4.8 | 38.1 | 32.0 | [config](./ins_seg/yolov5_ins_s-v61_syncbn_fast_8xb16-300e_coco_instance.py) | [model](https://download.openmmlab.com/mmyolo/v0/yolov5/ins_seg/yolov5_ins_s-v61_syncbn_fast_8xb16-300e_coco_instance/yolov5_ins_s-v61_syncbn_fast_8xb16-300e_coco_instance_20230426_012542-3e570436.pth) \| [log](https://download.openmmlab.com/mmyolo/v0/yolov5/ins_seg/yolov5_ins_s-v61_syncbn_fast_8xb16-300e_coco_instance/yolov5_ins_s-v61_syncbn_fast_8xb16-300e_coco_instance_20230426_012542.log.json) | +| YOLOv5-s(non-overlap) | P5 | 640 | Yes | Yes | 4.8 | 38.0 | 32.1 | [config](./ins_seg/yolov5_ins_s-v61_syncbn_fast_non_overlap_8xb16-300e_coco_instance.py) | [model](https://download.openmmlab.com/mmyolo/v0/yolov5/ins_seg/yolov5_ins_s-v61_syncbn_fast_non_overlap_8xb16-300e_coco_instance/yolov5_ins_s-v61_syncbn_fast_non_overlap_8xb16-300e_coco_instance_20230424_104642-6780d34e.pth) \| [log](https://download.openmmlab.com/mmyolo/v0/yolov5/ins_seg/yolov5_ins_s-v61_syncbn_fast_non_overlap_8xb16-300e_coco_instance/yolov5_ins_s-v61_syncbn_fast_non_overlap_8xb16-300e_coco_instance_20230424_104642.log.json) | +| YOLOv5-m | P5 | 640 | Yes | Yes | 7.3 | 45.1 | 37.3 | [config](./ins_seg/yolov5_ins_m-v61_syncbn_fast_8xb16-300e_coco_instance.py) | [model](https://download.openmmlab.com/mmyolo/v0/yolov5/ins_seg/yolov5_ins_m-v61_syncbn_fast_8xb16-300e_coco_instance/yolov5_ins_m-v61_syncbn_fast_8xb16-300e_coco_instance_20230424_111529-ef5ba1a9.pth) \| [log](https://download.openmmlab.com/mmyolo/v0/yolov5/ins_seg/yolov5_ins_m-v61_syncbn_fast_8xb16-300e_coco_instance/yolov5_ins_m-v61_syncbn_fast_8xb16-300e_coco_instance_20230424_111529.log.json) | +| YOLOv5-l | P5 | 640 | Yes | Yes | 10.7 | 48.8 | 39.9 | [config](./ins_seg/yolov5_ins_l-v61_syncbn_fast_8xb16-300e_coco_instance.py) | [model](https://download.openmmlab.com/mmyolo/v0/yolov5/ins_seg/yolov5_ins_l-v61_syncbn_fast_8xb16-300e_coco_instance/yolov5_ins_l-v61_syncbn_fast_8xb16-300e_coco_instance_20230508_104049-daa09f70.pth) \| [log](https://download.openmmlab.com/mmyolo/v0/yolov5/ins_seg/yolov5_ins_l-v61_syncbn_fast_8xb16-300e_coco_instance/yolov5_ins_l-v61_syncbn_fast_8xb16-300e_coco_instance_20230508_104049.log.json) | +| YOLOv5-x | P5 | 640 | Yes | Yes | 15.0 | 50.6 | 41.4 | [config](./ins_seg/yolov5_ins_x-v61_syncbn_fast_8xb16-300e_coco_instance.py) | [model](https://download.openmmlab.com/mmyolo/v0/yolov5/ins_seg/yolov5_ins_x-v61_syncbn_fast_8xb16-300e_coco_instance/yolov5_ins_x-v61_syncbn_fast_8xb16-300e_coco_instance_20230508_103925-a260c798.pth) \| [log](https://download.openmmlab.com/mmyolo/v0/yolov5/ins_seg/yolov5_ins_x-v61_syncbn_fast_8xb16-300e_coco_instance/yolov5_ins_x-v61_syncbn_fast_8xb16-300e_coco_instance_20230508_103925.log.json) | + +**Note**: + +1. `Non-overlap` refers to the instance-level masks being stored in the format (num_instances, h, w) instead of (h, w). Storing masks in overlap format consumes less memory and GPU memory. +2. For the M model, the `affine_scale` parameter should be 0.9, but due to some reason, we set it to 0.5 and found that the mAP did not change. Therefore, the released M model has an `affine_scale` parameter of 0.5, which is inconsistent with the value of 0.9 in the configuration. + +### VOC + +| Backbone | size | Batchsize | AMP | Mem (GB) | box AP(COCO metric) | Config | Download | +| :------: | :--: | :-------: | :-: | :------: | :-----------------: | :-------------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| YOLOv5-n | 512 | 64 | Yes | 3.5 | 51.2 | [config](./yolov5/voc/yolov5_n-v61_fast_1xb64-50e_voc.py) | [model](https://download.openmmlab.com/mmyolo/v0/yolov5/yolov5_n-v61_fast_1xb64-50e_voc/yolov5_n-v61_fast_1xb64-50e_voc_20221017_234254-f1493430.pth) \| [log](https://download.openmmlab.com/mmyolo/v0/yolov5/yolov5_n-v61_fast_1xb64-50e_voc/yolov5_n-v61_fast_1xb64-50e_voc_20221017_234254.log.json) | +| YOLOv5-s | 512 | 64 | Yes | 6.5 | 62.7 | [config](./yolov5/voc/yolov5_s-v61_fast_1xb64-50e_voc.py) | [model](https://download.openmmlab.com/mmyolo/v0/yolov5/yolov5_s-v61_fast_1xb64-50e_voc/yolov5_s-v61_fast_1xb64-50e_voc_20221017_234156-0009b33e.pth) \| [log](https://download.openmmlab.com/mmyolo/v0/yolov5/yolov5_s-v61_fast_1xb64-50e_voc/yolov5_s-v61_fast_1xb64-50e_voc_20221017_234156.log.json) | +| YOLOv5-m | 512 | 64 | Yes | 12.0 | 70.1 | [config](./yolov5/voc/yolov5_m-v61_fast_1xb64-50e_voc.py) | [model](https://download.openmmlab.com/mmyolo/v0/yolov5/yolov5_m-v61_fast_1xb64-50e_voc/yolov5_m-v61_fast_1xb64-50e_voc_20221017_114138-815c143a.pth) \| [log](https://download.openmmlab.com/mmyolo/v0/yolov5/yolov5_m-v61_fast_1xb64-50e_voc/yolov5_m-v61_fast_1xb64-50e_voc_20221017_114138.log.json) | +| YOLOv5-l | 512 | 32 | Yes | 10.0 | 73.1 | [config](./yolov5/voc/yolov5_l-v61_fast_1xb32-50e_voc.py) | [model](https://download.openmmlab.com/mmyolo/v0/yolov5/yolov5_l-v61_fast_1xb32-50e_voc/yolov5_l-v61_fast_1xb32-50e_voc_20221017_045500-edc7e0d8.pth) \| [log](https://download.openmmlab.com/mmyolo/v0/yolov5/yolov5_l-v61_fast_1xb32-50e_voc/yolov5_l-v61_fast_1xb32-50e_voc_20221017_045500.log.json) | + +**Note**: + +1. Training on VOC dataset need pretrained model which trained on COCO. +2. The performance is unstable and may fluctuate by about 0.4 mAP. +3. Official YOLOv5 use COCO metric, while training VOC dataset. +4. We converted the VOC test dataset to COCO format offline, while reproducing mAP result as shown above. We will support to use COCO metric while training VOC dataset in later version. +5. Hyperparameter reference from `https://wandb.ai/glenn-jocher/YOLOv5_VOC_official`. + +### CrowdHuman + +Since the `iscrowd` annotation of the COCO dataset is not equivalent to `ignore`, we use the CrowdHuman dataset to verify that the YOLOv5 ignore logic is correct. + +| Backbone | size | SyncBN | AMP | Mem (GB) | ignore_iof_thr | box AP50(CrowDHuman Metric) | MR | JI | Config | Download | +| :------: | :--: | :----: | :-: | :------: | :------------: | :-------------------------: | :--: | :---: | :------------------------------------------------------------------------: | :------: | +| YOLOv5-s | 640 | Yes | Yes | 2.6 | -1 | 85.79 | 48.7 | 75.33 | [config](./yolov5/crowdhuman/yolov5_s-v61_fast_8xb16-300e_crowdhuman.py) | | +| YOLOv5-s | 640 | Yes | Yes | 2.6 | 0.5 | 86.17 | 48.8 | 75.87 | [config](./yolov5/crowdhuman/yolov5_s-v61_8xb16-300e_ignore_crowdhuman.py) | | + +**Note**: + +1. `ignore_iof_thr` is -1 indicating that the ignore tag is not considered. We adjusted with `ignore_iof_thr` thresholds of 0.5, 0.8, 0.9, and the results show that 0.5 has the best performance. +2. The above table shows the performance of the model with the best performance on the validation set. The best performing models are around 160+ epoch which means that there is no need to train so many epochs. +3. This is a very simple implementation that simply replaces COCO's anchor with the `tools/analysis_tools/optimize_anchors.py` script. We'll adjust other parameters later to improve performance. + +## Citation + +```latex +@software{glenn_jocher_2022_7002879, + author = {Glenn Jocher and + Ayush Chaurasia and + Alex Stoken and + Jirka Borovec and + NanoCode012 and + Yonghye Kwon and + TaoXie and + Kalen Michael and + Jiacong Fang and + imyhxy and + Lorna and + Colin Wong and + 曾逸夫(Zeng Yifu) and + Abhiram V and + Diego Montes and + Zhiqiang Wang and + Cristi Fati and + Jebastin Nadar and + Laughing and + UnglvKitDe and + tkianai and + yxNONG and + Piotr Skalski and + Adam Hogan and + Max Strobel and + Mrinal Jain and + Lorenzo Mammana and + xylieong}, + title = {{ultralytics/yolov5: v6.2 - YOLOv5 Classification + Models, Apple M1, Reproducibility, ClearML and + Deci.ai integrations}}, + month = aug, + year = 2022, + publisher = {Zenodo}, + version = {v6.2}, + doi = {10.5281/zenodo.7002879}, + url = {https://doi.org/10.5281/zenodo.7002879} +} +``` diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolov5/crowdhuman/yolov5_s-v61_8xb16-300e_ignore_crowdhuman.py b/models/YOLO-World/third_party/mmyolo/configs/yolov5/crowdhuman/yolov5_s-v61_8xb16-300e_ignore_crowdhuman.py new file mode 100644 index 0000000000000000000000000000000000000000..85b371929acd68bfd06cc257d20978c3fcc36db7 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolov5/crowdhuman/yolov5_s-v61_8xb16-300e_ignore_crowdhuman.py @@ -0,0 +1,63 @@ +_base_ = 'yolov5_s-v61_fast_8xb16-300e_crowdhuman.py' + +model = dict( + data_preprocessor=dict( + _delete_=True, + type='mmdet.DetDataPreprocessor', + mean=[0., 0., 0.], + std=[255., 255., 255.], + bgr_to_rgb=True), + bbox_head=dict(ignore_iof_thr=0.5)) + +img_scale = _base_.img_scale + +albu_train_transforms = [ + dict(type='Blur', p=0.01), + dict(type='MedianBlur', p=0.01), + dict(type='ToGray', p=0.01), + dict(type='CLAHE', p=0.01) +] + +pre_transform = [ + dict(type='LoadImageFromFile', backend_args=_base_.backend_args), + # only change this + dict(type='mmdet.LoadAnnotations', with_bbox=True) +] + +train_pipeline = [ + *pre_transform, + dict( + type='Mosaic', + img_scale=img_scale, + pad_val=114.0, + pre_transform=pre_transform), + dict( + type='YOLOv5RandomAffine', + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=(0.5, 1.5), + # img_scale is (width, height) + border=(-img_scale[0] // 2, -img_scale[1] // 2), + border_val=(114, 114, 114)), + dict( + type='mmdet.Albu', + transforms=albu_train_transforms, + bbox_params=dict( + type='BboxParams', + format='pascal_voc', + label_fields=['gt_bboxes_labels', 'gt_ignore_flags']), + keymap={ + 'img': 'image', + 'gt_bboxes': 'bboxes' + }), + dict(type='YOLOv5HSVRandomAug'), + dict(type='mmdet.RandomFlip', prob=0.5), + dict( + type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'flip', + 'flip_direction')) +] + +train_dataloader = dict( + collate_fn=dict(type='pseudo_collate'), + dataset=dict(pipeline=train_pipeline)) diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolov5/crowdhuman/yolov5_s-v61_fast_8xb16-300e_crowdhuman.py b/models/YOLO-World/third_party/mmyolo/configs/yolov5/crowdhuman/yolov5_s-v61_fast_8xb16-300e_crowdhuman.py new file mode 100644 index 0000000000000000000000000000000000000000..a61859fa0f2c0ea8a08ffd7783adc4ccac8540dd --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolov5/crowdhuman/yolov5_s-v61_fast_8xb16-300e_crowdhuman.py @@ -0,0 +1,47 @@ +_base_ = '../yolov5_s-v61_syncbn_fast_8xb16-300e_coco.py' + +# Use the model trained on the COCO as the pretrained model +load_from = 'https://download.openmmlab.com/mmyolo/v0/yolov5/yolov5_s-v61_syncbn_fast_8xb16-300e_coco/yolov5_s-v61_syncbn_fast_8xb16-300e_coco_20220918_084700-86e02187.pth' # noqa + +# dataset settings +data_root = 'data/CrowdHuman/' +dataset_type = 'YOLOv5CrowdHumanDataset' + +# parameters that often need to be modified +num_classes = 1 + +anchors = [ + [(6, 14), (12, 28), (19, 48)], # P3/8 + [(29, 79), (46, 124), (142, 54)], # P4/16 + [(73, 198), (124, 330), (255, 504)] # P5/32 +] + +model = dict( + bbox_head=dict( + head_module=dict(num_classes=num_classes), + prior_generator=dict(base_sizes=anchors))) + +train_dataloader = dict( + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='annotation_train.odgt', + data_prefix=dict(img='Images/'))) + +val_dataloader = dict( + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='annotation_val.odgt', + data_prefix=dict(img='Images/'), + # CrowdHumanMetric does not support out-of-order output images + # for the time being. batch_shapes_cfg does not support. + batch_shapes_cfg=None)) +test_dataloader = val_dataloader + +val_evaluator = dict( + _delete_=True, + type='mmdet.CrowdHumanMetric', + ann_file=data_root + 'annotation_val.odgt', + metric=['AP', 'MR', 'JI']) +test_evaluator = val_evaluator diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolov5/ins_seg/yolov5_ins_l-v61_syncbn_fast_8xb16-300e_coco_instance.py b/models/YOLO-World/third_party/mmyolo/configs/yolov5/ins_seg/yolov5_ins_l-v61_syncbn_fast_8xb16-300e_coco_instance.py new file mode 100644 index 0000000000000000000000000000000000000000..6b27c7647bd233172e11df8e5a736946d70acfe0 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolov5/ins_seg/yolov5_ins_l-v61_syncbn_fast_8xb16-300e_coco_instance.py @@ -0,0 +1,81 @@ +_base_ = './yolov5_ins_m-v61_syncbn_fast_8xb16-300e_coco_instance.py' # noqa + +# This config use refining bbox and `YOLOv5CopyPaste`. +# Refining bbox means refining bbox by mask while loading annotations and +# transforming after `YOLOv5RandomAffine` +# ========================modified parameters====================== +deepen_factor = 1.0 +widen_factor = 1.0 + +mixup_prob = 0.1 +copypaste_prob = 0.1 + +# =======================Unmodified in most cases================== +img_scale = _base_.img_scale + +model = dict( + backbone=dict( + deepen_factor=deepen_factor, + widen_factor=widen_factor, + ), + neck=dict( + deepen_factor=deepen_factor, + widen_factor=widen_factor, + ), + bbox_head=dict(head_module=dict(widen_factor=widen_factor))) + +pre_transform = _base_.pre_transform +albu_train_transforms = _base_.albu_train_transforms +mosaic_affine_pipeline = [ + dict( + type='Mosaic', + img_scale=img_scale, + pad_val=114.0, + pre_transform=pre_transform), + dict(type='YOLOv5CopyPaste', prob=copypaste_prob), + dict( + type='YOLOv5RandomAffine', + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=(1 - _base_.affine_scale, 1 + _base_.affine_scale), + border=(-img_scale[0] // 2, -img_scale[1] // 2), + border_val=(114, 114, 114), + min_area_ratio=_base_.min_area_ratio, + max_aspect_ratio=_base_.max_aspect_ratio, + use_mask_refine=_base_.use_mask2refine), +] + +# enable mixup +train_pipeline = [ + *pre_transform, + *mosaic_affine_pipeline, + dict( + type='YOLOv5MixUp', + prob=mixup_prob, + pre_transform=[*pre_transform, *mosaic_affine_pipeline]), + # TODO: support mask transform in albu + # Geometric transformations are not supported in albu now. + dict( + type='mmdet.Albu', + transforms=albu_train_transforms, + bbox_params=dict( + type='BboxParams', + format='pascal_voc', + label_fields=['gt_bboxes_labels', 'gt_ignore_flags']), + keymap={ + 'img': 'image', + 'gt_bboxes': 'bboxes' + }), + dict(type='YOLOv5HSVRandomAug'), + dict(type='mmdet.RandomFlip', prob=0.5), + dict( + type='Polygon2Mask', + downsample_ratio=_base_.downsample_ratio, + mask_overlap=_base_.mask_overlap), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'flip', + 'flip_direction')) +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolov5/ins_seg/yolov5_ins_m-v61_syncbn_fast_8xb16-300e_coco_instance.py b/models/YOLO-World/third_party/mmyolo/configs/yolov5/ins_seg/yolov5_ins_m-v61_syncbn_fast_8xb16-300e_coco_instance.py new file mode 100644 index 0000000000000000000000000000000000000000..831e815cb2f982e92c9995bd6e012bcce95950f6 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolov5/ins_seg/yolov5_ins_m-v61_syncbn_fast_8xb16-300e_coco_instance.py @@ -0,0 +1,89 @@ +_base_ = './yolov5_ins_s-v61_syncbn_fast_8xb16-300e_coco_instance.py' # noqa + +# ========================modified parameters====================== +deepen_factor = 0.67 +widen_factor = 0.75 +lr_factor = 0.1 +loss_cls_weight = 0.3 +loss_obj_weight = 0.7 + +affine_scale = 0.9 +mixup_prob = 0.1 + +# =======================Unmodified in most cases================== +num_classes = _base_.num_classes +num_det_layers = _base_.num_det_layers +img_scale = _base_.img_scale + +model = dict( + backbone=dict( + deepen_factor=deepen_factor, + widen_factor=widen_factor, + ), + neck=dict( + deepen_factor=deepen_factor, + widen_factor=widen_factor, + ), + bbox_head=dict( + head_module=dict(widen_factor=widen_factor), + loss_cls=dict(loss_weight=loss_cls_weight * + (num_classes / 80 * 3 / num_det_layers)), + loss_obj=dict(loss_weight=loss_obj_weight * + ((img_scale[0] / 640)**2 * 3 / num_det_layers)))) + +pre_transform = _base_.pre_transform +albu_train_transforms = _base_.albu_train_transforms + +mosaic_affine_pipeline = [ + dict( + type='Mosaic', + img_scale=img_scale, + pad_val=114.0, + pre_transform=pre_transform), + dict( + type='YOLOv5RandomAffine', + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=(1 - affine_scale, 1 + affine_scale), + border=(-img_scale[0] // 2, -img_scale[1] // 2), + border_val=(114, 114, 114), + min_area_ratio=_base_.min_area_ratio, + max_aspect_ratio=_base_.max_aspect_ratio, + use_mask_refine=_base_.use_mask2refine), +] + +# enable mixup +train_pipeline = [ + *pre_transform, + *mosaic_affine_pipeline, + dict( + type='YOLOv5MixUp', + prob=mixup_prob, + pre_transform=[*pre_transform, *mosaic_affine_pipeline]), + # TODO: support mask transform in albu + # Geometric transformations are not supported in albu now. + dict( + type='mmdet.Albu', + transforms=albu_train_transforms, + bbox_params=dict( + type='BboxParams', + format='pascal_voc', + label_fields=['gt_bboxes_labels', 'gt_ignore_flags']), + keymap={ + 'img': 'image', + 'gt_bboxes': 'bboxes' + }), + dict(type='YOLOv5HSVRandomAug'), + dict(type='mmdet.RandomFlip', prob=0.5), + dict( + type='Polygon2Mask', + downsample_ratio=_base_.downsample_ratio, + mask_overlap=_base_.mask_overlap), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'flip', + 'flip_direction')) +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) +default_hooks = dict(param_scheduler=dict(lr_factor=lr_factor)) diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolov5/ins_seg/yolov5_ins_n-v61_syncbn_fast_8xb16-300e_coco_instance.py b/models/YOLO-World/third_party/mmyolo/configs/yolov5/ins_seg/yolov5_ins_n-v61_syncbn_fast_8xb16-300e_coco_instance.py new file mode 100644 index 0000000000000000000000000000000000000000..e06130bd317dba004a7fa1d5de0750f5b1cd21cf --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolov5/ins_seg/yolov5_ins_n-v61_syncbn_fast_8xb16-300e_coco_instance.py @@ -0,0 +1,15 @@ +_base_ = './yolov5_ins_s-v61_syncbn_fast_8xb16-300e_coco_instance.py' # noqa + +deepen_factor = 0.33 +widen_factor = 0.25 + +model = dict( + backbone=dict( + deepen_factor=deepen_factor, + widen_factor=widen_factor, + ), + neck=dict( + deepen_factor=deepen_factor, + widen_factor=widen_factor, + ), + bbox_head=dict(head_module=dict(widen_factor=widen_factor))) diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolov5/ins_seg/yolov5_ins_s-v61_syncbn_fast_8xb16-300e_balloon_instance.py b/models/YOLO-World/third_party/mmyolo/configs/yolov5/ins_seg/yolov5_ins_s-v61_syncbn_fast_8xb16-300e_balloon_instance.py new file mode 100644 index 0000000000000000000000000000000000000000..82e2ae6d059df466940fc3df84ce53102ffec081 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolov5/ins_seg/yolov5_ins_s-v61_syncbn_fast_8xb16-300e_balloon_instance.py @@ -0,0 +1,42 @@ +_base_ = './yolov5_ins_s-v61_syncbn_fast_8xb16-300e_coco_instance.py' # noqa + +data_root = 'data/balloon/' +# Path of train annotation file +train_ann_file = 'train.json' +train_data_prefix = 'train/' # Prefix of train image path +# Path of val annotation file +val_ann_file = 'val.json' +val_data_prefix = 'val/' # Prefix of val image path +metainfo = { + 'classes': ('balloon', ), + 'palette': [ + (220, 20, 60), + ] +} +num_classes = 1 + +train_batch_size_per_gpu = 4 +train_num_workers = 2 +log_interval = 1 +##################### +train_dataloader = dict( + batch_size=train_batch_size_per_gpu, + num_workers=train_num_workers, + dataset=dict( + data_root=data_root, + metainfo=metainfo, + data_prefix=dict(img=train_data_prefix), + ann_file=train_ann_file)) +val_dataloader = dict( + dataset=dict( + data_root=data_root, + metainfo=metainfo, + data_prefix=dict(img=val_data_prefix), + ann_file=val_ann_file)) +test_dataloader = val_dataloader +val_evaluator = dict(ann_file=data_root + val_ann_file) +test_evaluator = val_evaluator +default_hooks = dict(logger=dict(interval=log_interval)) +##################### + +model = dict(bbox_head=dict(head_module=dict(num_classes=num_classes))) diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolov5/ins_seg/yolov5_ins_s-v61_syncbn_fast_8xb16-300e_coco_instance.py b/models/YOLO-World/third_party/mmyolo/configs/yolov5/ins_seg/yolov5_ins_s-v61_syncbn_fast_8xb16-300e_coco_instance.py new file mode 100644 index 0000000000000000000000000000000000000000..0ab980ca7dfdd9c2feaba660f8745c92b49e6bbc --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolov5/ins_seg/yolov5_ins_s-v61_syncbn_fast_8xb16-300e_coco_instance.py @@ -0,0 +1,126 @@ +_base_ = '../yolov5_s-v61_syncbn_fast_8xb16-300e_coco.py' # noqa + +# ========================modified parameters====================== +# YOLOv5RandomAffine +use_mask2refine = True +max_aspect_ratio = 100 +min_area_ratio = 0.01 +# Polygon2Mask +downsample_ratio = 4 +mask_overlap = True +# LeterResize +# half_pad_param: if set to True, left and right pad_param will +# be given by dividing padding_h by 2. If set to False, pad_param is +# in int format. We recommend setting this to False for object +# detection tasks, and True for instance segmentation tasks. +# Default to False. +half_pad_param = True + +# Testing take a long time due to model_test_cfg. +# If you want to speed it up, you can increase score_thr +# or decraese nms_pre and max_per_img +model_test_cfg = dict( + multi_label=True, + nms_pre=30000, + min_bbox_size=0, + score_thr=0.001, + nms=dict(type='nms', iou_threshold=0.6), + max_per_img=300, + mask_thr_binary=0.5, + # fast_test: Whether to use fast test methods. When set + # to False, the implementation here is the same as the + # official, with higher mAP. If set to True, mask will first + # be upsampled to origin image shape through Pytorch, and + # then use mask_thr_binary to determine which pixels belong + # to the object. If set to False, will first use + # mask_thr_binary to determine which pixels belong to the + # object , and then use opencv to upsample mask to origin + # image shape. Default to False. + fast_test=True) + +# ===============================Unmodified in most cases==================== +model = dict( + type='YOLODetector', + bbox_head=dict( + type='YOLOv5InsHead', + head_module=dict( + type='YOLOv5InsHeadModule', mask_channels=32, proto_channels=256), + mask_overlap=mask_overlap, + loss_mask=dict( + type='mmdet.CrossEntropyLoss', use_sigmoid=True, reduction='none'), + loss_mask_weight=0.05), + test_cfg=model_test_cfg) + +pre_transform = [ + dict(type='LoadImageFromFile', backend_args=_base_.backend_args), + dict( + type='LoadAnnotations', + with_bbox=True, + with_mask=True, + mask2bbox=use_mask2refine) +] + +train_pipeline = [ + *pre_transform, + dict( + type='Mosaic', + img_scale=_base_.img_scale, + pad_val=114.0, + pre_transform=pre_transform), + dict( + type='YOLOv5RandomAffine', + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=(1 - _base_.affine_scale, 1 + _base_.affine_scale), + border=(-_base_.img_scale[0] // 2, -_base_.img_scale[1] // 2), + border_val=(114, 114, 114), + min_area_ratio=min_area_ratio, + max_aspect_ratio=max_aspect_ratio, + use_mask_refine=use_mask2refine), + # TODO: support mask transform in albu + # Geometric transformations are not supported in albu now. + dict( + type='mmdet.Albu', + transforms=_base_.albu_train_transforms, + bbox_params=dict( + type='BboxParams', + format='pascal_voc', + label_fields=['gt_bboxes_labels', 'gt_ignore_flags']), + keymap={ + 'img': 'image', + 'gt_bboxes': 'bboxes' + }), + dict(type='YOLOv5HSVRandomAug'), + dict(type='mmdet.RandomFlip', prob=0.5), + dict( + type='Polygon2Mask', + downsample_ratio=downsample_ratio, + mask_overlap=mask_overlap), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'flip', + 'flip_direction')) +] + +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args=_base_.backend_args), + dict(type='YOLOv5KeepRatioResize', scale=_base_.img_scale), + dict( + type='LetterResize', + scale=_base_.img_scale, + allow_scale_up=False, + half_pad_param=half_pad_param, + pad_val=dict(img=114)), + dict(type='LoadAnnotations', with_bbox=True, _scope_='mmdet'), + dict( + type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'pad_param')) +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = val_dataloader + +val_evaluator = dict(metric=['bbox', 'segm']) +test_evaluator = val_evaluator diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolov5/ins_seg/yolov5_ins_s-v61_syncbn_fast_non_overlap_8xb16-300e_coco_instance.py b/models/YOLO-World/third_party/mmyolo/configs/yolov5/ins_seg/yolov5_ins_s-v61_syncbn_fast_non_overlap_8xb16-300e_coco_instance.py new file mode 100644 index 0000000000000000000000000000000000000000..83b48cab69ade156f69864d11b37af597dd82da2 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolov5/ins_seg/yolov5_ins_s-v61_syncbn_fast_non_overlap_8xb16-300e_coco_instance.py @@ -0,0 +1,49 @@ +_base_ = './yolov5_ins_s-v61_syncbn_fast_8xb16-300e_coco_instance.py' # noqa + +# ========================modified parameters====================== +mask_overlap = False # Polygon2Mask + +# ===============================Unmodified in most cases==================== +model = dict(bbox_head=dict(mask_overlap=mask_overlap)) + +train_pipeline = [ + *_base_.pre_transform, + dict( + type='Mosaic', + img_scale=_base_.img_scale, + pad_val=114.0, + pre_transform=_base_.pre_transform), + dict( + type='YOLOv5RandomAffine', + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=(1 - _base_.affine_scale, 1 + _base_.affine_scale), + border=(-_base_.img_scale[0] // 2, -_base_.img_scale[1] // 2), + border_val=(114, 114, 114), + min_area_ratio=_base_.min_area_ratio, + max_aspect_ratio=_base_.max_aspect_ratio, + use_mask_refine=True), + dict( + type='mmdet.Albu', + transforms=_base_.albu_train_transforms, + bbox_params=dict( + type='BboxParams', + format='pascal_voc', + label_fields=['gt_bboxes_labels', 'gt_ignore_flags']), + keymap={ + 'img': 'image', + 'gt_bboxes': 'bboxes', + }), + dict(type='YOLOv5HSVRandomAug'), + dict(type='mmdet.RandomFlip', prob=0.5), + dict( + type='Polygon2Mask', + downsample_ratio=_base_.downsample_ratio, + mask_overlap=mask_overlap), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'flip', + 'flip_direction')) +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolov5/ins_seg/yolov5_ins_x-v61_syncbn_fast_8xb16-300e_coco_instance.py b/models/YOLO-World/third_party/mmyolo/configs/yolov5/ins_seg/yolov5_ins_x-v61_syncbn_fast_8xb16-300e_coco_instance.py new file mode 100644 index 0000000000000000000000000000000000000000..a18170ccc30c541f583ca3f4eaf829b853ed2816 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolov5/ins_seg/yolov5_ins_x-v61_syncbn_fast_8xb16-300e_coco_instance.py @@ -0,0 +1,15 @@ +_base_ = './yolov5_ins_l-v61_syncbn_fast_8xb16-300e_coco_instance.py' # noqa + +deepen_factor = 1.33 +widen_factor = 1.25 + +model = dict( + backbone=dict( + deepen_factor=deepen_factor, + widen_factor=widen_factor, + ), + neck=dict( + deepen_factor=deepen_factor, + widen_factor=widen_factor, + ), + bbox_head=dict(head_module=dict(widen_factor=widen_factor))) diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolov5/mask_refine/yolov5_l_mask-refine-v61_syncbn_fast_8xb16-300e_coco.py b/models/YOLO-World/third_party/mmyolo/configs/yolov5/mask_refine/yolov5_l_mask-refine-v61_syncbn_fast_8xb16-300e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..206eec3c41542958ae105764fbf3991935b30bc8 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolov5/mask_refine/yolov5_l_mask-refine-v61_syncbn_fast_8xb16-300e_coco.py @@ -0,0 +1,77 @@ +_base_ = './yolov5_m_mask-refine-v61_syncbn_fast_8xb16-300e_coco.py' + +# This config use refining bbox and `YOLOv5CopyPaste`. +# Refining bbox means refining bbox by mask while loading annotations and +# transforming after `YOLOv5RandomAffine` + +# ========================modified parameters====================== +deepen_factor = 1.0 +widen_factor = 1.0 + +mixup_prob = 0.1 +copypaste_prob = 0.1 + +# =======================Unmodified in most cases================== +img_scale = _base_.img_scale + +model = dict( + backbone=dict( + deepen_factor=deepen_factor, + widen_factor=widen_factor, + ), + neck=dict( + deepen_factor=deepen_factor, + widen_factor=widen_factor, + ), + bbox_head=dict(head_module=dict(widen_factor=widen_factor))) + +pre_transform = _base_.pre_transform +albu_train_transforms = _base_.albu_train_transforms + +mosaic_affine_pipeline = [ + dict( + type='Mosaic', + img_scale=img_scale, + pad_val=114.0, + pre_transform=pre_transform), + dict(type='YOLOv5CopyPaste', prob=copypaste_prob), + dict( + type='YOLOv5RandomAffine', + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=(1 - _base_.affine_scale, 1 + _base_.affine_scale), + # img_scale is (width, height) + border=(-img_scale[0] // 2, -img_scale[1] // 2), + border_val=(114, 114, 114), + min_area_ratio=_base_.min_area_ratio, + use_mask_refine=_base_.use_mask2refine), + dict(type='RemoveDataElement', keys=['gt_masks']) +] + +# enable mixup and copypaste +train_pipeline = [ + *pre_transform, *mosaic_affine_pipeline, + dict( + type='YOLOv5MixUp', + prob=mixup_prob, + pre_transform=[*pre_transform, *mosaic_affine_pipeline]), + dict( + type='mmdet.Albu', + transforms=albu_train_transforms, + bbox_params=dict( + type='BboxParams', + format='pascal_voc', + label_fields=['gt_bboxes_labels', 'gt_ignore_flags']), + keymap={ + 'img': 'image', + 'gt_bboxes': 'bboxes' + }), + dict(type='YOLOv5HSVRandomAug'), + dict(type='mmdet.RandomFlip', prob=0.5), + dict( + type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'flip', + 'flip_direction')) +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolov5/mask_refine/yolov5_m_mask-refine-v61_syncbn_fast_8xb16-300e_coco.py b/models/YOLO-World/third_party/mmyolo/configs/yolov5/mask_refine/yolov5_m_mask-refine-v61_syncbn_fast_8xb16-300e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..4af27a917e6113f33ff72781eeee911381bbed53 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolov5/mask_refine/yolov5_m_mask-refine-v61_syncbn_fast_8xb16-300e_coco.py @@ -0,0 +1,86 @@ +_base_ = './yolov5_s_mask-refine-v61_syncbn_fast_8xb16-300e_coco.py' + +# This config will refine bbox by mask while loading annotations and +# transforming after `YOLOv5RandomAffine` + +# ========================modified parameters====================== +deepen_factor = 0.67 +widen_factor = 0.75 +lr_factor = 0.1 +loss_cls_weight = 0.3 +loss_obj_weight = 0.7 + +affine_scale = 0.9 +mixup_prob = 0.1 + +# =======================Unmodified in most cases================== +num_classes = _base_.num_classes +num_det_layers = _base_.num_det_layers +img_scale = _base_.img_scale + +model = dict( + backbone=dict( + deepen_factor=deepen_factor, + widen_factor=widen_factor, + ), + neck=dict( + deepen_factor=deepen_factor, + widen_factor=widen_factor, + ), + bbox_head=dict( + head_module=dict(widen_factor=widen_factor), + loss_cls=dict(loss_weight=loss_cls_weight * + (num_classes / 80 * 3 / num_det_layers)), + loss_obj=dict(loss_weight=loss_obj_weight * + ((img_scale[0] / 640)**2 * 3 / num_det_layers)))) + +pre_transform = _base_.pre_transform +albu_train_transforms = _base_.albu_train_transforms + +mosaic_affine_pipeline = [ + dict( + type='Mosaic', + img_scale=img_scale, + pad_val=114.0, + pre_transform=pre_transform), + dict( + type='YOLOv5RandomAffine', + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=(1 - affine_scale, 1 + affine_scale), + # img_scale is (width, height) + border=(-img_scale[0] // 2, -img_scale[1] // 2), + border_val=(114, 114, 114), + min_area_ratio=_base_.min_area_ratio, + use_mask_refine=_base_.use_mask2refine), + dict(type='RemoveDataElement', keys=['gt_masks']) +] + +# enable mixup +train_pipeline = [ + *pre_transform, *mosaic_affine_pipeline, + dict( + type='YOLOv5MixUp', + prob=mixup_prob, + pre_transform=[*pre_transform, *mosaic_affine_pipeline]), + dict( + type='mmdet.Albu', + transforms=albu_train_transforms, + bbox_params=dict( + type='BboxParams', + format='pascal_voc', + label_fields=['gt_bboxes_labels', 'gt_ignore_flags']), + keymap={ + 'img': 'image', + 'gt_bboxes': 'bboxes' + }), + dict(type='YOLOv5HSVRandomAug'), + dict(type='mmdet.RandomFlip', prob=0.5), + dict( + type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'flip', + 'flip_direction')) +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) +default_hooks = dict(param_scheduler=dict(lr_factor=lr_factor)) diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolov5/mask_refine/yolov5_n_mask-refine-v61_syncbn_fast_8xb16-300e_coco.py b/models/YOLO-World/third_party/mmyolo/configs/yolov5/mask_refine/yolov5_n_mask-refine-v61_syncbn_fast_8xb16-300e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..3fe8dc32ceaf687940596f6b8094d79857921deb --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolov5/mask_refine/yolov5_n_mask-refine-v61_syncbn_fast_8xb16-300e_coco.py @@ -0,0 +1,20 @@ +_base_ = './yolov5_s_mask-refine-v61_syncbn_fast_8xb16-300e_coco.py' + +# This config will refine bbox by mask while loading annotations and +# transforming after `YOLOv5RandomAffine` + +# ========================modified parameters====================== +deepen_factor = 0.33 +widen_factor = 0.25 + +# ===============================Unmodified in most cases==================== +model = dict( + backbone=dict( + deepen_factor=deepen_factor, + widen_factor=widen_factor, + ), + neck=dict( + deepen_factor=deepen_factor, + widen_factor=widen_factor, + ), + bbox_head=dict(head_module=dict(widen_factor=widen_factor))) diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolov5/mask_refine/yolov5_s_mask-refine-v61_syncbn_fast_8xb16-300e_coco.py b/models/YOLO-World/third_party/mmyolo/configs/yolov5/mask_refine/yolov5_s_mask-refine-v61_syncbn_fast_8xb16-300e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..74febbb7764435d7ab4d9a8014fb6977a269da68 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolov5/mask_refine/yolov5_s_mask-refine-v61_syncbn_fast_8xb16-300e_coco.py @@ -0,0 +1,62 @@ +_base_ = '../yolov5_s-v61_syncbn_fast_8xb16-300e_coco.py' + +# This config will refine bbox by mask while loading annotations and +# transforming after `YOLOv5RandomAffine` + +# ========================modified parameters====================== +use_mask2refine = True +min_area_ratio = 0.01 # YOLOv5RandomAffine + +# ===============================Unmodified in most cases==================== +pre_transform = [ + dict(type='LoadImageFromFile', backend_args=_base_.backend_args), + dict( + type='LoadAnnotations', + with_bbox=True, + with_mask=True, + mask2bbox=use_mask2refine) +] + +last_transform = [ + # Delete gt_masks to avoid more computation + dict(type='RemoveDataElement', keys=['gt_masks']), + dict( + type='mmdet.Albu', + transforms=_base_.albu_train_transforms, + bbox_params=dict( + type='BboxParams', + format='pascal_voc', + label_fields=['gt_bboxes_labels', 'gt_ignore_flags']), + keymap={ + 'img': 'image', + 'gt_bboxes': 'bboxes' + }), + dict(type='YOLOv5HSVRandomAug'), + dict(type='mmdet.RandomFlip', prob=0.5), + dict( + type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'flip', + 'flip_direction')) +] + +train_pipeline = [ + *pre_transform, + dict( + type='Mosaic', + img_scale=_base_.img_scale, + pad_val=114.0, + pre_transform=pre_transform), + dict( + type='YOLOv5RandomAffine', + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=(1 - _base_.affine_scale, 1 + _base_.affine_scale), + # img_scale is (width, height) + border=(-_base_.img_scale[0] // 2, -_base_.img_scale[1] // 2), + border_val=(114, 114, 114), + min_area_ratio=min_area_ratio, + use_mask_refine=use_mask2refine), + *last_transform +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolov5/mask_refine/yolov5_x_mask-refine-v61_syncbn_fast_8xb16-300e_coco.py b/models/YOLO-World/third_party/mmyolo/configs/yolov5/mask_refine/yolov5_x_mask-refine-v61_syncbn_fast_8xb16-300e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..fb76f1057872d81f52ac9369a689545194a61bb7 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolov5/mask_refine/yolov5_x_mask-refine-v61_syncbn_fast_8xb16-300e_coco.py @@ -0,0 +1,21 @@ +_base_ = './yolov5_l_mask-refine-v61_syncbn_fast_8xb16-300e_coco.py' + +# This config use refining bbox and `YOLOv5CopyPaste`. +# Refining bbox means refining bbox by mask while loading annotations and +# transforming after `YOLOv5RandomAffine` + +# ========================modified parameters====================== +deepen_factor = 1.33 +widen_factor = 1.25 + +# ===============================Unmodified in most cases==================== +model = dict( + backbone=dict( + deepen_factor=deepen_factor, + widen_factor=widen_factor, + ), + neck=dict( + deepen_factor=deepen_factor, + widen_factor=widen_factor, + ), + bbox_head=dict(head_module=dict(widen_factor=widen_factor))) diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolov5/metafile.yml b/models/YOLO-World/third_party/mmyolo/configs/yolov5/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..bfe5add4fa0f268a8a6566c7ddc2e9b46a92ffe7 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolov5/metafile.yml @@ -0,0 +1,346 @@ +Collections: + - Name: YOLOv5 + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Nesterov + - Weight Decay + - AMP + - Synchronize BN + Training Resources: 8x A100 GPUs + Architecture: + - CSPDarkNet + - PAFPN + README: configs/yolov5/README.md + Code: + URL: https://github.com/open-mmlab/mmyolo/blob/v0.1.0/mmyolo/models/detectors/yolo_detector.py#L12 + Version: v0.1.0 + - Name: YOLOv5_VOC + Metadata: + Training Data: VOC + Training Techniques: + - SGD with Nesterov + - Weight Decay + - AMP + Training Resources: 1x A100 GPU + Architecture: + - CSPDarkNet + - PAFPN + README: configs/yolov5/README.md + Code: + URL: https://github.com/open-mmlab/mmyolo/blob/v0.1.0/mmyolo/models/detectors/yolo_detector.py#L12 + Version: v0.1.0 + +Models: + - Name: yolov5_n-v61_syncbn_fast_8xb16-300e_coco + In Collection: YOLOv5 + Config: configs/yolov5/yolov5_n-v61_syncbn_fast_8xb16-300e_coco.py + Metadata: + Training Memory (GB): 1.5 + Epochs: 300 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 28.0 + Weights: https://download.openmmlab.com/mmyolo/v0/yolov5/yolov5_n-v61_syncbn_fast_8xb16-300e_coco/yolov5_n-v61_syncbn_fast_8xb16-300e_coco_20220919_090739-b804c1ad.pth + - Name: yolov5_s-v61_syncbn_fast_8xb16-300e_coco + In Collection: YOLOv5 + Config: configs/yolov5/yolov5_s-v61_syncbn_fast_8xb16-300e_coco.py + Metadata: + Training Memory (GB): 2.7 + Epochs: 300 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 37.7 + Weights: https://download.openmmlab.com/mmyolo/v0/yolov5/yolov5_s-v61_syncbn_fast_8xb16-300e_coco/yolov5_s-v61_syncbn_fast_8xb16-300e_coco_20220918_084700-86e02187.pth + - Name: yolov5_m-v61_syncbn_fast_8xb16-300e_coco + In Collection: YOLOv5 + Config: configs/yolov5/yolov5_m-v61_syncbn_fast_8xb16-300e_coco.py + Metadata: + Training Memory (GB): 5.0 + Epochs: 300 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 45.3 + Weights: https://download.openmmlab.com/mmyolo/v0/yolov5/yolov5_m-v61_syncbn_fast_8xb16-300e_coco/yolov5_m-v61_syncbn_fast_8xb16-300e_coco_20220917_204944-516a710f.pth + - Name: yolov5_l-v61_syncbn_fast_8xb16-300e_coco + In Collection: YOLOv5 + Config: configs/yolov5/yolov5_l-v61_syncbn_fast_8xb16-300e_coco.py + Metadata: + Training Memory (GB): 8.1 + Epochs: 300 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 48.8 + Weights: https://download.openmmlab.com/mmyolo/v0/yolov5/yolov5_l-v61_syncbn_fast_8xb16-300e_coco/yolov5_l-v61_syncbn_fast_8xb16-300e_coco_20220917_031007-096ef0eb.pth + - Name: yolov5_x-v61_syncbn_fast_8xb16-300e_coco + In Collection: YOLOv5 + Config: configs/yolov5/yolov5_x-v61_syncbn_fast_8xb16-300e_coco.py + Metadata: + Training Memory (GB): 12.2 + Epochs: 300 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 50.2 + Weights: https://download.openmmlab.com/mmyolo/v0/yolov5/yolov5_x-v61_syncbn_fast_8xb16-300e_coco/yolov5_x-v61_syncbn_fast_8xb16-300e_coco_20230305_152943-00776a4b.pth + - Name: yolov5_n-p6-v62_syncbn_fast_8xb16-300e_coco + In Collection: YOLOv5 + Config: configs/yolov5/yolov5_n-p6-v62_syncbn_fast_8xb16-300e_coco.py + Metadata: + Training Memory (GB): 5.8 + Epochs: 300 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 35.9 + Weights: https://download.openmmlab.com/mmyolo/v0/yolov5/yolov5_n-p6-v62_syncbn_fast_8xb16-300e_coco/yolov5_n-p6-v62_syncbn_fast_8xb16-300e_coco_20221027_224705-d493c5f3.pth + - Name: yolov5_s-p6-v62_syncbn_fast_8xb16-300e_coco + In Collection: YOLOv5 + Config: configs/yolov5/yolov5_s-p6-v62_syncbn_fast_8xb16-300e_coco.py + Metadata: + Training Memory (GB): 10.5 + Epochs: 300 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 44.4 + Weights: https://download.openmmlab.com/mmyolo/v0/yolov5/yolov5_s-p6-v62_syncbn_fast_8xb16-300e_coco/yolov5_s-p6-v62_syncbn_fast_8xb16-300e_coco_20221027_215044-58865c19.pth + - Name: yolov5_m-p6-v62_syncbn_fast_8xb16-300e_coco + In Collection: YOLOv5 + Config: configs/yolov5/yolov5_m-p6-v62_syncbn_fast_8xb16-300e_coco.py + Metadata: + Training Memory (GB): 19.1 + Epochs: 300 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 51.3 + Weights: https://download.openmmlab.com/mmyolo/v0/yolov5/yolov5_m-p6-v62_syncbn_fast_8xb16-300e_coco/yolov5_m-p6-v62_syncbn_fast_8xb16-300e_coco_20221027_230453-49564d58.pth + - Name: yolov5_l-p6-v62_syncbn_fast_8xb16-300e_coco + In Collection: YOLOv5 + Config: configs/yolov5/yolov5_l-p6-v62_syncbn_fast_8xb16-300e_coco.py + Metadata: + Training Memory (GB): 30.5 + Epochs: 300 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 53.7 + Weights: https://download.openmmlab.com/mmyolo/v0/yolov5/yolov5_l-p6-v62_syncbn_fast_8xb16-300e_coco/yolov5_l-p6-v62_syncbn_fast_8xb16-300e_coco_20221027_234308-7a2ba6bf.pth + - Name: yolov5_n-v61_fast_1xb64-50e_voc + In Collection: YOLOv5_VOC + Config: configs/yolov5/voc/yolov5_n-v61_fast_1xb64-50e_voc.py + Metadata: + Training Memory (GB): 3.5 + Epochs: 50 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 51.2 + Weights: https://download.openmmlab.com/mmyolo/v0/yolov5/yolov5_n-v61_fast_1xb64-50e_voc/yolov5_n-v61_fast_1xb64-50e_voc_20221017_234254-f1493430.pth + - Name: yolov5_s-v61_fast_1xb64-50e_voc + In Collection: YOLOv5_VOC + Config: configs/yolov5/voc/yolov5_s-v61_fast_1xb64-50e_voc.py + Metadata: + Training Memory (GB): 6.5 + Epochs: 50 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 62.7 + Weights: https://download.openmmlab.com/mmyolo/v0/yolov5/yolov5_s-v61_fast_1xb64-50e_voc/yolov5_s-v61_fast_1xb64-50e_voc_20221017_234156-0009b33e.pth + - Name: yolov5_m-v61_fast_1xb64-50e_voc + In Collection: YOLOv5_VOC + Config: configs/yolov5/voc/yolov5_m-v61_fast_1xb64-50e_voc.py + Metadata: + Training Memory (GB): 12.0 + Epochs: 50 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 70.1 + Weights: https://download.openmmlab.com/mmyolo/v0/yolov5/yolov5_m-v61_fast_1xb64-50e_voc/yolov5_m-v61_fast_1xb64-50e_voc_20221017_114138-815c143a.pth + - Name: yolov5_l-v61_fast_1xb32-50e_voc + In Collection: YOLOv5_VOC + Config: configs/yolov5/voc/yolov5_l-v61_fast_1xb32-50e_voc.py + Metadata: + Training Memory (GB): 10.0 + Epochs: 50 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 73.1 + Weights: https://download.openmmlab.com/mmyolo/v0/yolov5/yolov5_l-v61_fast_1xb32-50e_voc/yolov5_l-v61_fast_1xb32-50e_voc_20221017_045500-edc7e0d8.pth + - Name: yolov5_n_mask-refine-v61_syncbn_fast_8xb16-300e_coco + In Collection: YOLOv5 + Config: configs/yolov5/mask_refine/yolov5_n_mask-refine-v61_syncbn_fast_8xb16-300e_coco.py + Metadata: + Training Memory (GB): 1.5 + Epochs: 300 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 28.0 + Weights: https://download.openmmlab.com/mmyolo/v0/yolov5/mask_refine/yolov5_n_mask-refine-v61_syncbn_fast_8xb16-300e_coco/yolov5_n_mask-refine-v61_syncbn_fast_8xb16-300e_coco_20230305_152706-712fb1b2.pth + - Name: yolov5_s_mask-refine-v61_syncbn_fast_8xb16-300e_coco + In Collection: YOLOv5 + Config: configs/yolov5/mask_refine/yolov5_s_mask-refine-v61_syncbn_fast_8xb16-300e_coco.py + Metadata: + Training Memory (GB): 2.7 + Epochs: 300 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 38.0 + Weights: https://download.openmmlab.com/mmyolo/v0/yolov5/mask_refine/yolov5_s_mask-refine-v61_syncbn_fast_8xb16-300e_coco/yolov5_s_mask-refine-v61_syncbn_fast_8xb16-300e_coco_20230304_033134-8e0cd271.pth + - Name: yolov5_m_mask-refine-v61_syncbn_fast_8xb16-300e_coco + In Collection: YOLOv5 + Config: configs/yolov5/mask_refine/yolov5_m_mask-refine-v61_syncbn_fast_8xb16-300e_coco.py + Metadata: + Training Memory (GB): 5.0 + Epochs: 300 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 45.3 + Weights: https://download.openmmlab.com/mmyolo/v0/yolov5/mask_refine/yolov5_m_mask-refine-v61_syncbn_fast_8xb16-300e_coco/yolov5_m_mask-refine-v61_syncbn_fast_8xb16-300e_coco_20230305_153946-44e96155.pth + - Name: yolov5_l_mask-refine-v61_syncbn_fast_8xb16-300e_coco + In Collection: YOLOv5 + Config: configs/yolov5/mask_refine/yolov5_l_mask-refine-v61_syncbn_fast_8xb16-300e_coco.py + Metadata: + Training Memory (GB): 8.1 + Epochs: 300 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 49.3 + Weights: https://download.openmmlab.com/mmyolo/v0/yolov5/mask_refine/yolov5_l_mask-refine-v61_syncbn_fast_8xb16-300e_coco/yolov5_l_mask-refine-v61_syncbn_fast_8xb16-300e_coco_20230305_154301-2c1d912a.pth + - Name: yolov5_x_mask-refine-v61_syncbn_fast_8xb16-300e_coco + In Collection: YOLOv5 + Config: configs/yolov5/mask_refine/yolov5_x_mask-refine-v61_syncbn_fast_8xb16-300e_coco.py + Metadata: + Training Memory (GB): 12.2 + Epochs: 300 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 50.9 + Weights: https://download.openmmlab.com/mmyolo/v0/yolov5/mask_refine/yolov5_x_mask-refine-v61_syncbn_fast_8xb16-300e_coco/yolov5_x_mask-refine-v61_syncbn_fast_8xb16-300e_coco_20230305_154321-07edeb62.pth + - Name: yolov5_ins_n-v61_syncbn_fast_8xb16-300e_coco_instance + In Collection: YOLOv5 + Config: configs/yolov5/ins_seg/yolov5_ins_n-v61_syncbn_fast_8xb16-300e_coco_instance.py + Metadata: + Training Memory (GB): 3.3 + Epochs: 300 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 27.9 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 23.7 + Weights: https://download.openmmlab.com/mmyolo/v0/yolov5/ins_seg/yolov5_ins_n-v61_syncbn_fast_8xb16-300e_coco_instance/yolov5_ins_n-v61_syncbn_fast_8xb16-300e_coco_instance_20230424_104807-84cc9240.pth + - Name: yolov5_ins_s-v61_syncbn_fast_8xb16-300e_coco_instance + In Collection: YOLOv5 + Config: configs/yolov5/ins_seg/yolov5_ins_s-v61_syncbn_fast_8xb16-300e_coco_instance.py + Metadata: + Training Memory (GB): 4.8 + Epochs: 300 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 38.1 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 32.0 + Weights: https://download.openmmlab.com/mmyolo/v0/yolov5/ins_seg/yolov5_ins_s-v61_syncbn_fast_8xb16-300e_coco_instance/yolov5_ins_s-v61_syncbn_fast_8xb16-300e_coco_instance_20230426_012542-3e570436.pth + - Name: yolov5_ins_s-v61_syncbn_fast_non_overlap_8xb16-300e_coco_instance + In Collection: YOLOv5 + Config: configs/yolov5/ins_seg/yolov5_ins_s-v61_syncbn_fast_non_overlap_8xb16-300e_coco_instance.py + Metadata: + Training Memory (GB): 4.8 + Epochs: 300 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 38.0 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 32.1 + Weights: https://download.openmmlab.com/mmyolo/v0/yolov5/ins_seg/yolov5_ins_s-v61_syncbn_fast_non_overlap_8xb16-300e_coco_instance/yolov5_ins_s-v61_syncbn_fast_non_overlap_8xb16-300e_coco_instance_20230424_104642-6780d34e.pth + - Name: yolov5_ins_m-v61_syncbn_fast_8xb16-300e_coco_instance + In Collection: YOLOv5 + Config: configs/yolov5/ins_seg/yolov5_ins_m-v61_syncbn_fast_8xb16-300e_coco_instance.py + Metadata: + Training Memory (GB): 7.3 + Epochs: 300 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 45.1 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 37.3 + Weights: https://download.openmmlab.com/mmyolo/v0/yolov5/ins_seg/yolov5_ins_m-v61_syncbn_fast_8xb16-300e_coco_instance/yolov5_ins_m-v61_syncbn_fast_8xb16-300e_coco_instance_20230424_111529-ef5ba1a9.pth + - Name: yolov5_ins_l-v61_syncbn_fast_8xb16-300e_coco_instance + In Collection: YOLOv5 + Config: configs/yolov5/ins_seg/yolov5_ins_l-v61_syncbn_fast_8xb16-300e_coco_instance.py + Metadata: + Training Memory (GB): 10.7 + Epochs: 300 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 48.8 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 39.9 + Weights: https://download.openmmlab.com/mmyolo/v0/yolov5/ins_seg/yolov5_ins_l-v61_syncbn_fast_8xb16-300e_coco_instance/yolov5_ins_l-v61_syncbn_fast_8xb16-300e_coco_instance_20230508_104049-daa09f70.pth + - Name: yolov5_ins_x-v61_syncbn_fast_8xb16-300e_coco_instance + In Collection: YOLOv5 + Config: configs/yolov5/ins_seg/yolov5_ins_x-v61_syncbn_fast_8xb16-300e_coco_instance.py + Metadata: + Training Memory (GB): 15.0 + Epochs: 300 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 50.6 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 41.4 + Weights: https://download.openmmlab.com/mmyolo/v0/yolov5/ins_seg/yolov5_ins_x-v61_syncbn_fast_8xb16-300e_coco_instance/yolov5_ins_x-v61_syncbn_fast_8xb16-300e_coco_instance_20230508_103925-a260c798.pth diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolov5/voc/yolov5_l-v61_fast_1xb32-50e_voc.py b/models/YOLO-World/third_party/mmyolo/configs/yolov5/voc/yolov5_l-v61_fast_1xb32-50e_voc.py new file mode 100644 index 0000000000000000000000000000000000000000..4b470973c46073748803bac2f736eca615e3cb00 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolov5/voc/yolov5_l-v61_fast_1xb32-50e_voc.py @@ -0,0 +1,25 @@ +_base_ = './yolov5_s-v61_fast_1xb64-50e_voc.py' + +deepen_factor = 1.0 +widen_factor = 1.0 +train_batch_size_per_gpu = 32 +train_num_workers = 8 + +load_from = 'https://download.openmmlab.com/mmyolo/v0/yolov5/yolov5_l-v61_syncbn_fast_8xb16-300e_coco/yolov5_l-v61_syncbn_fast_8xb16-300e_coco_20220917_031007-096ef0eb.pth' # noqa + +model = dict( + backbone=dict( + deepen_factor=deepen_factor, + widen_factor=widen_factor, + ), + neck=dict( + deepen_factor=deepen_factor, + widen_factor=widen_factor, + ), + bbox_head=dict(head_module=dict(widen_factor=widen_factor))) + +train_dataloader = dict( + batch_size=train_batch_size_per_gpu, num_workers=train_num_workers) + +optim_wrapper = dict( + optimizer=dict(batch_size_per_gpu=train_batch_size_per_gpu)) diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolov5/voc/yolov5_m-v61_fast_1xb64-50e_voc.py b/models/YOLO-World/third_party/mmyolo/configs/yolov5/voc/yolov5_m-v61_fast_1xb64-50e_voc.py new file mode 100644 index 0000000000000000000000000000000000000000..2ed2127a19854fde1b6fa0c80f4d6fd2ba818f0a --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolov5/voc/yolov5_m-v61_fast_1xb64-50e_voc.py @@ -0,0 +1,17 @@ +_base_ = './yolov5_s-v61_fast_1xb64-50e_voc.py' + +deepen_factor = 0.67 +widen_factor = 0.75 + +load_from = 'https://download.openmmlab.com/mmyolo/v0/yolov5/yolov5_m-v61_syncbn_fast_8xb16-300e_coco/yolov5_m-v61_syncbn_fast_8xb16-300e_coco_20220917_204944-516a710f.pth' # noqa + +model = dict( + backbone=dict( + deepen_factor=deepen_factor, + widen_factor=widen_factor, + ), + neck=dict( + deepen_factor=deepen_factor, + widen_factor=widen_factor, + ), + bbox_head=dict(head_module=dict(widen_factor=widen_factor))) diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolov5/voc/yolov5_n-v61_fast_1xb64-50e_voc.py b/models/YOLO-World/third_party/mmyolo/configs/yolov5/voc/yolov5_n-v61_fast_1xb64-50e_voc.py new file mode 100644 index 0000000000000000000000000000000000000000..041f6537d03a4f13402b1bb7e2665443793e4681 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolov5/voc/yolov5_n-v61_fast_1xb64-50e_voc.py @@ -0,0 +1,17 @@ +_base_ = './yolov5_s-v61_fast_1xb64-50e_voc.py' + +deepen_factor = 0.33 +widen_factor = 0.25 + +load_from = 'https://download.openmmlab.com/mmyolo/v0/yolov5/yolov5_n-v61_syncbn_fast_8xb16-300e_coco/yolov5_n-v61_syncbn_fast_8xb16-300e_coco_20220919_090739-b804c1ad.pth' # noqa + +model = dict( + backbone=dict( + deepen_factor=deepen_factor, + widen_factor=widen_factor, + ), + neck=dict( + deepen_factor=deepen_factor, + widen_factor=widen_factor, + ), + bbox_head=dict(head_module=dict(widen_factor=widen_factor))) diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolov5/voc/yolov5_s-v61_fast_1xb64-50e_voc.py b/models/YOLO-World/third_party/mmyolo/configs/yolov5/voc/yolov5_s-v61_fast_1xb64-50e_voc.py new file mode 100644 index 0000000000000000000000000000000000000000..f777fff9697dfbd315a0b8f762a2bf31a1118ca8 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolov5/voc/yolov5_s-v61_fast_1xb64-50e_voc.py @@ -0,0 +1,270 @@ +_base_ = '../yolov5_s-v61_syncbn_fast_8xb16-300e_coco.py' + +# dataset settings +data_root = 'data/VOCdevkit/' +dataset_type = 'YOLOv5VOCDataset' + +# parameters that often need to be modified +num_classes = 20 +img_scale = (512, 512) # width, height +max_epochs = 50 +train_batch_size_per_gpu = 64 +train_num_workers = 8 +val_batch_size_per_gpu = 1 +val_num_workers = 2 + +# persistent_workers must be False if num_workers is 0. +persistent_workers = True + +lr_factor = 0.15135 +affine_scale = 0.75544 + +# only on Val +batch_shapes_cfg = dict(img_size=img_scale[0]) + +anchors = [[(26, 44), (67, 57), (61, 130)], [(121, 118), (120, 239), + (206, 182)], + [(376, 161), (234, 324), (428, 322)]] +num_det_layers = 3 + +load_from = 'https://download.openmmlab.com/mmyolo/v0/yolov5/yolov5_s-v61_syncbn_fast_8xb16-300e_coco/yolov5_s-v61_syncbn_fast_8xb16-300e_coco_20220918_084700-86e02187.pth' # noqa + +tta_img_scales = [img_scale, (416, 416), (640, 640)] + +# Hyperparameter reference from: +# https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.VOC.yaml +model = dict( + bbox_head=dict( + head_module=dict(num_classes=num_classes), + prior_generator=dict(base_sizes=anchors), + loss_cls=dict( + loss_weight=0.21638 * (num_classes / 80 * 3 / num_det_layers), + class_weight=0.5), + loss_bbox=dict(loss_weight=0.02 * (3 / num_det_layers)), + loss_obj=dict( + loss_weight=0.51728 * + ((img_scale[0] / 640)**2 * 3 / num_det_layers), + class_weight=0.67198), + # Different from COCO + prior_match_thr=3.3744), + test_cfg=dict(nms=dict(iou_threshold=0.6))) + +albu_train_transforms = _base_.albu_train_transforms +pre_transform = _base_.pre_transform + +with_mosiac_pipeline = [ + dict( + type='Mosaic', + img_scale=img_scale, + pad_val=114.0, + pre_transform=pre_transform), + dict( + type='YOLOv5RandomAffine', + max_rotate_degree=0.0, + max_translate_ratio=0.04591, + max_shear_degree=0.0, + scaling_ratio_range=(1 - affine_scale, 1 + affine_scale), + # img_scale is (width, height) + border=(-img_scale[0] // 2, -img_scale[1] // 2), + border_val=(114, 114, 114)), + dict( + type='YOLOv5MixUp', + prob=0.04266, + pre_transform=[ + *pre_transform, + dict( + type='Mosaic', + img_scale=img_scale, + pad_val=114.0, + pre_transform=pre_transform), + dict( + type='YOLOv5RandomAffine', + max_rotate_degree=0.0, + max_translate_ratio=0.04591, + max_shear_degree=0.0, + scaling_ratio_range=(1 - affine_scale, 1 + affine_scale), + # img_scale is (width, height) + border=(-img_scale[0] // 2, -img_scale[1] // 2), + border_val=(114, 114, 114)) + ]) +] + +without_mosaic_pipeline = [ + dict( + type='YOLOv5RandomAffine', + max_rotate_degree=0.0, + max_translate_ratio=0.04591, + max_shear_degree=0.0, + scaling_ratio_range=(1 - affine_scale, 1 + affine_scale), + border=(0, 0), + border_val=(114, 114, 114)), + dict( + type='LetterResize', + scale=img_scale, + allow_scale_up=True, + pad_val=dict(img=114)) +] + +# Because the border parameter is inconsistent when +# using mosaic or not, `RandomChoice` is used here. +randchoice_mosaic_pipeline = dict( + type='RandomChoice', + transforms=[with_mosiac_pipeline, without_mosaic_pipeline], + prob=[0.85834, 0.14166]) + +train_pipeline = [ + *pre_transform, randchoice_mosaic_pipeline, + dict( + type='mmdet.Albu', + transforms=albu_train_transforms, + bbox_params=dict( + type='BboxParams', + format='pascal_voc', + label_fields=['gt_bboxes_labels', 'gt_ignore_flags']), + keymap={ + 'img': 'image', + 'gt_bboxes': 'bboxes' + }), + dict( + type='YOLOv5HSVRandomAug', + hue_delta=0.01041, + saturation_delta=0.54703, + value_delta=0.27739), + dict(type='mmdet.RandomFlip', prob=0.5), + dict( + type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'flip', + 'flip_direction')) +] + +train_dataloader = dict( + _delete_=True, + batch_size=train_batch_size_per_gpu, + num_workers=train_num_workers, + persistent_workers=persistent_workers, + pin_memory=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='ConcatDataset', + datasets=[ + dict( + type=dataset_type, + data_root=data_root, + ann_file='VOC2007/ImageSets/Main/trainval.txt', + data_prefix=dict(sub_data_root='VOC2007/'), + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=train_pipeline), + dict( + type=dataset_type, + data_root=data_root, + ann_file='VOC2012/ImageSets/Main/trainval.txt', + data_prefix=dict(sub_data_root='VOC2012/'), + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=train_pipeline) + ], + # Use ignore_keys to avoid judging metainfo is + # not equal in `ConcatDataset`. + ignore_keys='dataset_type'), + collate_fn=dict(type='yolov5_collate')) + +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args=_base_.backend_args), + dict(type='YOLOv5KeepRatioResize', scale=img_scale), + dict( + type='LetterResize', + scale=img_scale, + allow_scale_up=False, + pad_val=dict(img=114)), + dict(type='LoadAnnotations', with_bbox=True, _scope_='mmdet'), + dict( + type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'pad_param')) +] + +val_dataloader = dict( + batch_size=val_batch_size_per_gpu, + num_workers=val_num_workers, + persistent_workers=persistent_workers, + pin_memory=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='VOC2007/ImageSets/Main/test.txt', + data_prefix=dict(sub_data_root='VOC2007/'), + test_mode=True, + pipeline=test_pipeline, + batch_shapes_cfg=batch_shapes_cfg)) + +test_dataloader = val_dataloader + +param_scheduler = None +optim_wrapper = dict( + optimizer=dict( + lr=0.00334, + momentum=0.74832, + weight_decay=0.00025, + batch_size_per_gpu=train_batch_size_per_gpu)) + +default_hooks = dict( + param_scheduler=dict( + lr_factor=lr_factor, + max_epochs=max_epochs, + warmup_epochs=3.3835, + warmup_momentum=0.59462, + warmup_bias_lr=0.18657)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0001, + update_buffers=True, + # To load COCO pretrained model, need to set `strict_load=False` + strict_load=False, + priority=49) +] + +# TODO: Support using coco metric in voc dataset +val_evaluator = dict( + _delete_=True, type='mmdet.VOCMetric', metric='mAP', eval_mode='area') + +test_evaluator = val_evaluator + +train_cfg = dict(max_epochs=max_epochs) + +# Config for Test Time Augmentation. (TTA) +_multiscale_resize_transforms = [ + dict( + type='Compose', + transforms=[ + dict(type='YOLOv5KeepRatioResize', scale=s), + dict( + type='LetterResize', + scale=s, + allow_scale_up=False, + pad_val=dict(img=114)) + ]) for s in tta_img_scales +] + +tta_pipeline = [ + dict(type='LoadImageFromFile', backend_args=_base_.backend_args), + dict( + type='TestTimeAug', + transforms=[ + _multiscale_resize_transforms, + [ + dict(type='mmdet.RandomFlip', prob=1.), + dict(type='mmdet.RandomFlip', prob=0.) + ], [dict(type='mmdet.LoadAnnotations', with_bbox=True)], + [ + dict( + type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'pad_param', 'flip', + 'flip_direction')) + ] + ]) +] diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolov5/voc/yolov5_x-v61_fast_1xb32-50e_voc.py b/models/YOLO-World/third_party/mmyolo/configs/yolov5/voc/yolov5_x-v61_fast_1xb32-50e_voc.py new file mode 100644 index 0000000000000000000000000000000000000000..2fc4d79f86b40c45d3f7692f32adc88295bbb4a4 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolov5/voc/yolov5_x-v61_fast_1xb32-50e_voc.py @@ -0,0 +1,26 @@ +_base_ = './yolov5_s-v61_fast_1xb64-50e_voc.py' + +deepen_factor = 1.33 +widen_factor = 1.25 +train_batch_size_per_gpu = 32 +train_num_workers = 8 + +# TODO: need to add pretrained_model +load_from = None + +model = dict( + backbone=dict( + deepen_factor=deepen_factor, + widen_factor=widen_factor, + ), + neck=dict( + deepen_factor=deepen_factor, + widen_factor=widen_factor, + ), + bbox_head=dict(head_module=dict(widen_factor=widen_factor))) + +train_dataloader = dict( + batch_size=train_batch_size_per_gpu, num_workers=train_num_workers) + +optim_wrapper = dict( + optimizer=dict(batch_size_per_gpu=train_batch_size_per_gpu)) diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolov5/yolov5_l-p6-v62_syncbn_fast_8xb16-300e_coco.py b/models/YOLO-World/third_party/mmyolo/configs/yolov5/yolov5_l-p6-v62_syncbn_fast_8xb16-300e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..6a84fdbebc11dd4eafadc34be1e98bfb6f9b2f43 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolov5/yolov5_l-p6-v62_syncbn_fast_8xb16-300e_coco.py @@ -0,0 +1,15 @@ +_base_ = './yolov5_m-p6-v62_syncbn_fast_8xb16-300e_coco.py' + +deepen_factor = 1.0 +widen_factor = 1.0 + +model = dict( + backbone=dict( + deepen_factor=deepen_factor, + widen_factor=widen_factor, + ), + neck=dict( + deepen_factor=deepen_factor, + widen_factor=widen_factor, + ), + bbox_head=dict(head_module=dict(widen_factor=widen_factor))) diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolov5/yolov5_l-v61_syncbn_fast_8xb16-300e_coco.py b/models/YOLO-World/third_party/mmyolo/configs/yolov5/yolov5_l-v61_syncbn_fast_8xb16-300e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..60a11a375c3dd8ead1d3f6a04340aed2acb20b20 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolov5/yolov5_l-v61_syncbn_fast_8xb16-300e_coco.py @@ -0,0 +1,15 @@ +_base_ = './yolov5_m-v61_syncbn_fast_8xb16-300e_coco.py' + +deepen_factor = 1.0 +widen_factor = 1.0 + +model = dict( + backbone=dict( + deepen_factor=deepen_factor, + widen_factor=widen_factor, + ), + neck=dict( + deepen_factor=deepen_factor, + widen_factor=widen_factor, + ), + bbox_head=dict(head_module=dict(widen_factor=widen_factor))) diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolov5/yolov5_m-p6-v62_syncbn_fast_8xb16-300e_coco.py b/models/YOLO-World/third_party/mmyolo/configs/yolov5/yolov5_m-p6-v62_syncbn_fast_8xb16-300e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..f593e378a9fbbf1381e48a186a645a559b1f129a --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolov5/yolov5_m-p6-v62_syncbn_fast_8xb16-300e_coco.py @@ -0,0 +1,79 @@ +_base_ = './yolov5_s-p6-v62_syncbn_fast_8xb16-300e_coco.py' + +# ========================modified parameters====================== +deepen_factor = 0.67 +widen_factor = 0.75 +lr_factor = 0.1 +affine_scale = 0.9 +loss_cls_weight = 0.3 +loss_obj_weight = 0.7 +mixup_prob = 0.1 + +# =======================Unmodified in most cases================== +num_classes = _base_.num_classes +num_det_layers = _base_.num_det_layers +img_scale = _base_.img_scale + +model = dict( + backbone=dict( + deepen_factor=deepen_factor, + widen_factor=widen_factor, + ), + neck=dict( + deepen_factor=deepen_factor, + widen_factor=widen_factor, + ), + bbox_head=dict( + head_module=dict(widen_factor=widen_factor), + loss_cls=dict(loss_weight=loss_cls_weight * + (num_classes / 80 * 3 / num_det_layers)), + loss_obj=dict(loss_weight=loss_obj_weight * + ((img_scale[0] / 640)**2 * 3 / num_det_layers)))) + +pre_transform = _base_.pre_transform +albu_train_transforms = _base_.albu_train_transforms + +mosaic_affine_pipeline = [ + dict( + type='Mosaic', + img_scale=img_scale, + pad_val=114.0, + pre_transform=pre_transform), + dict( + type='YOLOv5RandomAffine', + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=(1 - affine_scale, 1 + affine_scale), + # img_scale is (width, height) + border=(-img_scale[0] // 2, -img_scale[1] // 2), + border_val=(114, 114, 114)) +] + +# enable mixup +train_pipeline = [ + *pre_transform, *mosaic_affine_pipeline, + dict( + type='YOLOv5MixUp', + prob=mixup_prob, + pre_transform=[*pre_transform, *mosaic_affine_pipeline]), + dict( + type='mmdet.Albu', + transforms=albu_train_transforms, + bbox_params=dict( + type='BboxParams', + format='pascal_voc', + label_fields=['gt_bboxes_labels', 'gt_ignore_flags']), + keymap={ + 'img': 'image', + 'gt_bboxes': 'bboxes' + }), + dict(type='YOLOv5HSVRandomAug'), + dict(type='mmdet.RandomFlip', prob=0.5), + dict( + type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'flip', + 'flip_direction')) +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) +default_hooks = dict(param_scheduler=dict(lr_factor=lr_factor)) diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolov5/yolov5_m-v61_syncbn_fast_8xb16-300e_coco.py b/models/YOLO-World/third_party/mmyolo/configs/yolov5/yolov5_m-v61_syncbn_fast_8xb16-300e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..d2ef324ed097a30d5a04fba2bb85641e7857f353 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolov5/yolov5_m-v61_syncbn_fast_8xb16-300e_coco.py @@ -0,0 +1,79 @@ +_base_ = './yolov5_s-v61_syncbn_fast_8xb16-300e_coco.py' + +# ========================modified parameters====================== +deepen_factor = 0.67 +widen_factor = 0.75 +lr_factor = 0.1 +affine_scale = 0.9 +loss_cls_weight = 0.3 +loss_obj_weight = 0.7 +mixup_prob = 0.1 + +# =======================Unmodified in most cases================== +num_classes = _base_.num_classes +num_det_layers = _base_.num_det_layers +img_scale = _base_.img_scale + +model = dict( + backbone=dict( + deepen_factor=deepen_factor, + widen_factor=widen_factor, + ), + neck=dict( + deepen_factor=deepen_factor, + widen_factor=widen_factor, + ), + bbox_head=dict( + head_module=dict(widen_factor=widen_factor), + loss_cls=dict(loss_weight=loss_cls_weight * + (num_classes / 80 * 3 / num_det_layers)), + loss_obj=dict(loss_weight=loss_obj_weight * + ((img_scale[0] / 640)**2 * 3 / num_det_layers)))) + +pre_transform = _base_.pre_transform +albu_train_transforms = _base_.albu_train_transforms + +mosaic_affine_pipeline = [ + dict( + type='Mosaic', + img_scale=img_scale, + pad_val=114.0, + pre_transform=pre_transform), + dict( + type='YOLOv5RandomAffine', + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=(1 - affine_scale, 1 + affine_scale), + # img_scale is (width, height) + border=(-img_scale[0] // 2, -img_scale[1] // 2), + border_val=(114, 114, 114)) +] + +# enable mixup +train_pipeline = [ + *pre_transform, *mosaic_affine_pipeline, + dict( + type='YOLOv5MixUp', + prob=mixup_prob, + pre_transform=[*pre_transform, *mosaic_affine_pipeline]), + dict( + type='mmdet.Albu', + transforms=albu_train_transforms, + bbox_params=dict( + type='BboxParams', + format='pascal_voc', + label_fields=['gt_bboxes_labels', 'gt_ignore_flags']), + keymap={ + 'img': 'image', + 'gt_bboxes': 'bboxes' + }), + dict(type='YOLOv5HSVRandomAug'), + dict(type='mmdet.RandomFlip', prob=0.5), + dict( + type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'flip', + 'flip_direction')) +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) +default_hooks = dict(param_scheduler=dict(lr_factor=lr_factor)) diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolov5/yolov5_n-p6-v62_syncbn_fast_8xb16-300e_coco.py b/models/YOLO-World/third_party/mmyolo/configs/yolov5/yolov5_n-p6-v62_syncbn_fast_8xb16-300e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..3cd2d6b7be817f4f8e6729acc1d3f9e450457e07 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolov5/yolov5_n-p6-v62_syncbn_fast_8xb16-300e_coco.py @@ -0,0 +1,15 @@ +_base_ = 'yolov5_s-p6-v62_syncbn_fast_8xb16-300e_coco.py' + +deepen_factor = 0.33 +widen_factor = 0.25 + +model = dict( + backbone=dict( + deepen_factor=deepen_factor, + widen_factor=widen_factor, + ), + neck=dict( + deepen_factor=deepen_factor, + widen_factor=widen_factor, + ), + bbox_head=dict(head_module=dict(widen_factor=widen_factor))) diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolov5/yolov5_n-v61_syncbn_fast_8xb16-300e_coco.py b/models/YOLO-World/third_party/mmyolo/configs/yolov5/yolov5_n-v61_syncbn_fast_8xb16-300e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..b6f93428fc8d6dc1b94a8d447671ffc1a877dbb8 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolov5/yolov5_n-v61_syncbn_fast_8xb16-300e_coco.py @@ -0,0 +1,15 @@ +_base_ = './yolov5_s-v61_syncbn_fast_8xb16-300e_coco.py' + +deepen_factor = 0.33 +widen_factor = 0.25 + +model = dict( + backbone=dict( + deepen_factor=deepen_factor, + widen_factor=widen_factor, + ), + neck=dict( + deepen_factor=deepen_factor, + widen_factor=widen_factor, + ), + bbox_head=dict(head_module=dict(widen_factor=widen_factor))) diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolov5/yolov5_s-p6-v62_syncbn_fast_8xb16-300e_coco.py b/models/YOLO-World/third_party/mmyolo/configs/yolov5/yolov5_s-p6-v62_syncbn_fast_8xb16-300e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..f64df69fd4ea0f4c8d30b9e8928bcd1c4e1d9d35 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolov5/yolov5_s-p6-v62_syncbn_fast_8xb16-300e_coco.py @@ -0,0 +1,138 @@ +_base_ = 'yolov5_s-v61_syncbn_fast_8xb16-300e_coco.py' + +# ========================modified parameters====================== +img_scale = (1280, 1280) # width, height +num_classes = 80 # Number of classes for classification +# Config of batch shapes. Only on val. +# It means not used if batch_shapes_cfg is None. +batch_shapes_cfg = dict( + img_size=img_scale[0], + # The image scale of padding should be divided by pad_size_divisor + size_divisor=64) +# Basic size of multi-scale prior box +anchors = [ + [(19, 27), (44, 40), (38, 94)], # P3/8 + [(96, 68), (86, 152), (180, 137)], # P4/16 + [(140, 301), (303, 264), (238, 542)], # P5/32 + [(436, 615), (739, 380), (925, 792)] # P6/64 +] +# Strides of multi-scale prior box +strides = [8, 16, 32, 64] +num_det_layers = 4 # The number of model output scales +loss_cls_weight = 0.5 +loss_bbox_weight = 0.05 +loss_obj_weight = 1.0 +# The obj loss weights of the three output layers +obj_level_weights = [4.0, 1.0, 0.25, 0.06] +affine_scale = 0.5 # YOLOv5RandomAffine scaling ratio + +tta_img_scales = [(1280, 1280), (1024, 1024), (1536, 1536)] +# =======================Unmodified in most cases================== +model = dict( + backbone=dict(arch='P6', out_indices=(2, 3, 4, 5)), + neck=dict( + in_channels=[256, 512, 768, 1024], out_channels=[256, 512, 768, 1024]), + bbox_head=dict( + head_module=dict( + in_channels=[256, 512, 768, 1024], featmap_strides=strides), + prior_generator=dict(base_sizes=anchors, strides=strides), + # scaled based on number of detection layers + loss_cls=dict(loss_weight=loss_cls_weight * + (num_classes / 80 * 3 / num_det_layers)), + loss_bbox=dict(loss_weight=loss_bbox_weight * (3 / num_det_layers)), + loss_obj=dict(loss_weight=loss_obj_weight * + ((img_scale[0] / 640)**2 * 3 / num_det_layers)), + obj_level_weights=obj_level_weights)) + +pre_transform = _base_.pre_transform +albu_train_transforms = _base_.albu_train_transforms + +train_pipeline = [ + *pre_transform, + dict( + type='Mosaic', + img_scale=img_scale, + pad_val=114.0, + pre_transform=pre_transform), + dict( + type='YOLOv5RandomAffine', + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=(1 - affine_scale, 1 + affine_scale), + # img_scale is (width, height) + border=(-img_scale[0] // 2, -img_scale[1] // 2), + border_val=(114, 114, 114)), + dict( + type='mmdet.Albu', + transforms=albu_train_transforms, + bbox_params=dict( + type='BboxParams', + format='pascal_voc', + label_fields=['gt_bboxes_labels', 'gt_ignore_flags']), + keymap={ + 'img': 'image', + 'gt_bboxes': 'bboxes' + }), + dict(type='YOLOv5HSVRandomAug'), + dict(type='mmdet.RandomFlip', prob=0.5), + dict( + type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'flip', + 'flip_direction')) +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) + +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args=_base_.backend_args), + dict(type='YOLOv5KeepRatioResize', scale=img_scale), + dict( + type='LetterResize', + scale=img_scale, + allow_scale_up=False, + pad_val=dict(img=114)), + dict(type='LoadAnnotations', with_bbox=True, _scope_='mmdet'), + dict( + type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'pad_param')) +] + +val_dataloader = dict( + dataset=dict(pipeline=test_pipeline, batch_shapes_cfg=batch_shapes_cfg)) + +test_dataloader = val_dataloader + +# Config for Test Time Augmentation. (TTA) +_multiscale_resize_transforms = [ + dict( + type='Compose', + transforms=[ + dict(type='YOLOv5KeepRatioResize', scale=s), + dict( + type='LetterResize', + scale=s, + allow_scale_up=False, + pad_val=dict(img=114)) + ]) for s in tta_img_scales +] + +tta_pipeline = [ + dict(type='LoadImageFromFile', backend_args=_base_.backend_args), + dict( + type='TestTimeAug', + transforms=[ + _multiscale_resize_transforms, + [ + dict(type='mmdet.RandomFlip', prob=1.), + dict(type='mmdet.RandomFlip', prob=0.) + ], [dict(type='mmdet.LoadAnnotations', with_bbox=True)], + [ + dict( + type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'pad_param', 'flip', + 'flip_direction')) + ] + ]) +] diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolov5/yolov5_s-v61_fast_1xb12-40e_608x352_cat.py b/models/YOLO-World/third_party/mmyolo/configs/yolov5/yolov5_s-v61_fast_1xb12-40e_608x352_cat.py new file mode 100644 index 0000000000000000000000000000000000000000..5bbd13e0859abb7a9fa315a8b0f956f959a560d7 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolov5/yolov5_s-v61_fast_1xb12-40e_608x352_cat.py @@ -0,0 +1,70 @@ +_base_ = 'yolov5_s-v61_fast_1xb12-40e_cat.py' + +# This configuration is used to provide non-square training examples +# Must be a multiple of 32 +img_scale = (608, 352) # w h + +anchors = [ + [(65, 35), (159, 45), (119, 80)], # P3/8 + [(215, 77), (224, 116), (170, 166)], # P4/16 + [(376, 108), (339, 176), (483, 190)] # P5/32 +] + +# ===============================Unmodified in most cases==================== +_base_.model.bbox_head.loss_obj.loss_weight = 1.0 * ((img_scale[1] / 640)**2) +_base_.model.bbox_head.prior_generator.base_sizes = anchors + +train_pipeline = [ + *_base_.pre_transform, + dict( + type='Mosaic', + img_scale=img_scale, + pad_val=114.0, + pre_transform=_base_.pre_transform), + dict( + type='YOLOv5RandomAffine', + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=(1 - _base_.affine_scale, 1 + _base_.affine_scale), + # img_scale is (width, height) + border=(-img_scale[0] // 2, -img_scale[1] // 2), + border_val=(114, 114, 114)), + dict( + type='mmdet.Albu', + transforms=_base_.albu_train_transforms, + bbox_params=dict( + type='BboxParams', + format='pascal_voc', + label_fields=['gt_bboxes_labels', 'gt_ignore_flags']), + keymap={ + 'img': 'image', + 'gt_bboxes': 'bboxes' + }), + dict(type='YOLOv5HSVRandomAug'), + dict(type='mmdet.RandomFlip', prob=0.5), + dict( + type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'flip', + 'flip_direction')) +] + +_base_.train_dataloader.dataset.pipeline = train_pipeline + +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args=_base_.backend_args), + dict(type='YOLOv5KeepRatioResize', scale=img_scale), + dict( + type='LetterResize', + scale=img_scale, + allow_scale_up=False, + pad_val=dict(img=114)), + dict(type='mmdet.LoadAnnotations', with_bbox=True), + dict( + type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'pad_param')) +] + +val_dataloader = dict( + dataset=dict(pipeline=test_pipeline, batch_shapes_cfg=None)) +test_dataloader = val_dataloader diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolov5/yolov5_s-v61_fast_1xb12-40e_cat.py b/models/YOLO-World/third_party/mmyolo/configs/yolov5/yolov5_s-v61_fast_1xb12-40e_cat.py new file mode 100644 index 0000000000000000000000000000000000000000..7b7e4f227bbc6aa37873dc306009d1af842c166c --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolov5/yolov5_s-v61_fast_1xb12-40e_cat.py @@ -0,0 +1,56 @@ +_base_ = 'yolov5_s-v61_syncbn_fast_8xb16-300e_coco.py' + +data_root = './data/cat/' +class_name = ('cat', ) +num_classes = len(class_name) +metainfo = dict(classes=class_name, palette=[(20, 220, 60)]) + +anchors = [ + [(68, 69), (154, 91), (143, 162)], # P3/8 + [(242, 160), (189, 287), (391, 207)], # P4/16 + [(353, 337), (539, 341), (443, 432)] # P5/32 +] + +max_epochs = 40 +train_batch_size_per_gpu = 12 +train_num_workers = 4 + +load_from = 'https://download.openmmlab.com/mmyolo/v0/yolov5/yolov5_s-v61_syncbn_fast_8xb16-300e_coco/yolov5_s-v61_syncbn_fast_8xb16-300e_coco_20220918_084700-86e02187.pth' # noqa + +model = dict( + backbone=dict(frozen_stages=4), + bbox_head=dict( + head_module=dict(num_classes=num_classes), + prior_generator=dict(base_sizes=anchors))) + +train_dataloader = dict( + batch_size=train_batch_size_per_gpu, + num_workers=train_num_workers, + dataset=dict( + data_root=data_root, + metainfo=metainfo, + ann_file='annotations/trainval.json', + data_prefix=dict(img='images/'))) + +val_dataloader = dict( + dataset=dict( + metainfo=metainfo, + data_root=data_root, + ann_file='annotations/test.json', + data_prefix=dict(img='images/'))) + +test_dataloader = val_dataloader + +_base_.optim_wrapper.optimizer.batch_size_per_gpu = train_batch_size_per_gpu + +val_evaluator = dict(ann_file=data_root + 'annotations/test.json') +test_evaluator = val_evaluator + +default_hooks = dict( + checkpoint=dict(interval=10, max_keep_ckpts=2, save_best='auto'), + # The warmup_mim_iter parameter is critical. + # The default value is 1000 which is not suitable for cat datasets. + param_scheduler=dict(max_epochs=max_epochs, warmup_mim_iter=10), + logger=dict(type='LoggerHook', interval=5)) +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +# visualizer = dict(vis_backends = [dict(type='LocalVisBackend'), dict(type='WandbVisBackend')]) # noqa diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolov5/yolov5_s-v61_fast_1xb12-ms-40e_cat.py b/models/YOLO-World/third_party/mmyolo/configs/yolov5/yolov5_s-v61_fast_1xb12-ms-40e_cat.py new file mode 100644 index 0000000000000000000000000000000000000000..dc460fa9802d34ece214482bcda7a6bdf7435b39 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolov5/yolov5_s-v61_fast_1xb12-ms-40e_cat.py @@ -0,0 +1,13 @@ +_base_ = 'yolov5_s-v61_fast_1xb12-40e_cat.py' + +model = dict( + data_preprocessor=dict( + type='YOLOv5DetDataPreprocessor', + pad_size_divisor=32, + batch_augments=[ + dict( + type='YOLOXBatchSyncRandomResize', + random_size_range=(480, 800), + size_divisor=32, + interval=1) + ])) diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolov5/yolov5_s-v61_syncbn-detect_8xb16-300e_coco.py b/models/YOLO-World/third_party/mmyolo/configs/yolov5/yolov5_s-v61_syncbn-detect_8xb16-300e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..d8238c1377cb2f56f4c3bf0c5cd6d4227b2d70a5 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolov5/yolov5_s-v61_syncbn-detect_8xb16-300e_coco.py @@ -0,0 +1,23 @@ +_base_ = 'yolov5_s-v61_syncbn_8xb16-300e_coco.py' + +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args=_base_.backend_args), + dict( + type='LetterResize', + scale=_base_.img_scale, + allow_scale_up=True, + use_mini_pad=True), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'pad_param')) +] + +val_dataloader = dict( + dataset=dict(pipeline=test_pipeline, batch_shapes_cfg=None)) +test_dataloader = val_dataloader + +model = dict( + test_cfg=dict( + multi_label=False, score_thr=0.25, nms=dict(iou_threshold=0.45))) diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolov5/yolov5_s-v61_syncbn_8xb16-300e_coco.py b/models/YOLO-World/third_party/mmyolo/configs/yolov5/yolov5_s-v61_syncbn_8xb16-300e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..7e81a0385587df40c588dcb44202a7f5d82478c1 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolov5/yolov5_s-v61_syncbn_8xb16-300e_coco.py @@ -0,0 +1,292 @@ +_base_ = ['../_base_/default_runtime.py', '../_base_/det_p5_tta.py'] + +# ========================Frequently modified parameters====================== +# -----data related----- +data_root = 'data/coco/' # Root path of data +# Path of train annotation file +train_ann_file = 'annotations/instances_train2017.json' +train_data_prefix = 'train2017/' # Prefix of train image path +# Path of val annotation file +val_ann_file = 'annotations/instances_val2017.json' +val_data_prefix = 'val2017/' # Prefix of val image path + +num_classes = 80 # Number of classes for classification +# Batch size of a single GPU during training +train_batch_size_per_gpu = 16 +# Worker to pre-fetch data for each single GPU during training +train_num_workers = 8 +# persistent_workers must be False if num_workers is 0 +persistent_workers = True + +# -----model related----- +# Basic size of multi-scale prior box +anchors = [ + [(10, 13), (16, 30), (33, 23)], # P3/8 + [(30, 61), (62, 45), (59, 119)], # P4/16 + [(116, 90), (156, 198), (373, 326)] # P5/32 +] + +# -----train val related----- +# Base learning rate for optim_wrapper. Corresponding to 8xb16=128 bs +base_lr = 0.01 +max_epochs = 300 # Maximum training epochs + +model_test_cfg = dict( + # The config of multi-label for multi-class prediction. + multi_label=True, + # The number of boxes before NMS + nms_pre=30000, + score_thr=0.001, # Threshold to filter out boxes. + nms=dict(type='nms', iou_threshold=0.65), # NMS type and threshold + max_per_img=300) # Max number of detections of each image + +# ========================Possible modified parameters======================== +# -----data related----- +img_scale = (640, 640) # width, height +# Dataset type, this will be used to define the dataset +dataset_type = 'YOLOv5CocoDataset' +# Batch size of a single GPU during validation +val_batch_size_per_gpu = 1 +# Worker to pre-fetch data for each single GPU during validation +val_num_workers = 2 + +# Config of batch shapes. Only on val. +# It means not used if batch_shapes_cfg is None. +batch_shapes_cfg = dict( + type='BatchShapePolicy', + batch_size=val_batch_size_per_gpu, + img_size=img_scale[0], + # The image scale of padding should be divided by pad_size_divisor + size_divisor=32, + # Additional paddings for pixel scale + extra_pad_ratio=0.5) + +# -----model related----- +# The scaling factor that controls the depth of the network structure +deepen_factor = 0.33 +# The scaling factor that controls the width of the network structure +widen_factor = 0.5 +# Strides of multi-scale prior box +strides = [8, 16, 32] +num_det_layers = 3 # The number of model output scales +norm_cfg = dict(type='BN', momentum=0.03, eps=0.001) # Normalization config + +# -----train val related----- +affine_scale = 0.5 # YOLOv5RandomAffine scaling ratio +loss_cls_weight = 0.5 +loss_bbox_weight = 0.05 +loss_obj_weight = 1.0 +prior_match_thr = 4. # Priori box matching threshold +# The obj loss weights of the three output layers +obj_level_weights = [4., 1., 0.4] +lr_factor = 0.01 # Learning rate scaling factor +weight_decay = 0.0005 +# Save model checkpoint and validation intervals +save_checkpoint_intervals = 10 +# The maximum checkpoints to keep. +max_keep_ckpts = 3 +# Single-scale training is recommended to +# be turned on, which can speed up training. +env_cfg = dict(cudnn_benchmark=True) + +# ===============================Unmodified in most cases==================== +model = dict( + type='YOLODetector', + data_preprocessor=dict( + type='mmdet.DetDataPreprocessor', + mean=[0., 0., 0.], + std=[255., 255., 255.], + bgr_to_rgb=True), + backbone=dict( + type='YOLOv5CSPDarknet', + deepen_factor=deepen_factor, + widen_factor=widen_factor, + norm_cfg=norm_cfg, + act_cfg=dict(type='SiLU', inplace=True)), + neck=dict( + type='YOLOv5PAFPN', + deepen_factor=deepen_factor, + widen_factor=widen_factor, + in_channels=[256, 512, 1024], + out_channels=[256, 512, 1024], + num_csp_blocks=3, + norm_cfg=norm_cfg, + act_cfg=dict(type='SiLU', inplace=True)), + bbox_head=dict( + type='YOLOv5Head', + head_module=dict( + type='YOLOv5HeadModule', + num_classes=num_classes, + in_channels=[256, 512, 1024], + widen_factor=widen_factor, + featmap_strides=strides, + num_base_priors=3), + prior_generator=dict( + type='mmdet.YOLOAnchorGenerator', + base_sizes=anchors, + strides=strides), + # scaled based on number of detection layers + loss_cls=dict( + type='mmdet.CrossEntropyLoss', + use_sigmoid=True, + reduction='mean', + loss_weight=loss_cls_weight * + (num_classes / 80 * 3 / num_det_layers)), + loss_bbox=dict( + type='IoULoss', + iou_mode='ciou', + bbox_format='xywh', + eps=1e-7, + reduction='mean', + loss_weight=loss_bbox_weight * (3 / num_det_layers), + return_iou=True), + loss_obj=dict( + type='mmdet.CrossEntropyLoss', + use_sigmoid=True, + reduction='mean', + loss_weight=loss_obj_weight * + ((img_scale[0] / 640)**2 * 3 / num_det_layers)), + prior_match_thr=prior_match_thr, + obj_level_weights=obj_level_weights), + test_cfg=model_test_cfg) + +albu_train_transforms = [ + dict(type='Blur', p=0.01), + dict(type='MedianBlur', p=0.01), + dict(type='ToGray', p=0.01), + dict(type='CLAHE', p=0.01) +] + +pre_transform = [ + dict(type='LoadImageFromFile', backend_args=_base_.backend_args), + dict(type='LoadAnnotations', with_bbox=True) +] + +train_pipeline = [ + *pre_transform, + dict( + type='Mosaic', + img_scale=img_scale, + pad_val=114.0, + pre_transform=pre_transform), + dict( + type='YOLOv5RandomAffine', + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=(1 - affine_scale, 1 + affine_scale), + # img_scale is (width, height) + border=(-img_scale[0] // 2, -img_scale[1] // 2), + border_val=(114, 114, 114)), + dict( + type='mmdet.Albu', + transforms=albu_train_transforms, + bbox_params=dict( + type='BboxParams', + format='pascal_voc', + label_fields=['gt_bboxes_labels', 'gt_ignore_flags']), + keymap={ + 'img': 'image', + 'gt_bboxes': 'bboxes' + }), + dict(type='YOLOv5HSVRandomAug'), + dict(type='mmdet.RandomFlip', prob=0.5), + dict( + type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'flip', + 'flip_direction')) +] + +train_dataloader = dict( + batch_size=train_batch_size_per_gpu, + num_workers=train_num_workers, + persistent_workers=persistent_workers, + pin_memory=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file=train_ann_file, + data_prefix=dict(img=train_data_prefix), + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=train_pipeline)) + +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args=_base_.backend_args), + dict(type='YOLOv5KeepRatioResize', scale=img_scale), + dict( + type='LetterResize', + scale=img_scale, + allow_scale_up=False, + pad_val=dict(img=114)), + dict(type='LoadAnnotations', with_bbox=True, _scope_='mmdet'), + dict( + type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'pad_param')) +] + +val_dataloader = dict( + batch_size=val_batch_size_per_gpu, + num_workers=val_num_workers, + persistent_workers=persistent_workers, + pin_memory=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + test_mode=True, + data_prefix=dict(img=val_data_prefix), + ann_file=val_ann_file, + pipeline=test_pipeline, + batch_shapes_cfg=batch_shapes_cfg)) + +test_dataloader = val_dataloader + +param_scheduler = None +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict( + type='SGD', + lr=base_lr, + momentum=0.937, + weight_decay=weight_decay, + nesterov=True, + batch_size_per_gpu=train_batch_size_per_gpu), + constructor='YOLOv5OptimizerConstructor') + +default_hooks = dict( + param_scheduler=dict( + type='YOLOv5ParamSchedulerHook', + scheduler_type='linear', + lr_factor=lr_factor, + max_epochs=max_epochs), + checkpoint=dict( + type='CheckpointHook', + interval=save_checkpoint_intervals, + save_best='auto', + max_keep_ckpts=max_keep_ckpts)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0001, + update_buffers=True, + strict_load=False, + priority=49) +] + +val_evaluator = dict( + type='mmdet.CocoMetric', + proposal_nums=(100, 1, 10), + ann_file=data_root + val_ann_file, + metric='bbox') +test_evaluator = val_evaluator + +train_cfg = dict( + type='EpochBasedTrainLoop', + max_epochs=max_epochs, + val_interval=save_checkpoint_intervals) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolov5/yolov5_s-v61_syncbn_fast_1xb4-300e_balloon.py b/models/YOLO-World/third_party/mmyolo/configs/yolov5/yolov5_s-v61_syncbn_fast_1xb4-300e_balloon.py new file mode 100644 index 0000000000000000000000000000000000000000..2c585ceb92e9bfb1984b49ce02f86f4d3cd4532d --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolov5/yolov5_s-v61_syncbn_fast_1xb4-300e_balloon.py @@ -0,0 +1,42 @@ +_base_ = './yolov5_s-v61_syncbn_fast_8xb16-300e_coco.py' + +# ========================modified parameters====================== +data_root = 'data/balloon/' +# Path of train annotation file +train_ann_file = 'train.json' +train_data_prefix = 'train/' # Prefix of train image path +# Path of val annotation file +val_ann_file = 'val.json' +val_data_prefix = 'val/' # Prefix of val image path +metainfo = { + 'classes': ('balloon', ), + 'palette': [ + (220, 20, 60), + ] +} +num_classes = 1 + +train_batch_size_per_gpu = 4 +train_num_workers = 2 +log_interval = 1 + +# =======================Unmodified in most cases================== +train_dataloader = dict( + batch_size=train_batch_size_per_gpu, + num_workers=train_num_workers, + dataset=dict( + data_root=data_root, + metainfo=metainfo, + data_prefix=dict(img=train_data_prefix), + ann_file=train_ann_file)) +val_dataloader = dict( + dataset=dict( + data_root=data_root, + metainfo=metainfo, + data_prefix=dict(img=val_data_prefix), + ann_file=val_ann_file)) +test_dataloader = val_dataloader +val_evaluator = dict(ann_file=data_root + val_ann_file) +test_evaluator = val_evaluator +model = dict(bbox_head=dict(head_module=dict(num_classes=num_classes))) +default_hooks = dict(logger=dict(interval=log_interval)) diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolov5/yolov5_s-v61_syncbn_fast_8xb16-300e_coco.py b/models/YOLO-World/third_party/mmyolo/configs/yolov5/yolov5_s-v61_syncbn_fast_8xb16-300e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..17b4a73b092fda1b98a088a83619697702859f71 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolov5/yolov5_s-v61_syncbn_fast_8xb16-300e_coco.py @@ -0,0 +1,12 @@ +_base_ = 'yolov5_s-v61_syncbn_8xb16-300e_coco.py' + +# fast means faster training speed, +# but less flexibility for multitasking +model = dict( + data_preprocessor=dict( + type='YOLOv5DetDataPreprocessor', + mean=[0., 0., 0.], + std=[255., 255., 255.], + bgr_to_rgb=True)) + +train_dataloader = dict(collate_fn=dict(type='yolov5_collate')) diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolov5/yolov5_x-p6-v62_syncbn_fast_8xb16-300e_coco.py b/models/YOLO-World/third_party/mmyolo/configs/yolov5/yolov5_x-p6-v62_syncbn_fast_8xb16-300e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..9fe5c0103520280ba26bb3f56a4a30658576b74b --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolov5/yolov5_x-p6-v62_syncbn_fast_8xb16-300e_coco.py @@ -0,0 +1,14 @@ +_base_ = './yolov5_m-p6-v62_syncbn_fast_8xb16-300e_coco.py' +deepen_factor = 1.33 +widen_factor = 1.25 + +model = dict( + backbone=dict( + deepen_factor=deepen_factor, + widen_factor=widen_factor, + ), + neck=dict( + deepen_factor=deepen_factor, + widen_factor=widen_factor, + ), + bbox_head=dict(head_module=dict(widen_factor=widen_factor))) diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolov5/yolov5_x-v61_syncbn_fast_8xb16-300e_coco.py b/models/YOLO-World/third_party/mmyolo/configs/yolov5/yolov5_x-v61_syncbn_fast_8xb16-300e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..8782eed8df6318b3aad6333809a04f639fd0cefb --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolov5/yolov5_x-v61_syncbn_fast_8xb16-300e_coco.py @@ -0,0 +1,14 @@ +_base_ = './yolov5_m-v61_syncbn_fast_8xb16-300e_coco.py' +deepen_factor = 1.33 +widen_factor = 1.25 + +model = dict( + backbone=dict( + deepen_factor=deepen_factor, + widen_factor=widen_factor, + ), + neck=dict( + deepen_factor=deepen_factor, + widen_factor=widen_factor, + ), + bbox_head=dict(head_module=dict(widen_factor=widen_factor))) diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolov5/yolov5u/yolov5u_l_mask-refine_syncbn_fast_8xb16-300e_coco.py b/models/YOLO-World/third_party/mmyolo/configs/yolov5/yolov5u/yolov5u_l_mask-refine_syncbn_fast_8xb16-300e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..60c11feb3d4e6f8db5f3e70af5d3afdbc5f65535 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolov5/yolov5u/yolov5u_l_mask-refine_syncbn_fast_8xb16-300e_coco.py @@ -0,0 +1,59 @@ +_base_ = './yolov5u_m_mask-refine_syncbn_fast_8xb16-300e_coco.py' + +# This config will refine bbox by mask while loading annotations and +# transforming after `YOLOv5RandomAffine` + +# ========================modified parameters====================== +deepen_factor = 1.00 +widen_factor = 1.00 + +mixup_prob = 0.15 +copypaste_prob = 0.3 + +# =======================Unmodified in most cases================== +img_scale = _base_.img_scale +pre_transform = _base_.pre_transform +last_transform = _base_.last_transform +affine_scale = _base_.affine_scale + +model = dict( + backbone=dict( + deepen_factor=deepen_factor, + widen_factor=widen_factor, + ), + neck=dict( + deepen_factor=deepen_factor, + widen_factor=widen_factor, + ), + bbox_head=dict(head_module=dict(widen_factor=widen_factor))) + +mosaic_affine_transform = [ + dict( + type='Mosaic', + img_scale=img_scale, + pad_val=114.0, + pre_transform=pre_transform), + dict(type='YOLOv5CopyPaste', prob=copypaste_prob), + dict( + type='YOLOv5RandomAffine', + max_rotate_degree=0.0, + max_shear_degree=0.0, + max_aspect_ratio=100., + scaling_ratio_range=(1 - affine_scale, 1 + affine_scale), + # img_scale is (width, height) + border=(-img_scale[0] // 2, -img_scale[1] // 2), + border_val=(114, 114, 114), + min_area_ratio=_base_.min_area_ratio, + use_mask_refine=_base_.use_mask2refine) +] + +train_pipeline = [ + *pre_transform, *mosaic_affine_transform, + dict( + type='YOLOv5MixUp', + prob=mixup_prob, + pre_transform=[*pre_transform, *mosaic_affine_transform]), + *last_transform +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolov5/yolov5u/yolov5u_l_syncbn_fast_8xb16-300e_coco.py b/models/YOLO-World/third_party/mmyolo/configs/yolov5/yolov5u/yolov5u_l_syncbn_fast_8xb16-300e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..22b9e881d024bfc781b1328913b50439ac80a2f3 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolov5/yolov5u/yolov5u_l_syncbn_fast_8xb16-300e_coco.py @@ -0,0 +1,18 @@ +_base_ = './yolov5u_s_syncbn_fast_8xb16-300e_coco.py' + +# ========================modified parameters====================== +# TODO: Update the training hyperparameters +deepen_factor = 1.0 +widen_factor = 1.0 + +# =======================Unmodified in most cases================== +model = dict( + backbone=dict( + deepen_factor=deepen_factor, + widen_factor=widen_factor, + ), + neck=dict( + deepen_factor=deepen_factor, + widen_factor=widen_factor, + ), + bbox_head=dict(head_module=dict(widen_factor=widen_factor))) diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolov5/yolov5u/yolov5u_m_mask-refine_syncbn_fast_8xb16-300e_coco.py b/models/YOLO-World/third_party/mmyolo/configs/yolov5/yolov5u/yolov5u_m_mask-refine_syncbn_fast_8xb16-300e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..ecc86fdd2d9ae362477f4edc5e5f9dd497222946 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolov5/yolov5u/yolov5u_m_mask-refine_syncbn_fast_8xb16-300e_coco.py @@ -0,0 +1,79 @@ +_base_ = './yolov5u_s_mask-refine_syncbn_fast_8xb16-300e_coco.py' + +# This config will refine bbox by mask while loading annotations and +# transforming after `YOLOv5RandomAffine` + +# ========================modified parameters====================== +deepen_factor = 0.67 +widen_factor = 0.75 + +affine_scale = 0.9 +mixup_prob = 0.1 +copypaste_prob = 0.1 + +# =======================Unmodified in most cases================== +img_scale = _base_.img_scale +pre_transform = _base_.pre_transform +last_transform = _base_.last_transform + +model = dict( + backbone=dict( + deepen_factor=deepen_factor, + widen_factor=widen_factor, + ), + neck=dict( + deepen_factor=deepen_factor, + widen_factor=widen_factor, + ), + bbox_head=dict(head_module=dict(widen_factor=widen_factor))) + +mosaic_affine_transform = [ + dict( + type='Mosaic', + img_scale=img_scale, + pad_val=114.0, + pre_transform=pre_transform), + dict(type='YOLOv5CopyPaste', prob=copypaste_prob), + dict( + type='YOLOv5RandomAffine', + max_rotate_degree=0.0, + max_shear_degree=0.0, + max_aspect_ratio=100., + scaling_ratio_range=(1 - affine_scale, 1 + affine_scale), + # img_scale is (width, height) + border=(-img_scale[0] // 2, -img_scale[1] // 2), + border_val=(114, 114, 114), + min_area_ratio=_base_.min_area_ratio, + use_mask_refine=_base_.use_mask2refine) +] + +train_pipeline = [ + *pre_transform, *mosaic_affine_transform, + dict( + type='YOLOv5MixUp', + prob=mixup_prob, + pre_transform=[*pre_transform, *mosaic_affine_transform]), + *last_transform +] + +train_pipeline_stage2 = [ + *pre_transform, + dict(type='YOLOv5KeepRatioResize', scale=img_scale), + dict( + type='LetterResize', + scale=img_scale, + allow_scale_up=True, + pad_val=dict(img=114.0)), + dict( + type='YOLOv5RandomAffine', + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=(1 - affine_scale, 1 + affine_scale), + max_aspect_ratio=_base_.max_aspect_ratio, + border_val=(114, 114, 114), + min_area_ratio=_base_.min_area_ratio, + use_mask_refine=_base_.use_mask2refine), *last_transform +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) +_base_.custom_hooks[1].switch_pipeline = train_pipeline_stage2 diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolov5/yolov5u/yolov5u_m_syncbn_fast_8xb16-300e_coco.py b/models/YOLO-World/third_party/mmyolo/configs/yolov5/yolov5u/yolov5u_m_syncbn_fast_8xb16-300e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..0cfb332488ba41c5e0880bd91d8c73fccde52f36 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolov5/yolov5u/yolov5u_m_syncbn_fast_8xb16-300e_coco.py @@ -0,0 +1,18 @@ +_base_ = './yolov5u_s_syncbn_fast_8xb16-300e_coco.py' + +# ========================modified parameters====================== +# TODO: Update the training hyperparameters +deepen_factor = 0.67 +widen_factor = 0.75 + +# =======================Unmodified in most cases================== +model = dict( + backbone=dict( + deepen_factor=deepen_factor, + widen_factor=widen_factor, + ), + neck=dict( + deepen_factor=deepen_factor, + widen_factor=widen_factor, + ), + bbox_head=dict(head_module=dict(widen_factor=widen_factor))) diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolov5/yolov5u/yolov5u_n_mask-refine_syncbn_fast_8xb16-300e_coco.py b/models/YOLO-World/third_party/mmyolo/configs/yolov5/yolov5u/yolov5u_n_mask-refine_syncbn_fast_8xb16-300e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..1ca21b65147e830b04b0e70e61011f6a9371d637 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolov5/yolov5u/yolov5u_n_mask-refine_syncbn_fast_8xb16-300e_coco.py @@ -0,0 +1,20 @@ +_base_ = './yolov5u_s_mask-refine_syncbn_fast_8xb16-300e_coco.py' + +# This config will refine bbox by mask while loading annotations and +# transforming after `YOLOv5RandomAffine` + +# ========================modified parameters====================== +deepen_factor = 0.33 +widen_factor = 0.25 + +# ===============================Unmodified in most cases==================== +model = dict( + backbone=dict( + deepen_factor=deepen_factor, + widen_factor=widen_factor, + ), + neck=dict( + deepen_factor=deepen_factor, + widen_factor=widen_factor, + ), + bbox_head=dict(head_module=dict(widen_factor=widen_factor))) diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolov5/yolov5u/yolov5u_n_syncbn_fast_8xb16-300e_coco.py b/models/YOLO-World/third_party/mmyolo/configs/yolov5/yolov5u/yolov5u_n_syncbn_fast_8xb16-300e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..ad6a9f2eba7ac8fc56c12fab52a3a8f9b24acba1 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolov5/yolov5u/yolov5u_n_syncbn_fast_8xb16-300e_coco.py @@ -0,0 +1,17 @@ +_base_ = './yolov5u_s_syncbn_fast_8xb16-300e_coco.py' + +# ========================modified parameters====================== +deepen_factor = 0.33 +widen_factor = 0.25 + +# =======================Unmodified in most cases================== +model = dict( + backbone=dict( + deepen_factor=deepen_factor, + widen_factor=widen_factor, + ), + neck=dict( + deepen_factor=deepen_factor, + widen_factor=widen_factor, + ), + bbox_head=dict(head_module=dict(widen_factor=widen_factor))) diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolov5/yolov5u/yolov5u_s_mask-refine_syncbn_fast_8xb16-300e_coco.py b/models/YOLO-World/third_party/mmyolo/configs/yolov5/yolov5u/yolov5u_s_mask-refine_syncbn_fast_8xb16-300e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..d6840bc288b2cb9d26ebc06d0b888926035ce8b9 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolov5/yolov5u/yolov5u_s_mask-refine_syncbn_fast_8xb16-300e_coco.py @@ -0,0 +1,80 @@ +_base_ = './yolov5u_s_syncbn_fast_8xb16-300e_coco.py' + +# This config will refine bbox by mask while loading annotations and +# transforming after `YOLOv5RandomAffine` + +# ========================modified parameters====================== +use_mask2refine = True +min_area_ratio = 0.01 # YOLOv5RandomAffine + +# ===============================Unmodified in most cases==================== +pre_transform = [ + dict(type='LoadImageFromFile', backend_args=_base_.backend_args), + dict( + type='LoadAnnotations', + with_bbox=True, + with_mask=True, + mask2bbox=use_mask2refine) +] + +last_transform = [ + # Delete gt_masks to avoid more computation + dict(type='RemoveDataElement', keys=['gt_masks']), + dict( + type='mmdet.Albu', + transforms=_base_.albu_train_transforms, + bbox_params=dict( + type='BboxParams', + format='pascal_voc', + label_fields=['gt_bboxes_labels', 'gt_ignore_flags']), + keymap={ + 'img': 'image', + 'gt_bboxes': 'bboxes' + }), + dict(type='YOLOv5HSVRandomAug'), + dict(type='mmdet.RandomFlip', prob=0.5), + dict( + type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'flip', + 'flip_direction')) +] + +train_pipeline = [ + *pre_transform, + dict( + type='Mosaic', + img_scale=_base_.img_scale, + pad_val=114.0, + pre_transform=pre_transform), + dict( + type='YOLOv5RandomAffine', + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=(1 - _base_.affine_scale, 1 + _base_.affine_scale), + # img_scale is (width, height) + border=(-_base_.img_scale[0] // 2, -_base_.img_scale[1] // 2), + border_val=(114, 114, 114), + min_area_ratio=min_area_ratio, + use_mask_refine=use_mask2refine), + *last_transform +] + +train_pipeline_stage2 = [ + *pre_transform, + dict(type='YOLOv5KeepRatioResize', scale=_base_.img_scale), + dict( + type='LetterResize', + scale=_base_.img_scale, + allow_scale_up=True, + pad_val=dict(img=114.0)), + dict( + type='YOLOv5RandomAffine', + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=(1 - _base_.affine_scale, 1 + _base_.affine_scale), + max_aspect_ratio=_base_.max_aspect_ratio, + border_val=(114, 114, 114)), *last_transform +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) +_base_.custom_hooks[1].switch_pipeline = train_pipeline_stage2 diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolov5/yolov5u/yolov5u_s_syncbn_fast_8xb16-300e_coco.py b/models/YOLO-World/third_party/mmyolo/configs/yolov5/yolov5u/yolov5u_s_syncbn_fast_8xb16-300e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..81d3a981c281af0f4cd9596c4a7349cb2e1bf367 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolov5/yolov5u/yolov5u_s_syncbn_fast_8xb16-300e_coco.py @@ -0,0 +1,326 @@ +_base_ = ['../../_base_/default_runtime.py', '../../_base_/det_p5_tta.py'] + +# ========================Frequently modified parameters====================== +# -----data related----- +data_root = 'data/coco/' # Root path of data +# Path of train annotation file +train_ann_file = 'annotations/instances_train2017.json' +train_data_prefix = 'train2017/' # Prefix of train image path +# Path of val annotation file +val_ann_file = 'annotations/instances_val2017.json' +val_data_prefix = 'val2017/' # Prefix of val image path + +num_classes = 80 # Number of classes for classification +# Batch size of a single GPU during training +train_batch_size_per_gpu = 16 +# Worker to pre-fetch data for each single GPU during training +train_num_workers = 8 +# persistent_workers must be False if num_workers is 0 +persistent_workers = True + +# -----train val related----- +# Base learning rate for optim_wrapper. Corresponding to 8xb16=128 bs +base_lr = 0.01 +max_epochs = 300 # Maximum training epochs +# Disable mosaic augmentation for final 10 epochs (stage 2) +close_mosaic_epochs = 10 + +model_test_cfg = dict( + # The config of multi-label for multi-class prediction. + multi_label=True, + # The number of boxes before NMS + nms_pre=30000, + score_thr=0.001, # Threshold to filter out boxes. + nms=dict(type='nms', iou_threshold=0.7), # NMS type and threshold + max_per_img=300) # Max number of detections of each image + +# ========================Possible modified parameters======================== +# -----data related----- +img_scale = (640, 640) # width, height +# Dataset type, this will be used to define the dataset +dataset_type = 'YOLOv5CocoDataset' +# Batch size of a single GPU during validation +val_batch_size_per_gpu = 1 +# Worker to pre-fetch data for each single GPU during validation +val_num_workers = 2 + +# Config of batch shapes. Only on val. +# It means not used if batch_shapes_cfg is None. +batch_shapes_cfg = dict( + type='BatchShapePolicy', + batch_size=val_batch_size_per_gpu, + img_size=img_scale[0], + # The image scale of padding should be divided by pad_size_divisor + size_divisor=32, + # Additional paddings for pixel scale + extra_pad_ratio=0.5) + +# -----model related----- +# The scaling factor that controls the depth of the network structure +deepen_factor = 0.33 +# The scaling factor that controls the width of the network structure +widen_factor = 0.5 +# Strides of multi-scale prior box +strides = [8, 16, 32] +num_det_layers = 3 # The number of model output scales +norm_cfg = dict(type='BN', momentum=0.03, eps=0.001) # Normalization config + +# -----train val related----- +tal_topk = 10 # Number of bbox selected in each level +tal_alpha = 0.5 # A Hyper-parameter related to alignment_metrics +tal_beta = 6.0 # A Hyper-parameter related to alignment_metrics + +affine_scale = 0.5 # YOLOv5RandomAffine scaling ratio +# YOLOv5RandomAffine aspect ratio of width and height thres to filter bboxes +max_aspect_ratio = 100 +# TODO: Automatically scale loss_weight based on number of detection layers +loss_cls_weight = 0.5 +loss_bbox_weight = 7.5 +# Since the dfloss is implemented differently in the official +# and mmdet, we're going to divide loss_weight by 4. +loss_dfl_weight = 1.5 / 4 +lr_factor = 0.01 # Learning rate scaling factor +weight_decay = 0.001 +# Save model checkpoint and validation intervals +save_checkpoint_intervals = 10 +# The maximum checkpoints to keep. +max_keep_ckpts = 3 +# Single-scale training is recommended to +# be turned on, which can speed up training. +env_cfg = dict(cudnn_benchmark=True) + +# ===============================Unmodified in most cases==================== +model = dict( + type='YOLODetector', + data_preprocessor=dict( + type='YOLOv5DetDataPreprocessor', + mean=[0., 0., 0.], + std=[255., 255., 255.], + bgr_to_rgb=True), + backbone=dict( + type='YOLOv5CSPDarknet', + deepen_factor=deepen_factor, + widen_factor=widen_factor, + norm_cfg=norm_cfg, + act_cfg=dict(type='SiLU', inplace=True)), + neck=dict( + type='YOLOv5PAFPN', + deepen_factor=deepen_factor, + widen_factor=widen_factor, + in_channels=[256, 512, 1024], + out_channels=[256, 512, 1024], + num_csp_blocks=3, + norm_cfg=norm_cfg, + act_cfg=dict(type='SiLU', inplace=True)), + bbox_head=dict( + type='YOLOv8Head', + head_module=dict( + type='YOLOv8HeadModule', + num_classes=num_classes, + in_channels=[256, 512, 1024], + widen_factor=widen_factor, + reg_max=16, + norm_cfg=norm_cfg, + act_cfg=dict(type='SiLU', inplace=True), + featmap_strides=strides), + prior_generator=dict( + type='mmdet.MlvlPointGenerator', offset=0.5, strides=strides), + bbox_coder=dict(type='DistancePointBBoxCoder'), + # scaled based on number of detection layers + loss_cls=dict( + type='mmdet.CrossEntropyLoss', + use_sigmoid=True, + reduction='none', + loss_weight=loss_cls_weight), + loss_bbox=dict( + type='IoULoss', + iou_mode='ciou', + bbox_format='xyxy', + reduction='sum', + loss_weight=loss_bbox_weight, + return_iou=False), + loss_dfl=dict( + type='mmdet.DistributionFocalLoss', + reduction='mean', + loss_weight=loss_dfl_weight)), + train_cfg=dict( + assigner=dict( + type='BatchTaskAlignedAssigner', + num_classes=num_classes, + use_ciou=True, + topk=tal_topk, + alpha=tal_alpha, + beta=tal_beta, + eps=1e-9)), + test_cfg=model_test_cfg) + +albu_train_transforms = [ + dict(type='Blur', p=0.01), + dict(type='MedianBlur', p=0.01), + dict(type='ToGray', p=0.01), + dict(type='CLAHE', p=0.01) +] + +pre_transform = [ + dict(type='LoadImageFromFile', backend_args=_base_.backend_args), + dict(type='LoadAnnotations', with_bbox=True) +] + +last_transform = [ + dict( + type='mmdet.Albu', + transforms=albu_train_transforms, + bbox_params=dict( + type='BboxParams', + format='pascal_voc', + label_fields=['gt_bboxes_labels', 'gt_ignore_flags']), + keymap={ + 'img': 'image', + 'gt_bboxes': 'bboxes' + }), + dict(type='YOLOv5HSVRandomAug'), + dict(type='mmdet.RandomFlip', prob=0.5), + dict( + type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'flip', + 'flip_direction')) +] + +train_pipeline = [ + *pre_transform, + dict( + type='Mosaic', + img_scale=img_scale, + pad_val=114.0, + pre_transform=pre_transform), + dict( + type='YOLOv5RandomAffine', + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=(1 - affine_scale, 1 + affine_scale), + max_aspect_ratio=max_aspect_ratio, + # img_scale is (width, height) + border=(-img_scale[0] // 2, -img_scale[1] // 2), + border_val=(114, 114, 114)), + *last_transform +] + +train_pipeline_stage2 = [ + *pre_transform, + dict(type='YOLOv5KeepRatioResize', scale=img_scale), + dict( + type='LetterResize', + scale=img_scale, + allow_scale_up=True, + pad_val=dict(img=114.0)), + dict( + type='YOLOv5RandomAffine', + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=(1 - affine_scale, 1 + affine_scale), + max_aspect_ratio=max_aspect_ratio, + border_val=(114, 114, 114)), *last_transform +] + +train_dataloader = dict( + batch_size=train_batch_size_per_gpu, + num_workers=train_num_workers, + persistent_workers=persistent_workers, + pin_memory=True, + sampler=dict(type='DefaultSampler', shuffle=True), + collate_fn=dict(type='yolov5_collate'), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file=train_ann_file, + data_prefix=dict(img=train_data_prefix), + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=train_pipeline)) + +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args=_base_.backend_args), + dict(type='YOLOv5KeepRatioResize', scale=img_scale), + dict( + type='LetterResize', + scale=img_scale, + allow_scale_up=False, + pad_val=dict(img=114)), + dict(type='LoadAnnotations', with_bbox=True, _scope_='mmdet'), + dict( + type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'pad_param')) +] + +val_dataloader = dict( + batch_size=val_batch_size_per_gpu, + num_workers=val_num_workers, + persistent_workers=persistent_workers, + pin_memory=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + test_mode=True, + data_prefix=dict(img=val_data_prefix), + ann_file=val_ann_file, + pipeline=test_pipeline, + batch_shapes_cfg=batch_shapes_cfg)) + +test_dataloader = val_dataloader + +param_scheduler = None +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict( + type='SGD', + lr=base_lr, + momentum=0.937, + weight_decay=weight_decay, + nesterov=True, + batch_size_per_gpu=train_batch_size_per_gpu), + constructor='YOLOv5OptimizerConstructor') + +default_hooks = dict( + param_scheduler=dict( + type='YOLOv5ParamSchedulerHook', + scheduler_type='linear', + lr_factor=lr_factor, + max_epochs=max_epochs, + warmup_epochs=3.0, + warmup_momentum=0.8, + warmup_bias_lr=0.1), + checkpoint=dict( + type='CheckpointHook', + interval=save_checkpoint_intervals, + save_best='auto', + max_keep_ckpts=max_keep_ckpts)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0001, + update_buffers=True, + strict_load=False, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - close_mosaic_epochs, + switch_pipeline=train_pipeline_stage2) +] + +val_evaluator = dict( + type='mmdet.CocoMetric', + proposal_nums=(100, 1, 10), + ann_file=data_root + val_ann_file, + metric='bbox') +test_evaluator = val_evaluator + +train_cfg = dict( + type='EpochBasedTrainLoop', + max_epochs=max_epochs, + val_interval=save_checkpoint_intervals) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolov5/yolov5u/yolov5u_x_mask-refine_syncbn_fast_8xb16-300e_coco.py b/models/YOLO-World/third_party/mmyolo/configs/yolov5/yolov5u/yolov5u_x_mask-refine_syncbn_fast_8xb16-300e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..33092aa6a47e6053c8ce83dcdf820828619077bc --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolov5/yolov5u/yolov5u_x_mask-refine_syncbn_fast_8xb16-300e_coco.py @@ -0,0 +1,17 @@ +_base_ = './yolov5u_l_mask-refine_syncbn_fast_8xb16-300e_coco.py' + +# ========================modified parameters====================== +deepen_factor = 1.33 +widen_factor = 1.25 + +# =======================Unmodified in most cases================== +model = dict( + backbone=dict( + deepen_factor=deepen_factor, + widen_factor=widen_factor, + ), + neck=dict( + deepen_factor=deepen_factor, + widen_factor=widen_factor, + ), + bbox_head=dict(head_module=dict(widen_factor=widen_factor))) diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolov5/yolov5u/yolov5u_x_syncbn_fast_8xb16-300e_coco.py b/models/YOLO-World/third_party/mmyolo/configs/yolov5/yolov5u/yolov5u_x_syncbn_fast_8xb16-300e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..fd471fd46f3e19c4e0a4176703d4ab5eeee3aa0b --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolov5/yolov5u/yolov5u_x_syncbn_fast_8xb16-300e_coco.py @@ -0,0 +1,18 @@ +_base_ = './yolov5u_l_syncbn_fast_8xb16-300e_coco.py' + +# ========================modified parameters====================== +# TODO: Update the training hyperparameters +deepen_factor = 1.33 +widen_factor = 1.25 + +# =======================Unmodified in most cases================== +model = dict( + backbone=dict( + deepen_factor=deepen_factor, + widen_factor=widen_factor, + ), + neck=dict( + deepen_factor=deepen_factor, + widen_factor=widen_factor, + ), + bbox_head=dict(head_module=dict(widen_factor=widen_factor))) diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolov6/README.md b/models/YOLO-World/third_party/mmyolo/configs/yolov6/README.md new file mode 100644 index 0000000000000000000000000000000000000000..7ecda276988ff87702e902be8799d85b2dfdc79f --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolov6/README.md @@ -0,0 +1,53 @@ +# YOLOv6 + +> [YOLOv6: A Single-Stage Object Detection Framework for Industrial Applications](https://arxiv.org/abs/2209.02976) + + + +## Abstract + +For years, YOLO series have been de facto industry-level standard for efficient object detection. The YOLO community has prospered overwhelmingly to enrich its use in a multitude of hardware platforms and abundant scenarios. In this technical report, we strive to push its limits to the next level, stepping forward with an unwavering mindset for industry application. Considering the diverse requirements for speed and accuracy in the real environment, we extensively examine the up-to-date object detection advancements either from industry or academy. Specifically, we heavily assimilate ideas from recent network design, training strategies, testing techniques, quantization and optimization methods. On top of this, we integrate our thoughts and practice to build a suite of deployment-ready networks at various scales to accommodate diversified use cases. With the generous permission of YOLO authors, we name it YOLOv6. We also express our warm welcome to users and contributors for further enhancement. For a glimpse of performance, our YOLOv6-N hits 35.9% AP on COCO dataset at a throughput of 1234 FPS on an NVIDIA Tesla T4 GPU. YOLOv6-S strikes 43.5% AP at 495 FPS, outperforming other mainstream detectors at the same scale (YOLOv5-S, YOLOX-S and PPYOLOE-S). Our quantized version of YOLOv6-S even brings a new state-of-the-art 43.3% AP at 869 FPS. Furthermore, YOLOv6-M/L also achieves better accuracy performance (i.e., 49.5%/52.3%) than other detectors with the similar inference speed. We carefully conducted experiments to validate the effectiveness of each component. + +
+ +
+ +
+YOLOv6-s +YOLOv6-s model structure +
+ +
+YOLOv6-l +YOLOv6-l model structure +
+ +## Results and models + +### COCO + +| Backbone | Arch | Size | Epoch | SyncBN | AMP | Mem (GB) | Box AP | Config | Download | +| :------: | :--: | :--: | :---: | :----: | :-: | :------: | :----: | :-------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| YOLOv6-n | P5 | 640 | 400 | Yes | Yes | 6.04 | 36.2 | [config](./yolov6_n_syncbn_fast_8xb32-400e_coco.py) | [model](https://download.openmmlab.com/mmyolo/v0/yolov6/yolov6_n_syncbn_fast_8xb32-400e_coco/yolov6_n_syncbn_fast_8xb32-400e_coco_20221030_202726-d99b2e82.pth) \| [log](https://download.openmmlab.com/mmyolo/v0/yolov6/yolov6_n_syncbn_fast_8xb32-400e_coco/yolov6_n_syncbn_fast_8xb32-400e_coco_20221030_202726.log.json) | +| YOLOv6-t | P5 | 640 | 400 | Yes | Yes | 8.13 | 41.0 | [config](./yolov6_t_syncbn_fast_8xb32-400e_coco.py) | [model](https://download.openmmlab.com/mmyolo/v0/yolov6/yolov6_t_syncbn_fast_8xb32-400e_coco/yolov6_t_syncbn_fast_8xb32-400e_coco_20221030_143755-cf0d278f.pth) \| [log](https://download.openmmlab.com/mmyolo/v0/yolov6/yolov6_t_syncbn_fast_8xb32-400e_coco/yolov6_t_syncbn_fast_8xb32-400e_coco_20221030_143755.log.json) | +| YOLOv6-s | P5 | 640 | 400 | Yes | Yes | 8.88 | 44.0 | [config](./yolov6_s_syncbn_fast_8xb32-400e_coco.py) | [model](https://download.openmmlab.com/mmyolo/v0/yolov6/yolov6_s_syncbn_fast_8xb32-400e_coco/yolov6_s_syncbn_fast_8xb32-400e_coco_20221102_203035-932e1d91.pth) \| [log](https://download.openmmlab.com/mmyolo/v0/yolov6/yolov6_s_syncbn_fast_8xb32-400e_coco/yolov6_s_syncbn_fast_8xb32-400e_coco_20221102_203035.log.json) | +| YOLOv6-m | P5 | 640 | 300 | Yes | Yes | 16.69 | 48.4 | [config](./yolov6_m_syncbn_fast_8xb32-300e_coco.py) | [model](https://download.openmmlab.com/mmyolo/v0/yolov6/yolov6_m_syncbn_fast_8xb32-300e_coco/yolov6_m_syncbn_fast_8xb32-300e_coco_20221109_182658-85bda3f4.pth) \| [log](https://download.openmmlab.com/mmyolo/v0/yolov6/yolov6_m_syncbn_fast_8xb32-300e_coco/yolov6_m_syncbn_fast_8xb32-300e_coco_20221109_182658.log.json) | +| YOLOv6-l | P5 | 640 | 300 | Yes | Yes | 20.86 | 51.0 | [config](./yolov6_l_syncbn_fast_8xb32-300e_coco.py) | [model](https://download.openmmlab.com/mmyolo/v0/yolov6/yolov6_l_syncbn_fast_8xb32-300e_coco/yolov6_l_syncbn_fast_8xb32-300e_coco_20221109_183156-91e3c447.pth) \| [log](https://download.openmmlab.com/mmyolo/v0/yolov6/yolov6_l_syncbn_fast_8xb32-300e_coco/yolov6_l_syncbn_fast_8xb32-300e_coco_20221109_183156.log.json) | + +**Note**: + +1. The official m and l models use knowledge distillation, but our version does not support it, which will be implemented in [MMRazor](https://github.com/open-mmlab/mmrazor) in the future. +2. The performance is unstable and may fluctuate by about 0.3 mAP. +3. If users need the weight of 300 epoch for nano, tiny and small model, they can train according to the configs of 300 epoch provided by us, or convert the official weight according to the [converter script](../../tools/model_converters/). +4. We have observed that the [base model](https://github.com/meituan/YOLOv6/tree/main/configs/base) has been officially released in v6 recently. Although the accuracy has decreased, it is more efficient. We will also provide the base model configuration in the future. + +## Citation + +```latex +@article{li2022yolov6, + title={YOLOv6: A Single-Stage Object Detection Framework for Industrial Applications}, + author={Li, Chuyi and Li, Lulu and Jiang, Hongliang and Weng, Kaiheng and Geng, Yifei and Li, Liang and Ke, Zaidan and Li, Qingyuan and Cheng, Meng and Nie, Weiqiang and others}, + journal={arXiv preprint arXiv:2209.02976}, + year={2022} +} +``` diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolov6/metafile.yml b/models/YOLO-World/third_party/mmyolo/configs/yolov6/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..df451526957c08d5956db33fe5e180cd7d5fcd66 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolov6/metafile.yml @@ -0,0 +1,83 @@ +Collections: + - Name: YOLOv6 + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Nesterov + - Weight Decay + - AMP + - Synchronize BN + Training Resources: 8x A100 GPUs + Architecture: + - CSPDarkNet + - PAFPN + - RepVGG + Paper: + URL: https://arxiv.org/abs/2209.02976 + Title: 'YOLOv6: A Single-Stage Object Detection Framework for Industrial Applications' + README: configs/yolov6/README.md + Code: + URL: https://github.com/open-mmlab/mmyolo/blob/v0.0.1/mmyolo/models/detectors/yolo_detector.py#L12 + Version: v0.0.1 + +Models: + - Name: yolov6_s_syncbn_fast_8xb32-400e_coco + In Collection: YOLOv6 + Config: configs/yolov6/yolov6_s_syncbn_fast_8xb32-400e_coco.py + Metadata: + Training Memory (GB): 8.88 + Epochs: 400 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 44.0 + Weights: https://download.openmmlab.com/mmyolo/v0/yolov6/yolov6_s_syncbn_fast_8xb32-400e_coco/yolov6_s_syncbn_fast_8xb32-400e_coco_20221102_203035-932e1d91.pth + - Name: yolov6_n_syncbn_fast_8xb32-400e_coco + In Collection: YOLOv6 + Config: configs/yolov6/yolov6_n_syncbn_fast_8xb32-400e_coco.py + Metadata: + Training Memory (GB): 6.04 + Epochs: 400 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 36.2 + Weights: https://download.openmmlab.com/mmyolo/v0/yolov6/yolov6_n_syncbn_fast_8xb32-400e_coco/yolov6_n_syncbn_fast_8xb32-400e_coco_20221030_202726-d99b2e82.pth + - Name: yolov6_t_syncbn_fast_8xb32-400e_coco + In Collection: YOLOv6 + Config: configs/yolov6/yolov6_t_syncbn_fast_8xb32-400e_coco.py + Metadata: + Training Memory (GB): 8.13 + Epochs: 400 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.0 + Weights: https://download.openmmlab.com/mmyolo/v0/yolov6/yolov6_t_syncbn_fast_8xb32-400e_coco/yolov6_t_syncbn_fast_8xb32-400e_coco_20221030_143755-cf0d278f.pth + - Name: yolov6_m_syncbn_fast_8xb32-300e_coco + In Collection: YOLOv6 + Config: configs/yolov6/yolov6_m_syncbn_fast_8xb32-300e_coco.py + Metadata: + Training Memory (GB): 16.69 + Epochs: 300 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 48.4 + Weights: https://download.openmmlab.com/mmyolo/v0/yolov6/yolov6_m_syncbn_fast_8xb32-300e_coco/yolov6_m_syncbn_fast_8xb32-300e_coco_20221109_182658-85bda3f4.pth + - Name: yolov6_l_syncbn_fast_8xb32-300e_coco + In Collection: YOLOv6 + Config: configs/yolov6/yolov6_l_syncbn_fast_8xb32-300e_coco.py + Metadata: + Training Memory (GB): 20.86 + Epochs: 300 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 51.0 + Weights: https://download.openmmlab.com/mmyolo/v0/yolov6/yolov6_l_syncbn_fast_8xb32-300e_coco/yolov6_l_syncbn_fast_8xb32-300e_coco_20221109_183156-91e3c447.pth diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolov6/yolov6_l_syncbn_fast_8xb32-300e_coco.py b/models/YOLO-World/third_party/mmyolo/configs/yolov6/yolov6_l_syncbn_fast_8xb32-300e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..ad5ecf347e4aa0b3194b8be33d9c294915dd9e56 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolov6/yolov6_l_syncbn_fast_8xb32-300e_coco.py @@ -0,0 +1,28 @@ +_base_ = './yolov6_m_syncbn_fast_8xb32-300e_coco.py' + +# ======================= Possible modified parameters ======================= +# -----model related----- +# The scaling factor that controls the depth of the network structure +deepen_factor = 1 +# The scaling factor that controls the width of the network structure +widen_factor = 1 + +# ============================== Unmodified in most cases =================== +model = dict( + backbone=dict( + deepen_factor=deepen_factor, + widen_factor=widen_factor, + hidden_ratio=1. / 2, + block_cfg=dict( + type='ConvWrapper', + norm_cfg=dict(type='BN', momentum=0.03, eps=0.001)), + act_cfg=dict(type='SiLU', inplace=True)), + neck=dict( + deepen_factor=deepen_factor, + widen_factor=widen_factor, + hidden_ratio=1. / 2, + block_cfg=dict( + type='ConvWrapper', + norm_cfg=dict(type='BN', momentum=0.03, eps=0.001)), + block_act_cfg=dict(type='SiLU', inplace=True)), + bbox_head=dict(head_module=dict(widen_factor=widen_factor))) diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolov6/yolov6_m_syncbn_fast_8xb32-300e_coco.py b/models/YOLO-World/third_party/mmyolo/configs/yolov6/yolov6_m_syncbn_fast_8xb32-300e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..09811c8c06fb81a061ac4da7904c8d7d1e248411 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolov6/yolov6_m_syncbn_fast_8xb32-300e_coco.py @@ -0,0 +1,62 @@ +_base_ = './yolov6_s_syncbn_fast_8xb32-300e_coco.py' + +# ======================= Possible modified parameters ======================= +# -----model related----- +# The scaling factor that controls the depth of the network structure +deepen_factor = 0.6 +# The scaling factor that controls the width of the network structure +widen_factor = 0.75 + +# -----train val related----- +affine_scale = 0.9 # YOLOv5RandomAffine scaling ratio + +# ============================== Unmodified in most cases =================== +model = dict( + backbone=dict( + type='YOLOv6CSPBep', + deepen_factor=deepen_factor, + widen_factor=widen_factor, + hidden_ratio=2. / 3, + block_cfg=dict(type='RepVGGBlock'), + act_cfg=dict(type='ReLU', inplace=True)), + neck=dict( + type='YOLOv6CSPRepPAFPN', + deepen_factor=deepen_factor, + widen_factor=widen_factor, + block_cfg=dict(type='RepVGGBlock'), + hidden_ratio=2. / 3, + block_act_cfg=dict(type='ReLU', inplace=True)), + bbox_head=dict( + type='YOLOv6Head', head_module=dict(widen_factor=widen_factor))) + +mosaic_affine_pipeline = [ + dict( + type='Mosaic', + img_scale=_base_.img_scale, + pad_val=114.0, + pre_transform=_base_.pre_transform), + dict( + type='YOLOv5RandomAffine', + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=(1 - affine_scale, 1 + affine_scale), + # img_scale is (width, height) + border=(-_base_.img_scale[0] // 2, -_base_.img_scale[1] // 2), + border_val=(114, 114, 114)) +] + +train_pipeline = [ + *_base_.pre_transform, *mosaic_affine_pipeline, + dict( + type='YOLOv5MixUp', + prob=0.1, + pre_transform=[*_base_.pre_transform, *mosaic_affine_pipeline]), + dict(type='YOLOv5HSVRandomAug'), + dict(type='mmdet.RandomFlip', prob=0.5), + dict( + type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'flip', + 'flip_direction')) +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolov6/yolov6_n_syncbn_fast_8xb32-300e_coco.py b/models/YOLO-World/third_party/mmyolo/configs/yolov6/yolov6_n_syncbn_fast_8xb32-300e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..bc2db4b6c03277a7c62ba3ed505d54f54267328f --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolov6/yolov6_n_syncbn_fast_8xb32-300e_coco.py @@ -0,0 +1,21 @@ +_base_ = './yolov6_s_syncbn_fast_8xb32-300e_coco.py' + +# ======================= Possible modified parameters ======================= +# -----model related----- +# The scaling factor that controls the depth of the network structure +deepen_factor = 0.33 +# The scaling factor that controls the width of the network structure +widen_factor = 0.25 + +# -----train val related----- +lr_factor = 0.02 # Learning rate scaling factor + +# ============================== Unmodified in most cases =================== +model = dict( + backbone=dict(deepen_factor=deepen_factor, widen_factor=widen_factor), + neck=dict(deepen_factor=deepen_factor, widen_factor=widen_factor), + bbox_head=dict( + head_module=dict(widen_factor=widen_factor), + loss_bbox=dict(iou_mode='siou'))) + +default_hooks = dict(param_scheduler=dict(lr_factor=lr_factor)) diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolov6/yolov6_n_syncbn_fast_8xb32-400e_coco.py b/models/YOLO-World/third_party/mmyolo/configs/yolov6/yolov6_n_syncbn_fast_8xb32-400e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..f66aa15fc447bce5f510a60bdda1914a8a7b5a76 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolov6/yolov6_n_syncbn_fast_8xb32-400e_coco.py @@ -0,0 +1,21 @@ +_base_ = './yolov6_s_syncbn_fast_8xb32-400e_coco.py' + +# ======================= Possible modified parameters ======================= +# -----model related----- +# The scaling factor that controls the depth of the network structure +deepen_factor = 0.33 +# The scaling factor that controls the width of the network structure +widen_factor = 0.25 + +# -----train val related----- +lr_factor = 0.02 # Learning rate scaling factor + +# ============================== Unmodified in most cases =================== +model = dict( + backbone=dict(deepen_factor=deepen_factor, widen_factor=widen_factor), + neck=dict(deepen_factor=deepen_factor, widen_factor=widen_factor), + bbox_head=dict( + head_module=dict(widen_factor=widen_factor), + loss_bbox=dict(iou_mode='siou'))) + +default_hooks = dict(param_scheduler=dict(lr_factor=lr_factor)) diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolov6/yolov6_s_fast_1xb12-40e_cat.py b/models/YOLO-World/third_party/mmyolo/configs/yolov6/yolov6_s_fast_1xb12-40e_cat.py new file mode 100644 index 0000000000000000000000000000000000000000..82578fccf7fffb8e4bb4ac21170543a7f71bc63e --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolov6/yolov6_s_fast_1xb12-40e_cat.py @@ -0,0 +1,56 @@ +_base_ = './yolov6_s_syncbn_fast_8xb32-400e_coco.py' + +data_root = './data/cat/' +class_name = ('cat', ) +num_classes = len(class_name) +metainfo = dict(classes=class_name, palette=[(20, 220, 60)]) + +max_epochs = 40 +train_batch_size_per_gpu = 12 +train_num_workers = 4 +num_last_epochs = 5 + +load_from = 'https://download.openmmlab.com/mmyolo/v0/yolov6/yolov6_s_syncbn_fast_8xb32-400e_coco/yolov6_s_syncbn_fast_8xb32-400e_coco_20221102_203035-932e1d91.pth' # noqa + +model = dict( + backbone=dict(frozen_stages=4), + bbox_head=dict(head_module=dict(num_classes=num_classes)), + train_cfg=dict( + initial_assigner=dict(num_classes=num_classes), + assigner=dict(num_classes=num_classes))) + +train_dataloader = dict( + batch_size=train_batch_size_per_gpu, + num_workers=train_num_workers, + dataset=dict( + data_root=data_root, + metainfo=metainfo, + ann_file='annotations/trainval.json', + data_prefix=dict(img='images/'))) + +val_dataloader = dict( + dataset=dict( + metainfo=metainfo, + data_root=data_root, + ann_file='annotations/test.json', + data_prefix=dict(img='images/'))) + +test_dataloader = val_dataloader + +val_evaluator = dict(ann_file=data_root + 'annotations/test.json') +test_evaluator = val_evaluator + +_base_.optim_wrapper.optimizer.batch_size_per_gpu = train_batch_size_per_gpu +_base_.custom_hooks[1].switch_epoch = max_epochs - num_last_epochs + +default_hooks = dict( + checkpoint=dict(interval=10, max_keep_ckpts=2, save_best='auto'), + # The warmup_mim_iter parameter is critical. + # The default value is 1000 which is not suitable for cat datasets. + param_scheduler=dict(max_epochs=max_epochs, warmup_mim_iter=10), + logger=dict(type='LoggerHook', interval=5)) +train_cfg = dict( + max_epochs=max_epochs, + val_interval=10, + dynamic_intervals=[(max_epochs - num_last_epochs, 1)]) +# visualizer = dict(vis_backends = [dict(type='LocalVisBackend'), dict(type='WandbVisBackend')]) # noqa diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolov6/yolov6_s_syncbn_fast_8xb32-300e_coco.py b/models/YOLO-World/third_party/mmyolo/configs/yolov6/yolov6_s_syncbn_fast_8xb32-300e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..dbffaeb3362883d8a70f43c0722dd6c99b8b8352 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolov6/yolov6_s_syncbn_fast_8xb32-300e_coco.py @@ -0,0 +1,33 @@ +_base_ = './yolov6_s_syncbn_fast_8xb32-400e_coco.py' + +# ======================= Frequently modified parameters ===================== +# -----train val related----- +# Base learning rate for optim_wrapper +max_epochs = 300 # Maximum training epochs +num_last_epochs = 15 # Last epoch number to switch training pipeline + +# ============================== Unmodified in most cases =================== +default_hooks = dict( + param_scheduler=dict( + type='YOLOv5ParamSchedulerHook', + scheduler_type='cosine', + lr_factor=0.01, + max_epochs=max_epochs)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0001, + update_buffers=True, + strict_load=False, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - num_last_epochs, + switch_pipeline=_base_.train_pipeline_stage2) +] + +train_cfg = dict( + max_epochs=max_epochs, + dynamic_intervals=[(max_epochs - num_last_epochs, 1)]) diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolov6/yolov6_s_syncbn_fast_8xb32-400e_coco.py b/models/YOLO-World/third_party/mmyolo/configs/yolov6/yolov6_s_syncbn_fast_8xb32-400e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..eb564c07a906185f6702aac88cbb4d53493f168c --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolov6/yolov6_s_syncbn_fast_8xb32-400e_coco.py @@ -0,0 +1,280 @@ +_base_ = ['../_base_/default_runtime.py', '../_base_/det_p5_tta.py'] + +# ======================= Frequently modified parameters ===================== +# -----data related----- +data_root = 'data/coco/' # Root path of data +# Path of train annotation file +train_ann_file = 'annotations/instances_train2017.json' +train_data_prefix = 'train2017/' # Prefix of train image path +# Path of val annotation file +val_ann_file = 'annotations/instances_val2017.json' +val_data_prefix = 'val2017/' # Prefix of val image path + +num_classes = 80 # Number of classes for classification +# Batch size of a single GPU during training +train_batch_size_per_gpu = 32 +# Worker to pre-fetch data for each single GPU during training +train_num_workers = 8 +# persistent_workers must be False if num_workers is 0 +persistent_workers = True + +# -----train val related----- +# Base learning rate for optim_wrapper +base_lr = 0.01 +max_epochs = 400 # Maximum training epochs +num_last_epochs = 15 # Last epoch number to switch training pipeline + +# ======================= Possible modified parameters ======================= +# -----data related----- +img_scale = (640, 640) # width, height +# Dataset type, this will be used to define the dataset +dataset_type = 'YOLOv5CocoDataset' +# Batch size of a single GPU during validation +val_batch_size_per_gpu = 1 +# Worker to pre-fetch data for each single GPU during validation +val_num_workers = 2 + +# Config of batch shapes. Only on val. +# It means not used if batch_shapes_cfg is None. +batch_shapes_cfg = dict( + type='BatchShapePolicy', + batch_size=val_batch_size_per_gpu, + img_size=img_scale[0], + size_divisor=32, + extra_pad_ratio=0.5) + +# -----model related----- +# The scaling factor that controls the depth of the network structure +deepen_factor = 0.33 +# The scaling factor that controls the width of the network structure +widen_factor = 0.5 + +# -----train val related----- +affine_scale = 0.5 # YOLOv5RandomAffine scaling ratio +lr_factor = 0.01 # Learning rate scaling factor +weight_decay = 0.0005 +# Save model checkpoint and validation intervals +save_epoch_intervals = 10 +# The maximum checkpoints to keep. +max_keep_ckpts = 3 +# Single-scale training is recommended to +# be turned on, which can speed up training. +env_cfg = dict(cudnn_benchmark=True) + +# ============================== Unmodified in most cases =================== +model = dict( + type='YOLODetector', + data_preprocessor=dict( + type='YOLOv5DetDataPreprocessor', + mean=[0., 0., 0.], + std=[255., 255., 255.], + bgr_to_rgb=True), + backbone=dict( + type='YOLOv6EfficientRep', + deepen_factor=deepen_factor, + widen_factor=widen_factor, + norm_cfg=dict(type='BN', momentum=0.03, eps=0.001), + act_cfg=dict(type='ReLU', inplace=True)), + neck=dict( + type='YOLOv6RepPAFPN', + deepen_factor=deepen_factor, + widen_factor=widen_factor, + in_channels=[256, 512, 1024], + out_channels=[128, 256, 512], + num_csp_blocks=12, + norm_cfg=dict(type='BN', momentum=0.03, eps=0.001), + act_cfg=dict(type='ReLU', inplace=True), + ), + bbox_head=dict( + type='YOLOv6Head', + head_module=dict( + type='YOLOv6HeadModule', + num_classes=num_classes, + in_channels=[128, 256, 512], + widen_factor=widen_factor, + norm_cfg=dict(type='BN', momentum=0.03, eps=0.001), + act_cfg=dict(type='SiLU', inplace=True), + featmap_strides=[8, 16, 32]), + loss_bbox=dict( + type='IoULoss', + iou_mode='giou', + bbox_format='xyxy', + reduction='mean', + loss_weight=2.5, + return_iou=False)), + train_cfg=dict( + initial_epoch=4, + initial_assigner=dict( + type='BatchATSSAssigner', + num_classes=num_classes, + topk=9, + iou_calculator=dict(type='mmdet.BboxOverlaps2D')), + assigner=dict( + type='BatchTaskAlignedAssigner', + num_classes=num_classes, + topk=13, + alpha=1, + beta=6), + ), + test_cfg=dict( + multi_label=True, + nms_pre=30000, + score_thr=0.001, + nms=dict(type='nms', iou_threshold=0.65), + max_per_img=300)) + +# The training pipeline of YOLOv6 is basically the same as YOLOv5. +# The difference is that Mosaic and RandomAffine will be closed in the last 15 epochs. # noqa +pre_transform = [ + dict(type='LoadImageFromFile', backend_args=_base_.backend_args), + dict(type='LoadAnnotations', with_bbox=True) +] + +train_pipeline = [ + *pre_transform, + dict( + type='Mosaic', + img_scale=img_scale, + pad_val=114.0, + pre_transform=pre_transform), + dict( + type='YOLOv5RandomAffine', + max_rotate_degree=0.0, + max_translate_ratio=0.1, + scaling_ratio_range=(1 - affine_scale, 1 + affine_scale), + # img_scale is (width, height) + border=(-img_scale[0] // 2, -img_scale[1] // 2), + border_val=(114, 114, 114), + max_shear_degree=0.0), + dict(type='YOLOv5HSVRandomAug'), + dict(type='mmdet.RandomFlip', prob=0.5), + dict( + type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'flip', + 'flip_direction')) +] + +train_pipeline_stage2 = [ + *pre_transform, + dict(type='YOLOv5KeepRatioResize', scale=img_scale), + dict( + type='LetterResize', + scale=img_scale, + allow_scale_up=True, + pad_val=dict(img=114)), + dict( + type='YOLOv5RandomAffine', + max_rotate_degree=0.0, + max_translate_ratio=0.1, + scaling_ratio_range=(1 - affine_scale, 1 + affine_scale), + max_shear_degree=0.0, + ), + dict(type='YOLOv5HSVRandomAug'), + dict(type='mmdet.RandomFlip', prob=0.5), + dict( + type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'flip', + 'flip_direction')) +] + +train_dataloader = dict( + batch_size=train_batch_size_per_gpu, + num_workers=train_num_workers, + collate_fn=dict(type='yolov5_collate'), + persistent_workers=persistent_workers, + pin_memory=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file=train_ann_file, + data_prefix=dict(img=train_data_prefix), + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=train_pipeline)) + +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args=_base_.backend_args), + dict(type='YOLOv5KeepRatioResize', scale=img_scale), + dict( + type='LetterResize', + scale=img_scale, + allow_scale_up=False, + pad_val=dict(img=114)), + dict(type='LoadAnnotations', with_bbox=True, _scope_='mmdet'), + dict( + type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'pad_param')) +] + +val_dataloader = dict( + batch_size=val_batch_size_per_gpu, + num_workers=val_num_workers, + persistent_workers=persistent_workers, + pin_memory=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + test_mode=True, + data_prefix=dict(img=val_data_prefix), + ann_file=val_ann_file, + pipeline=test_pipeline, + batch_shapes_cfg=batch_shapes_cfg)) + +test_dataloader = val_dataloader + +# Optimizer and learning rate scheduler of YOLOv6 are basically the same as YOLOv5. # noqa +# The difference is that the scheduler_type of YOLOv6 is cosine. +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict( + type='SGD', + lr=base_lr, + momentum=0.937, + weight_decay=weight_decay, + nesterov=True, + batch_size_per_gpu=train_batch_size_per_gpu), + constructor='YOLOv5OptimizerConstructor') + +default_hooks = dict( + param_scheduler=dict( + type='YOLOv5ParamSchedulerHook', + scheduler_type='cosine', + lr_factor=lr_factor, + max_epochs=max_epochs), + checkpoint=dict( + type='CheckpointHook', + interval=save_epoch_intervals, + max_keep_ckpts=max_keep_ckpts, + save_best='auto')) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0001, + update_buffers=True, + strict_load=False, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - num_last_epochs, + switch_pipeline=train_pipeline_stage2) +] + +val_evaluator = dict( + type='mmdet.CocoMetric', + proposal_nums=(100, 1, 10), + ann_file=data_root + val_ann_file, + metric='bbox') +test_evaluator = val_evaluator + +train_cfg = dict( + type='EpochBasedTrainLoop', + max_epochs=max_epochs, + val_interval=save_epoch_intervals, + dynamic_intervals=[(max_epochs - num_last_epochs, 1)]) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolov6/yolov6_t_syncbn_fast_8xb32-300e_coco.py b/models/YOLO-World/third_party/mmyolo/configs/yolov6/yolov6_t_syncbn_fast_8xb32-300e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..aa9da63f6984a9a23bc7ca78780db5be5a782399 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolov6/yolov6_t_syncbn_fast_8xb32-300e_coco.py @@ -0,0 +1,17 @@ +_base_ = './yolov6_s_syncbn_fast_8xb32-300e_coco.py' + +# ======================= Possible modified parameters ======================= +# -----model related----- +# The scaling factor that controls the depth of the network structure +deepen_factor = 0.33 +# The scaling factor that controls the width of the network structure +widen_factor = 0.375 + +# ============================== Unmodified in most cases =================== +model = dict( + backbone=dict(deepen_factor=deepen_factor, widen_factor=widen_factor), + neck=dict(deepen_factor=deepen_factor, widen_factor=widen_factor), + bbox_head=dict( + type='YOLOv6Head', + head_module=dict(widen_factor=widen_factor), + loss_bbox=dict(iou_mode='siou'))) diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolov6/yolov6_t_syncbn_fast_8xb32-400e_coco.py b/models/YOLO-World/third_party/mmyolo/configs/yolov6/yolov6_t_syncbn_fast_8xb32-400e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..75755555a58b45309df9213b6262cee030e41a9d --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolov6/yolov6_t_syncbn_fast_8xb32-400e_coco.py @@ -0,0 +1,17 @@ +_base_ = './yolov6_s_syncbn_fast_8xb32-400e_coco.py' + +# ======================= Possible modified parameters ======================= +# -----model related----- +# The scaling factor that controls the depth of the network structure +deepen_factor = 0.33 +# The scaling factor that controls the width of the network structure +widen_factor = 0.375 + +# ============================== Unmodified in most cases =================== +model = dict( + backbone=dict(deepen_factor=deepen_factor, widen_factor=widen_factor), + neck=dict(deepen_factor=deepen_factor, widen_factor=widen_factor), + bbox_head=dict( + type='YOLOv6Head', + head_module=dict(widen_factor=widen_factor), + loss_bbox=dict(iou_mode='siou'))) diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolov6/yolov6_v3_l_syncbn_fast_8xb32-300e_coco.py b/models/YOLO-World/third_party/mmyolo/configs/yolov6/yolov6_v3_l_syncbn_fast_8xb32-300e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..7ed4b05538c077d6f49036c6399942d5f8b3f627 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolov6/yolov6_v3_l_syncbn_fast_8xb32-300e_coco.py @@ -0,0 +1,28 @@ +_base_ = './yolov6_v3_m_syncbn_fast_8xb32-300e_coco.py' + +# ======================= Possible modified parameters ======================= +# -----model related----- +# The scaling factor that controls the depth of the network structure +deepen_factor = 1 +# The scaling factor that controls the width of the network structure +widen_factor = 1 + +# ============================== Unmodified in most cases =================== +model = dict( + backbone=dict( + deepen_factor=deepen_factor, + widen_factor=widen_factor, + hidden_ratio=1. / 2, + block_cfg=dict( + type='ConvWrapper', + norm_cfg=dict(type='BN', momentum=0.03, eps=0.001)), + act_cfg=dict(type='SiLU', inplace=True)), + neck=dict( + deepen_factor=deepen_factor, + widen_factor=widen_factor, + hidden_ratio=1. / 2, + block_cfg=dict( + type='ConvWrapper', + norm_cfg=dict(type='BN', momentum=0.03, eps=0.001)), + block_act_cfg=dict(type='SiLU', inplace=True)), + bbox_head=dict(head_module=dict(widen_factor=widen_factor))) diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolov6/yolov6_v3_m_syncbn_fast_8xb32-300e_coco.py b/models/YOLO-World/third_party/mmyolo/configs/yolov6/yolov6_v3_m_syncbn_fast_8xb32-300e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..982b0c8865a557c9970c1f50e3b84acba89bf93f --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolov6/yolov6_v3_m_syncbn_fast_8xb32-300e_coco.py @@ -0,0 +1,63 @@ +_base_ = './yolov6_v3_s_syncbn_fast_8xb32-300e_coco.py' + +# ======================= Possible modified parameters ======================= +# -----model related----- +# The scaling factor that controls the depth of the network structure +deepen_factor = 0.6 +# The scaling factor that controls the width of the network structure +widen_factor = 0.75 + +# -----train val related----- +affine_scale = 0.9 # YOLOv5RandomAffine scaling ratio + +# ============================== Unmodified in most cases =================== +model = dict( + backbone=dict( + type='YOLOv6CSPBep', + deepen_factor=deepen_factor, + widen_factor=widen_factor, + hidden_ratio=2. / 3, + block_cfg=dict(type='RepVGGBlock'), + act_cfg=dict(type='ReLU', inplace=True)), + neck=dict( + type='YOLOv6CSPRepBiPAFPN', + deepen_factor=deepen_factor, + widen_factor=widen_factor, + block_cfg=dict(type='RepVGGBlock'), + hidden_ratio=2. / 3, + block_act_cfg=dict(type='ReLU', inplace=True)), + bbox_head=dict( + type='YOLOv6Head', + head_module=dict(reg_max=16, widen_factor=widen_factor))) + +mosaic_affine_pipeline = [ + dict( + type='Mosaic', + img_scale=_base_.img_scale, + pad_val=114.0, + pre_transform=_base_.pre_transform), + dict( + type='YOLOv5RandomAffine', + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=(1 - affine_scale, 1 + affine_scale), + # img_scale is (width, height) + border=(-_base_.img_scale[0] // 2, -_base_.img_scale[1] // 2), + border_val=(114, 114, 114)) +] + +train_pipeline = [ + *_base_.pre_transform, *mosaic_affine_pipeline, + dict( + type='YOLOv5MixUp', + prob=0.1, + pre_transform=[*_base_.pre_transform, *mosaic_affine_pipeline]), + dict(type='YOLOv5HSVRandomAug'), + dict(type='mmdet.RandomFlip', prob=0.5), + dict( + type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'flip', + 'flip_direction')) +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolov6/yolov6_v3_n_syncbn_fast_8xb32-300e_coco.py b/models/YOLO-World/third_party/mmyolo/configs/yolov6/yolov6_v3_n_syncbn_fast_8xb32-300e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..96469f026e253b76a293f8f3ef81148af5d258a8 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolov6/yolov6_v3_n_syncbn_fast_8xb32-300e_coco.py @@ -0,0 +1,21 @@ +_base_ = './yolov6_v3_s_syncbn_fast_8xb32-300e_coco.py' + +# ======================= Possible modified parameters ======================= +# -----model related----- +# The scaling factor that controls the depth of the network structure +deepen_factor = 0.33 +# The scaling factor that controls the width of the network structure +widen_factor = 0.25 + +# -----train val related----- +lr_factor = 0.02 # Learning rate scaling factor + +# ============================== Unmodified in most cases =================== +model = dict( + backbone=dict(deepen_factor=deepen_factor, widen_factor=widen_factor), + neck=dict(deepen_factor=deepen_factor, widen_factor=widen_factor), + bbox_head=dict( + head_module=dict(widen_factor=widen_factor), + loss_bbox=dict(iou_mode='siou'))) + +default_hooks = dict(param_scheduler=dict(lr_factor=lr_factor)) diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolov6/yolov6_v3_s_syncbn_fast_8xb32-300e_coco.py b/models/YOLO-World/third_party/mmyolo/configs/yolov6/yolov6_v3_s_syncbn_fast_8xb32-300e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..8b0ad190139fa199918752cb8b531352db942fc0 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolov6/yolov6_v3_s_syncbn_fast_8xb32-300e_coco.py @@ -0,0 +1,282 @@ +_base_ = ['../_base_/default_runtime.py', '../_base_/det_p5_tta.py'] + +# ======================= Frequently modified parameters ===================== +# -----data related----- +data_root = 'data/coco/' # Root path of data +# Path of train annotation file +train_ann_file = 'annotations/instances_train2017.json' +train_data_prefix = 'train2017/' # Prefix of train image path +# Path of val annotation file +val_ann_file = 'annotations/instances_val2017.json' +val_data_prefix = 'val2017/' # Prefix of val image path + +num_classes = 80 # Number of classes for classification +# Batch size of a single GPU during training +train_batch_size_per_gpu = 32 +# Worker to pre-fetch data for each single GPU during training +train_num_workers = 8 +# persistent_workers must be False if num_workers is 0 +persistent_workers = True + +# -----train val related----- +# Base learning rate for optim_wrapper +base_lr = 0.01 +max_epochs = 300 # Maximum training epochs +num_last_epochs = 15 # Last epoch number to switch training pipeline + +# ======================= Possible modified parameters ======================= +# -----data related----- +img_scale = (640, 640) # width, height +# Dataset type, this will be used to define the dataset +dataset_type = 'YOLOv5CocoDataset' +# Batch size of a single GPU during validation +val_batch_size_per_gpu = 1 +# Worker to pre-fetch data for each single GPU during validation +val_num_workers = 2 + +# Config of batch shapes. Only on val. +# It means not used if batch_shapes_cfg is None. +batch_shapes_cfg = dict( + type='BatchShapePolicy', + batch_size=val_batch_size_per_gpu, + img_size=img_scale[0], + size_divisor=32, + extra_pad_ratio=0.5) + +# -----model related----- +# The scaling factor that controls the depth of the network structure +deepen_factor = 0.33 +# The scaling factor that controls the width of the network structure +widen_factor = 0.5 + +# -----train val related----- +affine_scale = 0.5 # YOLOv5RandomAffine scaling ratio +lr_factor = 0.01 # Learning rate scaling factor +weight_decay = 0.0005 +# Save model checkpoint and validation intervals +save_epoch_intervals = 10 +# The maximum checkpoints to keep. +max_keep_ckpts = 3 +# Single-scale training is recommended to +# be turned on, which can speed up training. +env_cfg = dict(cudnn_benchmark=True) + +# ============================== Unmodified in most cases =================== +model = dict( + type='YOLODetector', + data_preprocessor=dict( + type='YOLOv5DetDataPreprocessor', + mean=[0., 0., 0.], + std=[255., 255., 255.], + bgr_to_rgb=True), + backbone=dict( + type='YOLOv6EfficientRep', + out_indices=[1, 2, 3, 4], + use_cspsppf=True, + deepen_factor=deepen_factor, + widen_factor=widen_factor, + norm_cfg=dict(type='BN', momentum=0.03, eps=0.001), + act_cfg=dict(type='ReLU', inplace=True)), + neck=dict( + type='YOLOv6RepBiPAFPN', + deepen_factor=deepen_factor, + widen_factor=widen_factor, + in_channels=[128, 256, 512, 1024], + out_channels=[128, 256, 512], + num_csp_blocks=12, + norm_cfg=dict(type='BN', momentum=0.03, eps=0.001), + act_cfg=dict(type='ReLU', inplace=True), + ), + bbox_head=dict( + type='YOLOv6Head', + head_module=dict( + type='YOLOv6HeadModule', + num_classes=num_classes, + in_channels=[128, 256, 512], + widen_factor=widen_factor, + norm_cfg=dict(type='BN', momentum=0.03, eps=0.001), + act_cfg=dict(type='SiLU', inplace=True), + featmap_strides=[8, 16, 32]), + loss_bbox=dict( + type='IoULoss', + iou_mode='giou', + bbox_format='xyxy', + reduction='mean', + loss_weight=2.5, + return_iou=False)), + train_cfg=dict( + initial_epoch=4, + initial_assigner=dict( + type='BatchATSSAssigner', + num_classes=num_classes, + topk=9, + iou_calculator=dict(type='mmdet.BboxOverlaps2D')), + assigner=dict( + type='BatchTaskAlignedAssigner', + num_classes=num_classes, + topk=13, + alpha=1, + beta=6), + ), + test_cfg=dict( + multi_label=True, + nms_pre=30000, + score_thr=0.001, + nms=dict(type='nms', iou_threshold=0.65), + max_per_img=300)) + +# The training pipeline of YOLOv6 is basically the same as YOLOv5. +# The difference is that Mosaic and RandomAffine will be closed in the last 15 epochs. # noqa +pre_transform = [ + dict(type='LoadImageFromFile', backend_args=_base_.backend_args), + dict(type='LoadAnnotations', with_bbox=True) +] + +train_pipeline = [ + *pre_transform, + dict( + type='Mosaic', + img_scale=img_scale, + pad_val=114.0, + pre_transform=pre_transform), + dict( + type='YOLOv5RandomAffine', + max_rotate_degree=0.0, + max_translate_ratio=0.1, + scaling_ratio_range=(1 - affine_scale, 1 + affine_scale), + # img_scale is (width, height) + border=(-img_scale[0] // 2, -img_scale[1] // 2), + border_val=(114, 114, 114), + max_shear_degree=0.0), + dict(type='YOLOv5HSVRandomAug'), + dict(type='mmdet.RandomFlip', prob=0.5), + dict( + type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'flip', + 'flip_direction')) +] + +train_pipeline_stage2 = [ + *pre_transform, + dict(type='YOLOv5KeepRatioResize', scale=img_scale), + dict( + type='LetterResize', + scale=img_scale, + allow_scale_up=True, + pad_val=dict(img=114)), + dict( + type='YOLOv5RandomAffine', + max_rotate_degree=0.0, + max_translate_ratio=0.1, + scaling_ratio_range=(1 - affine_scale, 1 + affine_scale), + max_shear_degree=0.0, + ), + dict(type='YOLOv5HSVRandomAug'), + dict(type='mmdet.RandomFlip', prob=0.5), + dict( + type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'flip', + 'flip_direction')) +] + +train_dataloader = dict( + batch_size=train_batch_size_per_gpu, + num_workers=train_num_workers, + collate_fn=dict(type='yolov5_collate'), + persistent_workers=persistent_workers, + pin_memory=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file=train_ann_file, + data_prefix=dict(img=train_data_prefix), + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=train_pipeline)) + +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args=_base_.backend_args), + dict(type='YOLOv5KeepRatioResize', scale=img_scale), + dict( + type='LetterResize', + scale=img_scale, + allow_scale_up=False, + pad_val=dict(img=114)), + dict(type='LoadAnnotations', with_bbox=True, _scope_='mmdet'), + dict( + type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'pad_param')) +] + +val_dataloader = dict( + batch_size=val_batch_size_per_gpu, + num_workers=val_num_workers, + persistent_workers=persistent_workers, + pin_memory=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + test_mode=True, + data_prefix=dict(img=val_data_prefix), + ann_file=val_ann_file, + pipeline=test_pipeline, + batch_shapes_cfg=batch_shapes_cfg)) + +test_dataloader = val_dataloader + +# Optimizer and learning rate scheduler of YOLOv6 are basically the same as YOLOv5. # noqa +# The difference is that the scheduler_type of YOLOv6 is cosine. +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict( + type='SGD', + lr=base_lr, + momentum=0.937, + weight_decay=weight_decay, + nesterov=True, + batch_size_per_gpu=train_batch_size_per_gpu), + constructor='YOLOv5OptimizerConstructor') + +default_hooks = dict( + param_scheduler=dict( + type='YOLOv5ParamSchedulerHook', + scheduler_type='cosine', + lr_factor=lr_factor, + max_epochs=max_epochs), + checkpoint=dict( + type='CheckpointHook', + interval=save_epoch_intervals, + max_keep_ckpts=max_keep_ckpts, + save_best='auto')) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0001, + update_buffers=True, + strict_load=False, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - num_last_epochs, + switch_pipeline=train_pipeline_stage2) +] + +val_evaluator = dict( + type='mmdet.CocoMetric', + proposal_nums=(100, 1, 10), + ann_file=data_root + val_ann_file, + metric='bbox') +test_evaluator = val_evaluator + +train_cfg = dict( + type='EpochBasedTrainLoop', + max_epochs=max_epochs, + val_interval=save_epoch_intervals, + dynamic_intervals=[(max_epochs - num_last_epochs, 1)]) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolov6/yolov6_v3_t_syncbn_fast_8xb32-300e_coco.py b/models/YOLO-World/third_party/mmyolo/configs/yolov6/yolov6_v3_t_syncbn_fast_8xb32-300e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..d088b6b6629345f6f086f67373206b6d6f9b7e31 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolov6/yolov6_v3_t_syncbn_fast_8xb32-300e_coco.py @@ -0,0 +1,17 @@ +_base_ = './yolov6_v3_s_syncbn_fast_8xb32-300e_coco.py' + +# ======================= Possible modified parameters ======================= +# -----model related----- +# The scaling factor that controls the depth of the network structure +deepen_factor = 0.33 +# The scaling factor that controls the width of the network structure +widen_factor = 0.375 + +# ============================== Unmodified in most cases =================== +model = dict( + backbone=dict(deepen_factor=deepen_factor, widen_factor=widen_factor), + neck=dict(deepen_factor=deepen_factor, widen_factor=widen_factor), + bbox_head=dict( + type='YOLOv6Head', + head_module=dict(widen_factor=widen_factor), + loss_bbox=dict(iou_mode='siou'))) diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolov7/README.md b/models/YOLO-World/third_party/mmyolo/configs/yolov7/README.md new file mode 100644 index 0000000000000000000000000000000000000000..f8f87f8358e25b7c8004aabfe7229d7941b6919a --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolov7/README.md @@ -0,0 +1,50 @@ +# YOLOv7 + +> [YOLOv7: Trainable bag-of-freebies sets new state-of-the-art for real-time object detectors](https://arxiv.org/abs/2207.02696) + + + +## Abstract + +YOLOv7 surpasses all known object detectors in both speed and accuracy in the range from 5 FPS to 160 FPS and has the highest accuracy 56.8% AP among all known real-time object detectors with 30 FPS or higher on GPU V100. YOLOv7-E6 object detector (56 FPS V100, 55.9% AP) outperforms both transformer-based detector SWIN-L Cascade-Mask R-CNN (9.2 FPS A100, 53.9% AP) by 509% in speed and 2% in accuracy, and convolutional-based detector ConvNeXt-XL Cascade-Mask R-CNN (8.6 FPS A100, 55.2% AP) by 551% in speed and 0.7% AP in accuracy, as well as YOLOv7 outperforms: YOLOR, YOLOX, Scaled-YOLOv4, YOLOv5, DETR, Deformable DETR, DINO-5scale-R50, ViT-Adapter-B and many other object detectors in speed and accuracy. Moreover, we train YOLOv7 only on MS COCO dataset from scratch without using any other datasets or pre-trained weights. Source code is released in [this https URL](https://github.com/WongKinYiu/yolov7). + +
+ +
+ +
+YOLOv7-l +YOLOv7-l-P5 model structure +
+ +## Results and models + +### COCO + +| Backbone | Arch | Size | SyncBN | AMP | Mem (GB) | Box AP | Config | Download | +| :---------: | :--: | :--: | :----: | :-: | :------: | :----: | :----------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| YOLOv7-tiny | P5 | 640 | Yes | Yes | 2.7 | 37.5 | [config](./yolov7_tiny_syncbn_fast_8x16b-300e_coco.py) | [model](https://download.openmmlab.com/mmyolo/v0/yolov7/yolov7_tiny_syncbn_fast_8x16b-300e_coco/yolov7_tiny_syncbn_fast_8x16b-300e_coco_20221126_102719-0ee5bbdf.pth) \| [log](https://download.openmmlab.com/mmyolo/v0/yolov7/yolov7_tiny_syncbn_fast_8x16b-300e_coco/yolov7_tiny_syncbn_fast_8x16b-300e_coco_20221126_102719.log.json) | +| YOLOv7-l | P5 | 640 | Yes | Yes | 10.3 | 50.9 | [config](./yolov7_l_syncbn_fast_8x16b-300e_coco.py) | [model](https://download.openmmlab.com/mmyolo/v0/yolov7/yolov7_l_syncbn_fast_8x16b-300e_coco/yolov7_l_syncbn_fast_8x16b-300e_coco_20221123_023601-8113c0eb.pth) \| [log](https://download.openmmlab.com/mmyolo/v0/yolov7/yolov7_l_syncbn_fast_8x16b-300e_coco/yolov7_l_syncbn_fast_8x16b-300e_coco_20221123_023601.log.json) | +| YOLOv7-x | P5 | 640 | Yes | Yes | 13.7 | 52.8 | [config](./yolov7_x_syncbn_fast_8x16b-300e_coco.py) | [model](https://download.openmmlab.com/mmyolo/v0/yolov7/yolov7_x_syncbn_fast_8x16b-300e_coco/yolov7_x_syncbn_fast_8x16b-300e_coco_20221124_215331-ef949a68.pth) \| [log](https://download.openmmlab.com/mmyolo/v0/yolov7/yolov7_x_syncbn_fast_8x16b-300e_coco/yolov7_x_syncbn_fast_8x16b-300e_coco_20221124_215331.log.json) | +| YOLOv7-w | P6 | 1280 | Yes | Yes | 27.0 | 54.1 | [config](./yolov7_w-p6_syncbn_fast_8x16b-300e_coco.py) | [model](https://download.openmmlab.com/mmyolo/v0/yolov7/yolov7_w-p6_syncbn_fast_8x16b-300e_coco/yolov7_w-p6_syncbn_fast_8x16b-300e_coco_20221123_053031-a68ef9d2.pth) \| [log](https://download.openmmlab.com/mmyolo/v0/yolov7/yolov7_w-p6_syncbn_fast_8x16b-300e_coco/yolov7_w-p6_syncbn_fast_8x16b-300e_coco_20221123_053031.log.json) | +| YOLOv7-e | P6 | 1280 | Yes | Yes | 42.5 | 55.1 | [config](./yolov7_e-p6_syncbn_fast_8x16b-300e_coco.py) | [model](https://download.openmmlab.com/mmyolo/v0/yolov7/yolov7_e-p6_syncbn_fast_8x16b-300e_coco/yolov7_e-p6_syncbn_fast_8x16b-300e_coco_20221126_102636-34425033.pth) \| [log](https://download.openmmlab.com/mmyolo/v0/yolov7/yolov7_e-p6_syncbn_fast_8x16b-300e_coco/yolov7_e-p6_syncbn_fast_8x16b-300e_coco_20221126_102636.log.json) | + +**Note**: +In the official YOLOv7 code, the `random_perspective` data augmentation in COCO object detection task training uses mask annotation information, which leads to higher performance. Object detection should not use mask annotation, so only box annotation information is used in `MMYOLO`. We will use the mask annotation information in the instance segmentation task. + +1. The performance is unstable and may fluctuate by about 0.3 mAP. The performance shown above is the best model. +2. If users need the weight of `YOLOv7-e2e`, they can train according to the configs provided by us, or convert the official weight according to the [converter script](https://github.com/open-mmlab/mmyolo/blob/main/tools/model_converters/yolov7_to_mmyolo.py). +3. `fast` means that `YOLOv5DetDataPreprocessor` and `yolov5_collate` are used for data preprocessing, which is faster for training, but less flexible for multitasking. Recommended to use fast version config if you only care about object detection. +4. `SyncBN` means use SyncBN, `AMP` indicates training with mixed precision. +5. We use 8x A100 for training, and the single-GPU batch size is 16. This is different from the official code. + +## Citation + +```latex +@article{wang2022yolov7, + title={{YOLOv7}: Trainable bag-of-freebies sets new state-of-the-art for real-time object detectors}, + author={Wang, Chien-Yao and Bochkovskiy, Alexey and Liao, Hong-Yuan Mark}, + journal={arXiv preprint arXiv:2207.02696}, + year={2022} +} +``` diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolov7/metafile.yml b/models/YOLO-World/third_party/mmyolo/configs/yolov7/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..067ec6b45afefa2ae444b0343ad327b94f1507d2 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolov7/metafile.yml @@ -0,0 +1,83 @@ +Collections: + - Name: YOLOv7 + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Nesterov + - Weight Decay + - AMP + - Synchronize BN + Training Resources: 8x A100 GPUs + Architecture: + - EELAN + - PAFPN + - RepVGG + Paper: + URL: https://arxiv.org/abs/2207.02696 + Title: 'YOLOv7: Trainable bag-of-freebies sets new state-of-the-art for real-time object detectors' + README: configs/yolov7/README.md + Code: + URL: https://github.com/open-mmlab/mmyolo/blob/v0.0.1/mmyolo/models/detectors/yolo_detector.py#L12 + Version: v0.0.1 + +Models: + - Name: yolov7_tiny_syncbn_fast_8x16b-300e_coco + In Collection: YOLOv7 + Config: configs/yolov7/yolov7_tiny_syncbn_fast_8x16b-300e_coco.py + Metadata: + Training Memory (GB): 2.7 + Epochs: 300 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 37.5 + Weights: https://download.openmmlab.com/mmyolo/v0/yolov7/yolov7_tiny_syncbn_fast_8x16b-300e_coco/yolov7_tiny_syncbn_fast_8x16b-300e_coco_20221126_102719-0ee5bbdf.pth + - Name: yolov7_l_syncbn_fast_8x16b-300e_coco + In Collection: YOLOv7 + Config: configs/yolov7/yolov7_l_syncbn_fast_8x16b-300e_coco.py + Metadata: + Training Memory (GB): 10.3 + Epochs: 300 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 50.9 + Weights: https://download.openmmlab.com/mmyolo/v0/yolov7/yolov7_l_syncbn_fast_8x16b-300e_coco/yolov7_l_syncbn_fast_8x16b-300e_coco_20221123_023601-8113c0eb.pth + - Name: yolov7_x_syncbn_fast_8x16b-300e_coco + In Collection: YOLOv7 + Config: configs/yolov7/yolov7_x_syncbn_fast_8x16b-300e_coco.py + Metadata: + Training Memory (GB): 13.7 + Epochs: 300 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 52.8 + Weights: https://download.openmmlab.com/mmyolo/v0/yolov7/yolov7_x_syncbn_fast_8x16b-300e_coco/yolov7_x_syncbn_fast_8x16b-300e_coco_20221124_215331-ef949a68.pth + - Name: yolov7_w-p6_syncbn_fast_8x16b-300e_coco + In Collection: YOLOv7 + Config: configs/yolov7/yolov7_w-p6_syncbn_fast_8x16b-300e_coco.py + Metadata: + Training Memory (GB): 27.0 + Epochs: 300 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 54.1 + Weights: https://download.openmmlab.com/mmyolo/v0/yolov7/yolov7_w-p6_syncbn_fast_8x16b-300e_coco/yolov7_w-p6_syncbn_fast_8x16b-300e_coco_20221123_053031-a68ef9d2.pth + - Name: yolov7_e-p6_syncbn_fast_8x16b-300e_coco + In Collection: YOLOv7 + Config: configs/yolov7/yolov7_e-p6_syncbn_fast_8x16b-300e_coco.py + Metadata: + Training Memory (GB): 42.5 + Epochs: 300 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 55.1 + Weights: https://download.openmmlab.com/mmyolo/v0/yolov7/yolov7_e-p6_syncbn_fast_8x16b-300e_coco/yolov7_e-p6_syncbn_fast_8x16b-300e_coco_20221126_102636-34425033.pth diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolov7/yolov7_d-p6_syncbn_fast_8x16b-300e_coco.py b/models/YOLO-World/third_party/mmyolo/configs/yolov7/yolov7_d-p6_syncbn_fast_8x16b-300e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..a68715264d59c16ef2b31010ede44310d97a3a7e --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolov7/yolov7_d-p6_syncbn_fast_8x16b-300e_coco.py @@ -0,0 +1,21 @@ +_base_ = './yolov7_w-p6_syncbn_fast_8x16b-300e_coco.py' + +model = dict( + backbone=dict(arch='D'), + neck=dict( + use_maxpool_in_downsample=True, + use_in_channels_in_downsample=True, + block_cfg=dict( + type='ELANBlock', + middle_ratio=0.4, + block_ratio=0.2, + num_blocks=6, + num_convs_in_block=1), + in_channels=[384, 768, 1152, 1536], + out_channels=[192, 384, 576, 768]), + bbox_head=dict( + head_module=dict( + in_channels=[192, 384, 576, 768], + main_out_channels=[384, 768, 1152, 1536], + aux_out_channels=[384, 768, 1152, 1536], + ))) diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolov7/yolov7_e-p6_syncbn_fast_8x16b-300e_coco.py b/models/YOLO-World/third_party/mmyolo/configs/yolov7/yolov7_e-p6_syncbn_fast_8x16b-300e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..3d1463dc487e05eabfd3f586a28262017a9dc566 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolov7/yolov7_e-p6_syncbn_fast_8x16b-300e_coco.py @@ -0,0 +1,19 @@ +_base_ = './yolov7_w-p6_syncbn_fast_8x16b-300e_coco.py' + +model = dict( + backbone=dict(arch='E'), + neck=dict( + use_maxpool_in_downsample=True, + use_in_channels_in_downsample=True, + block_cfg=dict( + type='ELANBlock', + middle_ratio=0.4, + block_ratio=0.2, + num_blocks=6, + num_convs_in_block=1), + in_channels=[320, 640, 960, 1280], + out_channels=[160, 320, 480, 640]), + bbox_head=dict( + head_module=dict( + in_channels=[160, 320, 480, 640], + main_out_channels=[320, 640, 960, 1280]))) diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolov7/yolov7_e2e-p6_syncbn_fast_8x16b-300e_coco.py b/models/YOLO-World/third_party/mmyolo/configs/yolov7/yolov7_e2e-p6_syncbn_fast_8x16b-300e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..6af81051b72977410d5b51cf7a02a476d55ceb24 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolov7/yolov7_e2e-p6_syncbn_fast_8x16b-300e_coco.py @@ -0,0 +1,20 @@ +_base_ = './yolov7_w-p6_syncbn_fast_8x16b-300e_coco.py' + +model = dict( + backbone=dict(arch='E2E'), + neck=dict( + use_maxpool_in_downsample=True, + use_in_channels_in_downsample=True, + block_cfg=dict( + type='EELANBlock', + num_elan_block=2, + middle_ratio=0.4, + block_ratio=0.2, + num_blocks=6, + num_convs_in_block=1), + in_channels=[320, 640, 960, 1280], + out_channels=[160, 320, 480, 640]), + bbox_head=dict( + head_module=dict( + in_channels=[160, 320, 480, 640], + main_out_channels=[320, 640, 960, 1280]))) diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolov7/yolov7_l_syncbn_fast_8x16b-300e_coco.py b/models/YOLO-World/third_party/mmyolo/configs/yolov7/yolov7_l_syncbn_fast_8x16b-300e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..e8a756c27e5366e3a83658132b0e330a5f68ad22 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolov7/yolov7_l_syncbn_fast_8x16b-300e_coco.py @@ -0,0 +1,324 @@ +_base_ = ['../_base_/default_runtime.py', '../_base_/det_p5_tta.py'] + +# ========================Frequently modified parameters====================== +# -----data related----- +data_root = 'data/coco/' # Root path of data +# Path of train annotation file +train_ann_file = 'annotations/instances_train2017.json' +train_data_prefix = 'train2017/' # Prefix of train image path +# Path of val annotation file +val_ann_file = 'annotations/instances_val2017.json' +val_data_prefix = 'val2017/' # Prefix of val image path + +num_classes = 80 # Number of classes for classification +# Batch size of a single GPU during training +train_batch_size_per_gpu = 16 +# Worker to pre-fetch data for each single GPU during training +train_num_workers = 8 +# persistent_workers must be False if num_workers is 0 +persistent_workers = True + +# -----model related----- +# Basic size of multi-scale prior box +anchors = [ + [(12, 16), (19, 36), (40, 28)], # P3/8 + [(36, 75), (76, 55), (72, 146)], # P4/16 + [(142, 110), (192, 243), (459, 401)] # P5/32 +] +# -----train val related----- +# Base learning rate for optim_wrapper. Corresponding to 8xb16=128 bs +base_lr = 0.01 +max_epochs = 300 # Maximum training epochs + +num_epoch_stage2 = 30 # The last 30 epochs switch evaluation interval +val_interval_stage2 = 1 # Evaluation interval + +model_test_cfg = dict( + # The config of multi-label for multi-class prediction. + multi_label=True, + # The number of boxes before NMS. + nms_pre=30000, + score_thr=0.001, # Threshold to filter out boxes. + nms=dict(type='nms', iou_threshold=0.65), # NMS type and threshold + max_per_img=300) # Max number of detections of each image + +# ========================Possible modified parameters======================== +# -----data related----- +img_scale = (640, 640) # width, height +# Dataset type, this will be used to define the dataset +dataset_type = 'YOLOv5CocoDataset' +# Batch size of a single GPU during validation +val_batch_size_per_gpu = 1 +# Worker to pre-fetch data for each single GPU during validation +val_num_workers = 2 + +# Config of batch shapes. Only on val. +# It means not used if batch_shapes_cfg is None. +batch_shapes_cfg = dict( + type='BatchShapePolicy', + batch_size=val_batch_size_per_gpu, + img_size=img_scale[0], + # The image scale of padding should be divided by pad_size_divisor + size_divisor=32, + # Additional paddings for pixel scale + extra_pad_ratio=0.5) + +# -----model related----- +strides = [8, 16, 32] # Strides of multi-scale prior box +num_det_layers = 3 # The number of model output scales +norm_cfg = dict(type='BN', momentum=0.03, eps=0.001) + +# Data augmentation +max_translate_ratio = 0.2 # YOLOv5RandomAffine +scaling_ratio_range = (0.1, 2.0) # YOLOv5RandomAffine +mixup_prob = 0.15 # YOLOv5MixUp +randchoice_mosaic_prob = [0.8, 0.2] +mixup_alpha = 8.0 # YOLOv5MixUp +mixup_beta = 8.0 # YOLOv5MixUp + +# -----train val related----- +loss_cls_weight = 0.3 +loss_bbox_weight = 0.05 +loss_obj_weight = 0.7 +# BatchYOLOv7Assigner params +simota_candidate_topk = 10 +simota_iou_weight = 3.0 +simota_cls_weight = 1.0 +prior_match_thr = 4. # Priori box matching threshold +obj_level_weights = [4., 1., + 0.4] # The obj loss weights of the three output layers + +lr_factor = 0.1 # Learning rate scaling factor +weight_decay = 0.0005 +save_epoch_intervals = 1 # Save model checkpoint and validation intervals +max_keep_ckpts = 3 # The maximum checkpoints to keep. + +# Single-scale training is recommended to +# be turned on, which can speed up training. +env_cfg = dict(cudnn_benchmark=True) + +# ===============================Unmodified in most cases==================== +model = dict( + type='YOLODetector', + data_preprocessor=dict( + type='YOLOv5DetDataPreprocessor', + mean=[0., 0., 0.], + std=[255., 255., 255.], + bgr_to_rgb=True), + backbone=dict( + type='YOLOv7Backbone', + arch='L', + norm_cfg=norm_cfg, + act_cfg=dict(type='SiLU', inplace=True)), + neck=dict( + type='YOLOv7PAFPN', + block_cfg=dict( + type='ELANBlock', + middle_ratio=0.5, + block_ratio=0.25, + num_blocks=4, + num_convs_in_block=1), + upsample_feats_cat_first=False, + in_channels=[512, 1024, 1024], + # The real output channel will be multiplied by 2 + out_channels=[128, 256, 512], + norm_cfg=norm_cfg, + act_cfg=dict(type='SiLU', inplace=True)), + bbox_head=dict( + type='YOLOv7Head', + head_module=dict( + type='YOLOv7HeadModule', + num_classes=num_classes, + in_channels=[256, 512, 1024], + featmap_strides=strides, + num_base_priors=3), + prior_generator=dict( + type='mmdet.YOLOAnchorGenerator', + base_sizes=anchors, + strides=strides), + # scaled based on number of detection layers + loss_cls=dict( + type='mmdet.CrossEntropyLoss', + use_sigmoid=True, + reduction='mean', + loss_weight=loss_cls_weight * + (num_classes / 80 * 3 / num_det_layers)), + loss_bbox=dict( + type='IoULoss', + iou_mode='ciou', + bbox_format='xywh', + reduction='mean', + loss_weight=loss_bbox_weight * (3 / num_det_layers), + return_iou=True), + loss_obj=dict( + type='mmdet.CrossEntropyLoss', + use_sigmoid=True, + reduction='mean', + loss_weight=loss_obj_weight * + ((img_scale[0] / 640)**2 * 3 / num_det_layers)), + prior_match_thr=prior_match_thr, + obj_level_weights=obj_level_weights, + # BatchYOLOv7Assigner params + simota_candidate_topk=simota_candidate_topk, + simota_iou_weight=simota_iou_weight, + simota_cls_weight=simota_cls_weight), + test_cfg=model_test_cfg) + +pre_transform = [ + dict(type='LoadImageFromFile', backend_args=_base_.backend_args), + dict(type='LoadAnnotations', with_bbox=True) +] + +mosiac4_pipeline = [ + dict( + type='Mosaic', + img_scale=img_scale, + pad_val=114.0, + pre_transform=pre_transform), + dict( + type='YOLOv5RandomAffine', + max_rotate_degree=0.0, + max_shear_degree=0.0, + max_translate_ratio=max_translate_ratio, # note + scaling_ratio_range=scaling_ratio_range, # note + # img_scale is (width, height) + border=(-img_scale[0] // 2, -img_scale[1] // 2), + border_val=(114, 114, 114)), +] + +mosiac9_pipeline = [ + dict( + type='Mosaic9', + img_scale=img_scale, + pad_val=114.0, + pre_transform=pre_transform), + dict( + type='YOLOv5RandomAffine', + max_rotate_degree=0.0, + max_shear_degree=0.0, + max_translate_ratio=max_translate_ratio, # note + scaling_ratio_range=scaling_ratio_range, # note + # img_scale is (width, height) + border=(-img_scale[0] // 2, -img_scale[1] // 2), + border_val=(114, 114, 114)), +] + +randchoice_mosaic_pipeline = dict( + type='RandomChoice', + transforms=[mosiac4_pipeline, mosiac9_pipeline], + prob=randchoice_mosaic_prob) + +train_pipeline = [ + *pre_transform, + randchoice_mosaic_pipeline, + dict( + type='YOLOv5MixUp', + alpha=mixup_alpha, # note + beta=mixup_beta, # note + prob=mixup_prob, + pre_transform=[*pre_transform, randchoice_mosaic_pipeline]), + dict(type='YOLOv5HSVRandomAug'), + dict(type='mmdet.RandomFlip', prob=0.5), + dict( + type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'flip', + 'flip_direction')) +] + +train_dataloader = dict( + batch_size=train_batch_size_per_gpu, + num_workers=train_num_workers, + persistent_workers=persistent_workers, + pin_memory=True, + sampler=dict(type='DefaultSampler', shuffle=True), + collate_fn=dict(type='yolov5_collate'), # FASTER + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file=train_ann_file, + data_prefix=dict(img=train_data_prefix), + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=train_pipeline)) + +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args=_base_.backend_args), + dict(type='YOLOv5KeepRatioResize', scale=img_scale), + dict( + type='LetterResize', + scale=img_scale, + allow_scale_up=False, + pad_val=dict(img=114)), + dict(type='LoadAnnotations', with_bbox=True, _scope_='mmdet'), + dict( + type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'pad_param')) +] + +val_dataloader = dict( + batch_size=val_batch_size_per_gpu, + num_workers=val_num_workers, + persistent_workers=persistent_workers, + pin_memory=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + test_mode=True, + data_prefix=dict(img=val_data_prefix), + ann_file=val_ann_file, + pipeline=test_pipeline, + batch_shapes_cfg=batch_shapes_cfg)) + +test_dataloader = val_dataloader + +param_scheduler = None +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict( + type='SGD', + lr=base_lr, + momentum=0.937, + weight_decay=weight_decay, + nesterov=True, + batch_size_per_gpu=train_batch_size_per_gpu), + constructor='YOLOv7OptimWrapperConstructor') + +default_hooks = dict( + param_scheduler=dict( + type='YOLOv5ParamSchedulerHook', + scheduler_type='cosine', + lr_factor=lr_factor, # note + max_epochs=max_epochs), + checkpoint=dict( + type='CheckpointHook', + save_param_scheduler=False, + interval=save_epoch_intervals, + save_best='auto', + max_keep_ckpts=max_keep_ckpts)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0001, + update_buffers=True, + strict_load=False, + priority=49) +] + +val_evaluator = dict( + type='mmdet.CocoMetric', + proposal_nums=(100, 1, 10), # Can be accelerated + ann_file=data_root + val_ann_file, + metric='bbox') +test_evaluator = val_evaluator + +train_cfg = dict( + type='EpochBasedTrainLoop', + max_epochs=max_epochs, + val_interval=save_epoch_intervals, + dynamic_intervals=[(max_epochs - num_epoch_stage2, val_interval_stage2)]) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolov7/yolov7_tiny_fast_1xb12-40e_cat.py b/models/YOLO-World/third_party/mmyolo/configs/yolov7/yolov7_tiny_fast_1xb12-40e_cat.py new file mode 100644 index 0000000000000000000000000000000000000000..eb0446760eeb39951ad2bf6a8cbb1fe3cc19870a --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolov7/yolov7_tiny_fast_1xb12-40e_cat.py @@ -0,0 +1,56 @@ +_base_ = 'yolov7_tiny_syncbn_fast_8x16b-300e_coco.py' + +data_root = './data/cat/' +class_name = ('cat', ) +num_classes = len(class_name) +metainfo = dict(classes=class_name, palette=[(20, 220, 60)]) + +anchors = [ + [(68, 69), (154, 91), (143, 162)], # P3/8 + [(242, 160), (189, 287), (391, 207)], # P4/16 + [(353, 337), (539, 341), (443, 432)] # P5/32 +] + +max_epochs = 40 +train_batch_size_per_gpu = 12 +train_num_workers = 4 + +load_from = 'https://download.openmmlab.com/mmyolo/v0/yolov7/yolov7_tiny_syncbn_fast_8x16b-300e_coco/yolov7_tiny_syncbn_fast_8x16b-300e_coco_20221126_102719-0ee5bbdf.pth' # noqa + +model = dict( + backbone=dict(frozen_stages=4), + bbox_head=dict( + head_module=dict(num_classes=num_classes), + prior_generator=dict(base_sizes=anchors))) + +train_dataloader = dict( + batch_size=train_batch_size_per_gpu, + num_workers=train_num_workers, + dataset=dict( + data_root=data_root, + metainfo=metainfo, + ann_file='annotations/trainval.json', + data_prefix=dict(img='images/'))) + +val_dataloader = dict( + dataset=dict( + metainfo=metainfo, + data_root=data_root, + ann_file='annotations/test.json', + data_prefix=dict(img='images/'))) + +test_dataloader = val_dataloader + +_base_.optim_wrapper.optimizer.batch_size_per_gpu = train_batch_size_per_gpu + +val_evaluator = dict(ann_file=data_root + 'annotations/test.json') +test_evaluator = val_evaluator + +default_hooks = dict( + checkpoint=dict(interval=10, max_keep_ckpts=2, save_best='auto'), + # The warmup_mim_iter parameter is critical. + # The default value is 1000 which is not suitable for cat datasets. + param_scheduler=dict(max_epochs=max_epochs, warmup_mim_iter=10), + logger=dict(type='LoggerHook', interval=5)) +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +# visualizer = dict(vis_backends = [dict(type='LocalVisBackend'), dict(type='WandbVisBackend')]) # noqa diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolov7/yolov7_tiny_syncbn_fast_8x16b-300e_coco.py b/models/YOLO-World/third_party/mmyolo/configs/yolov7/yolov7_tiny_syncbn_fast_8x16b-300e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..b9e9f10e2926a840d2af7a9e27b0e2047710343d --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolov7/yolov7_tiny_syncbn_fast_8x16b-300e_coco.py @@ -0,0 +1,98 @@ +_base_ = './yolov7_l_syncbn_fast_8x16b-300e_coco.py' + +# ========================modified parameters======================== + +# -----model related----- +# Data augmentation +max_translate_ratio = 0.1 # YOLOv5RandomAffine +scaling_ratio_range = (0.5, 1.6) # YOLOv5RandomAffine +mixup_prob = 0.05 # YOLOv5MixUp +randchoice_mosaic_prob = [0.8, 0.2] +mixup_alpha = 8.0 # YOLOv5MixUp +mixup_beta = 8.0 # YOLOv5MixUp + +# -----train val related----- +loss_cls_weight = 0.5 +loss_obj_weight = 1.0 + +lr_factor = 0.01 # Learning rate scaling factor +# ===============================Unmodified in most cases==================== +num_classes = _base_.num_classes +num_det_layers = _base_.num_det_layers +img_scale = _base_.img_scale +pre_transform = _base_.pre_transform +model = dict( + backbone=dict( + arch='Tiny', act_cfg=dict(type='LeakyReLU', negative_slope=0.1)), + neck=dict( + is_tiny_version=True, + in_channels=[128, 256, 512], + out_channels=[64, 128, 256], + block_cfg=dict( + _delete_=True, type='TinyDownSampleBlock', middle_ratio=0.25), + act_cfg=dict(type='LeakyReLU', negative_slope=0.1), + use_repconv_outs=False), + bbox_head=dict( + head_module=dict(in_channels=[128, 256, 512]), + loss_cls=dict(loss_weight=loss_cls_weight * + (num_classes / 80 * 3 / num_det_layers)), + loss_obj=dict(loss_weight=loss_obj_weight * + ((img_scale[0] / 640)**2 * 3 / num_det_layers)))) + +mosiac4_pipeline = [ + dict( + type='Mosaic', + img_scale=img_scale, + pad_val=114.0, + pre_transform=pre_transform), + dict( + type='YOLOv5RandomAffine', + max_rotate_degree=0.0, + max_shear_degree=0.0, + max_translate_ratio=max_translate_ratio, # change + scaling_ratio_range=scaling_ratio_range, # change + # img_scale is (width, height) + border=(-img_scale[0] // 2, -img_scale[1] // 2), + border_val=(114, 114, 114)), +] + +mosiac9_pipeline = [ + dict( + type='Mosaic9', + img_scale=img_scale, + pad_val=114.0, + pre_transform=pre_transform), + dict( + type='YOLOv5RandomAffine', + max_rotate_degree=0.0, + max_shear_degree=0.0, + max_translate_ratio=max_translate_ratio, # change + scaling_ratio_range=scaling_ratio_range, # change + border=(-img_scale[0] // 2, -img_scale[1] // 2), + border_val=(114, 114, 114)), +] + +randchoice_mosaic_pipeline = dict( + type='RandomChoice', + transforms=[mosiac4_pipeline, mosiac9_pipeline], + prob=randchoice_mosaic_prob) + +train_pipeline = [ + *pre_transform, + randchoice_mosaic_pipeline, + dict( + type='YOLOv5MixUp', + alpha=mixup_alpha, + beta=mixup_beta, + prob=mixup_prob, # change + pre_transform=[*pre_transform, randchoice_mosaic_pipeline]), + dict(type='YOLOv5HSVRandomAug'), + dict(type='mmdet.RandomFlip', prob=0.5), + dict( + type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'flip', + 'flip_direction')) +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) +default_hooks = dict(param_scheduler=dict(lr_factor=lr_factor)) diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolov7/yolov7_w-p6_syncbn_fast_8x16b-300e_coco.py b/models/YOLO-World/third_party/mmyolo/configs/yolov7/yolov7_w-p6_syncbn_fast_8x16b-300e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..9758b871785050ef41303082aab745a6568e373b --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolov7/yolov7_w-p6_syncbn_fast_8x16b-300e_coco.py @@ -0,0 +1,182 @@ +_base_ = './yolov7_l_syncbn_fast_8x16b-300e_coco.py' + +# ========================modified parameters======================== +# -----data related----- +img_scale = (1280, 1280) # height, width +num_classes = 80 # Number of classes for classification +# Config of batch shapes. Only on val +# It means not used if batch_shapes_cfg is None. +batch_shapes_cfg = dict( + img_size=img_scale[ + 0], # The image scale of padding should be divided by pad_size_divisor + size_divisor=64) # Additional paddings for pixel scale +tta_img_scales = [(1280, 1280), (1024, 1024), (1536, 1536)] + +# -----model related----- +# Basic size of multi-scale prior box +anchors = [ + [(19, 27), (44, 40), (38, 94)], # P3/8 + [(96, 68), (86, 152), (180, 137)], # P4/16 + [(140, 301), (303, 264), (238, 542)], # P5/32 + [(436, 615), (739, 380), (925, 792)] # P6/64 +] +strides = [8, 16, 32, 64] # Strides of multi-scale prior box +num_det_layers = 4 # # The number of model output scales +norm_cfg = dict(type='BN', momentum=0.03, eps=0.001) + +# Data augmentation +max_translate_ratio = 0.2 # YOLOv5RandomAffine +scaling_ratio_range = (0.1, 2.0) # YOLOv5RandomAffine +mixup_prob = 0.15 # YOLOv5MixUp +randchoice_mosaic_prob = [0.8, 0.2] +mixup_alpha = 8.0 # YOLOv5MixUp +mixup_beta = 8.0 # YOLOv5MixUp + +# -----train val related----- +loss_cls_weight = 0.3 +loss_bbox_weight = 0.05 +loss_obj_weight = 0.7 +obj_level_weights = [4.0, 1.0, 0.25, 0.06] +simota_candidate_topk = 20 + +# The only difference between P6 and P5 in terms of +# hyperparameters is lr_factor +lr_factor = 0.2 + +# ===============================Unmodified in most cases==================== +pre_transform = _base_.pre_transform + +model = dict( + backbone=dict(arch='W', out_indices=(2, 3, 4, 5)), + neck=dict( + in_channels=[256, 512, 768, 1024], + out_channels=[128, 256, 384, 512], + use_maxpool_in_downsample=False, + use_repconv_outs=False), + bbox_head=dict( + head_module=dict( + type='YOLOv7p6HeadModule', + in_channels=[128, 256, 384, 512], + featmap_strides=strides, + norm_cfg=norm_cfg, + act_cfg=dict(type='SiLU', inplace=True)), + prior_generator=dict(base_sizes=anchors, strides=strides), + simota_candidate_topk=simota_candidate_topk, # note + # scaled based on number of detection layers + loss_cls=dict(loss_weight=loss_cls_weight * + (num_classes / 80 * 3 / num_det_layers)), + loss_bbox=dict(loss_weight=loss_bbox_weight * (3 / num_det_layers)), + loss_obj=dict(loss_weight=loss_obj_weight * + ((img_scale[0] / 640)**2 * 3 / num_det_layers)), + obj_level_weights=obj_level_weights)) + +mosiac4_pipeline = [ + dict( + type='Mosaic', + img_scale=img_scale, + pad_val=114.0, + pre_transform=pre_transform), + dict( + type='YOLOv5RandomAffine', + max_rotate_degree=0.0, + max_shear_degree=0.0, + max_translate_ratio=max_translate_ratio, # note + scaling_ratio_range=scaling_ratio_range, # note + # img_scale is (width, height) + border=(-img_scale[0] // 2, -img_scale[1] // 2), + border_val=(114, 114, 114)), +] + +mosiac9_pipeline = [ + dict( + type='Mosaic9', + img_scale=img_scale, + pad_val=114.0, + pre_transform=pre_transform), + dict( + type='YOLOv5RandomAffine', + max_rotate_degree=0.0, + max_shear_degree=0.0, + max_translate_ratio=max_translate_ratio, # note + scaling_ratio_range=scaling_ratio_range, # note + # img_scale is (width, height) + border=(-img_scale[0] // 2, -img_scale[1] // 2), + border_val=(114, 114, 114)), +] + +randchoice_mosaic_pipeline = dict( + type='RandomChoice', + transforms=[mosiac4_pipeline, mosiac9_pipeline], + prob=randchoice_mosaic_prob) + +train_pipeline = [ + *pre_transform, + randchoice_mosaic_pipeline, + dict( + type='YOLOv5MixUp', + alpha=mixup_alpha, # note + beta=mixup_beta, # note + prob=mixup_prob, + pre_transform=[*pre_transform, randchoice_mosaic_pipeline]), + dict(type='YOLOv5HSVRandomAug'), + dict(type='mmdet.RandomFlip', prob=0.5), + dict( + type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'flip', + 'flip_direction')) +] +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) + +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args=_base_.backend_args), + dict(type='YOLOv5KeepRatioResize', scale=img_scale), + dict( + type='LetterResize', + scale=img_scale, + allow_scale_up=False, + pad_val=dict(img=114)), + dict(type='LoadAnnotations', with_bbox=True, _scope_='mmdet'), + dict( + type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'pad_param')) +] +val_dataloader = dict( + dataset=dict(pipeline=test_pipeline, batch_shapes_cfg=batch_shapes_cfg)) +test_dataloader = val_dataloader + +default_hooks = dict(param_scheduler=dict(lr_factor=lr_factor)) + +# Config for Test Time Augmentation. (TTA) +_multiscale_resize_transforms = [ + dict( + type='Compose', + transforms=[ + dict(type='YOLOv5KeepRatioResize', scale=s), + dict( + type='LetterResize', + scale=s, + allow_scale_up=False, + pad_val=dict(img=114)) + ]) for s in tta_img_scales +] + +tta_pipeline = [ + dict(type='LoadImageFromFile', backend_args=_base_.backend_args), + dict( + type='TestTimeAug', + transforms=[ + _multiscale_resize_transforms, + [ + dict(type='mmdet.RandomFlip', prob=1.), + dict(type='mmdet.RandomFlip', prob=0.) + ], [dict(type='mmdet.LoadAnnotations', with_bbox=True)], + [ + dict( + type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'pad_param', 'flip', + 'flip_direction')) + ] + ]) +] diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolov7/yolov7_x_syncbn_fast_8x16b-300e_coco.py b/models/YOLO-World/third_party/mmyolo/configs/yolov7/yolov7_x_syncbn_fast_8x16b-300e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..9929705962c918392af12dd0a8275321f89fd361 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolov7/yolov7_x_syncbn_fast_8x16b-300e_coco.py @@ -0,0 +1,15 @@ +_base_ = './yolov7_l_syncbn_fast_8x16b-300e_coco.py' + +model = dict( + backbone=dict(arch='X'), + neck=dict( + in_channels=[640, 1280, 1280], + out_channels=[160, 320, 640], + block_cfg=dict( + type='ELANBlock', + middle_ratio=0.4, + block_ratio=0.4, + num_blocks=3, + num_convs_in_block=2), + use_repconv_outs=False), + bbox_head=dict(head_module=dict(in_channels=[320, 640, 1280]))) diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolov8/README.md b/models/YOLO-World/third_party/mmyolo/configs/yolov8/README.md new file mode 100644 index 0000000000000000000000000000000000000000..766aa99163c97bff5206724febd41c3e484faa55 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolov8/README.md @@ -0,0 +1,45 @@ +# YOLOv8 + + + +## Abstract + +Ultralytics YOLOv8, developed by Ultralytics, is a cutting-edge, state-of-the-art (SOTA) model that builds upon the success of previous YOLO versions and introduces new features and improvements to further boost performance and flexibility. YOLOv8 is designed to be fast, accurate, and easy to use, making it an excellent choice for a wide range of object detection, image segmentation and image classification tasks. + +
+ +YOLOv8 performance +
+ +
+ +YOLOv8-P5 model structure +
+ +## Results and models + +### COCO + +| Backbone | Arch | size | Mask Refine | SyncBN | AMP | Mem (GB) | box AP | TTA box AP | Config | Download | +| :------: | :--: | :--: | :---------: | :----: | :-: | :------: | :---------: | :--------: | :-------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| YOLOv8-n | P5 | 640 | No | Yes | Yes | 2.8 | 37.2 | | [config](./yolov8_n_syncbn_fast_8xb16-500e_coco.py) | [model](https://download.openmmlab.com/mmyolo/v0/yolov8/yolov8_n_syncbn_fast_8xb16-500e_coco/yolov8_n_syncbn_fast_8xb16-500e_coco_20230114_131804-88c11cdb.pth) \| [log](https://download.openmmlab.com/mmyolo/v0/yolov8/yolov8_n_syncbn_fast_8xb16-500e_coco/yolov8_n_syncbn_fast_8xb16-500e_coco_20230114_131804.log.json) | +| YOLOv8-n | P5 | 640 | Yes | Yes | Yes | 2.5 | 37.4 (+0.2) | 39.9 | [config](./yolov8_n_mask-refine_syncbn_fast_8xb16-500e_coco.py) | [model](https://download.openmmlab.com/mmyolo/v0/yolov8/yolov8_n_mask-refine_syncbn_fast_8xb16-500e_coco/yolov8_n_mask-refine_syncbn_fast_8xb16-500e_coco_20230216_101206-b975b1cd.pth) \| [log](https://download.openmmlab.com/mmyolo/v0/yolov8/yolov8_n_mask-refine_syncbn_fast_8xb16-500e_coco/yolov8_n_mask-refine_syncbn_fast_8xb16-500e_coco_20230216_101206.log.json) | +| YOLOv8-s | P5 | 640 | No | Yes | Yes | 4.0 | 44.2 | | [config](./yolov8_s_syncbn_fast_8xb16-500e_coco.py) | [model](https://download.openmmlab.com/mmyolo/v0/yolov8/yolov8_s_syncbn_fast_8xb16-500e_coco/yolov8_s_syncbn_fast_8xb16-500e_coco_20230117_180101-5aa5f0f1.pth) \| [log](https://download.openmmlab.com/mmyolo/v0/yolov8/yolov8_s_syncbn_fast_8xb16-500e_coco/yolov8_s_syncbn_fast_8xb16-500e_coco_20230117_180101.log.json) | +| YOLOv8-s | P5 | 640 | Yes | Yes | Yes | 4.0 | 45.1 (+0.9) | 46.8 | [config](./yolov8_s_mask-refine_syncbn_fast_8xb16-500e_coco.py) | [model](https://download.openmmlab.com/mmyolo/v0/yolov8/yolov8_s_mask-refine_syncbn_fast_8xb16-500e_coco/yolov8_s_mask-refine_syncbn_fast_8xb16-500e_coco_20230216_095938-ce3c1b3f.pth) \| [log](https://download.openmmlab.com/mmyolo/v0/yolov8/yolov8_s_mask-refine_syncbn_fast_8xb16-500e_coco/yolov8_s_mask-refine_syncbn_fast_8xb16-500e_coco_20230216_095938.log.json) | +| YOLOv8-m | P5 | 640 | No | Yes | Yes | 7.2 | 49.8 | | [config](./yolov8_m_syncbn_fast_8xb16-500e_coco.py) | [model](https://download.openmmlab.com/mmyolo/v0/yolov8/yolov8_m_syncbn_fast_8xb16-500e_coco/yolov8_m_syncbn_fast_8xb16-500e_coco_20230115_192200-c22e560a.pth) \| [log](https://download.openmmlab.com/mmyolo/v0/yolov8/yolov8_m_syncbn_fast_8xb16-500e_coco/yolov8_m_syncbn_fast_8xb16-500e_coco_20230115_192200.log.json) | +| YOLOv8-m | P5 | 640 | Yes | Yes | Yes | 7.0 | 50.6 (+0.8) | 52.3 | [config](./yolov8_m_mask-refine_syncbn_fast_8xb16-500e_coco.py) | [model](https://download.openmmlab.com/mmyolo/v0/yolov8/yolov8_m_mask-refine_syncbn_fast_8xb16-500e_coco/yolov8_m_mask-refine_syncbn_fast_8xb16-500e_coco_20230216_223400-f40abfcd.pth) \| [log](https://download.openmmlab.com/mmyolo/v0/yolov8/yolov8_m_mask-refine_syncbn_fast_8xb16-500e_coco/yolov8_m_mask-refine_syncbn_fast_8xb16-500e_coco_20230216_223400.log.json) | +| YOLOv8-l | P5 | 640 | No | Yes | Yes | 9.8 | 52.1 | | [config](./yolov8_l_syncbn_fast_8xb16-500e_coco.py) | [model](https://download.openmmlab.com/mmyolo/v0/yolov8/yolov8_l_syncbn_fast_8xb16-500e_coco/yolov8_l_syncbn_fast_8xb16-500e_coco_20230217_182526-189611b6.pth) \| [log](https://download.openmmlab.com/mmyolo/v0/yolov8/yolov8_l_syncbn_fast_8xb16-500e_coco/yolov8_l_syncbn_fast_8xb16-500e_coco_20230217_182526.log.json) | +| YOLOv8-l | P5 | 640 | Yes | Yes | Yes | 9.1 | 53.0 (+0.9) | 54.4 | [config](./yolov8_l_mask-refine_syncbn_fast_8xb16-500e_coco.py) | [model](https://download.openmmlab.com/mmyolo/v0/yolov8/yolov8_l_mask-refine_syncbn_fast_8xb16-500e_coco/yolov8_l_mask-refine_syncbn_fast_8xb16-500e_coco_20230217_120100-5881dec4.pth) \| [log](https://download.openmmlab.com/mmyolo/v0/yolov8/yolov8_l_mask-refine_syncbn_fast_8xb16-500e_coco/yolov8_l_mask-refine_syncbn_fast_8xb16-500e_coco_20230217_120100.log.json) | +| YOLOv8-x | P5 | 640 | No | Yes | Yes | 12.2 | 52.7 | | [config](./yolov8_x_syncbn_fast_8xb16-500e_coco.py) | [model](https://download.openmmlab.com/mmyolo/v0/yolov8/yolov8_x_syncbn_fast_8xb16-500e_coco/yolov8_x_syncbn_fast_8xb16-500e_coco_20230218_023338-5674673c.pth) \| [log](https://download.openmmlab.com/mmyolo/v0/yolov8/yolov8_x_syncbn_fast_8xb16-500e_coco/yolov8_x_syncbn_fast_8xb16-500e_coco_20230218_023338.log.json) | +| YOLOv8-x | P5 | 640 | Yes | Yes | Yes | 12.4 | 54.0 (+1.3) | 55.0 | [config](./yolov8_x_mask-refine_syncbn_fast_8xb16-500e_coco.py) | [model](https://download.openmmlab.com/mmyolo/v0/yolov8/yolov8_x_mask-refine_syncbn_fast_8xb16-500e_coco/yolov8_x_mask-refine_syncbn_fast_8xb16-500e_coco_20230217_120411-079ca8d1.pth) \| [log](https://download.openmmlab.com/mmyolo/v0/yolov8/yolov8_x_mask-refine_syncbn_fast_8xb16-500e_coco/yolov8_x_mask-refine_syncbn_fast_8xb16-500e_coco_20230217_120411.log.json) | + +**Note** + +1. We use 8x A100 for training, and the single-GPU batch size is 16. This is different from the official code, but has no effect on performance. +2. The performance is unstable and may fluctuate by about 0.3 mAP and the highest performance weight in `COCO` training in `YOLOv8` may not be the last epoch. The performance shown above is the best model. +3. We provide [scripts](https://github.com/open-mmlab/mmyolo/tree/dev/tools/model_converters/yolov8_to_mmyolo.py) to convert official weights to MMYOLO. +4. `SyncBN` means using SyncBN, `AMP` indicates training with mixed precision. +5. The performance of `Mask Refine` training is for the weight performance officially released by YOLOv8. `Mask Refine` means refining bbox by mask while loading annotations and transforming after `YOLOv5RandomAffine`, and the L and X models use `Copy Paste`. +6. `TTA` means that Test Time Augmentation. It's perform 3 multi-scaling transformations on the image, followed by 2 flipping transformations (flipping and not flipping). You only need to specify `--tta` when testing to enable. see [TTA](https://github.com/open-mmlab/mmyolo/blob/dev/docs/en/common_usage/tta.md) for details. + +## Citation diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolov8/metafile.yml b/models/YOLO-World/third_party/mmyolo/configs/yolov8/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..33cd22bc69114f39c4b2a1fcaeabf5228534bb68 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolov8/metafile.yml @@ -0,0 +1,140 @@ +Collections: + - Name: YOLOv8 + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Nesterov + - Weight Decay + - AMP + - Synchronize BN + Training Resources: 8x A100 GPUs + Architecture: + - CSPDarkNet + - PAFPN + - Decoupled Head + README: configs/yolov8/README.md + Code: + URL: https://github.com/open-mmlab/mmyolo/blob/v0.0.1/mmyolo/models/detectors/yolo_detector.py#L12 + Version: v0.0.1 + +Models: + - Name: yolov8_n_syncbn_fast_8xb16-500e_coco + In Collection: YOLOv8 + Config: configs/yolov8/yolov8_n_syncbn_fast_8xb16-500e_coco.py + Metadata: + Training Memory (GB): 2.8 + Epochs: 500 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 37.2 + Weights: https://download.openmmlab.com/mmyolo/v0/yolov8/yolov8_n_syncbn_fast_8xb16-500e_coco/yolov8_n_syncbn_fast_8xb16-500e_coco_20230114_131804-88c11cdb.pth + - Name: yolov8_s_syncbn_fast_8xb16-500e_coco + In Collection: YOLOv8 + Config: configs/yolov8/yolov8_s_syncbn_fast_8xb16-500e_coco.py + Metadata: + Training Memory (GB): 4.0 + Epochs: 500 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 44.2 + Weights: https://download.openmmlab.com/mmyolo/v0/yolov8/yolov8_s_syncbn_fast_8xb16-500e_coco/yolov8_s_syncbn_fast_8xb16-500e_coco_20230117_180101-5aa5f0f1.pth + - Name: yolov8_m_syncbn_fast_8xb16-500e_coco + In Collection: YOLOv8 + Config: configs/yolov8/yolov8_m_syncbn_fast_8xb16-500e_coco.py + Metadata: + Training Memory (GB): 7.2 + Epochs: 500 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 49.8 + Weights: https://download.openmmlab.com/mmyolo/v0/yolov8/yolov8_m_syncbn_fast_8xb16-500e_coco/yolov8_m_syncbn_fast_8xb16-500e_coco_20230115_192200-c22e560a.pth + - Name: yolov8_l_syncbn_fast_8xb16-500e_coco + In Collection: YOLOv8 + Config: configs/yolov8/yolov8_l_syncbn_fast_8xb16-500e_coco.py + Metadata: + Training Memory (GB): 9.8 + Epochs: 500 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 52.1 + Weights: https://download.openmmlab.com/mmyolo/v0/yolov8/yolov8_l_syncbn_fast_8xb16-500e_coco/yolov8_l_syncbn_fast_8xb16-500e_coco_20230217_182526-189611b6.pth + - Name: yolov8_x_syncbn_fast_8xb16-500e_coco + In Collection: YOLOv8 + Config: configs/yolov8/yolov8_x_syncbn_fast_8xb16-500e_coco.py + Metadata: + Training Memory (GB): 12.2 + Epochs: 500 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 52.7 + Weights: https://download.openmmlab.com/mmyolo/v0/yolov8/yolov8_x_syncbn_fast_8xb16-500e_coco/yolov8_x_syncbn_fast_8xb16-500e_coco_20230218_023338-5674673c.pth + - Name: yolov8_n_mask-refine_syncbn_fast_8xb16-500e_coco + In Collection: YOLOv8 + Config: configs/yolov8/yolov8_n_mask-refine_syncbn_fast_8xb16-500e_coco.py + Metadata: + Training Memory (GB): 2.5 + Epochs: 500 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 37.4 + Weights: https://download.openmmlab.com/mmyolo/v0/yolov8/yolov8_n_mask-refine_syncbn_fast_8xb16-500e_coco/yolov8_n_mask-refine_syncbn_fast_8xb16-500e_coco_20230216_101206-b975b1cd.pth + - Name: yolov8_s_mask-refine_syncbn_fast_8xb16-500e_coco + In Collection: YOLOv8 + Config: configs/yolov8/yolov8_s_mask-refine_syncbn_fast_8xb16-500e_coco.py + Metadata: + Training Memory (GB): 4.0 + Epochs: 500 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 45.1 + Weights: https://download.openmmlab.com/mmyolo/v0/yolov8/yolov8_s_mask-refine_syncbn_fast_8xb16-500e_coco/yolov8_s_mask-refine_syncbn_fast_8xb16-500e_coco_20230216_095938-ce3c1b3f.pth + - Name: yolov8_m_mask-refine_syncbn_fast_8xb16-500e_coco + In Collection: YOLOv8 + Config: configs/yolov8/yolov8_m_mask-refine_syncbn_fast_8xb16-500e_coco.py + Metadata: + Training Memory (GB): 7.0 + Epochs: 500 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 50.6 + Weights: https://download.openmmlab.com/mmyolo/v0/yolov8/yolov8_m_mask-refine_syncbn_fast_8xb16-500e_coco/yolov8_m_mask-refine_syncbn_fast_8xb16-500e_coco_20230216_223400-f40abfcd.pth + - Name: yolov8_l_mask-refine_syncbn_fast_8xb16-500e_coco + In Collection: YOLOv8 + Config: configs/yolov8/yolov8_l_mask-refine_syncbn_fast_8xb16-500e_coco.py + Metadata: + Training Memory (GB): 9.1 + Epochs: 500 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 53.0 + Weights: https://download.openmmlab.com/mmyolo/v0/yolov8/yolov8_l_mask-refine_syncbn_fast_8xb16-500e_coco/yolov8_l_mask-refine_syncbn_fast_8xb16-500e_coco_20230217_120100-5881dec4.pth + - Name: yolov8_x_mask-refine_syncbn_fast_8xb16-500e_coco + In Collection: YOLOv8 + Config: configs/yolov8/yolov8_x_mask-refine_syncbn_fast_8xb16-500e_coco.py + Metadata: + Training Memory (GB): 12.4 + Epochs: 500 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 54.0 + Weights: https://download.openmmlab.com/mmyolo/v0/yolov8/yolov8_x_mask-refine_syncbn_fast_8xb16-500e_coco/yolov8_x_mask-refine_syncbn_fast_8xb16-500e_coco_20230217_120411-079ca8d1.pth diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolov8/yolov8_l_mask-refine_syncbn_fast_8xb16-500e_coco.py b/models/YOLO-World/third_party/mmyolo/configs/yolov8/yolov8_l_mask-refine_syncbn_fast_8xb16-500e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..e25b6bcb63d1bad084f7c2175a6983dadb591fc4 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolov8/yolov8_l_mask-refine_syncbn_fast_8xb16-500e_coco.py @@ -0,0 +1,65 @@ +_base_ = './yolov8_m_mask-refine_syncbn_fast_8xb16-500e_coco.py' + +# This config use refining bbox and `YOLOv5CopyPaste`. +# Refining bbox means refining bbox by mask while loading annotations and +# transforming after `YOLOv5RandomAffine` + +# ========================modified parameters====================== +deepen_factor = 1.00 +widen_factor = 1.00 +last_stage_out_channels = 512 + +mixup_prob = 0.15 +copypaste_prob = 0.3 + +# =======================Unmodified in most cases================== +img_scale = _base_.img_scale +pre_transform = _base_.pre_transform +last_transform = _base_.last_transform +affine_scale = _base_.affine_scale + +model = dict( + backbone=dict( + last_stage_out_channels=last_stage_out_channels, + deepen_factor=deepen_factor, + widen_factor=widen_factor), + neck=dict( + deepen_factor=deepen_factor, + widen_factor=widen_factor, + in_channels=[256, 512, last_stage_out_channels], + out_channels=[256, 512, last_stage_out_channels]), + bbox_head=dict( + head_module=dict( + widen_factor=widen_factor, + in_channels=[256, 512, last_stage_out_channels]))) + +mosaic_affine_transform = [ + dict( + type='Mosaic', + img_scale=img_scale, + pad_val=114.0, + pre_transform=pre_transform), + dict(type='YOLOv5CopyPaste', prob=copypaste_prob), + dict( + type='YOLOv5RandomAffine', + max_rotate_degree=0.0, + max_shear_degree=0.0, + max_aspect_ratio=100., + scaling_ratio_range=(1 - affine_scale, 1 + affine_scale), + # img_scale is (width, height) + border=(-img_scale[0] // 2, -img_scale[1] // 2), + border_val=(114, 114, 114), + min_area_ratio=_base_.min_area_ratio, + use_mask_refine=_base_.use_mask2refine) +] + +train_pipeline = [ + *pre_transform, *mosaic_affine_transform, + dict( + type='YOLOv5MixUp', + prob=mixup_prob, + pre_transform=[*pre_transform, *mosaic_affine_transform]), + *last_transform +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolov8/yolov8_l_syncbn_fast_8xb16-500e_coco.py b/models/YOLO-World/third_party/mmyolo/configs/yolov8/yolov8_l_syncbn_fast_8xb16-500e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..bea8b2d56fecd46beddd0370732e8b83309528e5 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolov8/yolov8_l_syncbn_fast_8xb16-500e_coco.py @@ -0,0 +1,39 @@ +_base_ = './yolov8_m_syncbn_fast_8xb16-500e_coco.py' + +# ========================modified parameters====================== +deepen_factor = 1.00 +widen_factor = 1.00 +last_stage_out_channels = 512 + +mixup_prob = 0.15 + +# =======================Unmodified in most cases================== +pre_transform = _base_.pre_transform +mosaic_affine_transform = _base_.mosaic_affine_transform +last_transform = _base_.last_transform + +model = dict( + backbone=dict( + last_stage_out_channels=last_stage_out_channels, + deepen_factor=deepen_factor, + widen_factor=widen_factor), + neck=dict( + deepen_factor=deepen_factor, + widen_factor=widen_factor, + in_channels=[256, 512, last_stage_out_channels], + out_channels=[256, 512, last_stage_out_channels]), + bbox_head=dict( + head_module=dict( + widen_factor=widen_factor, + in_channels=[256, 512, last_stage_out_channels]))) + +train_pipeline = [ + *pre_transform, *mosaic_affine_transform, + dict( + type='YOLOv5MixUp', + prob=mixup_prob, + pre_transform=[*pre_transform, *mosaic_affine_transform]), + *last_transform +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolov8/yolov8_m_mask-refine_syncbn_fast_8xb16-500e_coco.py b/models/YOLO-World/third_party/mmyolo/configs/yolov8/yolov8_m_mask-refine_syncbn_fast_8xb16-500e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..2884daeb436e321c2c256687e0f063780d680f37 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolov8/yolov8_m_mask-refine_syncbn_fast_8xb16-500e_coco.py @@ -0,0 +1,85 @@ +_base_ = './yolov8_s_mask-refine_syncbn_fast_8xb16-500e_coco.py' + +# This config use refining bbox and `YOLOv5CopyPaste`. +# Refining bbox means refining bbox by mask while loading annotations and +# transforming after `YOLOv5RandomAffine` + +# ========================modified parameters====================== +deepen_factor = 0.67 +widen_factor = 0.75 +last_stage_out_channels = 768 + +affine_scale = 0.9 +mixup_prob = 0.1 +copypaste_prob = 0.1 + +# ===============================Unmodified in most cases==================== +img_scale = _base_.img_scale +pre_transform = _base_.pre_transform +last_transform = _base_.last_transform + +model = dict( + backbone=dict( + last_stage_out_channels=last_stage_out_channels, + deepen_factor=deepen_factor, + widen_factor=widen_factor), + neck=dict( + deepen_factor=deepen_factor, + widen_factor=widen_factor, + in_channels=[256, 512, last_stage_out_channels], + out_channels=[256, 512, last_stage_out_channels]), + bbox_head=dict( + head_module=dict( + widen_factor=widen_factor, + in_channels=[256, 512, last_stage_out_channels]))) + +mosaic_affine_transform = [ + dict( + type='Mosaic', + img_scale=img_scale, + pad_val=114.0, + pre_transform=pre_transform), + dict(type='YOLOv5CopyPaste', prob=copypaste_prob), + dict( + type='YOLOv5RandomAffine', + max_rotate_degree=0.0, + max_shear_degree=0.0, + max_aspect_ratio=100., + scaling_ratio_range=(1 - affine_scale, 1 + affine_scale), + # img_scale is (width, height) + border=(-img_scale[0] // 2, -img_scale[1] // 2), + border_val=(114, 114, 114), + min_area_ratio=_base_.min_area_ratio, + use_mask_refine=_base_.use_mask2refine) +] + +train_pipeline = [ + *pre_transform, *mosaic_affine_transform, + dict( + type='YOLOv5MixUp', + prob=mixup_prob, + pre_transform=[*pre_transform, *mosaic_affine_transform]), + *last_transform +] + +train_pipeline_stage2 = [ + *pre_transform, + dict(type='YOLOv5KeepRatioResize', scale=img_scale), + dict( + type='LetterResize', + scale=img_scale, + allow_scale_up=True, + pad_val=dict(img=114.0)), + dict( + type='YOLOv5RandomAffine', + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=(1 - affine_scale, 1 + affine_scale), + max_aspect_ratio=_base_.max_aspect_ratio, + border_val=(114, 114, 114), + min_area_ratio=_base_.min_area_ratio, + use_mask_refine=_base_.use_mask2refine), *last_transform +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) +_base_.custom_hooks[1].switch_pipeline = train_pipeline_stage2 diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolov8/yolov8_m_syncbn_fast_8xb16-500e_coco.py b/models/YOLO-World/third_party/mmyolo/configs/yolov8/yolov8_m_syncbn_fast_8xb16-500e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..840d32ccff78db31d9945bfe32531c1970845ee7 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolov8/yolov8_m_syncbn_fast_8xb16-500e_coco.py @@ -0,0 +1,76 @@ +_base_ = './yolov8_s_syncbn_fast_8xb16-500e_coco.py' + +# ========================modified parameters====================== +deepen_factor = 0.67 +widen_factor = 0.75 +last_stage_out_channels = 768 + +affine_scale = 0.9 +mixup_prob = 0.1 + +# =======================Unmodified in most cases================== +img_scale = _base_.img_scale +pre_transform = _base_.pre_transform +last_transform = _base_.last_transform + +model = dict( + backbone=dict( + last_stage_out_channels=last_stage_out_channels, + deepen_factor=deepen_factor, + widen_factor=widen_factor), + neck=dict( + deepen_factor=deepen_factor, + widen_factor=widen_factor, + in_channels=[256, 512, last_stage_out_channels], + out_channels=[256, 512, last_stage_out_channels]), + bbox_head=dict( + head_module=dict( + widen_factor=widen_factor, + in_channels=[256, 512, last_stage_out_channels]))) + +mosaic_affine_transform = [ + dict( + type='Mosaic', + img_scale=img_scale, + pad_val=114.0, + pre_transform=pre_transform), + dict( + type='YOLOv5RandomAffine', + max_rotate_degree=0.0, + max_shear_degree=0.0, + max_aspect_ratio=100, + scaling_ratio_range=(1 - affine_scale, 1 + affine_scale), + # img_scale is (width, height) + border=(-img_scale[0] // 2, -img_scale[1] // 2), + border_val=(114, 114, 114)) +] + +# enable mixup +train_pipeline = [ + *pre_transform, *mosaic_affine_transform, + dict( + type='YOLOv5MixUp', + prob=mixup_prob, + pre_transform=[*pre_transform, *mosaic_affine_transform]), + *last_transform +] + +train_pipeline_stage2 = [ + *pre_transform, + dict(type='YOLOv5KeepRatioResize', scale=img_scale), + dict( + type='LetterResize', + scale=img_scale, + allow_scale_up=True, + pad_val=dict(img=114.0)), + dict( + type='YOLOv5RandomAffine', + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=(1 - affine_scale, 1 + affine_scale), + max_aspect_ratio=100, + border_val=(114, 114, 114)), *last_transform +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) +_base_.custom_hooks[1].switch_pipeline = train_pipeline_stage2 diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolov8/yolov8_n_mask-refine_syncbn_fast_8xb16-500e_coco.py b/models/YOLO-World/third_party/mmyolo/configs/yolov8/yolov8_n_mask-refine_syncbn_fast_8xb16-500e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..50d3774267fd89b747574f72b34e6d7d2237c5ef --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolov8/yolov8_n_mask-refine_syncbn_fast_8xb16-500e_coco.py @@ -0,0 +1,12 @@ +_base_ = './yolov8_s_mask-refine_syncbn_fast_8xb16-500e_coco.py' + +# This config will refine bbox by mask while loading annotations and +# transforming after `YOLOv5RandomAffine` + +deepen_factor = 0.33 +widen_factor = 0.25 + +model = dict( + backbone=dict(deepen_factor=deepen_factor, widen_factor=widen_factor), + neck=dict(deepen_factor=deepen_factor, widen_factor=widen_factor), + bbox_head=dict(head_module=dict(widen_factor=widen_factor))) diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolov8/yolov8_n_syncbn_fast_8xb16-500e_coco.py b/models/YOLO-World/third_party/mmyolo/configs/yolov8/yolov8_n_syncbn_fast_8xb16-500e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..5833df3a157151bca2d2ce29380962e43f1ec876 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolov8/yolov8_n_syncbn_fast_8xb16-500e_coco.py @@ -0,0 +1,9 @@ +_base_ = './yolov8_s_syncbn_fast_8xb16-500e_coco.py' + +deepen_factor = 0.33 +widen_factor = 0.25 + +model = dict( + backbone=dict(deepen_factor=deepen_factor, widen_factor=widen_factor), + neck=dict(deepen_factor=deepen_factor, widen_factor=widen_factor), + bbox_head=dict(head_module=dict(widen_factor=widen_factor))) diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolov8/yolov8_s_fast_1xb12-40e_cat.py b/models/YOLO-World/third_party/mmyolo/configs/yolov8/yolov8_s_fast_1xb12-40e_cat.py new file mode 100644 index 0000000000000000000000000000000000000000..e54bff03358c4138ea175187f6617735e80f185e --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolov8/yolov8_s_fast_1xb12-40e_cat.py @@ -0,0 +1,52 @@ +_base_ = 'yolov8_s_syncbn_fast_8xb16-500e_coco.py' + +data_root = './data/cat/' +class_name = ('cat', ) +num_classes = len(class_name) +metainfo = dict(classes=class_name, palette=[(20, 220, 60)]) + +close_mosaic_epochs = 5 + +max_epochs = 40 +train_batch_size_per_gpu = 12 +train_num_workers = 4 + +load_from = 'https://download.openmmlab.com/mmyolo/v0/yolov8/yolov8_s_syncbn_fast_8xb16-500e_coco/yolov8_s_syncbn_fast_8xb16-500e_coco_20230117_180101-5aa5f0f1.pth' # noqa + +model = dict( + backbone=dict(frozen_stages=4), + bbox_head=dict(head_module=dict(num_classes=num_classes)), + train_cfg=dict(assigner=dict(num_classes=num_classes))) + +train_dataloader = dict( + batch_size=train_batch_size_per_gpu, + num_workers=train_num_workers, + dataset=dict( + data_root=data_root, + metainfo=metainfo, + ann_file='annotations/trainval.json', + data_prefix=dict(img='images/'))) + +val_dataloader = dict( + dataset=dict( + metainfo=metainfo, + data_root=data_root, + ann_file='annotations/test.json', + data_prefix=dict(img='images/'))) + +test_dataloader = val_dataloader + +_base_.optim_wrapper.optimizer.batch_size_per_gpu = train_batch_size_per_gpu +_base_.custom_hooks[1].switch_epoch = max_epochs - close_mosaic_epochs + +val_evaluator = dict(ann_file=data_root + 'annotations/test.json') +test_evaluator = val_evaluator + +default_hooks = dict( + checkpoint=dict(interval=10, max_keep_ckpts=2, save_best='auto'), + # The warmup_mim_iter parameter is critical. + # The default value is 1000 which is not suitable for cat datasets. + param_scheduler=dict(max_epochs=max_epochs, warmup_mim_iter=10), + logger=dict(type='LoggerHook', interval=5)) +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +# visualizer = dict(vis_backends = [dict(type='LocalVisBackend'), dict(type='WandbVisBackend')]) # noqa diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolov8/yolov8_s_mask-refine_syncbn_fast_8xb16-500e_coco.py b/models/YOLO-World/third_party/mmyolo/configs/yolov8/yolov8_s_mask-refine_syncbn_fast_8xb16-500e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..769a698e4b52886797e08169cdc6da8eedea204d --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolov8/yolov8_s_mask-refine_syncbn_fast_8xb16-500e_coco.py @@ -0,0 +1,83 @@ +_base_ = './yolov8_s_syncbn_fast_8xb16-500e_coco.py' + +# This config will refine bbox by mask while loading annotations and +# transforming after `YOLOv5RandomAffine` + +# ========================modified parameters====================== +use_mask2refine = True +min_area_ratio = 0.01 # YOLOv5RandomAffine + +# ===============================Unmodified in most cases==================== +pre_transform = [ + dict(type='LoadImageFromFile', backend_args=_base_.backend_args), + dict( + type='LoadAnnotations', + with_bbox=True, + with_mask=True, + mask2bbox=use_mask2refine) +] + +last_transform = [ + # Delete gt_masks to avoid more computation + dict(type='RemoveDataElement', keys=['gt_masks']), + dict( + type='mmdet.Albu', + transforms=_base_.albu_train_transforms, + bbox_params=dict( + type='BboxParams', + format='pascal_voc', + label_fields=['gt_bboxes_labels', 'gt_ignore_flags']), + keymap={ + 'img': 'image', + 'gt_bboxes': 'bboxes' + }), + dict(type='YOLOv5HSVRandomAug'), + dict(type='mmdet.RandomFlip', prob=0.5), + dict( + type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'flip', + 'flip_direction')) +] + +train_pipeline = [ + *pre_transform, + dict( + type='Mosaic', + img_scale=_base_.img_scale, + pad_val=114.0, + pre_transform=pre_transform), + dict( + type='YOLOv5RandomAffine', + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=(1 - _base_.affine_scale, 1 + _base_.affine_scale), + max_aspect_ratio=_base_.max_aspect_ratio, + # img_scale is (width, height) + border=(-_base_.img_scale[0] // 2, -_base_.img_scale[1] // 2), + border_val=(114, 114, 114), + min_area_ratio=min_area_ratio, + use_mask_refine=use_mask2refine), + *last_transform +] + +train_pipeline_stage2 = [ + *pre_transform, + dict(type='YOLOv5KeepRatioResize', scale=_base_.img_scale), + dict( + type='LetterResize', + scale=_base_.img_scale, + allow_scale_up=True, + pad_val=dict(img=114.0)), + dict( + type='YOLOv5RandomAffine', + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=(1 - _base_.affine_scale, 1 + _base_.affine_scale), + max_aspect_ratio=_base_.max_aspect_ratio, + border_val=(114, 114, 114), + min_area_ratio=min_area_ratio, + use_mask_refine=use_mask2refine), *last_transform +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) +_base_.custom_hooks[1].switch_pipeline = train_pipeline_stage2 diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolov8/yolov8_s_syncbn_fast_8xb16-500e_coco.py b/models/YOLO-World/third_party/mmyolo/configs/yolov8/yolov8_s_syncbn_fast_8xb16-500e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..7e4127efbfd549803d8794b0bdf9fbcc9565e55c --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolov8/yolov8_s_syncbn_fast_8xb16-500e_coco.py @@ -0,0 +1,334 @@ +_base_ = ['../_base_/default_runtime.py', '../_base_/det_p5_tta.py'] + +# ========================Frequently modified parameters====================== +# -----data related----- +data_root = 'data/coco/' # Root path of data +# Path of train annotation file +train_ann_file = 'annotations/instances_train2017.json' +train_data_prefix = 'train2017/' # Prefix of train image path +# Path of val annotation file +val_ann_file = 'annotations/instances_val2017.json' +val_data_prefix = 'val2017/' # Prefix of val image path + +num_classes = 80 # Number of classes for classification +# Batch size of a single GPU during training +train_batch_size_per_gpu = 16 +# Worker to pre-fetch data for each single GPU during training +train_num_workers = 8 +# persistent_workers must be False if num_workers is 0 +persistent_workers = True + +# -----train val related----- +# Base learning rate for optim_wrapper. Corresponding to 8xb16=64 bs +base_lr = 0.01 +max_epochs = 500 # Maximum training epochs +# Disable mosaic augmentation for final 10 epochs (stage 2) +close_mosaic_epochs = 10 + +model_test_cfg = dict( + # The config of multi-label for multi-class prediction. + multi_label=True, + # The number of boxes before NMS + nms_pre=30000, + score_thr=0.001, # Threshold to filter out boxes. + nms=dict(type='nms', iou_threshold=0.7), # NMS type and threshold + max_per_img=300) # Max number of detections of each image + +# ========================Possible modified parameters======================== +# -----data related----- +img_scale = (640, 640) # width, height +# Dataset type, this will be used to define the dataset +dataset_type = 'YOLOv5CocoDataset' +# Batch size of a single GPU during validation +val_batch_size_per_gpu = 1 +# Worker to pre-fetch data for each single GPU during validation +val_num_workers = 2 + +# Config of batch shapes. Only on val. +# We tested YOLOv8-m will get 0.02 higher than not using it. +batch_shapes_cfg = None +# You can turn on `batch_shapes_cfg` by uncommenting the following lines. +# batch_shapes_cfg = dict( +# type='BatchShapePolicy', +# batch_size=val_batch_size_per_gpu, +# img_size=img_scale[0], +# # The image scale of padding should be divided by pad_size_divisor +# size_divisor=32, +# # Additional paddings for pixel scale +# extra_pad_ratio=0.5) + +# -----model related----- +# The scaling factor that controls the depth of the network structure +deepen_factor = 0.33 +# The scaling factor that controls the width of the network structure +widen_factor = 0.5 +# Strides of multi-scale prior box +strides = [8, 16, 32] +# The output channel of the last stage +last_stage_out_channels = 1024 +num_det_layers = 3 # The number of model output scales +norm_cfg = dict(type='BN', momentum=0.03, eps=0.001) # Normalization config + +# -----train val related----- +affine_scale = 0.5 # YOLOv5RandomAffine scaling ratio +# YOLOv5RandomAffine aspect ratio of width and height thres to filter bboxes +max_aspect_ratio = 100 +tal_topk = 10 # Number of bbox selected in each level +tal_alpha = 0.5 # A Hyper-parameter related to alignment_metrics +tal_beta = 6.0 # A Hyper-parameter related to alignment_metrics +# TODO: Automatically scale loss_weight based on number of detection layers +loss_cls_weight = 0.5 +loss_bbox_weight = 7.5 +# Since the dfloss is implemented differently in the official +# and mmdet, we're going to divide loss_weight by 4. +loss_dfl_weight = 1.5 / 4 +lr_factor = 0.01 # Learning rate scaling factor +weight_decay = 0.0005 +# Save model checkpoint and validation intervals in stage 1 +save_epoch_intervals = 10 +# validation intervals in stage 2 +val_interval_stage2 = 1 +# The maximum checkpoints to keep. +max_keep_ckpts = 2 +# Single-scale training is recommended to +# be turned on, which can speed up training. +env_cfg = dict(cudnn_benchmark=True) + +# ===============================Unmodified in most cases==================== +model = dict( + type='YOLODetector', + data_preprocessor=dict( + type='YOLOv5DetDataPreprocessor', + mean=[0., 0., 0.], + std=[255., 255., 255.], + bgr_to_rgb=True), + backbone=dict( + type='YOLOv8CSPDarknet', + arch='P5', + last_stage_out_channels=last_stage_out_channels, + deepen_factor=deepen_factor, + widen_factor=widen_factor, + norm_cfg=norm_cfg, + act_cfg=dict(type='SiLU', inplace=True)), + neck=dict( + type='YOLOv8PAFPN', + deepen_factor=deepen_factor, + widen_factor=widen_factor, + in_channels=[256, 512, last_stage_out_channels], + out_channels=[256, 512, last_stage_out_channels], + num_csp_blocks=3, + norm_cfg=norm_cfg, + act_cfg=dict(type='SiLU', inplace=True)), + bbox_head=dict( + type='YOLOv8Head', + head_module=dict( + type='YOLOv8HeadModule', + num_classes=num_classes, + in_channels=[256, 512, last_stage_out_channels], + widen_factor=widen_factor, + reg_max=16, + norm_cfg=norm_cfg, + act_cfg=dict(type='SiLU', inplace=True), + featmap_strides=strides), + prior_generator=dict( + type='mmdet.MlvlPointGenerator', offset=0.5, strides=strides), + bbox_coder=dict(type='DistancePointBBoxCoder'), + # scaled based on number of detection layers + loss_cls=dict( + type='mmdet.CrossEntropyLoss', + use_sigmoid=True, + reduction='none', + loss_weight=loss_cls_weight), + loss_bbox=dict( + type='IoULoss', + iou_mode='ciou', + bbox_format='xyxy', + reduction='sum', + loss_weight=loss_bbox_weight, + return_iou=False), + loss_dfl=dict( + type='mmdet.DistributionFocalLoss', + reduction='mean', + loss_weight=loss_dfl_weight)), + train_cfg=dict( + assigner=dict( + type='BatchTaskAlignedAssigner', + num_classes=num_classes, + use_ciou=True, + topk=tal_topk, + alpha=tal_alpha, + beta=tal_beta, + eps=1e-9)), + test_cfg=model_test_cfg) + +albu_train_transforms = [ + dict(type='Blur', p=0.01), + dict(type='MedianBlur', p=0.01), + dict(type='ToGray', p=0.01), + dict(type='CLAHE', p=0.01) +] + +pre_transform = [ + dict(type='LoadImageFromFile', backend_args=_base_.backend_args), + dict(type='LoadAnnotations', with_bbox=True) +] + +last_transform = [ + dict( + type='mmdet.Albu', + transforms=albu_train_transforms, + bbox_params=dict( + type='BboxParams', + format='pascal_voc', + label_fields=['gt_bboxes_labels', 'gt_ignore_flags']), + keymap={ + 'img': 'image', + 'gt_bboxes': 'bboxes' + }), + dict(type='YOLOv5HSVRandomAug'), + dict(type='mmdet.RandomFlip', prob=0.5), + dict( + type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'flip', + 'flip_direction')) +] + +train_pipeline = [ + *pre_transform, + dict( + type='Mosaic', + img_scale=img_scale, + pad_val=114.0, + pre_transform=pre_transform), + dict( + type='YOLOv5RandomAffine', + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=(1 - affine_scale, 1 + affine_scale), + max_aspect_ratio=max_aspect_ratio, + # img_scale is (width, height) + border=(-img_scale[0] // 2, -img_scale[1] // 2), + border_val=(114, 114, 114)), + *last_transform +] + +train_pipeline_stage2 = [ + *pre_transform, + dict(type='YOLOv5KeepRatioResize', scale=img_scale), + dict( + type='LetterResize', + scale=img_scale, + allow_scale_up=True, + pad_val=dict(img=114.0)), + dict( + type='YOLOv5RandomAffine', + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=(1 - affine_scale, 1 + affine_scale), + max_aspect_ratio=max_aspect_ratio, + border_val=(114, 114, 114)), *last_transform +] + +train_dataloader = dict( + batch_size=train_batch_size_per_gpu, + num_workers=train_num_workers, + persistent_workers=persistent_workers, + pin_memory=True, + sampler=dict(type='DefaultSampler', shuffle=True), + collate_fn=dict(type='yolov5_collate'), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file=train_ann_file, + data_prefix=dict(img=train_data_prefix), + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=train_pipeline)) + +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args=_base_.backend_args), + dict(type='YOLOv5KeepRatioResize', scale=img_scale), + dict( + type='LetterResize', + scale=img_scale, + allow_scale_up=False, + pad_val=dict(img=114)), + dict(type='LoadAnnotations', with_bbox=True, _scope_='mmdet'), + dict( + type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'pad_param')) +] + +val_dataloader = dict( + batch_size=val_batch_size_per_gpu, + num_workers=val_num_workers, + persistent_workers=persistent_workers, + pin_memory=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + test_mode=True, + data_prefix=dict(img=val_data_prefix), + ann_file=val_ann_file, + pipeline=test_pipeline, + batch_shapes_cfg=batch_shapes_cfg)) + +test_dataloader = val_dataloader + +param_scheduler = None +optim_wrapper = dict( + type='OptimWrapper', + clip_grad=dict(max_norm=10.0), + optimizer=dict( + type='SGD', + lr=base_lr, + momentum=0.937, + weight_decay=weight_decay, + nesterov=True, + batch_size_per_gpu=train_batch_size_per_gpu), + constructor='YOLOv5OptimizerConstructor') + +default_hooks = dict( + param_scheduler=dict( + type='YOLOv5ParamSchedulerHook', + scheduler_type='linear', + lr_factor=lr_factor, + max_epochs=max_epochs), + checkpoint=dict( + type='CheckpointHook', + interval=save_epoch_intervals, + save_best='auto', + max_keep_ckpts=max_keep_ckpts)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0001, + update_buffers=True, + strict_load=False, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - close_mosaic_epochs, + switch_pipeline=train_pipeline_stage2) +] + +val_evaluator = dict( + type='mmdet.CocoMetric', + proposal_nums=(100, 1, 10), + ann_file=data_root + val_ann_file, + metric='bbox') +test_evaluator = val_evaluator + +train_cfg = dict( + type='EpochBasedTrainLoop', + max_epochs=max_epochs, + val_interval=save_epoch_intervals, + dynamic_intervals=[((max_epochs - close_mosaic_epochs), + val_interval_stage2)]) + +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolov8/yolov8_x_mask-refine_syncbn_fast_8xb16-500e_coco.py b/models/YOLO-World/third_party/mmyolo/configs/yolov8/yolov8_x_mask-refine_syncbn_fast_8xb16-500e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..8c27b9619d288f222ea0ce351f9e4578c31934a7 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolov8/yolov8_x_mask-refine_syncbn_fast_8xb16-500e_coco.py @@ -0,0 +1,13 @@ +_base_ = './yolov8_l_mask-refine_syncbn_fast_8xb16-500e_coco.py' + +# This config use refining bbox and `YOLOv5CopyPaste`. +# Refining bbox means refining bbox by mask while loading annotations and +# transforming after `YOLOv5RandomAffine` + +deepen_factor = 1.00 +widen_factor = 1.25 + +model = dict( + backbone=dict(deepen_factor=deepen_factor, widen_factor=widen_factor), + neck=dict(deepen_factor=deepen_factor, widen_factor=widen_factor), + bbox_head=dict(head_module=dict(widen_factor=widen_factor))) diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolov8/yolov8_x_syncbn_fast_8xb16-500e_coco.py b/models/YOLO-World/third_party/mmyolo/configs/yolov8/yolov8_x_syncbn_fast_8xb16-500e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..3d8e6653278db54745aa3a3a606bc63aa40328b7 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolov8/yolov8_x_syncbn_fast_8xb16-500e_coco.py @@ -0,0 +1,9 @@ +_base_ = './yolov8_l_syncbn_fast_8xb16-500e_coco.py' + +deepen_factor = 1.00 +widen_factor = 1.25 + +model = dict( + backbone=dict(deepen_factor=deepen_factor, widen_factor=widen_factor), + neck=dict(deepen_factor=deepen_factor, widen_factor=widen_factor), + bbox_head=dict(head_module=dict(widen_factor=widen_factor))) diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolox/README.md b/models/YOLO-World/third_party/mmyolo/configs/yolox/README.md new file mode 100644 index 0000000000000000000000000000000000000000..7d5dc683c1b2e912ee27c7492bf7f869c103bb15 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolox/README.md @@ -0,0 +1,86 @@ +# YOLOX + +> [YOLOX: Exceeding YOLO Series in 2021](https://arxiv.org/abs/2107.08430) + + + +## Abstract + +In this report, we present some experienced improvements to YOLO series, forming a new high-performance detector -- YOLOX. We switch the YOLO detector to an anchor-free manner and conduct other advanced detection techniques, i.e., a decoupled head and the leading label assignment strategy SimOTA to achieve state-of-the-art results across a large scale range of models: For YOLO-Nano with only 0.91M parameters and 1.08G FLOPs, we get 25.3% AP on COCO, surpassing NanoDet by 1.8% AP; for YOLOv3, one of the most widely used detectors in industry, we boost it to 47.3% AP on COCO, outperforming the current best practice by 3.0% AP; for YOLOX-L with roughly the same amount of parameters as YOLOv4-CSP, YOLOv5-L, we achieve 50.0% AP on COCO at a speed of 68.9 FPS on Tesla V100, exceeding YOLOv5-L by 1.8% AP. Further, we won the 1st Place on Streaming Perception Challenge (Workshop on Autonomous Driving at CVPR 2021) using a single YOLOX-L model. We hope this report can provide useful experience for developers and researchers in practical scenes, and we also provide deploy versions with ONNX, TensorRT, NCNN, and Openvino supported. + +
+ +
+ +
+ +YOLOX-l model structure +
+ +## 🥳 🚀 Results and Models + +| Backbone | Size | Batch Size | AMP | RTMDet-Hyp | Mem (GB) | Box AP | Config | Download | +| :--------: | :--: | :--------: | :-: | :--------: | :------: | :---------: | :-------------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| YOLOX-tiny | 416 | 8xb8 | No | No | 2.8 | 32.7 | [config](./yolox_tiny_fast_8xb8-300e_coco.py) | [model](https://download.openmmlab.com/mmyolo/v0/yolox/yolox_tiny_8xb8-300e_coco/yolox_tiny_8xb8-300e_coco_20220919_090908-0e40a6fc.pth) \| [log](https://download.openmmlab.com/mmyolo/v0/yolox/yolox_tiny_8xb8-300e_coco/yolox_tiny_8xb8-300e_coco_20220919_090908.log.json) | +| YOLOX-tiny | 416 | 8xb32 | Yes | Yes | 4.9 | 34.3 (+1.6) | [config](./yolox_tiny_fast_8xb32-300e-rtmdet-hyp_coco.py) | [model](https://download.openmmlab.com/mmyolo/v0/yolox/yolox_tiny_fast_8xb32-300e-rtmdet-hyp_coco/yolox_tiny_fast_8xb32-300e-rtmdet-hyp_coco_20230210_143637-4c338102.pth) \| [log](https://download.openmmlab.com/mmyolo/v0/yolox/yolox_tiny_fast_8xb32-300e-rtmdet-hyp_coco/yolox_tiny_fast_8xb32-300e-rtmdet-hyp_coco_20230210_143637.log.json) | +| YOLOX-s | 640 | 8xb8 | Yes | No | 2.9 | 40.7 | [config](./yolox_s_fast_8xb8-300e_coco.py) | [model](https://download.openmmlab.com/mmyolo/v0/yolox/yolox_s_fast_8xb8-300e_coco/yolox_s_fast_8xb8-300e_coco_20230213_142600-2b224d8b.pth) \| [log](https://download.openmmlab.com/mmyolo/v0/yolox/yolox_s_fast_8xb8-300e_coco/yolox_s_fast_8xb8-300e_coco_20230213_142600.log.json) | +| YOLOX-s | 640 | 8xb32 | Yes | Yes | 9.8 | 41.9 (+1.2) | [config](./yolox_s_fast_8xb32-300e-rtmdet-hyp_coco.py) | [model](https://download.openmmlab.com/mmyolo/v0/yolox/yolox_s_fast_8xb32-300e-rtmdet-hyp_coco/yolox_s_fast_8xb32-300e-rtmdet-hyp_coco_20230210_134645-3a8dfbd7.pth) \| [log](https://download.openmmlab.com/mmyolo/v0/yolox/yolox_s_fast_8xb32-300e-rtmdet-hyp_coco/yolox_s_fast_8xb32-300e-rtmdet-hyp_coco_20230210_134645.log.json) | +| YOLOX-m | 640 | 8xb8 | Yes | No | 4.9 | 46.9 | [config](./yolox_m_fast_8xb8-300e_coco.py) | [model](https://download.openmmlab.com/mmyolo/v0/yolox/yolox_m_fast_8xb8-300e_coco/yolox_m_fast_8xb8-300e_coco_20230213_160218-a71a6b25.pth) \| [log](https://download.openmmlab.com/mmyolo/v0/yolox/yolox_m_fast_8xb8-300e_coco/yolox_m_fast_8xb8-300e_coco_20230213_160218.log.json) | +| YOLOX-m | 640 | 8xb32 | Yes | Yes | 17.6 | 47.5 (+0.6) | [config](./yolox_m_fast_8xb32-300e-rtmdet-hyp_coco.py) | [model](https://download.openmmlab.com/mmyolo/v0/yolox/yolox_m_fast_8xb32-300e-rtmdet-hyp_coco/yolox_m_fast_8xb32-300e-rtmdet-hyp_coco_20230210_144328-e657e182.pth) \| [log](https://download.openmmlab.com/mmyolo/v0/yolox/yolox_m_fast_8xb32-300e-rtmdet-hyp_coco/yolox_m_fast_8xb32-300e-rtmdet-hyp_coco_20230210_144328.log.json) | +| YOLOX-l | 640 | 8xb8 | Yes | No | 8.0 | 50.1 | [config](./yolox_l_fast_8xb8-300e_coco.py) | [model](https://download.openmmlab.com/mmyolo/v0/yolox/yolox_l_fast_8xb8-300e_coco/yolox_l_fast_8xb8-300e_coco_20230213_160715-c731eb1c.pth) \| [log](https://download.openmmlab.com/mmyolo/v0/yolox/yolox_l_fast_8xb8-300e_coco/yolox_l_fast_8xb8-300e_coco_20230213_160715.log.json) | +| YOLOX-x | 640 | 8xb8 | Yes | No | 9.8 | 51.4 | [config](./yolox_x_fast_8xb8-300e_coco.py) | [model](https://download.openmmlab.com/mmyolo/v0/yolox/yolox_x_fast_8xb8-300e_coco/yolox_x_fast_8xb8-300e_coco_20230215_133950-1d509fab.pth) \| [log](https://download.openmmlab.com/mmyolo/v0/yolox/yolox_x_fast_8xb8-300e_coco/yolox_x_fast_8xb8-300e_coco_20230215_133950.log.json) | + +YOLOX uses a default training configuration of `8xbs8` which results in a long training time, we expect it to use `8xbs32` to speed up the training and not cause a decrease in mAP. We modified `train_batch_size_per_gpu` from 8 to 32, `batch_augments_interval` from 10 to 1 and `base_lr` from 0.01 to 0.04 under YOLOX-s default configuration based on the linear scaling rule, which resulted in mAP degradation. Finally, I found that using RTMDet's training hyperparameter can improve performance in YOLOX Tiny/S/M, which also validates the superiority of RTMDet's training hyperparameter. + +The modified training parameters are as follows: + +1. train_batch_size_per_gpu: 8 -> 32 +2. batch_augments_interval: 10 -> 1 +3. num_last_epochs: 15 -> 20 +4. optim cfg: SGD -> AdamW, base_lr 0.01 -> 0.004, weight_decay 0.0005 -> 0.05 +5. ema momentum: 0.0001 -> 0.0002 + +**Note**: + +1. The test score threshold is 0.001. +2. Due to the need for pre-training weights, we cannot reproduce the performance of the `yolox-nano` model. Please refer to https://github.com/Megvii-BaseDetection/YOLOX/issues/674 for more information. + +## YOLOX-Pose + +Based on [MMPose](https://github.com/open-mmlab/mmpose/blob/main/projects/yolox-pose/README.md), we have implemented a YOLOX-based human pose estimator, utilizing the approach outlined in **YOLO-Pose: Enhancing YOLO for Multi Person Pose Estimation Using Object Keypoint Similarity Loss (CVPRW 2022)**. This pose estimator is lightweight and quick, making it well-suited for crowded scenes. + +
+ +
+ +### Results + +| Backbone | Size | Batch Size | AMP | RTMDet-Hyp | Mem (GB) | AP | Config | Download | +| :--------: | :--: | :--------: | :-: | :--------: | :------: | :--: | :------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| YOLOX-tiny | 416 | 8xb32 | Yes | Yes | 5.3 | 52.8 | [config](./pose/yolox-pose_tiny_8xb32-300e-rtmdet-hyp_coco.py) | [model](https://download.openmmlab.com/mmyolo/v0/yolox/pose/yolox-pose_tiny_8xb32-300e-rtmdet-hyp_coco/yolox-pose_tiny_8xb32-300e-rtmdet-hyp_coco_20230427_080351-2117af67.pth) \| [log](https://download.openmmlab.com/mmyolo/v0/yolox/pose/yolox-pose_tiny_8xb32-300e-rtmdet-hyp_coco/yolox-pose_tiny_8xb32-300e-rtmdet-hyp_coco_20230427_080351.log.json) | +| YOLOX-s | 640 | 8xb32 | Yes | Yes | 10.7 | 63.7 | [config](./pose/yolox-pose_s_8xb32-300e-rtmdet-hyp_coco.py) | [model](https://download.openmmlab.com/mmyolo/v0/yolox/pose/yolox-pose_s_8xb32-300e-rtmdet-hyp_coco/yolox-pose_s_8xb32-300e-rtmdet-hyp_coco_20230427_005150-e87d843a.pth) \| [log](https://download.openmmlab.com/mmyolo/v0/yolox/pose/yolox-pose_s_8xb32-300e-rtmdet-hyp_coco/yolox-pose_s_8xb32-300e-rtmdet-hyp_coco_20230427_005150.log.json) | +| YOLOX-m | 640 | 8xb32 | Yes | Yes | 19.2 | 69.3 | [config](./pose/yolox-pose_m_8xb32-300e-rtmdet-hyp_coco.py) | [model](https://download.openmmlab.com/mmyolo/v0/yolox/pose/yolox-pose_m_8xb32-300e-rtmdet-hyp_coco/yolox-pose_m_8xb32-300e-rtmdet-hyp_coco_20230427_094024-bbeacc1c.pth) \| [log](https://download.openmmlab.com/mmyolo/v0/yolox/pose/yolox-pose_m_8xb32-300e-rtmdet-hyp_coco/yolox-pose_m_8xb32-300e-rtmdet-hyp_coco_20230427_094024.log.json) | +| YOLOX-l | 640 | 8xb32 | Yes | Yes | 30.3 | 71.1 | [config](./pose/yolox-pose_l_8xb32-300e-rtmdet-hyp_coco.py) | [model](https://download.openmmlab.com/mmyolo/v0/yolox/pose/yolox-pose_l_8xb32-300e-rtmdet-hyp_coco/yolox-pose_l_8xb32-300e-rtmdet-hyp_coco_20230427_041140-82d65ac8.pth) \| [log](https://download.openmmlab.com/mmyolo/v0/yolox/pose/yolox-pose_l_8xb32-300e-rtmdet-hyp_coco/yolox-pose_l_8xb32-300e-rtmdet-hyp_coco_20230427_041140.log.json) | + +**Note** + +1. The performance is unstable and may fluctuate and the highest performance weight in `COCO` training may not be the last epoch. The performance shown above is the best model. + +### Installation + +Install MMPose + +``` +mim install -r requirements/mmpose.txt +``` + +## Citation + +```latex +@article{yolox2021, + title={{YOLOX}: Exceeding YOLO Series in 2021}, + author={Ge, Zheng and Liu, Songtao and Wang, Feng and Li, Zeming and Sun, Jian}, + journal={arXiv preprint arXiv:2107.08430}, + year={2021} +} +``` diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolox/metafile.yml b/models/YOLO-World/third_party/mmyolo/configs/yolox/metafile.yml new file mode 100644 index 0000000000000000000000000000000000000000..78ede704a629fa44957bc2b24e05e6559fc17710 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolox/metafile.yml @@ -0,0 +1,166 @@ +Collections: + - Name: YOLOX + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Nesterov + - Weight Decay + - Cosine Annealing Lr Updater + Training Resources: 8x A100 GPUs + Architecture: + - CSPDarkNet + - PAFPN + Paper: + URL: https://arxiv.org/abs/2107.08430 + Title: 'YOLOX: Exceeding YOLO Series in 2021' + README: configs/yolox/README.md + Code: + URL: https://github.com/open-mmlab/mmyolo/blob/v0.1.0/mmyolo/models/detectors/yolo_detector.py#L12 + Version: v0.1.0 + + +Models: + - Name: yolox_tiny_fast_8xb8-300e_coco + In Collection: YOLOX + Config: configs/yolox/yolox_tiny_fast_8xb8-300e_coco.py + Metadata: + Training Memory (GB): 2.8 + Epochs: 300 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 32.7 + Weights: https://download.openmmlab.com/mmyolo/v0/yolox/yolox_tiny_8xb8-300e_coco/yolox_tiny_8xb8-300e_coco_20220919_090908-0e40a6fc.pth + - Name: yolox_s_fast_8xb8-300e_coco + In Collection: YOLOX + Config: configs/yolox/yolox_s_fast_8xb8-300e_coco.py + Metadata: + Training Memory (GB): 2.9 + Epochs: 300 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.7 + Weights: https://download.openmmlab.com/mmyolo/v0/yolox/yolox_s_fast_8xb8-300e_coco/yolox_s_fast_8xb8-300e_coco_20230213_142600-2b224d8b.pth + - Name: yolox_m_fast_8xb8-300e_coco + In Collection: YOLOX + Config: configs/yolox/yolox_m_fast_8xb8-300e_coco.py + Metadata: + Training Memory (GB): 4.9 + Epochs: 300 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 46.9 + Weights: https://download.openmmlab.com/mmyolo/v0/yolox/yolox_m_fast_8xb8-300e_coco/yolox_m_fast_8xb8-300e_coco_20230213_160218-a71a6b25.pth + - Name: yolox_l_fast_8xb8-300e_coco + In Collection: YOLOX + Config: configs/yolox/yolox_l_fast_8xb8-300e_coco.py + Metadata: + Training Memory (GB): 8.0 + Epochs: 300 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 50.1 + Weights: https://download.openmmlab.com/mmyolo/v0/yolox/yolox_l_fast_8xb8-300e_coco/yolox_l_fast_8xb8-300e_coco_20230213_160715-c731eb1c.pth + - Name: yolox_x_fast_8xb8-300e_coco + In Collection: YOLOX + Config: configs/yolox/yolox_x_fast_8xb8-300e_coco.py + Metadata: + Training Memory (GB): 9.8 + Epochs: 300 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 51.4 + Weights: https://download.openmmlab.com/mmyolo/v0/yolox/yolox_x_fast_8xb8-300e_coco/yolox_x_fast_8xb8-300e_coco_20230215_133950-1d509fab.pth + - Name: yolox_tiny_fast_8xb32-300e-rtmdet-hyp_coco + In Collection: YOLOX + Config: configs/yolox/yolox_tiny_fast_8xb32-300e-rtmdet-hyp_coco.py + Metadata: + Training Memory (GB): 4.9 + Epochs: 300 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 34.3 + Weights: https://download.openmmlab.com/mmyolo/v0/yolox/yolox_tiny_fast_8xb32-300e-rtmdet-hyp_coco/yolox_tiny_fast_8xb32-300e-rtmdet-hyp_coco_20230210_143637-4c338102.pth + - Name: yolox_s_fast_8xb32-300e-rtmdet-hyp_coco + In Collection: YOLOX + Config: configs/yolox/yolox_s_fast_8xb32-300e-rtmdet-hyp_coco.py + Metadata: + Training Memory (GB): 9.8 + Epochs: 300 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.9 + Weights: https://download.openmmlab.com/mmyolo/v0/yolox/yolox_s_fast_8xb32-300e-rtmdet-hyp_coco/yolox_s_fast_8xb32-300e-rtmdet-hyp_coco_20230210_134645-3a8dfbd7.pth + - Name: yolox_m_fast_8xb32-300e-rtmdet-hyp_coco + In Collection: YOLOX + Config: configs/yolox/yolox_m_fast_8xb32-300e-rtmdet-hyp_coco.py + Metadata: + Training Memory (GB): 17.6 + Epochs: 300 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 47.5 + Weights: https://download.openmmlab.com/mmyolo/v0/yolox/yolox_m_fast_8xb32-300e-rtmdet-hyp_coco/yolox_m_fast_8xb32-300e-rtmdet-hyp_coco_20230210_144328-e657e182.pth + - Name: yolox-pose_tiny_8xb32-300e-rtmdet-hyp_coco + In Collection: YOLOX + Config: yolox-pose_tiny_8xb32-300e-rtmdet-hyp_coco.py + Metadata: + Training Memory (GB): 5.3 + Epochs: 300 + Results: + - Task: Human Pose Estimation + Dataset: COCO + Metrics: + AP: 52.8 + Weights: https://download.openmmlab.com/mmyolo/v0/yolox/pose/yolox-pose_tiny_8xb32-300e-rtmdet-hyp_coco/yolox-pose_tiny_8xb32-300e-rtmdet-hyp_coco_20230427_080351-2117af67.pth + - Name: yolox-pose_s_8xb32-300e-rtmdet-hyp_coco + In Collection: YOLOX + Config: yolox-pose_s_8xb32-300e-rtmdet-hyp_coco.py + Metadata: + Training Memory (GB): 10.7 + Epochs: 300 + Results: + - Task: Human Pose Estimation + Dataset: COCO + Metrics: + AP: 63.7 + Weights: https://download.openmmlab.com/mmyolo/v0/yolox/pose/yolox-pose_s_8xb32-300e-rtmdet-hyp_coco/yolox-pose_s_8xb32-300e-rtmdet-hyp_coco_20230427_005150-e87d843a.pth + - Name: yolox-pose_m_8xb32-300e-rtmdet-hyp_coco + In Collection: YOLOX + Config: yolox-pose_m_8xb32-300e-rtmdet-hyp_coco.py + Metadata: + Training Memory (GB): 19.2 + Epochs: 300 + Results: + - Task: Human Pose Estimation + Dataset: COCO + Metrics: + AP: 69.3 + Weights: https://download.openmmlab.com/mmyolo/v0/yolox/pose/yolox-pose_m_8xb32-300e-rtmdet-hyp_coco/yolox-pose_m_8xb32-300e-rtmdet-hyp_coco_20230427_094024-bbeacc1c.pth + - Name: yolox-pose_l_8xb32-300e-rtmdet-hyp_coco + In Collection: YOLOX + Config: yolox-pose_l_8xb32-300e-rtmdet-hyp_coco.py + Metadata: + Training Memory (GB): 30.3 + Epochs: 300 + Results: + - Task: Human Pose Estimation + Dataset: COCO + Metrics: + AP: 71.1 + Weights: https://download.openmmlab.com/mmyolo/v0/yolox/pose/yolox-pose_l_8xb32-300e-rtmdet-hyp_coco/yolox-pose_l_8xb32-300e-rtmdet-hyp_coco_20230427_041140-82d65ac8.pth diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolox/pose/yolox-pose_l_8xb32-300e-rtmdet-hyp_coco.py b/models/YOLO-World/third_party/mmyolo/configs/yolox/pose/yolox-pose_l_8xb32-300e-rtmdet-hyp_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..96de5e98183b33d6c19865547e7f7e217be31ea5 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolox/pose/yolox-pose_l_8xb32-300e-rtmdet-hyp_coco.py @@ -0,0 +1,14 @@ +_base_ = ['./yolox-pose_m_8xb32-300e-rtmdet-hyp_coco.py'] + +load_from = 'https://download.openmmlab.com/mmyolo/v0/yolox/yolox_l_fast_8xb8-300e_coco/yolox_l_fast_8xb8-300e_coco_20230213_160715-c731eb1c.pth' # noqa + +# ========================modified parameters====================== +deepen_factor = 1.0 +widen_factor = 1.0 + +# =======================Unmodified in most cases================== +# model settings +model = dict( + backbone=dict(deepen_factor=deepen_factor, widen_factor=widen_factor), + neck=dict(deepen_factor=deepen_factor, widen_factor=widen_factor), + bbox_head=dict(head_module=dict(widen_factor=widen_factor))) diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolox/pose/yolox-pose_m_8xb32-300e-rtmdet-hyp_coco.py b/models/YOLO-World/third_party/mmyolo/configs/yolox/pose/yolox-pose_m_8xb32-300e-rtmdet-hyp_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..f78d6a3a2f8ce2828839073f1fe2582f49bb5a69 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolox/pose/yolox-pose_m_8xb32-300e-rtmdet-hyp_coco.py @@ -0,0 +1,14 @@ +_base_ = ['./yolox-pose_s_8xb32-300e-rtmdet-hyp_coco.py'] + +load_from = 'https://download.openmmlab.com/mmyolo/v0/yolox/yolox_m_fast_8xb32-300e-rtmdet-hyp_coco/yolox_m_fast_8xb32-300e-rtmdet-hyp_coco_20230210_144328-e657e182.pth' # noqa + +# ========================modified parameters====================== +deepen_factor = 0.67 +widen_factor = 0.75 + +# =======================Unmodified in most cases================== +# model settings +model = dict( + backbone=dict(deepen_factor=deepen_factor, widen_factor=widen_factor), + neck=dict(deepen_factor=deepen_factor, widen_factor=widen_factor), + bbox_head=dict(head_module=dict(widen_factor=widen_factor))) diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolox/pose/yolox-pose_s_8xb32-300e-rtmdet-hyp_coco.py b/models/YOLO-World/third_party/mmyolo/configs/yolox/pose/yolox-pose_s_8xb32-300e-rtmdet-hyp_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..8fa2172c989ddfa6c6b28e33654e1c14b8cbbc91 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolox/pose/yolox-pose_s_8xb32-300e-rtmdet-hyp_coco.py @@ -0,0 +1,136 @@ +_base_ = '../yolox_s_fast_8xb32-300e-rtmdet-hyp_coco.py' + +load_from = 'https://download.openmmlab.com/mmyolo/v0/yolox/yolox_s_fast_8xb32-300e-rtmdet-hyp_coco/yolox_s_fast_8xb32-300e-rtmdet-hyp_coco_20230210_134645-3a8dfbd7.pth' # noqa + +num_keypoints = 17 +scaling_ratio_range = (0.75, 1.0) +mixup_ratio_range = (0.8, 1.6) +num_last_epochs = 20 + +# model settings +model = dict( + bbox_head=dict( + type='YOLOXPoseHead', + head_module=dict( + type='YOLOXPoseHeadModule', + num_classes=1, + num_keypoints=num_keypoints, + ), + loss_pose=dict( + type='OksLoss', + metainfo='configs/_base_/pose/coco.py', + loss_weight=30.0)), + train_cfg=dict( + assigner=dict( + type='PoseSimOTAAssigner', + center_radius=2.5, + oks_weight=3.0, + iou_calculator=dict(type='mmdet.BboxOverlaps2D'), + oks_calculator=dict( + type='OksLoss', metainfo='configs/_base_/pose/coco.py'))), + test_cfg=dict(score_thr=0.01)) + +# pipelines +pre_transform = [ + dict(type='LoadImageFromFile', backend_args=_base_.backend_args), + dict(type='LoadAnnotations', with_keypoints=True) +] + +img_scale = _base_.img_scale + +train_pipeline_stage1 = [ + *pre_transform, + dict( + type='Mosaic', + img_scale=img_scale, + pad_val=114.0, + pre_transform=pre_transform), + dict( + type='RandomAffine', + scaling_ratio_range=scaling_ratio_range, + border=(-img_scale[0] // 2, -img_scale[1] // 2)), + dict( + type='YOLOXMixUp', + img_scale=img_scale, + ratio_range=mixup_ratio_range, + pad_val=114.0, + pre_transform=pre_transform), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict(type='RandomFlip', prob=0.5), + dict(type='FilterAnnotations', by_keypoints=True, keep_empty=False), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape')) +] + +train_pipeline_stage2 = [ + *pre_transform, + dict(type='Resize', scale=img_scale, keep_ratio=True), + dict( + type='mmdet.Pad', + pad_to_square=True, + pad_val=dict(img=(114.0, 114.0, 114.0))), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict(type='RandomFlip', prob=0.5), + dict(type='FilterAnnotations', by_keypoints=True, keep_empty=False), + dict(type='PackDetInputs') +] + +test_pipeline = [ + *pre_transform, + dict(type='Resize', scale=img_scale, keep_ratio=True), + dict( + type='mmdet.Pad', + pad_to_square=True, + pad_val=dict(img=(114.0, 114.0, 114.0))), + dict( + type='PackDetInputs', + meta_keys=('id', 'img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'flip_indices')) +] + +# dataset settings +dataset_type = 'PoseCocoDataset' + +train_dataloader = dict( + dataset=dict( + type=dataset_type, + data_mode='bottomup', + ann_file='annotations/person_keypoints_train2017.json', + pipeline=train_pipeline_stage1)) + +val_dataloader = dict( + dataset=dict( + type=dataset_type, + data_mode='bottomup', + ann_file='annotations/person_keypoints_val2017.json', + pipeline=test_pipeline)) +test_dataloader = val_dataloader + +# evaluators +val_evaluator = dict( + _delete_=True, + type='mmpose.CocoMetric', + ann_file=_base_.data_root + 'annotations/person_keypoints_val2017.json', + score_mode='bbox') +test_evaluator = val_evaluator + +default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater')) + +visualizer = dict(type='mmpose.PoseLocalVisualizer') + +custom_hooks = [ + dict( + type='YOLOXModeSwitchHook', + num_last_epochs=num_last_epochs, + new_train_pipeline=train_pipeline_stage2, + priority=48), + dict(type='mmdet.SyncNormHook', priority=48), + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + strict_load=False, + priority=49) +] diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolox/pose/yolox-pose_tiny_8xb32-300e-rtmdet-hyp_coco.py b/models/YOLO-World/third_party/mmyolo/configs/yolox/pose/yolox-pose_tiny_8xb32-300e-rtmdet-hyp_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..a7399065e70f40f4142abc943b572cbd93954222 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolox/pose/yolox-pose_tiny_8xb32-300e-rtmdet-hyp_coco.py @@ -0,0 +1,70 @@ +_base_ = './yolox-pose_s_8xb32-300e-rtmdet-hyp_coco.py' + +load_from = 'https://download.openmmlab.com/mmyolo/v0/yolox/yolox_tiny_fast_8xb32-300e-rtmdet-hyp_coco/yolox_tiny_fast_8xb32-300e-rtmdet-hyp_coco_20230210_143637-4c338102.pth' # noqa + +deepen_factor = 0.33 +widen_factor = 0.375 +scaling_ratio_range = (0.75, 1.0) + +# model settings +model = dict( + data_preprocessor=dict(batch_augments=[ + dict( + type='YOLOXBatchSyncRandomResize', + random_size_range=(320, 640), + size_divisor=32, + interval=1) + ]), + backbone=dict( + deepen_factor=deepen_factor, + widen_factor=widen_factor, + ), + neck=dict( + deepen_factor=deepen_factor, + widen_factor=widen_factor, + ), + bbox_head=dict(head_module=dict(widen_factor=widen_factor))) + +# data settings +img_scale = _base_.img_scale +pre_transform = _base_.pre_transform + +train_pipeline_stage1 = [ + *pre_transform, + dict( + type='Mosaic', + img_scale=img_scale, + pad_val=114.0, + pre_transform=pre_transform), + dict( + type='RandomAffine', + scaling_ratio_range=scaling_ratio_range, + border=(-img_scale[0] // 2, -img_scale[1] // 2)), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict(type='RandomFlip', prob=0.5), + dict( + type='FilterAnnotations', + by_keypoints=True, + min_gt_bbox_wh=(1, 1), + keep_empty=False), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape')) +] + +test_pipeline = [ + *pre_transform, + dict(type='Resize', scale=(416, 416), keep_ratio=True), + dict( + type='mmdet.Pad', + pad_to_square=True, + pad_val=dict(img=(114.0, 114.0, 114.0))), + dict( + type='PackDetInputs', + meta_keys=('id', 'img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'flip_indices')) +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline_stage1)) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = val_dataloader diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolox/yolox_l_fast_8xb8-300e_coco.py b/models/YOLO-World/third_party/mmyolo/configs/yolox/yolox_l_fast_8xb8-300e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..39198d2e245b00445f0a5d38e41a1ffe389b17de --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolox/yolox_l_fast_8xb8-300e_coco.py @@ -0,0 +1,12 @@ +_base_ = './yolox_s_fast_8xb8-300e_coco.py' + +# ========================modified parameters====================== +deepen_factor = 1.0 +widen_factor = 1.0 + +# =======================Unmodified in most cases================== +# model settings +model = dict( + backbone=dict(deepen_factor=deepen_factor, widen_factor=widen_factor), + neck=dict(deepen_factor=deepen_factor, widen_factor=widen_factor), + bbox_head=dict(head_module=dict(widen_factor=widen_factor))) diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolox/yolox_m_fast_8xb32-300e-rtmdet-hyp_coco.py b/models/YOLO-World/third_party/mmyolo/configs/yolox/yolox_m_fast_8xb32-300e-rtmdet-hyp_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..4a4743c2dd4bcbe9e692aff54e3af1909d540c60 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolox/yolox_m_fast_8xb32-300e-rtmdet-hyp_coco.py @@ -0,0 +1,12 @@ +_base_ = './yolox_s_fast_8xb32-300e-rtmdet-hyp_coco.py' + +# ========================modified parameters====================== +deepen_factor = 0.67 +widen_factor = 0.75 + +# =======================Unmodified in most cases================== +# model settings +model = dict( + backbone=dict(deepen_factor=deepen_factor, widen_factor=widen_factor), + neck=dict(deepen_factor=deepen_factor, widen_factor=widen_factor), + bbox_head=dict(head_module=dict(widen_factor=widen_factor))) diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolox/yolox_m_fast_8xb8-300e_coco.py b/models/YOLO-World/third_party/mmyolo/configs/yolox/yolox_m_fast_8xb8-300e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..ec8fd2c854bc2d41d53ba481fa3ad7f23ba3c54a --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolox/yolox_m_fast_8xb8-300e_coco.py @@ -0,0 +1,12 @@ +_base_ = './yolox_s_fast_8xb8-300e_coco.py' + +# ========================modified parameters====================== +deepen_factor = 0.67 +widen_factor = 0.75 + +# =======================Unmodified in most cases================== +# model settings +model = dict( + backbone=dict(deepen_factor=deepen_factor, widen_factor=widen_factor), + neck=dict(deepen_factor=deepen_factor, widen_factor=widen_factor), + bbox_head=dict(head_module=dict(widen_factor=widen_factor))) diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolox/yolox_nano_fast_8xb32-300e-rtmdet-hyp_coco.py b/models/YOLO-World/third_party/mmyolo/configs/yolox/yolox_nano_fast_8xb32-300e-rtmdet-hyp_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..851664fb3cb03dc24c4ea03e158b08db011684e9 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolox/yolox_nano_fast_8xb32-300e-rtmdet-hyp_coco.py @@ -0,0 +1,21 @@ +_base_ = './yolox_tiny_fast_8xb32-300e-rtmdet-hyp_coco.py' + +# ========================modified parameters====================== +deepen_factor = 0.33 +widen_factor = 0.25 +use_depthwise = True + +# =======================Unmodified in most cases================== +# model settings +model = dict( + backbone=dict( + deepen_factor=deepen_factor, + widen_factor=widen_factor, + use_depthwise=use_depthwise), + neck=dict( + deepen_factor=deepen_factor, + widen_factor=widen_factor, + use_depthwise=use_depthwise), + bbox_head=dict( + head_module=dict( + widen_factor=widen_factor, use_depthwise=use_depthwise))) diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolox/yolox_nano_fast_8xb8-300e_coco.py b/models/YOLO-World/third_party/mmyolo/configs/yolox/yolox_nano_fast_8xb8-300e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..a0a5d373856343af82259f9c165f851be49de16d --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolox/yolox_nano_fast_8xb8-300e_coco.py @@ -0,0 +1,21 @@ +_base_ = './yolox_tiny_fast_8xb8-300e_coco.py' + +# ========================modified parameters====================== +deepen_factor = 0.33 +widen_factor = 0.25 +use_depthwise = True + +# =======================Unmodified in most cases================== +# model settings +model = dict( + backbone=dict( + deepen_factor=deepen_factor, + widen_factor=widen_factor, + use_depthwise=use_depthwise), + neck=dict( + deepen_factor=deepen_factor, + widen_factor=widen_factor, + use_depthwise=use_depthwise), + bbox_head=dict( + head_module=dict( + widen_factor=widen_factor, use_depthwise=use_depthwise))) diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolox/yolox_p5_tta.py b/models/YOLO-World/third_party/mmyolo/configs/yolox/yolox_p5_tta.py new file mode 100644 index 0000000000000000000000000000000000000000..7ffe3490ca3f7f059d498201277f4df86fbcd3da --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolox/yolox_p5_tta.py @@ -0,0 +1,56 @@ +# TODO: Need to solve the problem of multiple backend_args parameters +# _backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# './data/': 's3://openmmlab/datasets/detection/', +# 'data/': 's3://openmmlab/datasets/detection/' +# })) + +_backend_args = None + +tta_model = dict( + type='mmdet.DetTTAModel', + tta_cfg=dict(nms=dict(type='nms', iou_threshold=0.65), max_per_img=300)) + +img_scales = [(640, 640), (320, 320), (960, 960)] + +# LoadImageFromFile +# / | \ +# Resize Resize Resize # noqa +# / \ / \ / \ +# RandomFlip RandomFlip RandomFlip RandomFlip RandomFlip RandomFlip # noqa +# | | | | | | +# LoadAnn LoadAnn LoadAnn LoadAnn LoadAnn LoadAnn +# | | | | | | +# PackDetIn PackDetIn PackDetIn PackDetIn PackDetIn PackDetIn # noqa + +tta_pipeline = [ + dict(type='LoadImageFromFile', backend_args=_backend_args), + dict( + type='TestTimeAug', + transforms=[ + [ + dict(type='mmdet.Resize', scale=s, keep_ratio=True) + for s in img_scales + ], + [ + # ``RandomFlip`` must be placed before ``Pad``, otherwise + # bounding box coordinates after flipping cannot be + # recovered correctly. + dict(type='mmdet.RandomFlip', prob=1.), + dict(type='mmdet.RandomFlip', prob=0.) + ], + [ + dict( + type='mmdet.Pad', + pad_to_square=True, + pad_val=dict(img=(114.0, 114.0, 114.0))), + ], + [ + dict( + type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'flip', 'flip_direction')) + ] + ]) +] diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolox/yolox_s_fast_1xb12-40e-rtmdet-hyp_cat.py b/models/YOLO-World/third_party/mmyolo/configs/yolox/yolox_s_fast_1xb12-40e-rtmdet-hyp_cat.py new file mode 100644 index 0000000000000000000000000000000000000000..f7eac58fb548a034e22acccef72a32951bb80dee --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolox/yolox_s_fast_1xb12-40e-rtmdet-hyp_cat.py @@ -0,0 +1,76 @@ +_base_ = './yolox_s_fast_8xb32-300e-rtmdet-hyp_coco.py' + +data_root = './data/cat/' +class_name = ('cat', ) +num_classes = len(class_name) +metainfo = dict(classes=class_name, palette=[(20, 220, 60)]) + +num_last_epochs = 5 + +max_epochs = 40 +train_batch_size_per_gpu = 12 +train_num_workers = 4 + +load_from = 'https://download.openmmlab.com/mmyolo/v0/yolox/yolox_s_fast_8xb32-300e-rtmdet-hyp_coco/yolox_s_fast_8xb32-300e-rtmdet-hyp_coco_20230210_134645-3a8dfbd7.pth' # noqa + +model = dict( + backbone=dict(frozen_stages=4), + bbox_head=dict(head_module=dict(num_classes=num_classes))) + +train_dataloader = dict( + batch_size=train_batch_size_per_gpu, + num_workers=train_num_workers, + dataset=dict( + data_root=data_root, + metainfo=metainfo, + ann_file='annotations/trainval.json', + data_prefix=dict(img='images/'))) + +val_dataloader = dict( + dataset=dict( + metainfo=metainfo, + data_root=data_root, + ann_file='annotations/test.json', + data_prefix=dict(img='images/'))) + +test_dataloader = val_dataloader + +param_scheduler = [ + dict( + # use quadratic formula to warm up 3 epochs + # and lr is updated by iteration + # TODO: fix default scope in get function + type='mmdet.QuadraticWarmupLR', + by_epoch=True, + begin=0, + end=3, + convert_to_iter_based=True), + dict( + # use cosine lr from 5 to 35 epoch + type='CosineAnnealingLR', + eta_min=_base_.base_lr * 0.05, + begin=5, + T_max=max_epochs - num_last_epochs, + end=max_epochs - num_last_epochs, + by_epoch=True, + convert_to_iter_based=True), + dict( + # use fixed lr during last num_last_epochs epochs + type='ConstantLR', + by_epoch=True, + factor=1, + begin=max_epochs - num_last_epochs, + end=max_epochs, + ) +] + +_base_.custom_hooks[0].num_last_epochs = num_last_epochs + +val_evaluator = dict(ann_file=data_root + 'annotations/test.json') +test_evaluator = val_evaluator + +default_hooks = dict( + checkpoint=dict(interval=10, max_keep_ckpts=2, save_best='auto'), + logger=dict(type='LoggerHook', interval=5)) +train_cfg = dict(max_epochs=max_epochs, val_interval=10) +# visualizer = dict(vis_backends = [dict(type='LocalVisBackend'), dict(type='WandbVisBackend')]) # noqa diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolox/yolox_s_fast_8xb32-300e-rtmdet-hyp_coco.py b/models/YOLO-World/third_party/mmyolo/configs/yolox/yolox_s_fast_8xb32-300e-rtmdet-hyp_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..167023da94815e13a782b85209e1116aeac7803d --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolox/yolox_s_fast_8xb32-300e-rtmdet-hyp_coco.py @@ -0,0 +1,87 @@ +_base_ = './yolox_s_fast_8xb8-300e_coco.py' + +# ========================modified parameters====================== +# Batch size of a single GPU during training +# 8 -> 32 +train_batch_size_per_gpu = 32 + +# Multi-scale training intervals +# 10 -> 1 +batch_augments_interval = 1 + +# Last epoch number to switch training pipeline +# 15 -> 20 +num_last_epochs = 20 + +# Base learning rate for optim_wrapper. Corresponding to 8xb32=256 bs +base_lr = 0.004 + +# SGD -> AdamW +optim_wrapper = dict( + _delete_=True, + type='OptimWrapper', + optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), + paramwise_cfg=dict( + norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) + +# 0.0001 -> 0.0002 +ema_momentum = 0.0002 + +# ============================== Unmodified in most cases =================== +model = dict( + data_preprocessor=dict(batch_augments=[ + dict( + type='YOLOXBatchSyncRandomResize', + random_size_range=(480, 800), + size_divisor=32, + interval=batch_augments_interval) + ])) + +param_scheduler = [ + dict( + # use quadratic formula to warm up 5 epochs + # and lr is updated by iteration + # TODO: fix default scope in get function + type='mmdet.QuadraticWarmupLR', + by_epoch=True, + begin=0, + end=5, + convert_to_iter_based=True), + dict( + # use cosine lr from 5 to 285 epoch + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=5, + T_max=_base_.max_epochs - num_last_epochs, + end=_base_.max_epochs - num_last_epochs, + by_epoch=True, + convert_to_iter_based=True), + dict( + # use fixed lr during last num_last_epochs epochs + type='ConstantLR', + by_epoch=True, + factor=1, + begin=_base_.max_epochs - num_last_epochs, + end=_base_.max_epochs, + ) +] + +custom_hooks = [ + dict( + type='YOLOXModeSwitchHook', + num_last_epochs=num_last_epochs, + new_train_pipeline=_base_.train_pipeline_stage2, + priority=48), + dict(type='mmdet.SyncNormHook', priority=48), + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=ema_momentum, + update_buffers=True, + strict_load=False, + priority=49) +] + +train_dataloader = dict(batch_size=train_batch_size_per_gpu) +train_cfg = dict(dynamic_intervals=[(_base_.max_epochs - num_last_epochs, 1)]) +auto_scale_lr = dict(base_batch_size=8 * train_batch_size_per_gpu) diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolox/yolox_s_fast_8xb8-300e_coco.py b/models/YOLO-World/third_party/mmyolo/configs/yolox/yolox_s_fast_8xb8-300e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..b371ea11d2dd0900476d88a9de626e881297d790 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolox/yolox_s_fast_8xb8-300e_coco.py @@ -0,0 +1,331 @@ +_base_ = ['../_base_/default_runtime.py', 'yolox_p5_tta.py'] + +# ========================Frequently modified parameters====================== +# -----data related----- +data_root = 'data/coco/' # Root path of data +# path of train annotation file +train_ann_file = 'annotations/instances_train2017.json' +train_data_prefix = 'train2017/' # Prefix of train image path +# path of val annotation file +val_ann_file = 'annotations/instances_val2017.json' +val_data_prefix = 'val2017/' # Prefix of train image path + +num_classes = 80 # Number of classes for classification +# Batch size of a single GPU during training +train_batch_size_per_gpu = 8 +# Worker to pre-fetch data for each single GPU during tarining +train_num_workers = 8 +# Presistent_workers must be False if num_workers is 0 +persistent_workers = True + +# -----train val related----- +# Base learning rate for optim_wrapper. Corresponding to 8xb16=64 bs +base_lr = 0.01 +max_epochs = 300 # Maximum training epochs + +model_test_cfg = dict( + yolox_style=True, # better + # The config of multi-label for multi-class prediction + multi_label=True, # 40.5 -> 40.7 + score_thr=0.001, # Threshold to filter out boxes + max_per_img=300, # Max number of detections of each image + nms=dict(type='nms', iou_threshold=0.65)) # NMS type and threshold + +# ========================Possible modified parameters======================== +# -----data related----- +img_scale = (640, 640) # width, height +# Dataset type, this will be used to define the dataset +dataset_type = 'YOLOv5CocoDataset' +# Batch size of a single GPU during validation +val_batch_size_per_gpu = 1 +# Worker to pre-fetch data for each single GPU during validation +val_num_workers = 2 + +# -----model related----- +# The scaling factor that controls the depth of the network structure +deepen_factor = 0.33 +# The scaling factor that controls the width of the network structure +widen_factor = 0.5 +norm_cfg = dict(type='BN', momentum=0.03, eps=0.001) +# generate new random resize shape interval +batch_augments_interval = 10 + +# -----train val related----- +weight_decay = 0.0005 +loss_cls_weight = 1.0 +loss_bbox_weight = 5.0 +loss_obj_weight = 1.0 +loss_bbox_aux_weight = 1.0 +center_radius = 2.5 # SimOTAAssigner +num_last_epochs = 15 +random_affine_scaling_ratio_range = (0.1, 2) +mixup_ratio_range = (0.8, 1.6) +# Save model checkpoint and validation intervals +save_epoch_intervals = 10 +# The maximum checkpoints to keep. +max_keep_ckpts = 3 + +ema_momentum = 0.0001 + +# ===============================Unmodified in most cases==================== +# model settings +model = dict( + type='YOLODetector', + init_cfg=dict( + type='Kaiming', + layer='Conv2d', + a=2.23606797749979, # math.sqrt(5) + distribution='uniform', + mode='fan_in', + nonlinearity='leaky_relu'), + # TODO: Waiting for mmengine support + use_syncbn=False, + data_preprocessor=dict( + type='YOLOv5DetDataPreprocessor', + pad_size_divisor=32, + batch_augments=[ + dict( + type='YOLOXBatchSyncRandomResize', + random_size_range=(480, 800), + size_divisor=32, + interval=batch_augments_interval) + ]), + backbone=dict( + type='YOLOXCSPDarknet', + deepen_factor=deepen_factor, + widen_factor=widen_factor, + out_indices=(2, 3, 4), + spp_kernal_sizes=(5, 9, 13), + norm_cfg=norm_cfg, + act_cfg=dict(type='SiLU', inplace=True), + ), + neck=dict( + type='YOLOXPAFPN', + deepen_factor=deepen_factor, + widen_factor=widen_factor, + in_channels=[256, 512, 1024], + out_channels=256, + norm_cfg=norm_cfg, + act_cfg=dict(type='SiLU', inplace=True)), + bbox_head=dict( + type='YOLOXHead', + head_module=dict( + type='YOLOXHeadModule', + num_classes=num_classes, + in_channels=256, + feat_channels=256, + widen_factor=widen_factor, + stacked_convs=2, + featmap_strides=(8, 16, 32), + use_depthwise=False, + norm_cfg=norm_cfg, + act_cfg=dict(type='SiLU', inplace=True), + ), + loss_cls=dict( + type='mmdet.CrossEntropyLoss', + use_sigmoid=True, + reduction='sum', + loss_weight=loss_cls_weight), + loss_bbox=dict( + type='mmdet.IoULoss', + mode='square', + eps=1e-16, + reduction='sum', + loss_weight=loss_bbox_weight), + loss_obj=dict( + type='mmdet.CrossEntropyLoss', + use_sigmoid=True, + reduction='sum', + loss_weight=loss_obj_weight), + loss_bbox_aux=dict( + type='mmdet.L1Loss', + reduction='sum', + loss_weight=loss_bbox_aux_weight)), + train_cfg=dict( + assigner=dict( + type='mmdet.SimOTAAssigner', + center_radius=center_radius, + iou_calculator=dict(type='mmdet.BboxOverlaps2D'))), + test_cfg=model_test_cfg) + +pre_transform = [ + dict(type='LoadImageFromFile', backend_args=_base_.backend_args), + dict(type='LoadAnnotations', with_bbox=True) +] + +train_pipeline_stage1 = [ + *pre_transform, + dict( + type='Mosaic', + img_scale=img_scale, + pad_val=114.0, + pre_transform=pre_transform), + dict( + type='mmdet.RandomAffine', + scaling_ratio_range=random_affine_scaling_ratio_range, + # img_scale is (width, height) + border=(-img_scale[0] // 2, -img_scale[1] // 2)), + dict( + type='YOLOXMixUp', + img_scale=img_scale, + ratio_range=mixup_ratio_range, + pad_val=114.0, + pre_transform=pre_transform), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict(type='mmdet.RandomFlip', prob=0.5), + dict( + type='mmdet.FilterAnnotations', + min_gt_bbox_wh=(1, 1), + keep_empty=False), + dict( + type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'flip', + 'flip_direction')) +] + +train_pipeline_stage2 = [ + *pre_transform, + dict(type='mmdet.Resize', scale=img_scale, keep_ratio=True), + dict( + type='mmdet.Pad', + pad_to_square=True, + # If the image is three-channel, the pad value needs + # to be set separately for each channel. + pad_val=dict(img=(114.0, 114.0, 114.0))), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict(type='mmdet.RandomFlip', prob=0.5), + dict( + type='mmdet.FilterAnnotations', + min_gt_bbox_wh=(1, 1), + keep_empty=False), + dict(type='mmdet.PackDetInputs') +] + +train_dataloader = dict( + batch_size=train_batch_size_per_gpu, + num_workers=train_num_workers, + persistent_workers=persistent_workers, + pin_memory=True, + collate_fn=dict(type='yolov5_collate'), + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file=train_ann_file, + data_prefix=dict(img=train_data_prefix), + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=train_pipeline_stage1)) + +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args=_base_.backend_args), + dict(type='mmdet.Resize', scale=img_scale, keep_ratio=True), + dict( + type='mmdet.Pad', + pad_to_square=True, + pad_val=dict(img=(114.0, 114.0, 114.0))), + dict(type='LoadAnnotations', with_bbox=True, _scope_='mmdet'), + dict( + type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')) +] + +val_dataloader = dict( + batch_size=val_batch_size_per_gpu, + num_workers=val_num_workers, + persistent_workers=persistent_workers, + pin_memory=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file=val_ann_file, + data_prefix=dict(img=val_data_prefix), + test_mode=True, + pipeline=test_pipeline)) +test_dataloader = val_dataloader + +# Reduce evaluation time +val_evaluator = dict( + type='mmdet.CocoMetric', + proposal_nums=(100, 1, 10), + ann_file=data_root + val_ann_file, + metric='bbox') + +test_evaluator = val_evaluator + +# optimizer +# default 8 gpu +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict( + type='SGD', + lr=base_lr, + momentum=0.9, + weight_decay=weight_decay, + nesterov=True), + paramwise_cfg=dict(norm_decay_mult=0., bias_decay_mult=0.)) + +# learning rate +param_scheduler = [ + dict( + # use quadratic formula to warm up 5 epochs + # and lr is updated by iteration + # TODO: fix default scope in get function + type='mmdet.QuadraticWarmupLR', + by_epoch=True, + begin=0, + end=5, + convert_to_iter_based=True), + dict( + # use cosine lr from 5 to 285 epoch + type='CosineAnnealingLR', + eta_min=base_lr * 0.05, + begin=5, + T_max=max_epochs - num_last_epochs, + end=max_epochs - num_last_epochs, + by_epoch=True, + convert_to_iter_based=True), + dict( + # use fixed lr during last 15 epochs + type='ConstantLR', + by_epoch=True, + factor=1, + begin=max_epochs - num_last_epochs, + end=max_epochs, + ) +] + +default_hooks = dict( + checkpoint=dict( + type='CheckpointHook', + interval=save_epoch_intervals, + max_keep_ckpts=max_keep_ckpts, + save_best='auto')) + +custom_hooks = [ + dict( + type='YOLOXModeSwitchHook', + num_last_epochs=num_last_epochs, + new_train_pipeline=train_pipeline_stage2, + priority=48), + dict(type='mmdet.SyncNormHook', priority=48), + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=ema_momentum, + update_buffers=True, + strict_load=False, + priority=49) +] + +train_cfg = dict( + type='EpochBasedTrainLoop', + max_epochs=max_epochs, + val_interval=save_epoch_intervals, + dynamic_intervals=[(max_epochs - num_last_epochs, 1)]) + +auto_scale_lr = dict(base_batch_size=8 * train_batch_size_per_gpu) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolox/yolox_tiny_fast_8xb32-300e-rtmdet-hyp_coco.py b/models/YOLO-World/third_party/mmyolo/configs/yolox/yolox_tiny_fast_8xb32-300e-rtmdet-hyp_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..28e539c9472d20fe2e28b49659ec523c098bb170 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolox/yolox_tiny_fast_8xb32-300e-rtmdet-hyp_coco.py @@ -0,0 +1,70 @@ +_base_ = './yolox_s_fast_8xb32-300e-rtmdet-hyp_coco.py' + +# ========================modified parameters====================== +deepen_factor = 0.33 +widen_factor = 0.375 + +# Multi-scale training intervals +# 10 -> 1 +batch_augments_interval = 1 + +scaling_ratio_range = (0.5, 1.5) + +# =======================Unmodified in most cases================== +img_scale = _base_.img_scale +pre_transform = _base_.pre_transform + +# model settings +model = dict( + data_preprocessor=dict(batch_augments=[ + dict( + type='YOLOXBatchSyncRandomResize', + random_size_range=(320, 640), + size_divisor=32, + interval=batch_augments_interval) + ]), + backbone=dict(deepen_factor=deepen_factor, widen_factor=widen_factor), + neck=dict(deepen_factor=deepen_factor, widen_factor=widen_factor), + bbox_head=dict(head_module=dict(widen_factor=widen_factor))) + +train_pipeline_stage1 = [ + *pre_transform, + dict( + type='Mosaic', + img_scale=img_scale, + pad_val=114.0, + pre_transform=pre_transform), + dict( + type='mmdet.RandomAffine', + scaling_ratio_range=scaling_ratio_range, # note + # img_scale is (width, height) + border=(-img_scale[0] // 2, -img_scale[1] // 2)), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict(type='mmdet.RandomFlip', prob=0.5), + dict( + type='mmdet.FilterAnnotations', + min_gt_bbox_wh=(1, 1), + keep_empty=False), + dict( + type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'flip', + 'flip_direction')) +] + +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args=_base_.backend_args), + dict(type='mmdet.Resize', scale=(416, 416), keep_ratio=True), # note + dict( + type='mmdet.Pad', + pad_to_square=True, + pad_val=dict(img=(114.0, 114.0, 114.0))), + dict(type='LoadAnnotations', with_bbox=True, _scope_='mmdet'), + dict( + type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')) +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline_stage1)) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = val_dataloader diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolox/yolox_tiny_fast_8xb8-300e_coco.py b/models/YOLO-World/third_party/mmyolo/configs/yolox/yolox_tiny_fast_8xb8-300e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..fd175a6c73ccc55df697ccbf04dfb46a3fbdc0ee --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolox/yolox_tiny_fast_8xb8-300e_coco.py @@ -0,0 +1,100 @@ +_base_ = './yolox_s_fast_8xb8-300e_coco.py' + +# ========================modified parameters====================== +deepen_factor = 0.33 +widen_factor = 0.375 +scaling_ratio_range = (0.5, 1.5) + +# =======================Unmodified in most cases================== +img_scale = _base_.img_scale +pre_transform = _base_.pre_transform + +test_img_scale = (416, 416) +tta_img_scales = [test_img_scale, (320, 320), (640, 640)] + +# model settings +model = dict( + data_preprocessor=dict(batch_augments=[ + dict( + type='YOLOXBatchSyncRandomResize', + random_size_range=(320, 640), + size_divisor=32, + interval=10) + ]), + backbone=dict(deepen_factor=deepen_factor, widen_factor=widen_factor), + neck=dict(deepen_factor=deepen_factor, widen_factor=widen_factor), + bbox_head=dict(head_module=dict(widen_factor=widen_factor))) + +train_pipeline_stage1 = [ + *pre_transform, + dict( + type='Mosaic', + img_scale=img_scale, + pad_val=114.0, + pre_transform=pre_transform), + dict( + type='mmdet.RandomAffine', + scaling_ratio_range=scaling_ratio_range, # note + # img_scale is (width, height) + border=(-img_scale[0] // 2, -img_scale[1] // 2)), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict(type='mmdet.RandomFlip', prob=0.5), + dict( + type='mmdet.FilterAnnotations', + min_gt_bbox_wh=(1, 1), + keep_empty=False), + dict( + type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'flip', + 'flip_direction')) +] + +test_pipeline = [ + dict(type='LoadImageFromFile', backend_args=_base_.backend_args), + dict(type='mmdet.Resize', scale=test_img_scale, keep_ratio=True), # note + dict( + type='mmdet.Pad', + pad_to_square=True, + pad_val=dict(img=(114.0, 114.0, 114.0))), + dict(type='LoadAnnotations', with_bbox=True, _scope_='mmdet'), + dict( + type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')) +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline_stage1)) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = val_dataloader + +# Config for Test Time Augmentation. (TTA) +tta_pipeline = [ + dict(type='LoadImageFromFile', backend_args=_base_.backend_args), + dict( + type='TestTimeAug', + transforms=[ + [ + dict(type='mmdet.Resize', scale=s, keep_ratio=True) + for s in tta_img_scales + ], + [ + # ``RandomFlip`` must be placed before ``Pad``, otherwise + # bounding box coordinates after flipping cannot be + # recovered correctly. + dict(type='mmdet.RandomFlip', prob=1.), + dict(type='mmdet.RandomFlip', prob=0.) + ], + [ + dict( + type='mmdet.Pad', + pad_to_square=True, + pad_val=dict(img=(114.0, 114.0, 114.0))), + ], + [ + dict( + type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'flip', 'flip_direction')) + ] + ]) +] diff --git a/models/YOLO-World/third_party/mmyolo/configs/yolox/yolox_x_fast_8xb8-300e_coco.py b/models/YOLO-World/third_party/mmyolo/configs/yolox/yolox_x_fast_8xb8-300e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..0759d468be70f9af026fef2ae0dbf2308082ad96 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/configs/yolox/yolox_x_fast_8xb8-300e_coco.py @@ -0,0 +1,12 @@ +_base_ = './yolox_s_fast_8xb8-300e_coco.py' + +# ========================modified parameters====================== +deepen_factor = 1.33 +widen_factor = 1.25 + +# =======================Unmodified in most cases================== +# model settings +model = dict( + backbone=dict(deepen_factor=deepen_factor, widen_factor=widen_factor), + neck=dict(deepen_factor=deepen_factor, widen_factor=widen_factor), + bbox_head=dict(head_module=dict(widen_factor=widen_factor))) diff --git a/models/YOLO-World/third_party/mmyolo/demo/15_minutes_instance_segmentation.ipynb b/models/YOLO-World/third_party/mmyolo/demo/15_minutes_instance_segmentation.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..a09a1a10512c15abd611c35cefdfbeda64090268 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/demo/15_minutes_instance_segmentation.ipynb @@ -0,0 +1,658 @@ +{ + "cells": [ + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "id": "x7seefPduh36" + }, + "source": [ + "
\n", + " \n", + "
 
\n", + "
\n", + " OpenMMLab website\n", + " \n", + " \n", + " HOT\n", + " \n", + " \n", + "     \n", + " OpenMMLab platform\n", + " \n", + " \n", + " TRY IT OUT\n", + " \n", + " \n", + "
\n", + "
 
\n", + "\n", + "\"Open\n", + "\n", + "[![PyPI](https://img.shields.io/pypi/v/mmyolo)](https://pypi.org/project/mmyolo)\n", + "[![docs](https://img.shields.io/badge/docs-latest-blue)](https://mmyolo.readthedocs.io/en/latest/)\n", + "[![deploy](https://github.com/open-mmlab/mmyolo/workflows/deploy/badge.svg)](https://github.com/open-mmlab/mmyolo/actions)\n", + "[![codecov](https://codecov.io/gh/open-mmlab/mmyolo/branch/main/graph/badge.svg)](https://codecov.io/gh/open-mmlab/mmyolo)\n", + "[![license](https://img.shields.io/github/license/open-mmlab/mmyolo.svg)](https://github.com/open-mmlab/mmyolo/blob/main/LICENSE)\n", + "[![open issues](https://isitmaintained.com/badge/open/open-mmlab/mmyolo.svg)](https://github.com/open-mmlab/mmyolo/issues)\n", + "[![issue resolution](https://isitmaintained.com/badge/resolution/open-mmlab/mmyolo.svg)](https://github.com/open-mmlab/mmyolo/issues)\n", + "\n", + "[📘Documentation](https://mmyolo.readthedocs.io/en/latest/) |\n", + "[🛠️Installation](https://mmyolo.readthedocs.io/en/latest/get_started/installation.html) |\n", + "[👀Model Zoo](https://mmyolo.readthedocs.io/en/latest/model_zoo.html) |\n", + "[🆕Update News](https://mmyolo.readthedocs.io/en/latest/notes/changelog.html) |\n", + "[🤔Reporting Issues](https://github.com/open-mmlab/mmyolo/issues/new/choose)\n", + "\n", + "
\n", + "\n", + "
\n", + " \n", + " \"\"\n", + " \"\"\n", + " \n", + " \"\"\n", + " \"\"\n", + " \n", + " \"\"\n", + " \"\"\n", + " \n", + " \"\"\n", + " \"\"\n", + " \n", + " \"\"\n", + " \"\"\n", + " \n", + " \"\"\n", + "
" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "id": "V6W8P5XEJGoc" + }, + "source": [ + "# 15 minutes to get started with MMYOLO instance segmentation\n", + "\n", + "Instance segmentation is a task in computer vision that aims to segment each object in an image and assign each object a unique identifier.\n", + "\n", + "Unlike semantic segmentation, instance segmentation not only segments out different categories in an image, but also separates different instances of the same category.\n", + "\n", + "
\n", + "\"Instance\n", + "
\n", + "\n", + "Taking the downloadable balloon dataset as an example, I will guide you through a 15-minute easy introduction to MMYOLO instance segmentation. The entire process includes the following steps:\n", + "\n", + "- [Installation](#installation)\n", + "- [Dataset](#dataset)\n", + "- [Config](#config)\n", + "- [Training](#training)\n", + "- [Testing](#testing)\n", + "- [EasyDeploy](#easydeploy-deployment)\n", + "\n", + "In this tutorial, we will use YOLOv5-s as an example. For the demo configuration of the balloon dataset with other YOLO series algorithms, please refer to the corresponding algorithm configuration folder." + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "id": "Ae5SqsA7wYGQ" + }, + "source": [ + "## Installation\n", + "\n", + "Assuming you've already installed Conda in advance, then install PyTorch using the following commands." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "cellView": "form", + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "XVLRaEIzwW-6", + "outputId": "901b5db6-b1d7-4830-e746-485ee76d6648" + }, + "outputs": [], + "source": [ + "# -----------------------------------------------------------------------------------------\n", + "# If you are using colab, you can skip this cell for PyTorch is pre-installed on the colab.\n", + "# -----------------------------------------------------------------------------------------\n", + "!python -V\n", + "# Check nvcc version\n", + "!nvcc -V\n", + "# Check GCC version\n", + "!gcc --version\n", + "# Create a new Conda environment\n", + "%conda create -n mmyolo python=3.8 -y\n", + "%conda activate mmyolo\n", + "# If you have GPU\n", + "%conda install pytorch torchvision -c pytorch\n", + "# If you only have CPU\n", + "# %conda install pytorch torchvision cpuonly -c pytorch" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Check PyTorch version\n", + "import torch\n", + "print(torch.__version__)\n", + "print(torch.cuda.is_available())" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Install MMYOLO and dependency libraries using the following commands.\n", + "For details about how to configure the environment, see [Installation and verification](https://mmyolo.readthedocs.io/en/latest/get_started/installation.html).\n", + "```{note}\n", + "Note: Since this repo uses OpenMMLab 2.0, it is better to create a new conda virtual environment to prevent conflicts with the repo installed in OpenMMLab 1.0.\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "-qATUuntwmfD", + "outputId": "24be577b-efce-46f2-8b2f-a65d02824467" + }, + "outputs": [], + "source": [ + "!git clone https://github.com/open-mmlab/mmyolo.git\n", + "%cd mmyolo\n", + "%pip install -U openmim\n", + "!mim install -r requirements/mminstall.txt\n", + "# Install albumentations\n", + "!mim install -r requirements/albu.txt\n", + "# Install MMYOLO\n", + "!mim install -v -e .\n", + "# \"-v\" means verbose, or more output\n", + "# \"-e\" means installing a project in editable mode,\n", + "# thus any local modifications made to the code will take effect without reinstallation." + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Dataset\n", + "\n", + "The Balloon dataset is a single-class dataset that consists of 74 images and includes annotated information required for training. Here is an example image from the dataset:\n", + "\n", + "
\n", + "\"balloon\n", + "
\n", + "\n", + "You can download and use it directly by the following command:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "cellView": "form", + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "gMQXwWuIw3ef", + "outputId": "c8efeac7-5b0c-4342-b5af-d3e790e358c3" + }, + "outputs": [], + "source": [ + "!python tools/misc/download_dataset.py --dataset-name balloon --save-dir ./data/balloon --unzip --delete\n", + "!python ./tools/dataset_converters/balloon2coco.py" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "id": "covQskXXw2ul" + }, + "source": [ + "The data for the MMYOLO project is located in the MMYOLO project directory. The `train.json` and `val.json` files store the annotations in COCO format, while the `data/balloon/train` and `data/balloon/val` directories contain all the images for the dataset.\n", + "\n", + "## Config\n", + "\n", + "Taking YOLOv5 algorithm as an example, considering the limited GPU memory of users, we need to modify some default training parameters to make them run smoothly. The key parameters to be modified are as follows:\n", + "\n", + "- YOLOv5 is an Anchor-Based algorithm, and different datasets need to calculate suitable anchors adaptively.\n", + "- The default config uses 8 GPUs with a batch size of 16 per GPU. Now change it to a single GPU with a batch size of 12.\n", + "- In principle, the learning rate should be linearly scaled accordingly when the batch size is changed, but actual measurements have found that this is not necessary.\n", + "\n", + "To perform the specific operation, create a new configuration file named `yolov5_ins_s-v61_syncbn_fast_8xb16-300e_balloon_instance.py` in the `configs/yolov5/ins_seg` folder. For convenience, we have already provided this configuration file. Copy the following contents into the configuration file.\n", + "\n", + "```python\n", + "_base_ = './yolov5_ins_s-v61_syncbn_fast_8xb16-300e_coco_instance.py' # noqa\n", + "\n", + "data_root = 'data/balloon/' # dataset root\n", + "# Training set annotation file of json path\n", + "train_ann_file = 'train.json'\n", + "train_data_prefix = 'train/' # Dataset prefix\n", + "# Validation set annotation file of json path\n", + "val_ann_file = 'val.json'\n", + "val_data_prefix = 'val/'\n", + "metainfo = {\n", + " 'classes': ('balloon', ), # dataset category name\n", + " 'palette': [\n", + " (220, 20, 60),\n", + " ]\n", + "}\n", + "num_classes = 1\n", + "# Set batch size to 4\n", + "train_batch_size_per_gpu = 4\n", + "# dataloader num workers\n", + "train_num_workers = 2\n", + "log_interval = 1\n", + "#####################\n", + "train_dataloader = dict(\n", + " batch_size=train_batch_size_per_gpu,\n", + " num_workers=train_num_workers,\n", + " dataset=dict(\n", + " data_root=data_root,\n", + " metainfo=metainfo,\n", + " data_prefix=dict(img=train_data_prefix),\n", + " ann_file=train_ann_file))\n", + "val_dataloader = dict(\n", + " dataset=dict(\n", + " data_root=data_root,\n", + " metainfo=metainfo,\n", + " data_prefix=dict(img=val_data_prefix),\n", + " ann_file=val_ann_file))\n", + "test_dataloader = val_dataloader\n", + "val_evaluator = dict(ann_file=data_root + val_ann_file)\n", + "test_evaluator = val_evaluator\n", + "default_hooks = dict(logger=dict(interval=log_interval))\n", + "#####################\n", + "\n", + "model = dict(bbox_head=dict(head_module=dict(num_classes=num_classes)))\n", + "```\n", + "\n", + "The above configuration inherits from `yolov5_ins_s-v61_syncbn_fast_8xb16-300e_coco_instance.py` and updates configurations such as `data_root`, `metainfo`, `train_dataloader`, `val_dataloader`, `num_classes`, etc., based on the characteristics of the balloon dataset.\n", + "\n", + "## Training" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!python tools/train.py configs/yolov5/ins_seg/yolov5_ins_s-v61_syncbn_fast_8xb16-300e_balloon_instance.py" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "id": "TQ0h6sv_rJxq" + }, + "source": [ + "After running the training command mentioned above, the folder `work_dirs/yolov5_ins_s-v61_syncbn_fast_8xb16-300e_balloon_instance` will be automatically generated. The weight files and the training configuration file for this session will be saved in this folder. On a lower-end GPU like the GTX 1660, the entire training process will take approximately 30 minutes.\n", + "\n", + "
\n", + "\"image\"/\n", + "
\n", + "\n", + "The performance on `val.json` is as follows:\n", + "\n", + "```text\n", + " Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.330\n", + " Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.509\n", + " Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.317\n", + " Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.000\n", + " Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.103\n", + " Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.417\n", + " Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.150\n", + " Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.396\n", + " Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.454\n", + " Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.000\n", + " Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.317\n", + " Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.525\n", + "```\n", + "\n", + "The above performance is obtained by printing using the COCO API, where -1 indicates the absence of objects of that scale.\n", + "\n", + "### Some Notes\n", + "\n", + "Two key warnings are printed during training:\n", + "\n", + "- You are using `YOLOv5Head` with num_classes == 1. The loss_cls will be 0. This is a normal phenomenon.\n", + "\n", + "The warning is because the `num_classes` currently trained is 1, the loss of the classification branch is always 0 according to the community of the YOLOv5 algorithm, which is a normal phenomenon.\n", + "\n", + "### Training is resumed after the interruption\n", + "\n", + "If you stop training, you can add `--resume` to the end of the training command and the program will automatically resume training with the latest weights file from `work_dirs`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!python tools/train.py configs/yolov5/ins_seg/yolov5_ins_s-v61_syncbn_fast_8xb16-300e_balloon_instance.py --resume" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "id": "3sJxvQoUrMhX" + }, + "source": [ + "### Save GPU memory strategy\n", + "\n", + "The above config requires about 3G RAM, so if you don't have enough, consider turning on mixed-precision training" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!python tools/train.py configs/yolov5/ins_seg/yolov5_ins_s-v61_syncbn_fast_8xb16-300e_balloon_instance.py --amp" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "id": "jVJdyHTxrQ9a" + }, + "source": [ + "### Training visualization\n", + "\n", + "MMYOLO currently supports local, TensorBoard, WandB and other back-end visualization. The default is to use local visualization, and you can switch to WandB and other real-time visualization of various indicators in the training process.\n", + "\n", + "#### 1 WandB\n", + "\n", + "WandB visualization need registered in website, and in the https://wandb.ai/settings for wandb API Keys.\n", + "\n", + "
\n", + "\"image\"/\n", + "
" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%pip install wandb\n", + "# After running wandb login, enter the API Keys obtained above, and the login is successful.\n", + "!wandb login" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "id": "Yu0_4YYRrbyY" + }, + "source": [ + "Add the wandb config at the end of config file we just created: `configs/yolov5/ins_seg/yolov5_ins_s-v61_syncbn_fast_8xb16-300e_balloon_instance.py`.\n", + "\n", + "```python\n", + "visualizer = dict(vis_backends = [dict(type='LocalVisBackend'), dict(type='WandbVisBackend')])\n", + "```\n", + "\n", + "Running the training command and you will see the loss, learning rate, and coco/bbox_mAP visualizations in the link." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!python tools/train.py configs/yolov5/ins_seg/yolov5_ins_s-v61_syncbn_fast_8xb16-300e_balloon_instance.py" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "id": "f_DyzfDIzwMa" + }, + "source": [ + "
\n", + "\"image\"/\n", + "
\n", + "
\n", + "\"image\"/\n", + "
\n", + "\n", + "#### 2 Tensorboard\n", + "\n", + "Install Tensorboard using the following command." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "cellView": "form", + "id": "gHkGlii3n29Q" + }, + "outputs": [], + "source": [ + "%pip install tensorboard" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "id": "bE-nx9TY1P-M" + }, + "source": [ + "Add the `tensorboard` config at the end of config file we just created: `configs/yolov5/ins_seg/yolov5_ins_s-v61_syncbn_fast_8xb16-300e_balloon_instance.py`.\n", + "\n", + "```python\n", + "visualizer = dict(vis_backends=[dict(type='LocalVisBackend'),dict(type='TensorboardVisBackend')])\n", + "```\n", + "\n", + "After re-running the training command, Tensorboard file will be generated in the visualization folder `work_dirs/yolov5_ins_s-v61_syncbn_fast_8xb16-300e_balloon_instance/{timestamp}/vis_data`.\n", + "We can use Tensorboard to view the loss, learning rate, and coco/bbox_mAP visualizations from a web link by running the following command:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "cellView": "form", + "id": "g8fZgokho5CE" + }, + "outputs": [], + "source": [ + "!tensorboard --logdir=work_dirs/yolov5_ins_s-v61_syncbn_fast_8xb16-300e_balloon_instance" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "id": "GUZ7MPoaro-o" + }, + "source": [ + "## Testing" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "VYmxtE0GunTB", + "outputId": "f440807c-1931-4810-b76d-617f73fde227" + }, + "outputs": [], + "source": [ + "!python tools/test.py configs/yolov5/ins_seg/yolov5_ins_s-v61_syncbn_fast_8xb16-300e_balloon_instance.py work_dirs/yolov5_ins_s-v61_syncbn_fast_8xb16-300e_balloon_instance best_coco_bbox_mAP_epoch_300.pth --show-dir show_results" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "id": "_cFocUqN0BCb" + }, + "source": [ + "Run the above test command, you can not only get the AP performance printed in the **Training** section, You can also automatically save the result images to the `work_dirs/yolov5_ins_s-v61_syncbn_fast_8xb16-300e_balloon_instance/{timestamp}/show_results` folder. Below is one of the result images, the left image is the actual annotation, and the right image is the inference result of the model.\n", + "\n", + "
\n", + "\"result_img\"/\n", + "
\n", + "\n", + "You can also visualize model inference results in a browser window if you use `WandbVisBackend` or `TensorboardVisBackend`.\n", + "\n", + "## Feature map visualization\n", + "\n", + "MMYOLO provides visualization scripts for feature map to analyze the current model training. Please refer to [Feature Map Visualization](../recommended_topics/visualization.md)\n", + "\n", + "Due to the bias of direct visualization of `test_pipeline`, we need to modify the `test_pipeline` of `configs/yolov5/yolov5_ins_s-v61_syncbn_fast_8xb16-300e_balloon_instance.py`\n", + "\n", + "```python\n", + "test_pipeline = [\n", + " dict(\n", + " type='LoadImageFromFile',\n", + " file_client_args=_base_.file_client_args),\n", + " dict(type='YOLOv5KeepRatioResize', scale=img_scale),\n", + " dict(\n", + " type='LetterResize',\n", + " scale=img_scale,\n", + " allow_scale_up=False,\n", + " pad_val=dict(img=114)),\n", + " dict(type='LoadAnnotations', with_bbox=True, _scope_='mmdet'),\n", + " dict(\n", + " type='mmdet.PackDetInputs',\n", + " meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',\n", + " 'scale_factor', 'pad_param'))\n", + "]\n", + "```\n", + "\n", + "to the following config:\n", + "\n", + "```python\n", + "test_pipeline = [\n", + " dict(\n", + " type='LoadImageFromFile',\n", + " file_client_args=_base_.file_client_args),\n", + " dict(type='mmdet.Resize', scale=img_scale, keep_ratio=False), # modify the LetterResize to mmdet.Resize\n", + " dict(type='LoadAnnotations', with_bbox=True, _scope_='mmdet'),\n", + " dict(\n", + " type='mmdet.PackDetInputs',\n", + " meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',\n", + " 'scale_factor'))\n", + "]\n", + "```\n", + "\n", + "Let's choose the `data/balloon/train/3927754171_9011487133_b.jpg` image as an example to visualize the output feature maps of YOLOv5 backbone and neck layers.\n", + "\n", + "**1. Visualize the three channels of YOLOv5 backbone**" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!python demo/featmap_vis_demo.py data/balloon/train/3927754171_9011487133_b.jpg onfigs/yolov5/ins_seg/yolov5_ins_s-v61_syncbn_fast_8xb16-300e_balloon_instance.py work_dirs/yolov5_ins_s-v61_syncbn_fast_8xb16-300e_balloon_instance/best_coco_bbox_mAP_epoch_300.pth --target-layers backbone --channel-reduction squeeze_mean" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "
\n", + "\"image\"/\n", + "
\n", + "\n", + "The result will be saved to the output folder in current path. Three output feature maps plotted in the above figure correspond to small, medium and large output feature maps.\n", + "\n", + "**2. Visualize the three channels of YOLOv5 neck**" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!python demo/featmap_vis_demo.py data/balloon/train/3927754171_9011487133_b.jpg \\\n", + " configs/yolov5/ins_seg/yolov5_ins_s-v61_syncbn_fast_8xb16-300e_balloon_instance.py \\\n", + " work_dirs/yolov5_ins_s-v61_syncbn_fast_8xb16-300e_balloon_instance/best_coco_bbox_mAP_epoch_300.pth \\\n", + " --target-layers neck \\\n", + " --channel-reduction squeeze_mean" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "
\n", + "\"image\"/\n", + "
\n", + "\n", + "**3. Grad-Based CAM visualization**\n", + "TODO" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## EasyDeploy deployment\n", + "TODO\n", + "\n", + "This completes the transformation deployment of the trained model and checks the inference results. This is the end of the tutorial.\n", + "\n", + "If you encounter problems during training or testing, please check the [common troubleshooting steps](https://mmyolo.readthedocs.io/en/dev/recommended_topics/troubleshooting_steps.html) first and feel free to open an [issue](https://github.com/open-mmlab/mmyolo/issues/new/choose) if you still can't solve it." + ] + } + ], + "metadata": { + "accelerator": "GPU", + "colab": { + "provenance": [], + "toc_visible": true + }, + "gpuClass": "standard", + "kernelspec": { + "display_name": "Python 3", + "name": "python3" + }, + "language_info": { + "name": "python" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} diff --git a/models/YOLO-World/third_party/mmyolo/demo/15_minutes_object_detection.ipynb b/models/YOLO-World/third_party/mmyolo/demo/15_minutes_object_detection.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..47e0ccbd803c808982b2a30d55b640f0b1bd48da --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/demo/15_minutes_object_detection.ipynb @@ -0,0 +1,1002 @@ +{ + "cells": [ + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "id": "x7seefPduh36" + }, + "source": [ + "
\n", + " \n", + "
 
\n", + "
\n", + " OpenMMLab website\n", + " \n", + " \n", + " HOT\n", + " \n", + " \n", + "     \n", + " OpenMMLab platform\n", + " \n", + " \n", + " TRY IT OUT\n", + " \n", + " \n", + "
\n", + "
 
\n", + "\n", + "\"Open\n", + "\n", + "[![PyPI](https://img.shields.io/pypi/v/mmyolo)](https://pypi.org/project/mmyolo)\n", + "[![docs](https://img.shields.io/badge/docs-latest-blue)](https://mmyolo.readthedocs.io/en/latest/)\n", + "[![deploy](https://github.com/open-mmlab/mmyolo/workflows/deploy/badge.svg)](https://github.com/open-mmlab/mmyolo/actions)\n", + "[![codecov](https://codecov.io/gh/open-mmlab/mmyolo/branch/main/graph/badge.svg)](https://codecov.io/gh/open-mmlab/mmyolo)\n", + "[![license](https://img.shields.io/github/license/open-mmlab/mmyolo.svg)](https://github.com/open-mmlab/mmyolo/blob/main/LICENSE)\n", + "[![open issues](https://isitmaintained.com/badge/open/open-mmlab/mmyolo.svg)](https://github.com/open-mmlab/mmyolo/issues)\n", + "[![issue resolution](https://isitmaintained.com/badge/resolution/open-mmlab/mmyolo.svg)](https://github.com/open-mmlab/mmyolo/issues)\n", + "\n", + "[📘Documentation](https://mmyolo.readthedocs.io/en/latest/) |\n", + "[🛠️Installation](https://mmyolo.readthedocs.io/en/latest/get_started/installation.html) |\n", + "[👀Model Zoo](https://mmyolo.readthedocs.io/en/latest/model_zoo.html) |\n", + "[🆕Update News](https://mmyolo.readthedocs.io/en/latest/notes/changelog.html) |\n", + "[🤔Reporting Issues](https://github.com/open-mmlab/mmyolo/issues/new/choose)\n", + "\n", + "
\n", + "\n", + "
\n", + " \n", + " \"\"\n", + " \"\"\n", + " \n", + " \"\"\n", + " \"\"\n", + " \n", + " \"\"\n", + " \"\"\n", + " \n", + " \"\"\n", + " \"\"\n", + " \n", + " \"\"\n", + " \"\"\n", + " \n", + " \"\"\n", + "
" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "id": "V6W8P5XEJGoc" + }, + "source": [ + "# 15 minutes to get started with MMYOLO object detection\n", + "\n", + "Object detection task refers to that given a picture, the network predicts all the categories of objects included in the picture and the corresponding boundary boxes\n", + "\n", + "
\n", + "\"object\n", + "
\n", + "\n", + "Take the small dataset of cat as an example, you can easily learn MMYOLO object detection in 15 minutes. The whole process consists of the following steps:\n", + "\n", + "- [Installation](#installation)\n", + "- [Dataset](#dataset)\n", + "- [Config](#config)\n", + "- [Training](#training)\n", + "- [Testing](#testing)\n", + "- [EasyDeploy](#easydeploy-deployment)\n", + "\n", + "In this tutorial, we take YOLOv5-s as an example. For the rest of the YOLO series algorithms, please see the corresponding algorithm configuration folder." + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "id": "Ae5SqsA7wYGQ" + }, + "source": [ + "## Installation\n", + "\n", + "Assuming you've already installed Conda in advance, then install PyTorch using the following commands." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "cellView": "form", + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "XVLRaEIzwW-6", + "outputId": "901b5db6-b1d7-4830-e746-485ee76d6648" + }, + "outputs": [], + "source": [ + "# -----------------------------------------------------------------------------------------\n", + "# If you are using colab, you can skip this cell for PyTorch is pre-installed on the colab.\n", + "# -----------------------------------------------------------------------------------------\n", + "!python -V\n", + "# Check nvcc version\n", + "!nvcc -V\n", + "# Check GCC version\n", + "!gcc --version\n", + "# Create a new Conda environment\n", + "%conda create -n mmyolo python=3.8 -y\n", + "%conda activate mmyolo\n", + "# If you have GPU\n", + "%conda install pytorch torchvision -c pytorch\n", + "# If you only have CPU\n", + "# %conda install pytorch torchvision cpuonly -c pytorch" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Check PyTorch version\n", + "import torch\n", + "print(torch.__version__)\n", + "print(torch.cuda.is_available())" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Install MMYOLO and dependency libraries using the following commands.\n", + "For details about how to configure the environment, see [Installation and verification](https://mmyolo.readthedocs.io/en/latest/get_started/installation.html).\n", + "```{note}\n", + "Note: Since this repo uses OpenMMLab 2.0, it is better to create a new conda virtual environment to prevent conflicts with the repo installed in OpenMMLab 1.0.\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "-qATUuntwmfD", + "outputId": "24be577b-efce-46f2-8b2f-a65d02824467" + }, + "outputs": [], + "source": [ + "!git clone https://github.com/open-mmlab/mmyolo.git\n", + "%cd mmyolo\n", + "%pip install -U openmim\n", + "!mim install -r requirements/mminstall.txt\n", + "# Install albumentations\n", + "!mim install -r requirements/albu.txt\n", + "# Install MMYOLO\n", + "!mim install -v -e .\n", + "# \"-v\" means verbose, or more output\n", + "# \"-e\" means installing a project in editable mode,\n", + "# thus any local modifications made to the code will take effect without reinstallation." + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Dataset\n", + "\n", + "The Cat dataset is a single-category dataset consisting of 144 pictures (the original pictures are provided by @RangeKing, and cleaned by @PeterH0323), which contains the annotation information required for training. The sample image is shown below:\n", + "\n", + "
\n", + "\"cat\n", + "
\n", + "\n", + "You can download and use it directly by the following command:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "cellView": "form", + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "gMQXwWuIw3ef", + "outputId": "c8efeac7-5b0c-4342-b5af-d3e790e358c3" + }, + "outputs": [], + "source": [ + "!python tools/misc/download_dataset.py --dataset-name cat --save-dir ./data/cat --unzip --delete" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "id": "covQskXXw2ul" + }, + "source": [ + "This dataset is automatically downloaded to the `./data/cat` dir with the following directory structure:\n", + "\n", + "
\n", + "\"image\"/\n", + "
\n", + "\n", + "The cat dataset is located in the mmyolo project dir, and `data/cat/annotations` stores annotations in COCO format, and `data/cat/images` stores all images\n", + "\n", + "## Config\n", + "\n", + "Taking YOLOv5 algorithm as an example, considering the limited GPU memory of users, we need to modify some default training parameters to make them run smoothly. The key parameters to be modified are as follows:\n", + "\n", + "- YOLOv5 is an Anchor-Based algorithm, and different datasets need to calculate suitable anchors adaptively\n", + "- The default config uses 8 GPUs with a batch size of 16 per GPU. Now change it to a single GPU with a batch size of 12.\n", + "- The default training epoch is 300. Change it to 40 epoch\n", + "- Given the small size of the dataset, we opted to use fixed backbone weights\n", + "- In principle, the learning rate should be linearly scaled accordingly when the batch size is changed, but actual measurements have found that this is not necessary\n", + "\n", + "Create a `yolov5_s-v61_fast_1xb12-40e_cat.py` config file in the `configs/yolov5` folder (we have provided this config for you to use directly) and copy the following into the config file.\n", + "\n", + "```python\n", + "# Inherit and overwrite part of the config based on this config\n", + "_base_ = 'yolov5_s-v61_syncbn_fast_8xb16-300e_coco.py'\n", + "\n", + "data_root = './data/cat/' # dataset root\n", + "class_name = ('cat', ) # dataset category name\n", + "num_classes = len(class_name) # dataset category number\n", + "# metainfo is a configuration that must be passed to the dataloader, otherwise it is invalid\n", + "# palette is a display color for category at visualization\n", + "# The palette length must be greater than or equal to the length of the classes\n", + "metainfo = dict(classes=class_name, palette=[(20, 220, 60)])\n", + "\n", + "# Adaptive anchor based on tools/analysis_tools/optimize_anchors.py\n", + "anchors = [\n", + " [(68, 69), (154, 91), (143, 162)], # P3/8\n", + " [(242, 160), (189, 287), (391, 207)], # P4/16\n", + " [(353, 337), (539, 341), (443, 432)] # P5/32\n", + "]\n", + "# Max training 40 epoch\n", + "max_epochs = 40\n", + "# bs = 12\n", + "train_batch_size_per_gpu = 12\n", + "# dataloader num workers\n", + "train_num_workers = 4\n", + "\n", + "# load COCO pre-trained weight\n", + "load_from = 'https://download.openmmlab.com/mmyolo/v0/yolov5/yolov5_s-v61_syncbn_fast_8xb16-300e_coco/yolov5_s-v61_syncbn_fast_8xb16-300e_coco_20220918_084700-86e02187.pth' # noqa\n", + "\n", + "model = dict(\n", + " # Fixed the weight of the entire backbone without training\n", + " backbone=dict(frozen_stages=4),\n", + " bbox_head=dict(\n", + " head_module=dict(num_classes=num_classes),\n", + " prior_generator=dict(base_sizes=anchors)\n", + " ))\n", + "\n", + "train_dataloader = dict(\n", + " batch_size=train_batch_size_per_gpu,\n", + " num_workers=train_num_workers,\n", + " dataset=dict(\n", + " data_root=data_root,\n", + " metainfo=metainfo,\n", + " # Dataset annotation file of json path\n", + " ann_file='annotations/trainval.json',\n", + " # Dataset prefix\n", + " data_prefix=dict(img='images/')))\n", + "\n", + "val_dataloader = dict(\n", + " dataset=dict(\n", + " metainfo=metainfo,\n", + " data_root=data_root,\n", + " ann_file='annotations/test.json',\n", + " data_prefix=dict(img='images/')))\n", + "\n", + "test_dataloader = val_dataloader\n", + "\n", + "_base_.optim_wrapper.optimizer.batch_size_per_gpu = train_batch_size_per_gpu\n", + "\n", + "val_evaluator = dict(ann_file=data_root + 'annotations/test.json')\n", + "test_evaluator = val_evaluator\n", + "\n", + "default_hooks = dict(\n", + " # Save weights every 10 epochs and a maximum of two weights can be saved.\n", + " # The best model is saved automatically during model evaluation\n", + " checkpoint=dict(interval=10, max_keep_ckpts=2, save_best='auto'),\n", + " # The warmup_mim_iter parameter is critical.\n", + " # The default value is 1000 which is not suitable for cat datasets.\n", + " param_scheduler=dict(max_epochs=max_epochs, warmup_mim_iter=10),\n", + " # The log printing interval is 5\n", + " logger=dict(type='LoggerHook', interval=5))\n", + "# The evaluation interval is 10\n", + "train_cfg = dict(max_epochs=max_epochs, val_interval=10)\n", + "```\n", + "\n", + "The above config is inherited from `yolov5_s-v61_syncbn_fast_8xb16-300e_coco.py`. According to the characteristics of cat dataset updated `data_root`, `metainfo`, `train_dataloader`, `val_dataloader`, `num_classes` and other config.\n", + "\n", + "## Training" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!python tools/train.py configs/yolov5/yolov5_s-v61_fast_1xb12-40e_cat.py" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "id": "TQ0h6sv_rJxq" + }, + "source": [ + "Run the above training command, `work_dirs/yolov5_s-v61_fast_1xb12-40e_cat` folder will be automatically generated, the checkpoint file and the training config file will be saved in this folder. On a low-end 1660 GPU, the entire training process takes about eight minutes.\n", + "\n", + "
\n", + "\"image\"/\n", + "
\n", + "\n", + "The performance on `test.json` is as follows:\n", + "\n", + "```text\n", + " Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.631\n", + " Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.909\n", + " Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.747\n", + " Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = -1.000\n", + " Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = -1.000\n", + " Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.631\n", + " Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.627\n", + " Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.703\n", + " Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.703\n", + " Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = -1.000\n", + " Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = -1.000\n", + " Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.703\n", + "```\n", + "\n", + "The above properties are printed via the COCO API, where -1 indicates that no object exists for the scale. According to the rules defined by COCO, the Cat dataset contains all large sized objects, and there are no small or medium-sized objects.\n", + "\n", + "### Some Notes\n", + "\n", + "Two key warnings are printed during training:\n", + "\n", + "- You are using `YOLOv5Head` with num_classes == 1. The loss_cls will be 0. This is a normal phenomenon.\n", + "- The model and loaded state dict do not match exactly\n", + "\n", + "Neither of these warnings will have any impact on performance. The first warning is because the `num_classes` currently trained is 1, the loss of the classification branch is always 0 according to the community of the YOLOv5 algorithm, which is a normal phenomenon. The second warning is because we are currently training in fine-tuning mode, we load the COCO pre-trained weights for 80 classes,\n", + "This will lead to the final Head module convolution channel number does not correspond, resulting in this part of the weight can not be loaded, which is also a normal phenomenon.\n", + "\n", + "### Training is resumed after the interruption\n", + "\n", + "If you stop training, you can add `--resume` to the end of the training command and the program will automatically resume training with the latest weights file from `work_dirs`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!python tools/train.py configs/yolov5/yolov5_s-v61_fast_1xb12-40e_cat.py --resume" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "id": "3sJxvQoUrMhX" + }, + "source": [ + "### Save GPU memory strategy\n", + "\n", + "The above config requires about 3G RAM, so if you don't have enough, consider turning on mixed-precision training" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!python tools/train.py configs/yolov5/yolov5_s-v61_fast_1xb12-40e_cat.py --amp" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "id": "jVJdyHTxrQ9a" + }, + "source": [ + "### Training visualization\n", + "\n", + "MMYOLO currently supports local, TensorBoard, WandB and other back-end visualization. The default is to use local visualization, and you can switch to WandB and other real-time visualization of various indicators in the training process.\n", + "\n", + "#### 1 WandB\n", + "\n", + "WandB visualization need registered in website, and in the https://wandb.ai/settings for wandb API Keys.\n", + "\n", + "
\n", + "\"image\"/\n", + "
" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%pip install wandb\n", + "# After running wandb login, enter the API Keys obtained above, and the login is successful.\n", + "!wandb login" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "id": "Yu0_4YYRrbyY" + }, + "source": [ + "Add the wandb config at the end of config file we just created: `configs/yolov5/yolov5_s-v61_fast_1xb12-40e_cat.py`.\n", + "\n", + "```python\n", + "visualizer = dict(vis_backends = [dict(type='LocalVisBackend'), dict(type='WandbVisBackend')])\n", + "```\n", + "\n", + "Running the training command and you will see the loss, learning rate, and coco/bbox_mAP visualizations in the link." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!python tools/train.py configs/yolov5/yolov5_s-v61_fast_1xb12-40e_cat.py" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "id": "f_DyzfDIzwMa" + }, + "source": [ + "
\n", + "\"image\"/\n", + "
\n", + "
\n", + "\"image\"/\n", + "
\n", + "\n", + "#### 2 Tensorboard\n", + "\n", + "Install Tensorboard using the following command." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "cellView": "form", + "id": "gHkGlii3n29Q" + }, + "outputs": [], + "source": [ + "%pip install tensorboard" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "id": "bE-nx9TY1P-M" + }, + "source": [ + "Add the `tensorboard` config at the end of config file we just created: `configs/yolov5/yolov5_s-v61_fast_1xb12-40e_cat.py`.\n", + "\n", + "```python\n", + "visualizer = dict(vis_backends=[dict(type='LocalVisBackend'),dict(type='TensorboardVisBackend')])\n", + "```\n", + "\n", + "After re-running the training command, Tensorboard file will be generated in the visualization folder `work_dirs/yolov5_s-v61_fast_1xb12-40e_cat/{timestamp}/vis_data`.\n", + "We can use Tensorboard to view the loss, learning rate, and coco/bbox_mAP visualizations from a web link by running the following command:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "cellView": "form", + "id": "g8fZgokho5CE" + }, + "outputs": [], + "source": [ + "!tensorboard --logdir=work_dirs/yolov5_s-v61_fast_1xb12-40e_cat" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "id": "GUZ7MPoaro-o" + }, + "source": [ + "## Testing" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "VYmxtE0GunTB", + "outputId": "f440807c-1931-4810-b76d-617f73fde227" + }, + "outputs": [], + "source": [ + "!python tools/test.py configs/yolov5/yolov5_s-v61_fast_1xb12-40e_cat.py \\\n", + " work_dirs/yolov5_s-v61_fast_1xb12-40e_cat/epoch_40.pth \\\n", + " --show-dir show_results" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "id": "_cFocUqN0BCb" + }, + "source": [ + "Run the above test command, you can not only get the AP performance printed in the **Training** section, You can also automatically save the result images to the `work_dirs/yolov5_s-v61_fast_1xb12-40e_cat/{timestamp}/show_results` folder. Below is one of the result images, the left image is the actual annotation, and the right image is the inference result of the model.\n", + "\n", + "
\n", + "\"result_img\"/\n", + "
\n", + "\n", + "You can also visualize model inference results in a browser window if you use 'WandbVisBackend' or 'TensorboardVisBackend'.\n", + "\n", + "## Feature map visualization\n", + "\n", + "MMYOLO provides visualization scripts for feature map to analyze the current model training. Please refer to [Feature Map Visualization](../recommended_topics/visualization.md)\n", + "\n", + "Due to the bias of direct visualization of `test_pipeline`, we need modify the `test_pipeline` of `configs/yolov5/yolov5_s-v61_syncbn_8xb16-300e_coco.py`,\n", + "\n", + "```python\n", + "test_pipeline = [\n", + " dict(\n", + " type='LoadImageFromFile',\n", + " file_client_args=_base_.file_client_args),\n", + " dict(type='YOLOv5KeepRatioResize', scale=img_scale),\n", + " dict(\n", + " type='LetterResize',\n", + " scale=img_scale,\n", + " allow_scale_up=False,\n", + " pad_val=dict(img=114)),\n", + " dict(type='LoadAnnotations', with_bbox=True, _scope_='mmdet'),\n", + " dict(\n", + " type='mmdet.PackDetInputs',\n", + " meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',\n", + " 'scale_factor', 'pad_param'))\n", + "]\n", + "```\n", + "\n", + "to the following config:\n", + "\n", + "```python\n", + "test_pipeline = [\n", + " dict(\n", + " type='LoadImageFromFile',\n", + " file_client_args=_base_.file_client_args),\n", + " dict(type='mmdet.Resize', scale=img_scale, keep_ratio=False), # modify the LetterResize to mmdet.Resize\n", + " dict(type='LoadAnnotations', with_bbox=True, _scope_='mmdet'),\n", + " dict(\n", + " type='mmdet.PackDetInputs',\n", + " meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',\n", + " 'scale_factor'))\n", + "]\n", + "```\n", + "\n", + "Let's choose the `data/cat/images/IMG_20221020_112705.jpg` image as an example to visualize the output feature maps of YOLOv5 backbone and neck layers.\n", + "\n", + "**1. Visualize the three channels of YOLOv5 backbone**" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!python demo/featmap_vis_demo.py data/cat/images/IMG_20221020_112705.jpg \\\n", + " configs/yolov5/yolov5_s-v61_fast_1xb12-40e_cat.py \\\n", + " work_dirs/yolov5_s-v61_fast_1xb12-40e_cat/epoch_40.pth \\\n", + " --target-layers backbone \\\n", + " --channel-reduction squeeze_mean" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "
\n", + "\"image\"/\n", + "
\n", + "\n", + "The result will be saved to the output folder in current path. Three output feature maps plotted in the above figure correspond to small, medium and large output feature maps. As the backbone of this training is not actually involved in training, it can be seen from the above figure that the big object cat is predicted on the small feature map, which is in line with the idea of hierarchical detection of object detection.\n", + "\n", + "**2. Visualize the three channels of YOLOv5 neck**" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!python demo/featmap_vis_demo.py data/cat/images/IMG_20221020_112705.jpg \\\n", + " configs/yolov5/yolov5_s-v61_fast_1xb12-40e_cat.py \\\n", + " work_dirs/yolov5_s-v61_fast_1xb12-40e_cat/epoch_40.pth \\\n", + " --target-layers neck \\\n", + " --channel-reduction squeeze_mean" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "
\n", + "\"image\"/\n", + "
\n", + "\n", + "As can be seen from the above figure, because neck is involved in training, and we also reset anchor, the three output feature maps are forced to simulate the same scale object, resulting in the three output maps of neck are similar, which destroys the original pre-training distribution of backbone. At the same time, it can also be seen that 40 epochs are not enough to train the above dataset, and the feature maps do not perform well.\n", + "\n", + "**3. Grad-Based CAM visualization**\n", + "\n", + "Based on the above feature map visualization, we can analyze Grad CAM at the feature layer of bbox level.\n", + "\n", + "Install `grad-cam` package:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%pip install \"grad-cam\"" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "(a) View Grad CAM of the minimum output feature map of the neck" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!python demo/boxam_vis_demo.py data/cat/images/IMG_20221020_112705.jpg \\\n", + " configs/yolov5/yolov5_s-v61_fast_1xb12-40e_cat.py \\\n", + " work_dirs/yolov5_s-v61_fast_1xb12-40e_cat/epoch_40.pth \\\n", + " --target-layer neck.out_layers[2]" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "id": "9v-dMkePvHMg" + }, + "source": [ + "
\n", + "\"image\"/\n", + "
\n", + "\n", + "(b) View Grad CAM of the medium output feature map of the neck" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "cellView": "form", + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "p9H9u0A-3KAD", + "outputId": "32ca5a56-052f-4930-f53c-41cc3a9dc619" + }, + "outputs": [], + "source": [ + "!python demo/boxam_vis_demo.py data/cat/images/IMG_20221020_112705.jpg \\\n", + " configs/yolov5/yolov5_s-v61_fast_1xb12-40e_cat.py \\\n", + " work_dirs/yolov5_s-v61_fast_1xb12-40e_cat/epoch_40.pth \\\n", + " --target-layer neck.out_layers[1]" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "(c) View Grad CAM of the maximum output feature map of the neck" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "cellView": "form", + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "MrKan1U43uUY", + "outputId": "690f8414-a76b-4fa6-e600-7cc874ce1914" + }, + "outputs": [], + "source": [ + "!python demo/boxam_vis_demo.py data/cat/images/IMG_20221020_112705.jpg \\\n", + " configs/yolov5/yolov5_s-v61_fast_1xb12-40e_cat.py \\\n", + " work_dirs/yolov5_s-v61_fast_1xb12-40e_cat/epoch_40.pth \\\n", + " --target-layer neck.out_layers[0]" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "
\n", + "\"image\"/\n", + "
\n", + "\n", + "## EasyDeploy deployment\n", + "\n", + "Here we'll use MMYOLO's [EasyDeploy](../../../projects/easydeploy/) to demonstrate the transformation deployment and basic inference of model.\n", + "\n", + "First you need to follow EasyDeploy's [basic documentation](../../../projects/easydeploy/docs/model_convert.md) controls own equipment installed for each library.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%pip install onnx\n", + "%pip install onnx-simplifier # Install if you want to use simplify\n", + "%pip install tensorrt # If you have GPU environment and need to output TensorRT model you need to continue execution" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Once installed, you can use the following command to transform and deploy the trained model on the cat dataset with one click. The current ONNX version is 1.13.0 and TensorRT version is 8.5.3.1, so keep the `--opset` value of 11. The remaining parameters need to be adjusted according to the config used. Here we export the CPU version of ONNX with the `--backend` set to 1." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "cellView": "form", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 534 + }, + "id": "YsRFEecU5C0w", + "outputId": "c26011d4-2836-4715-cd6b-68836294db33" + }, + "outputs": [], + "source": [ + "!python projects/easydeploy/tools/export.py \\\n", + "\t configs/yolov5/yolov5_s-v61_fast_1xb12-40e_cat.py \\\n", + "\t work_dirs/yolov5_s-v61_fast_1xb12-40e_cat/epoch_40.pth \\\n", + "\t --work-dir work_dirs/yolov5_s-v61_fast_1xb12-40e_cat \\\n", + " --img-size 640 640 \\\n", + " --batch 1 \\\n", + " --device cpu \\\n", + " --simplify \\\n", + "\t --opset 11 \\\n", + "\t --backend 1 \\\n", + "\t --pre-topk 1000 \\\n", + "\t --keep-topk 100 \\\n", + "\t --iou-threshold 0.65 \\\n", + "\t --score-threshold 0.25\n" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "id": "q1EY415x3Idx" + }, + "source": [ + "On success, you will get the converted ONNX model under `work-dir`, which is named `end2end.onnx` by default.\n", + "\n", + "Let's use `end2end.onnx` model to perform a basic image inference:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!python projects/easydeploy/tools/image-demo.py \\\n", + " data/cat/images/IMG_20210728_205312.jpg \\\n", + " configs/yolov5/yolov5_s-v61_fast_1xb12-40e_cat.py \\\n", + " work_dirs/yolov5_s-v61_fast_1xb12-40e_cat/end2end.onnx \\\n", + " --device cpu" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "id": "IrjiBa5YwDQM" + }, + "source": [ + "After successful inference, the result image will be generated in the `output` folder of the default MMYOLO root directory. If you want to see the result without saving it, you can add `--show` to the end of the above command. For convenience, the following is the generated result.\n", + "\n", + "
\n", + "\"image\"/\n", + "
\n", + "\n", + "Let's go on to convert the engine file for TensorRT, because TensorRT needs to be specific to the current environment and deployment version, so make sure to export the parameters, here we export the TensorRT8 file, the `--backend` is 2." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "d8zxczqiBLoB" + }, + "outputs": [], + "source": [ + "!python projects/easydeploy/tools/export.py \\\n", + " configs/yolov5/yolov5_s-v61_fast_1xb12-40e_cat.py \\\n", + " work_dirs/yolov5_s-v61_fast_1xb12-40e_cat/epoch_40.pth \\\n", + " --work-dir work_dirs/yolov5_s-v61_fast_1xb12-40e_cat \\\n", + " --img-size 640 640 \\\n", + " --batch 1 \\\n", + " --device cuda:0 \\\n", + " --simplify \\\n", + " --opset 11 \\\n", + " --backend 2 \\\n", + " --pre-topk 1000 \\\n", + " --keep-topk 100 \\\n", + " --iou-threshold 0.65 \\\n", + " --score-threshold 0.25" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The resulting `end2end.onnx` is the ONNX file for the TensorRT8 deployment, which we will use to complete the TensorRT engine transformation." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 1000 + }, + "id": "QFh8rIsX_kVw", + "outputId": "c5bd6929-03a8-400e-be1e-581f32b23f61" + }, + "outputs": [], + "source": [ + "!python projects/easydeploy/tools/build_engine.py \\\n", + " work_dirs/yolov5_s-v61_fast_1xb12-40e_cat/end2end.onnx \\\n", + " --img-size 640 640 \\\n", + " --device cuda:0" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Successful execution will generate the `end2end.engine` file under `work-dir`:\n", + "\n", + "```shell\n", + "work_dirs/yolov5_s-v61_fast_1xb12-40e_cat\n", + "├── 202302XX_XXXXXX\n", + "│ ├── 202302XX_XXXXXX.log\n", + "│ └── vis_data\n", + "│ ├── 202302XX_XXXXXX.json\n", + "│ ├── config.py\n", + "│ └── scalars.json\n", + "├── best_coco\n", + "│ └── bbox_mAP_epoch_40.pth\n", + "├── end2end.engine\n", + "├── end2end.onnx\n", + "├── epoch_30.pth\n", + "├── epoch_40.pth\n", + "├── last_checkpoint\n", + "└── yolov5_s-v61_fast_1xb12-40e_cat.py\n", + "```\n", + "\n", + "Let's continue use `image-demo.py` for image inference:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "cellView": "form", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 1000 + }, + "id": "rOqXEi-jAI7Y", + "outputId": "2a21aaaa-d4ba-498a-f985-2a6a2b8d348f" + }, + "outputs": [], + "source": [ + "!python projects/easydeploy/tools/image-demo.py \\\n", + " data/cat/images/IMG_20210728_205312.jpg \\\n", + " configs/yolov5/yolov5_s-v61_fast_1xb12-40e_cat.py \\\n", + " work_dirs/yolov5_s-v61_fast_1xb12-40e_cat/end2end.engine \\\n", + " --device cuda:0" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "id": "ocHGUUEA_TjI" + }, + "source": [ + "
\n", + "\"image\"/\n", + "
\n", + "\n", + "This completes the transformation deployment of the trained model and checks the inference results. This is the end of the tutorial.\n", + "\n", + "If you encounter problems during training or testing, please check the [common troubleshooting steps](https://mmyolo.readthedocs.io/en/dev/recommended_topics/troubleshooting_steps.html) first and feel free to open an [issue](https://github.com/open-mmlab/mmyolo/issues/new/choose) if you still can't solve it.\n" + ] + } + ], + "metadata": { + "accelerator": "GPU", + "colab": { + "provenance": [], + "toc_visible": true + }, + "gpuClass": "standard", + "kernelspec": { + "display_name": "Python 3", + "name": "python3" + }, + "language_info": { + "name": "python" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} diff --git a/models/YOLO-World/third_party/mmyolo/demo/boxam_vis_demo.py b/models/YOLO-World/third_party/mmyolo/demo/boxam_vis_demo.py new file mode 100644 index 0000000000000000000000000000000000000000..278574f89fe5427cb5be7b9a7fd99f70de090bd4 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/demo/boxam_vis_demo.py @@ -0,0 +1,276 @@ +# Copyright (c) OpenMMLab. All rights reserved. +"""This script is in the experimental verification stage and cannot be +guaranteed to be completely correct. Currently Grad-based CAM and Grad-free CAM +are supported. + +The target detection task is different from the classification task. It not +only includes the AM map of the category, but also includes information such as +bbox and mask, so this script is named bboxam. +""" + +import argparse +import os.path +import warnings +from functools import partial + +import cv2 +import mmcv +from mmengine import Config, DictAction, MessageHub +from mmengine.utils import ProgressBar + +try: + from pytorch_grad_cam import AblationCAM, EigenCAM +except ImportError: + raise ImportError('Please run `pip install "grad-cam"` to install ' + 'pytorch_grad_cam package.') + +from mmyolo.utils.boxam_utils import (BoxAMDetectorVisualizer, + BoxAMDetectorWrapper, DetAblationLayer, + DetBoxScoreTarget, GradCAM, + GradCAMPlusPlus, reshape_transform) +from mmyolo.utils.misc import get_file_list + +GRAD_FREE_METHOD_MAP = { + 'ablationcam': AblationCAM, + 'eigencam': EigenCAM, + # 'scorecam': ScoreCAM, # consumes too much memory +} + +GRAD_BASED_METHOD_MAP = {'gradcam': GradCAM, 'gradcam++': GradCAMPlusPlus} + +ALL_SUPPORT_METHODS = list(GRAD_FREE_METHOD_MAP.keys() + | GRAD_BASED_METHOD_MAP.keys()) + +IGNORE_LOSS_PARAMS = { + 'yolov5': ['loss_obj'], + 'yolov6': ['loss_cls'], + 'yolox': ['loss_obj'], + 'rtmdet': ['loss_cls'], + 'yolov7': ['loss_obj'], + 'yolov8': ['loss_cls'], + 'ppyoloe': ['loss_cls'], +} + +# This parameter is required in some algorithms +# for calculating Loss +message_hub = MessageHub.get_current_instance() +message_hub.runtime_info['epoch'] = 0 + + +def parse_args(): + parser = argparse.ArgumentParser(description='Visualize Box AM') + parser.add_argument( + 'img', help='Image path, include image file, dir and URL.') + parser.add_argument('config', help='Config file') + parser.add_argument('checkpoint', help='Checkpoint file') + parser.add_argument( + '--method', + default='gradcam', + choices=ALL_SUPPORT_METHODS, + help='Type of method to use, supports ' + f'{", ".join(ALL_SUPPORT_METHODS)}.') + parser.add_argument( + '--target-layers', + default=['neck.out_layers[2]'], + nargs='+', + type=str, + help='The target layers to get Box AM, if not set, the tool will ' + 'specify the neck.out_layers[2]') + parser.add_argument( + '--out-dir', default='./output', help='Path to output file') + parser.add_argument( + '--show', action='store_true', help='Show the CAM results') + parser.add_argument( + '--device', default='cuda:0', help='Device used for inference') + parser.add_argument( + '--score-thr', type=float, default=0.3, help='Bbox score threshold') + parser.add_argument( + '--topk', + type=int, + default=-1, + help='Select topk predict resutls to show. -1 are mean all.') + parser.add_argument( + '--max-shape', + nargs='+', + type=int, + default=-1, + help='max shapes. Its purpose is to save GPU memory. ' + 'The activation map is scaled and then evaluated. ' + 'If set to -1, it means no scaling.') + parser.add_argument( + '--preview-model', + default=False, + action='store_true', + help='To preview all the model layers') + parser.add_argument( + '--norm-in-bbox', action='store_true', help='Norm in bbox of am image') + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. If the value to ' + 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' + 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' + 'Note that the quotation marks are necessary and that no white space ' + 'is allowed.') + # Only used by AblationCAM + parser.add_argument( + '--batch-size', + type=int, + default=1, + help='batch of inference of AblationCAM') + parser.add_argument( + '--ratio-channels-to-ablate', + type=int, + default=0.5, + help='Making it much faster of AblationCAM. ' + 'The parameter controls how many channels should be ablated') + + args = parser.parse_args() + return args + + +def init_detector_and_visualizer(args, cfg): + max_shape = args.max_shape + if not isinstance(max_shape, list): + max_shape = [args.max_shape] + assert len(max_shape) == 1 or len(max_shape) == 2 + + model_wrapper = BoxAMDetectorWrapper( + cfg, args.checkpoint, args.score_thr, device=args.device) + + if args.preview_model: + print(model_wrapper.detector) + print('\n Please remove `--preview-model` to get the BoxAM.') + return None, None + + target_layers = [] + for target_layer in args.target_layers: + try: + target_layers.append( + eval(f'model_wrapper.detector.{target_layer}')) + except Exception as e: + print(model_wrapper.detector) + raise RuntimeError('layer does not exist', e) + + ablationcam_extra_params = { + 'batch_size': args.batch_size, + 'ablation_layer': DetAblationLayer(), + 'ratio_channels_to_ablate': args.ratio_channels_to_ablate + } + + if args.method in GRAD_BASED_METHOD_MAP: + method_class = GRAD_BASED_METHOD_MAP[args.method] + is_need_grad = True + else: + method_class = GRAD_FREE_METHOD_MAP[args.method] + is_need_grad = False + + boxam_detector_visualizer = BoxAMDetectorVisualizer( + method_class, + model_wrapper, + target_layers, + reshape_transform=partial( + reshape_transform, max_shape=max_shape, is_need_grad=is_need_grad), + is_need_grad=is_need_grad, + extra_params=ablationcam_extra_params) + return model_wrapper, boxam_detector_visualizer + + +def main(): + args = parse_args() + + # hard code + ignore_loss_params = None + for param_keys in IGNORE_LOSS_PARAMS: + if param_keys in args.config: + print(f'The algorithm currently used is {param_keys}') + ignore_loss_params = IGNORE_LOSS_PARAMS[param_keys] + break + + cfg = Config.fromfile(args.config) + if args.cfg_options is not None: + cfg.merge_from_dict(args.cfg_options) + + if not os.path.exists(args.out_dir) and not args.show: + os.mkdir(args.out_dir) + + model_wrapper, boxam_detector_visualizer = init_detector_and_visualizer( + args, cfg) + + # get file list + image_list, source_type = get_file_list(args.img) + + progress_bar = ProgressBar(len(image_list)) + + for image_path in image_list: + image = cv2.imread(image_path) + model_wrapper.set_input_data(image) + + # forward detection results + result = model_wrapper()[0] + + pred_instances = result.pred_instances + # Get candidate predict info with score threshold + pred_instances = pred_instances[pred_instances.scores > args.score_thr] + + if len(pred_instances) == 0: + warnings.warn('empty detection results! skip this') + continue + + if args.topk > 0: + pred_instances = pred_instances[:args.topk] + + targets = [ + DetBoxScoreTarget( + pred_instances, + device=args.device, + ignore_loss_params=ignore_loss_params) + ] + + if args.method in GRAD_BASED_METHOD_MAP: + model_wrapper.need_loss(True) + model_wrapper.set_input_data(image, pred_instances) + boxam_detector_visualizer.switch_activations_and_grads( + model_wrapper) + + # get box am image + grayscale_boxam = boxam_detector_visualizer(image, targets=targets) + + # draw cam on image + pred_instances = pred_instances.numpy() + image_with_bounding_boxes = boxam_detector_visualizer.show_am( + image, + pred_instances, + grayscale_boxam, + with_norm_in_bboxes=args.norm_in_bbox) + + if source_type['is_dir']: + filename = os.path.relpath(image_path, args.img).replace('/', '_') + else: + filename = os.path.basename(image_path) + out_file = None if args.show else os.path.join(args.out_dir, filename) + + if out_file: + mmcv.imwrite(image_with_bounding_boxes, out_file) + else: + cv2.namedWindow(filename, 0) + cv2.imshow(filename, image_with_bounding_boxes) + cv2.waitKey(0) + + # switch + if args.method in GRAD_BASED_METHOD_MAP: + model_wrapper.need_loss(False) + boxam_detector_visualizer.switch_activations_and_grads( + model_wrapper) + + progress_bar.update() + + if not args.show: + print(f'All done!' + f'\nResults have been saved at {os.path.abspath(args.out_dir)}') + + +if __name__ == '__main__': + main() diff --git a/models/YOLO-World/third_party/mmyolo/demo/deploy_demo.py b/models/YOLO-World/third_party/mmyolo/demo/deploy_demo.py new file mode 100644 index 0000000000000000000000000000000000000000..f5d08df47fc9740bc1d2ca837d5188f8b4eac267 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/demo/deploy_demo.py @@ -0,0 +1,120 @@ +# Copyright (c) OpenMMLab. All rights reserved. +"""Deploy demo for mmdeploy. + +This script help user to run mmdeploy demo after convert the +checkpoint to backends. + +Usage: + python deploy_demo.py img \ + config \ + checkpoint \ + [--deploy-cfg DEPLOY_CFG] \ + [--device DEVICE] \ + [--out-dir OUT_DIR] \ + [--show] \ + [--score-thr SCORE_THR] + +Example: + python deploy_demo.py \ + ${MMYOLO_PATH}/data/cat/images \ + ./yolov5_s-v61_syncbn_fast_1xb32-100e_cat.py \ + ./end2end.engine \ + --deploy-cfg ./detection_tensorrt-fp16_dynamic-192x192-960x960.py \ + --out-dir ${MMYOLO_PATH}/work_dirs/deploy_predict_out \ + --device cuda:0 \ + --score-thr 0.5 +""" +import argparse +import os + +import torch +from mmengine import ProgressBar + +from mmyolo.utils.misc import get_file_list + +try: + from mmdeploy.apis.utils import build_task_processor + from mmdeploy.utils import get_input_shape, load_config +except ImportError: + raise ImportError( + 'mmdeploy is not installed, please see ' + 'https://mmdeploy.readthedocs.io/en/1.x/01-how-to-build/build_from_source.html' # noqa + ) + + +def parse_args(): + parser = argparse.ArgumentParser(description='For mmdeploy predict') + parser.add_argument( + 'img', help='Image path, include image file, dir and URL.') + parser.add_argument('config', help='model config root') + parser.add_argument('checkpoint', help='checkpoint backend model path') + parser.add_argument('--deploy-cfg', help='deploy config path') + parser.add_argument( + '--device', default='cuda:0', help='device used for conversion') + parser.add_argument( + '--out-dir', default='./output', help='Path to output file') + parser.add_argument( + '--show', action='store_true', help='Show the detection results') + parser.add_argument( + '--score-thr', type=float, default=0.3, help='Bbox score threshold') + args = parser.parse_args() + return args + + +# TODO Still need to refactor to not building dataset. +def main(): + args = parse_args() + + if not os.path.exists(args.out_dir) and not args.show: + os.mkdir(args.out_dir) + + # read deploy_cfg and config + deploy_cfg, model_cfg = load_config(args.deploy_cfg, args.config) + + # build task and backend model + task_processor = build_task_processor(model_cfg, deploy_cfg, args.device) + model = task_processor.build_backend_model([args.checkpoint]) + + # get model input shape + input_shape = get_input_shape(deploy_cfg) + + # get file list + files, source_type = get_file_list(args.img) + + # start detector inference + progress_bar = ProgressBar(len(files)) + for file in files: + # process input image + model_inputs, _ = task_processor.create_input(file, input_shape) + + # do model inference + with torch.no_grad(): + result = model.test_step(model_inputs) + + if source_type['is_dir']: + filename = os.path.relpath(file, args.img).replace('/', '_') + else: + filename = os.path.basename(file) + out_file = None if args.show else os.path.join(args.out_dir, filename) + + # filter score + result = result[0] + result.pred_instances = result.pred_instances[ + result.pred_instances.scores > args.score_thr] + + # visualize results + task_processor.visualize( + image=file, + model=model, + result=result, + show_result=args.show, + window_name=os.path.basename(filename), + output_file=out_file) + + progress_bar.update() + + print('All done!') + + +if __name__ == '__main__': + main() diff --git a/models/YOLO-World/third_party/mmyolo/demo/featmap_vis_demo.py b/models/YOLO-World/third_party/mmyolo/demo/featmap_vis_demo.py new file mode 100644 index 0000000000000000000000000000000000000000..892e73d616b0e629ddfcc276e8eb4ca289f5085b --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/demo/featmap_vis_demo.py @@ -0,0 +1,199 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os +from typing import Sequence + +import mmcv +from mmdet.apis import inference_detector, init_detector +from mmengine import Config, DictAction +from mmengine.registry import init_default_scope +from mmengine.utils import ProgressBar + +from mmyolo.registry import VISUALIZERS +from mmyolo.utils.misc import auto_arrange_images, get_file_list + + +def parse_args(): + parser = argparse.ArgumentParser(description='Visualize feature map') + parser.add_argument( + 'img', help='Image path, include image file, dir and URL.') + parser.add_argument('config', help='Config file') + parser.add_argument('checkpoint', help='Checkpoint file') + parser.add_argument( + '--out-dir', default='./output', help='Path to output file') + parser.add_argument( + '--target-layers', + default=['backbone'], + nargs='+', + type=str, + help='The target layers to get feature map, if not set, the tool will ' + 'specify the backbone') + parser.add_argument( + '--preview-model', + default=False, + action='store_true', + help='To preview all the model layers') + parser.add_argument( + '--device', default='cuda:0', help='Device used for inference') + parser.add_argument( + '--score-thr', type=float, default=0.3, help='Bbox score threshold') + parser.add_argument( + '--show', action='store_true', help='Show the featmap results') + parser.add_argument( + '--channel-reduction', + default='select_max', + help='Reduce multiple channels to a single channel') + parser.add_argument( + '--topk', + type=int, + default=4, + help='Select topk channel to show by the sum of each channel') + parser.add_argument( + '--arrangement', + nargs='+', + type=int, + default=[2, 2], + help='The arrangement of featmap when channel_reduction is ' + 'not None and topk > 0') + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. If the value to ' + 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' + 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' + 'Note that the quotation marks are necessary and that no white space ' + 'is allowed.') + args = parser.parse_args() + return args + + +class ActivationsWrapper: + + def __init__(self, model, target_layers): + self.model = model + self.activations = [] + self.handles = [] + self.image = None + for target_layer in target_layers: + self.handles.append( + target_layer.register_forward_hook(self.save_activation)) + + def save_activation(self, module, input, output): + self.activations.append(output) + + def __call__(self, img_path): + self.activations = [] + results = inference_detector(self.model, img_path) + return results, self.activations + + def release(self): + for handle in self.handles: + handle.remove() + + +def main(): + args = parse_args() + + cfg = Config.fromfile(args.config) + if args.cfg_options is not None: + cfg.merge_from_dict(args.cfg_options) + + init_default_scope(cfg.get('default_scope', 'mmyolo')) + + channel_reduction = args.channel_reduction + if channel_reduction == 'None': + channel_reduction = None + assert len(args.arrangement) == 2 + + model = init_detector(args.config, args.checkpoint, device=args.device) + + if not os.path.exists(args.out_dir) and not args.show: + os.mkdir(args.out_dir) + + if args.preview_model: + print(model) + print('\n This flag is only show model, if you want to continue, ' + 'please remove `--preview-model` to get the feature map.') + return + + target_layers = [] + for target_layer in args.target_layers: + try: + target_layers.append(eval(f'model.{target_layer}')) + except Exception as e: + print(model) + raise RuntimeError('layer does not exist', e) + + activations_wrapper = ActivationsWrapper(model, target_layers) + + # init visualizer + visualizer = VISUALIZERS.build(model.cfg.visualizer) + visualizer.dataset_meta = model.dataset_meta + + # get file list + image_list, source_type = get_file_list(args.img) + + progress_bar = ProgressBar(len(image_list)) + for image_path in image_list: + result, featmaps = activations_wrapper(image_path) + if not isinstance(featmaps, Sequence): + featmaps = [featmaps] + + flatten_featmaps = [] + for featmap in featmaps: + if isinstance(featmap, Sequence): + flatten_featmaps.extend(featmap) + else: + flatten_featmaps.append(featmap) + + img = mmcv.imread(image_path) + img = mmcv.imconvert(img, 'bgr', 'rgb') + + if source_type['is_dir']: + filename = os.path.relpath(image_path, args.img).replace('/', '_') + else: + filename = os.path.basename(image_path) + out_file = None if args.show else os.path.join(args.out_dir, filename) + + # show the results + shown_imgs = [] + visualizer.add_datasample( + 'result', + img, + data_sample=result, + draw_gt=False, + show=False, + wait_time=0, + out_file=None, + pred_score_thr=args.score_thr) + drawn_img = visualizer.get_image() + + for featmap in flatten_featmaps: + shown_img = visualizer.draw_featmap( + featmap[0], + drawn_img, + channel_reduction=channel_reduction, + topk=args.topk, + arrangement=args.arrangement) + shown_imgs.append(shown_img) + + shown_imgs = auto_arrange_images(shown_imgs) + + progress_bar.update() + if out_file: + mmcv.imwrite(shown_imgs[..., ::-1], out_file) + + if args.show: + visualizer.show(shown_imgs) + + if not args.show: + print(f'All done!' + f'\nResults have been saved at {os.path.abspath(args.out_dir)}') + + +# Please refer to the usage tutorial: +# https://github.com/open-mmlab/mmyolo/blob/main/docs/zh_cn/user_guides/visualization.md # noqa +if __name__ == '__main__': + main() diff --git a/models/YOLO-World/third_party/mmyolo/demo/image_demo.py b/models/YOLO-World/third_party/mmyolo/demo/image_demo.py new file mode 100644 index 0000000000000000000000000000000000000000..fa2cfb2a03f7e8328dd068851433d69c9f4a0db5 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/demo/image_demo.py @@ -0,0 +1,168 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os +from argparse import ArgumentParser +from pathlib import Path + +import mmcv +from mmdet.apis import inference_detector, init_detector +from mmengine.config import Config, ConfigDict +from mmengine.logging import print_log +from mmengine.utils import ProgressBar, path + +from mmyolo.registry import VISUALIZERS +from mmyolo.utils import switch_to_deploy +from mmyolo.utils.labelme_utils import LabelmeFormat +from mmyolo.utils.misc import get_file_list, show_data_classes + + +def parse_args(): + parser = ArgumentParser() + parser.add_argument( + 'img', help='Image path, include image file, dir and URL.') + parser.add_argument('config', help='Config file') + parser.add_argument('checkpoint', help='Checkpoint file') + parser.add_argument( + '--out-dir', default='./output', help='Path to output file') + parser.add_argument( + '--device', default='cuda:0', help='Device used for inference') + parser.add_argument( + '--show', action='store_true', help='Show the detection results') + parser.add_argument( + '--deploy', + action='store_true', + help='Switch model to deployment mode') + parser.add_argument( + '--tta', + action='store_true', + help='Whether to use test time augmentation') + parser.add_argument( + '--score-thr', type=float, default=0.3, help='Bbox score threshold') + parser.add_argument( + '--class-name', + nargs='+', + type=str, + help='Only Save those classes if set') + parser.add_argument( + '--to-labelme', + action='store_true', + help='Output labelme style label file') + args = parser.parse_args() + return args + + +def main(): + args = parse_args() + + if args.to_labelme and args.show: + raise RuntimeError('`--to-labelme` or `--show` only ' + 'can choose one at the same time.') + config = args.config + + if isinstance(config, (str, Path)): + config = Config.fromfile(config) + elif not isinstance(config, Config): + raise TypeError('config must be a filename or Config object, ' + f'but got {type(config)}') + if 'init_cfg' in config.model.backbone: + config.model.backbone.init_cfg = None + + if args.tta: + assert 'tta_model' in config, 'Cannot find ``tta_model`` in config.' \ + " Can't use tta !" + assert 'tta_pipeline' in config, 'Cannot find ``tta_pipeline`` ' \ + "in config. Can't use tta !" + config.model = ConfigDict(**config.tta_model, module=config.model) + test_data_cfg = config.test_dataloader.dataset + while 'dataset' in test_data_cfg: + test_data_cfg = test_data_cfg['dataset'] + + # batch_shapes_cfg will force control the size of the output image, + # it is not compatible with tta. + if 'batch_shapes_cfg' in test_data_cfg: + test_data_cfg.batch_shapes_cfg = None + test_data_cfg.pipeline = config.tta_pipeline + + # TODO: TTA mode will error if cfg_options is not set. + # This is an mmdet issue and needs to be fixed later. + # build the model from a config file and a checkpoint file + model = init_detector( + config, args.checkpoint, device=args.device, cfg_options={}) + + if args.deploy: + switch_to_deploy(model) + + if not args.show: + path.mkdir_or_exist(args.out_dir) + + # init visualizer + visualizer = VISUALIZERS.build(model.cfg.visualizer) + visualizer.dataset_meta = model.dataset_meta + + # get file list + files, source_type = get_file_list(args.img) + + # get model class name + dataset_classes = model.dataset_meta.get('classes') + + # ready for labelme format if it is needed + to_label_format = LabelmeFormat(classes=dataset_classes) + + # check class name + if args.class_name is not None: + for class_name in args.class_name: + if class_name in dataset_classes: + continue + show_data_classes(dataset_classes) + raise RuntimeError( + 'Expected args.class_name to be one of the list, ' + f'but got "{class_name}"') + + # start detector inference + progress_bar = ProgressBar(len(files)) + for file in files: + result = inference_detector(model, file) + + img = mmcv.imread(file) + img = mmcv.imconvert(img, 'bgr', 'rgb') + + if source_type['is_dir']: + filename = os.path.relpath(file, args.img).replace('/', '_') + else: + filename = os.path.basename(file) + out_file = None if args.show else os.path.join(args.out_dir, filename) + + progress_bar.update() + + # Get candidate predict info with score threshold + pred_instances = result.pred_instances[ + result.pred_instances.scores > args.score_thr] + + if args.to_labelme: + # save result to labelme files + out_file = out_file.replace( + os.path.splitext(out_file)[-1], '.json') + to_label_format(pred_instances, result.metainfo, out_file, + args.class_name) + continue + + visualizer.add_datasample( + filename, + img, + data_sample=result, + draw_gt=False, + show=args.show, + wait_time=0, + out_file=out_file, + pred_score_thr=args.score_thr) + + if not args.show and not args.to_labelme: + print_log( + f'\nResults have been saved at {os.path.abspath(args.out_dir)}') + + elif args.to_labelme: + print_log('\nLabelme format label files ' + f'had all been saved in {args.out_dir}') + + +if __name__ == '__main__': + main() diff --git a/models/YOLO-World/third_party/mmyolo/demo/large_image_demo.py b/models/YOLO-World/third_party/mmyolo/demo/large_image_demo.py new file mode 100644 index 0000000000000000000000000000000000000000..bdbc3a56d0056c3965fac28c49e18b31355a2029 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/demo/large_image_demo.py @@ -0,0 +1,294 @@ +# Copyright (c) OpenMMLab. All rights reserved. +"""Perform MMYOLO inference on large images (as satellite imagery) as: + +```shell +wget -P checkpoint https://download.openmmlab.com/mmyolo/v0/yolov5/yolov5_s-v61_syncbn_fast_8xb16-300e_coco/yolov5_s-v61_syncbn_fast_8xb16-300e_coco_20220918_084700-86e02187.pth # noqa: E501, E261. + +python demo/large_image_demo.py \ + demo/large_image.jpg \ + configs/yolov5/yolov5_s-v61_syncbn_fast_8xb16-300e_coco.py \ + checkpoint/yolov5_s-v61_syncbn_fast_8xb16-300e_coco_20220918_084700-86e02187.pth +``` +""" + +import os +import random +from argparse import ArgumentParser +from pathlib import Path + +import mmcv +import numpy as np +from mmdet.apis import inference_detector, init_detector +from mmengine.config import Config, ConfigDict +from mmengine.logging import print_log +from mmengine.utils import ProgressBar + +try: + from sahi.slicing import slice_image +except ImportError: + raise ImportError('Please run "pip install -U sahi" ' + 'to install sahi first for large image inference.') + +from mmyolo.registry import VISUALIZERS +from mmyolo.utils import switch_to_deploy +from mmyolo.utils.large_image import merge_results_by_nms, shift_predictions +from mmyolo.utils.misc import get_file_list + + +def parse_args(): + parser = ArgumentParser( + description='Perform MMYOLO inference on large images.') + parser.add_argument( + 'img', help='Image path, include image file, dir and URL.') + parser.add_argument('config', help='Config file') + parser.add_argument('checkpoint', help='Checkpoint file') + parser.add_argument( + '--out-dir', default='./output', help='Path to output file') + parser.add_argument( + '--device', default='cuda:0', help='Device used for inference') + parser.add_argument( + '--show', action='store_true', help='Show the detection results') + parser.add_argument( + '--deploy', + action='store_true', + help='Switch model to deployment mode') + parser.add_argument( + '--tta', + action='store_true', + help='Whether to use test time augmentation') + parser.add_argument( + '--score-thr', type=float, default=0.3, help='Bbox score threshold') + parser.add_argument( + '--patch-size', type=int, default=640, help='The size of patches') + parser.add_argument( + '--patch-overlap-ratio', + type=float, + default=0.25, + help='Ratio of overlap between two patches') + parser.add_argument( + '--merge-iou-thr', + type=float, + default=0.25, + help='IoU threshould for merging results') + parser.add_argument( + '--merge-nms-type', + type=str, + default='nms', + help='NMS type for merging results') + parser.add_argument( + '--batch-size', + type=int, + default=1, + help='Batch size, must greater than or equal to 1') + parser.add_argument( + '--debug', + action='store_true', + help='Export debug results before merging') + parser.add_argument( + '--save-patch', + action='store_true', + help='Save the results of each patch. ' + 'The `--debug` must be enabled.') + args = parser.parse_args() + return args + + +def main(): + args = parse_args() + + config = args.config + + if isinstance(config, (str, Path)): + config = Config.fromfile(config) + elif not isinstance(config, Config): + raise TypeError('config must be a filename or Config object, ' + f'but got {type(config)}') + if 'init_cfg' in config.model.backbone: + config.model.backbone.init_cfg = None + + if args.tta: + assert 'tta_model' in config, 'Cannot find ``tta_model`` in config.' \ + " Can't use tta !" + assert 'tta_pipeline' in config, 'Cannot find ``tta_pipeline`` ' \ + "in config. Can't use tta !" + config.model = ConfigDict(**config.tta_model, module=config.model) + test_data_cfg = config.test_dataloader.dataset + while 'dataset' in test_data_cfg: + test_data_cfg = test_data_cfg['dataset'] + + # batch_shapes_cfg will force control the size of the output image, + # it is not compatible with tta. + if 'batch_shapes_cfg' in test_data_cfg: + test_data_cfg.batch_shapes_cfg = None + test_data_cfg.pipeline = config.tta_pipeline + + # TODO: TTA mode will error if cfg_options is not set. + # This is an mmdet issue and needs to be fixed later. + # build the model from a config file and a checkpoint file + model = init_detector( + config, args.checkpoint, device=args.device, cfg_options={}) + + if args.deploy: + switch_to_deploy(model) + + if not os.path.exists(args.out_dir) and not args.show: + os.mkdir(args.out_dir) + + # init visualizer + visualizer = VISUALIZERS.build(model.cfg.visualizer) + visualizer.dataset_meta = model.dataset_meta + + # get file list + files, source_type = get_file_list(args.img) + + # start detector inference + print(f'Performing inference on {len(files)} images.... ' + 'This may take a while.') + progress_bar = ProgressBar(len(files)) + for file in files: + # read image + img = mmcv.imread(file) + + # arrange slices + height, width = img.shape[:2] + sliced_image_object = slice_image( + img, + slice_height=args.patch_size, + slice_width=args.patch_size, + auto_slice_resolution=False, + overlap_height_ratio=args.patch_overlap_ratio, + overlap_width_ratio=args.patch_overlap_ratio, + ) + + # perform sliced inference + slice_results = [] + start = 0 + while True: + # prepare batch slices + end = min(start + args.batch_size, len(sliced_image_object)) + images = [] + for sliced_image in sliced_image_object.images[start:end]: + images.append(sliced_image) + + # forward the model + slice_results.extend(inference_detector(model, images)) + + if end >= len(sliced_image_object): + break + start += args.batch_size + + if source_type['is_dir']: + filename = os.path.relpath(file, args.img).replace('/', '_') + else: + filename = os.path.basename(file) + + img = mmcv.imconvert(img, 'bgr', 'rgb') + out_file = None if args.show else os.path.join(args.out_dir, filename) + + # export debug images + if args.debug: + # export sliced image results + name, suffix = os.path.splitext(filename) + + shifted_instances = shift_predictions( + slice_results, + sliced_image_object.starting_pixels, + src_image_shape=(height, width)) + merged_result = slice_results[0].clone() + merged_result.pred_instances = shifted_instances + + debug_file_name = name + '_debug' + suffix + debug_out_file = None if args.show else os.path.join( + args.out_dir, debug_file_name) + visualizer.set_image(img.copy()) + + debug_grids = [] + for starting_point in sliced_image_object.starting_pixels: + start_point_x = starting_point[0] + start_point_y = starting_point[1] + end_point_x = start_point_x + args.patch_size + end_point_y = start_point_y + args.patch_size + debug_grids.append( + [start_point_x, start_point_y, end_point_x, end_point_y]) + debug_grids = np.array(debug_grids) + debug_grids[:, 0::2] = np.clip(debug_grids[:, 0::2], 1, + img.shape[1] - 1) + debug_grids[:, 1::2] = np.clip(debug_grids[:, 1::2], 1, + img.shape[0] - 1) + + palette = np.random.randint(0, 256, size=(len(debug_grids), 3)) + palette = [tuple(c) for c in palette] + line_styles = random.choices(['-', '-.', ':'], k=len(debug_grids)) + visualizer.draw_bboxes( + debug_grids, + edge_colors=palette, + alpha=1, + line_styles=line_styles) + visualizer.draw_bboxes( + debug_grids, face_colors=palette, alpha=0.15) + + visualizer.draw_texts( + list(range(len(debug_grids))), + debug_grids[:, :2] + 5, + colors='w') + + visualizer.add_datasample( + debug_file_name, + visualizer.get_image(), + data_sample=merged_result, + draw_gt=False, + show=args.show, + wait_time=0, + out_file=debug_out_file, + pred_score_thr=args.score_thr, + ) + + if args.save_patch: + debug_patch_out_dir = os.path.join(args.out_dir, + f'{name}_patch') + for i, slice_result in enumerate(slice_results): + patch_out_file = os.path.join( + debug_patch_out_dir, + f'{filename}_slice_{i}_result.jpg') + image = mmcv.imconvert(sliced_image_object.images[i], + 'bgr', 'rgb') + + visualizer.add_datasample( + 'patch_result', + image, + data_sample=slice_result, + draw_gt=False, + show=False, + wait_time=0, + out_file=patch_out_file, + pred_score_thr=args.score_thr, + ) + + image_result = merge_results_by_nms( + slice_results, + sliced_image_object.starting_pixels, + src_image_shape=(height, width), + nms_cfg={ + 'type': args.merge_nms_type, + 'iou_threshold': args.merge_iou_thr + }) + + visualizer.add_datasample( + filename, + img, + data_sample=image_result, + draw_gt=False, + show=args.show, + wait_time=0, + out_file=out_file, + pred_score_thr=args.score_thr, + ) + progress_bar.update() + + if not args.show or (args.debug and args.save_patch): + print_log( + f'\nResults have been saved at {os.path.abspath(args.out_dir)}') + + +if __name__ == '__main__': + main() diff --git a/models/YOLO-World/third_party/mmyolo/demo/video_demo.py b/models/YOLO-World/third_party/mmyolo/demo/video_demo.py new file mode 100644 index 0000000000000000000000000000000000000000..d8317a2c6c777eaa9cc6aab27e55bf53efe9e8fd --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/demo/video_demo.py @@ -0,0 +1,96 @@ +# Copyright (c) OpenMMLab. All rights reserved. +"""Perform MMYOLO inference on a video as: + +```shell +wget -P checkpoint https://download.openmmlab.com/mmyolo/v0/yolov5/yolov5_s-v61_syncbn_fast_8xb16-300e_coco/yolov5_s-v61_syncbn_fast_8xb16-300e_coco_20220918_084700-86e02187.pth # noqa: E501, E261. + +python demo/video_demo.py \ + demo/video_demo.mp4 \ + configs/yolov5/yolov5_s-v61_syncbn_fast_8xb16-300e_coco.py \ + checkpoint/yolov5_s-v61_syncbn_fast_8xb16-300e_coco_20220918_084700-86e02187.pth \ + --out demo_result.mp4 +``` +""" +import argparse + +import cv2 +import mmcv +from mmcv.transforms import Compose +from mmdet.apis import inference_detector, init_detector +from mmengine.utils import track_iter_progress + +from mmyolo.registry import VISUALIZERS + + +def parse_args(): + parser = argparse.ArgumentParser(description='MMYOLO video demo') + parser.add_argument('video', help='Video file') + parser.add_argument('config', help='Config file') + parser.add_argument('checkpoint', help='Checkpoint file') + parser.add_argument( + '--device', default='cuda:0', help='Device used for inference') + parser.add_argument( + '--score-thr', type=float, default=0.3, help='Bbox score threshold') + parser.add_argument('--out', type=str, help='Output video file') + parser.add_argument('--show', action='store_true', help='Show video') + parser.add_argument( + '--wait-time', + type=float, + default=1, + help='The interval of show (s), 0 is block') + args = parser.parse_args() + return args + + +def main(): + args = parse_args() + assert args.out or args.show, \ + ('Please specify at least one operation (save/show the ' + 'video) with the argument "--out" or "--show"') + + # build the model from a config file and a checkpoint file + model = init_detector(args.config, args.checkpoint, device=args.device) + + # build test pipeline + model.cfg.test_dataloader.dataset.pipeline[ + 0].type = 'mmdet.LoadImageFromNDArray' + test_pipeline = Compose(model.cfg.test_dataloader.dataset.pipeline) + + # init visualizer + visualizer = VISUALIZERS.build(model.cfg.visualizer) + # the dataset_meta is loaded from the checkpoint and + # then pass to the model in init_detector + visualizer.dataset_meta = model.dataset_meta + + video_reader = mmcv.VideoReader(args.video) + video_writer = None + if args.out: + fourcc = cv2.VideoWriter_fourcc(*'mp4v') + video_writer = cv2.VideoWriter( + args.out, fourcc, video_reader.fps, + (video_reader.width, video_reader.height)) + + for frame in track_iter_progress(video_reader): + result = inference_detector(model, frame, test_pipeline=test_pipeline) + visualizer.add_datasample( + name='video', + image=frame, + data_sample=result, + draw_gt=False, + show=False, + pred_score_thr=args.score_thr) + frame = visualizer.get_image() + + if args.show: + cv2.namedWindow('video', 0) + mmcv.imshow(frame, 'video', args.wait_time) + if args.out: + video_writer.write(frame) + + if video_writer: + video_writer.release() + cv2.destroyAllWindows() + + +if __name__ == '__main__': + main() diff --git a/models/YOLO-World/third_party/mmyolo/docker/Dockerfile b/models/YOLO-World/third_party/mmyolo/docker/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..fc65431a2940604118aaf747290442da78741365 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/docker/Dockerfile @@ -0,0 +1,36 @@ +ARG PYTORCH="1.9.0" +ARG CUDA="11.1" +ARG CUDNN="8" + +FROM pytorch/pytorch:${PYTORCH}-cuda${CUDA}-cudnn${CUDNN}-devel + +ENV TORCH_CUDA_ARCH_LIST="6.0 6.1 7.0 7.5 8.0 8.6+PTX" \ + TORCH_NVCC_FLAGS="-Xfatbin -compress-all" \ + CMAKE_PREFIX_PATH="$(dirname $(which conda))/../" \ + FORCE_CUDA="1" + +RUN rm /etc/apt/sources.list.d/cuda.list \ + && rm /etc/apt/sources.list.d/nvidia-ml.list \ + && apt-key del 7fa2af80 \ + && apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/3bf863cc.pub \ + && apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64/7fa2af80.pub + +# (Optional) +# RUN sed -i 's/http:\/\/archive.ubuntu.com\/ubuntu\//http:\/\/mirrors.aliyun.com\/ubuntu\//g' /etc/apt/sources.list && \ +# pip config set global.index-url https://pypi.tuna.tsinghua.edu.cn/simple + +RUN apt-get update \ + && apt-get install -y ffmpeg libsm6 libxext6 git ninja-build libglib2.0-0 libxrender-dev \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* + +# Install MMEngine , MMCV and MMDet +RUN pip install --no-cache-dir openmim && \ + mim install --no-cache-dir "mmengine>=0.6.0" "mmcv>=2.0.0rc4,<2.1.0" "mmdet>=3.0.0,<4.0.0" + +# Install MMYOLO +RUN git clone https://github.com/open-mmlab/mmyolo.git /mmyolo && \ + cd /mmyolo && \ + mim install --no-cache-dir -e . + +WORKDIR /mmyolo diff --git a/models/YOLO-World/third_party/mmyolo/docker/Dockerfile_deployment b/models/YOLO-World/third_party/mmyolo/docker/Dockerfile_deployment new file mode 100644 index 0000000000000000000000000000000000000000..8ea1e380b0fab494047f9e2f94545f4e4b0b72e9 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/docker/Dockerfile_deployment @@ -0,0 +1,65 @@ +FROM nvcr.io/nvidia/pytorch:22.04-py3 + +WORKDIR /openmmlab +ARG ONNXRUNTIME_VERSION=1.8.1 +ENV DEBIAN_FRONTEND=noninteractive \ + APT_KEY_DONT_WARN_ON_DANGEROUS_USAGE=DontWarn \ + FORCE_CUDA="1" + +RUN apt-key del 7fa2af80 \ + && apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/3bf863cc.pub \ + && apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64/7fa2af80.pub + +# (Optional) +# RUN sed -i 's/http:\/\/archive.ubuntu.com\/ubuntu\//http:\/\/mirrors.aliyun.com\/ubuntu\//g' /etc/apt/sources.list \ +# && pip config set global.index-url https://pypi.tuna.tsinghua.edu.cn/simple + +RUN apt-get update \ + && apt-get install -y ffmpeg git libgl1-mesa-glx libopencv-dev \ + libsm6 libspdlog-dev libssl-dev ninja-build libxext6 libxrender-dev \ + libglib2.0-0 vim wget --no-install-recommends \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* + +# get onnxruntime +RUN wget -q https://github.com/microsoft/onnxruntime/releases/download/v${ONNXRUNTIME_VERSION}/onnxruntime-linux-x64-${ONNXRUNTIME_VERSION}.tgz \ + && tar -zxvf onnxruntime-linux-x64-${ONNXRUNTIME_VERSION}.tgz \ + && pip install --no-cache-dir onnxruntime-gpu==${ONNXRUNTIME_VERSION} \ + && pip install pycuda + + +# Install OPENMIM MMENGINE MMDET +RUN pip install --no-cache-dir openmim \ + && mim install --no-cache-dir "mmengine>=0.6.0" "mmdet>=3.0.0,<4.0.0" \ + && mim install --no-cache-dir opencv-python==4.5.5.64 opencv-python-headless==4.5.5.64 + +RUN git clone https://github.com/open-mmlab/mmcv.git -b 2.x mmcv \ + && cd mmcv \ + && mim install --no-cache-dir -r requirements/optional.txt \ + && MMCV_WITH_OPS=1 mim install --no-cache-dir -e . -v \ + && cd .. + +# Install MMYOLO +RUN git clone https://github.com/open-mmlab/mmyolo.git -b dev mmyolo \ + && cd mmyolo \ + && mim install --no-cache-dir -e . \ + && cd .. + +# Install MMDEPLOY +ENV ONNXRUNTIME_DIR=/openmmlab/onnxruntime-linux-x64-${ONNXRUNTIME_VERSION} \ + TENSORRT_DIR=/usr/lib/x86_64-linux-gnu \ + CUDNN_DIR=/usr/lib/x86_64-linux-gnu + +RUN git clone https://github.com/open-mmlab/mmdeploy -b dev-1.x mmdeploy \ + && cd mmdeploy \ + && git submodule update --init --recursive \ + && mkdir -p build \ + && cd build \ + && cmake -DMMDEPLOY_TARGET_BACKENDS="ort;trt" -DONNXRUNTIME_DIR=${ONNXRUNTIME_DIR} -DTENSORRT_DIR=${TENSORRT_DIR} -DCUDNN_DIR=${CUDNN_DIR} .. \ + && make -j$(nproc) \ + && make install \ + && cd .. \ + && mim install --no-cache-dir -e . + +# Fix undefined symbol bug + RUN echo -e "\nexport LD_LIBRARY_PATH=${ONNXRUNTIME_DIR}/lib:${TENSORRT_DIR}/lib:${CUDNN_DIR}/lib64:${LD_LIBRARY_PATH}\nldconfig" >> /root/.bashrc diff --git a/models/YOLO-World/third_party/mmyolo/mmyolo/__init__.py b/models/YOLO-World/third_party/mmyolo/mmyolo/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..13ce4e8a2b21ce22adffe37d77c5f374f6f0a008 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/mmyolo/__init__.py @@ -0,0 +1,39 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import mmcv +import mmdet +import mmengine +from mmengine.utils import digit_version + +from .version import __version__, version_info + +mmcv_minimum_version = '2.0.0rc4' +mmcv_maximum_version = '2.1.0' +mmcv_version = digit_version(mmcv.__version__) + +mmengine_minimum_version = '0.7.1' +mmengine_maximum_version = '1.0.0' +mmengine_version = digit_version(mmengine.__version__) + +mmdet_minimum_version = '3.0.0' +mmdet_maximum_version = '4.0.0' +mmdet_version = digit_version(mmdet.__version__) + + +assert (mmcv_version >= digit_version(mmcv_minimum_version) + and mmcv_version <= digit_version(mmcv_maximum_version)), \ + f'MMCV=={mmcv.__version__} is used but incompatible. ' \ + f'Please install mmcv>={mmcv_minimum_version}, <{mmcv_maximum_version}.' + +assert (mmengine_version >= digit_version(mmengine_minimum_version) + and mmengine_version < digit_version(mmengine_maximum_version)), \ + f'MMEngine=={mmengine.__version__} is used but incompatible. ' \ + f'Please install mmengine>={mmengine_minimum_version}, ' \ + f'<{mmengine_maximum_version}.' + +assert (mmdet_version >= digit_version(mmdet_minimum_version) + and mmdet_version < digit_version(mmdet_maximum_version)), \ + f'MMDetection=={mmdet.__version__} is used but incompatible. ' \ + f'Please install mmdet>={mmdet_minimum_version}, ' \ + f'<{mmdet_maximum_version}.' + +__all__ = ['__version__', 'version_info', 'digit_version'] diff --git a/models/YOLO-World/third_party/mmyolo/mmyolo/datasets/__init__.py b/models/YOLO-World/third_party/mmyolo/mmyolo/datasets/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..9db4390457119feaf13b1d2279c8c8bdf2abcf71 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/mmyolo/datasets/__init__.py @@ -0,0 +1,14 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .pose_coco import PoseCocoDataset +from .transforms import * # noqa: F401,F403 +from .utils import BatchShapePolicy, yolov5_collate +from .yolov5_coco import YOLOv5CocoDataset +from .yolov5_crowdhuman import YOLOv5CrowdHumanDataset +from .yolov5_dota import YOLOv5DOTADataset +from .yolov5_voc import YOLOv5VOCDataset + +__all__ = [ + 'YOLOv5CocoDataset', 'YOLOv5VOCDataset', 'BatchShapePolicy', + 'yolov5_collate', 'YOLOv5CrowdHumanDataset', 'YOLOv5DOTADataset', + 'PoseCocoDataset' +] diff --git a/models/YOLO-World/third_party/mmyolo/mmyolo/datasets/pose_coco.py b/models/YOLO-World/third_party/mmyolo/mmyolo/datasets/pose_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..b17f9836aea469f09679d01d605f3629771a1801 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/mmyolo/datasets/pose_coco.py @@ -0,0 +1,30 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Any + +from mmengine.dataset import force_full_init + +try: + from mmpose.datasets import CocoDataset as MMPoseCocoDataset +except ImportError: + MMPoseCocoDataset = object + +from ..registry import DATASETS + + +@DATASETS.register_module() +class PoseCocoDataset(MMPoseCocoDataset): + + METAINFO: dict = dict(from_file='configs/_base_/pose/coco.py') + + def __init__(self, *args, **kwargs): + if MMPoseCocoDataset is object: + raise ImportError( + 'Please run "mim install -r requirements/mmpose.txt" ' + 'to install mmpose first for PoseCocoDataset.') + super().__init__(*args, **kwargs) + + @force_full_init + def prepare_data(self, idx) -> Any: + data_info = self.get_data_info(idx) + data_info['dataset'] = self + return self.pipeline(data_info) diff --git a/models/YOLO-World/third_party/mmyolo/mmyolo/datasets/transforms/__init__.py b/models/YOLO-World/third_party/mmyolo/mmyolo/datasets/transforms/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..7cdcf8625173e05ef884cf1afe17a9a1c992b6cd --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/mmyolo/datasets/transforms/__init__.py @@ -0,0 +1,18 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .formatting import PackDetInputs +from .mix_img_transforms import Mosaic, Mosaic9, YOLOv5MixUp, YOLOXMixUp +from .transforms import (FilterAnnotations, LetterResize, LoadAnnotations, + Polygon2Mask, PPYOLOERandomCrop, PPYOLOERandomDistort, + RandomAffine, RandomFlip, RegularizeRotatedBox, + RemoveDataElement, Resize, YOLOv5CopyPaste, + YOLOv5HSVRandomAug, YOLOv5KeepRatioResize, + YOLOv5RandomAffine) + +__all__ = [ + 'YOLOv5KeepRatioResize', 'LetterResize', 'Mosaic', 'YOLOXMixUp', + 'YOLOv5MixUp', 'YOLOv5HSVRandomAug', 'LoadAnnotations', + 'YOLOv5RandomAffine', 'PPYOLOERandomDistort', 'PPYOLOERandomCrop', + 'Mosaic9', 'YOLOv5CopyPaste', 'RemoveDataElement', 'RegularizeRotatedBox', + 'Polygon2Mask', 'PackDetInputs', 'RandomAffine', 'RandomFlip', 'Resize', + 'FilterAnnotations' +] diff --git a/models/YOLO-World/third_party/mmyolo/mmyolo/datasets/transforms/formatting.py b/models/YOLO-World/third_party/mmyolo/mmyolo/datasets/transforms/formatting.py new file mode 100644 index 0000000000000000000000000000000000000000..07eb0121eefdeece052695eeb46599a71a62efe3 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/mmyolo/datasets/transforms/formatting.py @@ -0,0 +1,113 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +from mmcv.transforms import to_tensor +from mmdet.datasets.transforms import PackDetInputs as MMDET_PackDetInputs +from mmdet.structures import DetDataSample +from mmdet.structures.bbox import BaseBoxes +from mmengine.structures import InstanceData, PixelData + +from mmyolo.registry import TRANSFORMS + + +@TRANSFORMS.register_module() +class PackDetInputs(MMDET_PackDetInputs): + """Pack the inputs data for the detection / semantic segmentation / + panoptic segmentation. + + Compared to mmdet, we just add the `gt_panoptic_seg` field and logic. + """ + mapping_table = { + 'gt_bboxes': 'bboxes', + 'gt_bboxes_labels': 'labels', + 'gt_masks': 'masks', + 'gt_keypoints': 'keypoints', + 'gt_keypoints_visible': 'keypoints_visible' + } + + def transform(self, results: dict) -> dict: + """Method to pack the input data. + Args: + results (dict): Result dict from the data pipeline. + Returns: + dict: + - 'inputs' (obj:`torch.Tensor`): The forward data of models. + - 'data_sample' (obj:`DetDataSample`): The annotation info of the + sample. + """ + packed_results = dict() + if 'img' in results: + img = results['img'] + if len(img.shape) < 3: + img = np.expand_dims(img, -1) + # To improve the computational speed by by 3-5 times, apply: + # If image is not contiguous, use + # `numpy.transpose()` followed by `numpy.ascontiguousarray()` + # If image is already contiguous, use + # `torch.permute()` followed by `torch.contiguous()` + # Refer to https://github.com/open-mmlab/mmdetection/pull/9533 + # for more details + if not img.flags.c_contiguous: + img = np.ascontiguousarray(img.transpose(2, 0, 1)) + img = to_tensor(img) + else: + img = to_tensor(img).permute(2, 0, 1).contiguous() + + packed_results['inputs'] = img + + if 'gt_ignore_flags' in results: + valid_idx = np.where(results['gt_ignore_flags'] == 0)[0] + ignore_idx = np.where(results['gt_ignore_flags'] == 1)[0] + if 'gt_keypoints' in results: + results['gt_keypoints_visible'] = results[ + 'gt_keypoints'].keypoints_visible + results['gt_keypoints'] = results['gt_keypoints'].keypoints + + data_sample = DetDataSample() + instance_data = InstanceData() + ignore_instance_data = InstanceData() + + for key in self.mapping_table.keys(): + if key not in results: + continue + if key == 'gt_masks' or isinstance(results[key], BaseBoxes): + if 'gt_ignore_flags' in results: + instance_data[ + self.mapping_table[key]] = results[key][valid_idx] + ignore_instance_data[ + self.mapping_table[key]] = results[key][ignore_idx] + else: + instance_data[self.mapping_table[key]] = results[key] + else: + if 'gt_ignore_flags' in results: + instance_data[self.mapping_table[key]] = to_tensor( + results[key][valid_idx]) + ignore_instance_data[self.mapping_table[key]] = to_tensor( + results[key][ignore_idx]) + else: + instance_data[self.mapping_table[key]] = to_tensor( + results[key]) + data_sample.gt_instances = instance_data + data_sample.ignored_instances = ignore_instance_data + + if 'gt_seg_map' in results: + gt_sem_seg_data = dict( + sem_seg=to_tensor(results['gt_seg_map'][None, ...].copy())) + data_sample.gt_sem_seg = PixelData(**gt_sem_seg_data) + + # In order to unify the support for the overlap mask annotations + # i.e. mask overlap annotations in (h,w) format, + # we use the gt_panoptic_seg field to unify the modeling + if 'gt_panoptic_seg' in results: + data_sample.gt_panoptic_seg = PixelData( + pan_seg=results['gt_panoptic_seg']) + + img_meta = {} + for key in self.meta_keys: + assert key in results, f'`{key}` is not found in `results`, ' \ + f'the valid keys are {list(results)}.' + img_meta[key] = results[key] + + data_sample.set_metainfo(img_meta) + packed_results['data_samples'] = data_sample + + return packed_results diff --git a/models/YOLO-World/third_party/mmyolo/mmyolo/datasets/transforms/keypoint_structure.py b/models/YOLO-World/third_party/mmyolo/mmyolo/datasets/transforms/keypoint_structure.py new file mode 100644 index 0000000000000000000000000000000000000000..7b8402be9950bc2a635f5269e7959719e8d87ac9 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/mmyolo/datasets/transforms/keypoint_structure.py @@ -0,0 +1,248 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from abc import ABCMeta +from copy import deepcopy +from typing import List, Optional, Sequence, Tuple, Type, TypeVar, Union + +import numpy as np +import torch +from torch import Tensor + +DeviceType = Union[str, torch.device] +T = TypeVar('T') +IndexType = Union[slice, int, list, torch.LongTensor, torch.cuda.LongTensor, + torch.BoolTensor, torch.cuda.BoolTensor, np.ndarray] + + +class Keypoints(metaclass=ABCMeta): + """The Keypoints class is for keypoints representation. + + Args: + keypoints (Tensor or np.ndarray): The keypoint data with shape of + (N, K, 2). + keypoints_visible (Tensor or np.ndarray): The visibility of keypoints + with shape of (N, K). + device (str or torch.device, Optional): device of keypoints. + Default to None. + clone (bool): Whether clone ``keypoints`` or not. Defaults to True. + flip_indices (list, Optional): The indices of keypoints when the + images is flipped. Defaults to None. + + Notes: + N: the number of instances. + K: the number of keypoints. + """ + + def __init__(self, + keypoints: Union[Tensor, np.ndarray], + keypoints_visible: Union[Tensor, np.ndarray], + device: Optional[DeviceType] = None, + clone: bool = True, + flip_indices: Optional[List] = None) -> None: + + assert len(keypoints_visible) == len(keypoints) + assert keypoints.ndim == 3 + assert keypoints_visible.ndim == 2 + + keypoints = torch.as_tensor(keypoints) + keypoints_visible = torch.as_tensor(keypoints_visible) + + if device is not None: + keypoints = keypoints.to(device=device) + keypoints_visible = keypoints_visible.to(device=device) + + if clone: + keypoints = keypoints.clone() + keypoints_visible = keypoints_visible.clone() + + self.keypoints = keypoints + self.keypoints_visible = keypoints_visible + self.flip_indices = flip_indices + + def flip_(self, + img_shape: Tuple[int, int], + direction: str = 'horizontal') -> None: + """Flip boxes & kpts horizontally in-place. + + Args: + img_shape (Tuple[int, int]): A tuple of image height and width. + direction (str): Flip direction, options are "horizontal", + "vertical" and "diagonal". Defaults to "horizontal" + """ + assert direction == 'horizontal' + self.keypoints[..., 0] = img_shape[1] - self.keypoints[..., 0] + self.keypoints = self.keypoints[:, self.flip_indices] + self.keypoints_visible = self.keypoints_visible[:, self.flip_indices] + + def translate_(self, distances: Tuple[float, float]) -> None: + """Translate boxes and keypoints in-place. + + Args: + distances (Tuple[float, float]): translate distances. The first + is horizontal distance and the second is vertical distance. + """ + assert len(distances) == 2 + distances = self.keypoints.new_tensor(distances).reshape(1, 1, 2) + self.keypoints = self.keypoints + distances + + def rescale_(self, scale_factor: Tuple[float, float]) -> None: + """Rescale boxes & keypoints w.r.t. rescale_factor in-place. + + Note: + Both ``rescale_`` and ``resize_`` will enlarge or shrink boxes + w.r.t ``scale_facotr``. The difference is that ``resize_`` only + changes the width and the height of boxes, but ``rescale_`` also + rescales the box centers simultaneously. + + Args: + scale_factor (Tuple[float, float]): factors for scaling boxes. + The length should be 2. + """ + assert len(scale_factor) == 2 + + scale_factor = self.keypoints.new_tensor(scale_factor).reshape(1, 1, 2) + self.keypoints = self.keypoints * scale_factor + + def clip_(self, img_shape: Tuple[int, int]) -> None: + """Clip bounding boxes and set invisible keypoints outside the image + boundary in-place. + + Args: + img_shape (Tuple[int, int]): A tuple of image height and width. + """ + + kpt_outside = torch.logical_or( + torch.logical_or(self.keypoints[..., 0] < 0, + self.keypoints[..., 1] < 0), + torch.logical_or(self.keypoints[..., 0] > img_shape[1], + self.keypoints[..., 1] > img_shape[0])) + self.keypoints_visible[kpt_outside] *= 0 + + def project_(self, homography_matrix: Union[Tensor, np.ndarray]) -> None: + """Geometrically transform bounding boxes and keypoints in-place using + a homography matrix. + + Args: + homography_matrix (Tensor or np.ndarray): A 3x3 tensor or ndarray + representing the homography matrix for the transformation. + """ + keypoints = self.keypoints + if isinstance(homography_matrix, np.ndarray): + homography_matrix = keypoints.new_tensor(homography_matrix) + + # Convert keypoints to homogeneous coordinates + keypoints = torch.cat([ + self.keypoints, + self.keypoints.new_ones(*self.keypoints.shape[:-1], 1) + ], + dim=-1) + + # Transpose keypoints for matrix multiplication + keypoints_T = torch.transpose(keypoints, -1, 0).contiguous().flatten(1) + + # Apply homography matrix to corners and keypoints + keypoints_T = torch.matmul(homography_matrix, keypoints_T) + + # Transpose back to original shape + keypoints_T = keypoints_T.reshape(3, self.keypoints.shape[1], -1) + keypoints = torch.transpose(keypoints_T, -1, 0).contiguous() + + # Convert corners and keypoints back to non-homogeneous coordinates + keypoints = keypoints[..., :2] / keypoints[..., 2:3] + + # Convert corners back to bounding boxes and update object attributes + self.keypoints = keypoints + + @classmethod + def cat(cls: Type[T], kps_list: Sequence[T], dim: int = 0) -> T: + """Cancatenates an instance list into one single instance. Similar to + ``torch.cat``. + + Args: + box_list (Sequence[T]): A sequence of instances. + dim (int): The dimension over which the box and keypoint are + concatenated. Defaults to 0. + + Returns: + T: Concatenated instance. + """ + assert isinstance(kps_list, Sequence) + if len(kps_list) == 0: + raise ValueError('kps_list should not be a empty list.') + + assert dim == 0 + assert all(isinstance(keypoints, cls) for keypoints in kps_list) + + th_kpt_list = torch.cat( + [keypoints.keypoints for keypoints in kps_list], dim=dim) + th_kpt_vis_list = torch.cat( + [keypoints.keypoints_visible for keypoints in kps_list], dim=dim) + flip_indices = kps_list[0].flip_indices + return cls( + th_kpt_list, + th_kpt_vis_list, + clone=False, + flip_indices=flip_indices) + + def __getitem__(self: T, index: IndexType) -> T: + """Rewrite getitem to protect the last dimension shape.""" + if isinstance(index, np.ndarray): + index = torch.as_tensor(index, device=self.device) + if isinstance(index, Tensor) and index.dtype == torch.bool: + assert index.dim() < self.keypoints.dim() - 1 + elif isinstance(index, tuple): + assert len(index) < self.keypoints.dim() - 1 + # `Ellipsis`(...) is commonly used in index like [None, ...]. + # When `Ellipsis` is in index, it must be the last item. + if Ellipsis in index: + assert index[-1] is Ellipsis + + keypoints = self.keypoints[index] + keypoints_visible = self.keypoints_visible[index] + if self.keypoints.dim() == 2: + keypoints = keypoints.reshape(1, -1, 2) + keypoints_visible = keypoints_visible.reshape(1, -1) + return type(self)( + keypoints, + keypoints_visible, + flip_indices=self.flip_indices, + clone=False) + + def __repr__(self) -> str: + """Return a strings that describes the object.""" + return self.__class__.__name__ + '(\n' + str(self.keypoints) + ')' + + @property + def num_keypoints(self) -> Tensor: + """Compute the number of visible keypoints for each object.""" + return self.keypoints_visible.sum(dim=1).int() + + def __deepcopy__(self, memo): + """Only clone the tensors when applying deepcopy.""" + cls = self.__class__ + other = cls.__new__(cls) + memo[id(self)] = other + other.keypoints = self.keypoints.clone() + other.keypoints_visible = self.keypoints_visible.clone() + other.flip_indices = deepcopy(self.flip_indices) + return other + + def clone(self: T) -> T: + """Reload ``clone`` for tensors.""" + return type(self)( + self.keypoints, + self.keypoints_visible, + flip_indices=self.flip_indices, + clone=True) + + def to(self: T, *args, **kwargs) -> T: + """Reload ``to`` for tensors.""" + return type(self)( + self.keypoints.to(*args, **kwargs), + self.keypoints_visible.to(*args, **kwargs), + flip_indices=self.flip_indices, + clone=False) + + @property + def device(self) -> torch.device: + """Reload ``device`` from self.tensor.""" + return self.keypoints.device diff --git a/models/YOLO-World/third_party/mmyolo/mmyolo/datasets/transforms/mix_img_transforms.py b/models/YOLO-World/third_party/mmyolo/mmyolo/datasets/transforms/mix_img_transforms.py new file mode 100644 index 0000000000000000000000000000000000000000..29e4a4057366374dbdd72fa106b5a3f7ac484d24 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/mmyolo/datasets/transforms/mix_img_transforms.py @@ -0,0 +1,1191 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import collections +import copy +from abc import ABCMeta, abstractmethod +from typing import Optional, Sequence, Tuple, Union + +import mmcv +import numpy as np +from mmcv.transforms import BaseTransform +from mmdet.structures.bbox import autocast_box_type +from mmengine.dataset import BaseDataset +from mmengine.dataset.base_dataset import Compose +from numpy import random + +from mmyolo.registry import TRANSFORMS + + +class BaseMixImageTransform(BaseTransform, metaclass=ABCMeta): + """A Base Transform of multiple images mixed. + + Suitable for training on multiple images mixed data augmentation like + mosaic and mixup. + + Cached mosaic transform will random select images from the cache + and combine them into one output image if use_cached is True. + + Args: + pre_transform(Sequence[str]): Sequence of transform object or + config dict to be composed. Defaults to None. + prob(float): The transformation probability. Defaults to 1.0. + use_cached (bool): Whether to use cache. Defaults to False. + max_cached_images (int): The maximum length of the cache. The larger + the cache, the stronger the randomness of this transform. As a + rule of thumb, providing 10 caches for each image suffices for + randomness. Defaults to 40. + random_pop (bool): Whether to randomly pop a result from the cache + when the cache is full. If set to False, use FIFO popping method. + Defaults to True. + max_refetch (int): The maximum number of retry iterations for getting + valid results from the pipeline. If the number of iterations is + greater than `max_refetch`, but results is still None, then the + iteration is terminated and raise the error. Defaults to 15. + """ + + def __init__(self, + pre_transform: Optional[Sequence[str]] = None, + prob: float = 1.0, + use_cached: bool = False, + max_cached_images: int = 40, + random_pop: bool = True, + max_refetch: int = 15): + + self.max_refetch = max_refetch + self.prob = prob + + self.use_cached = use_cached + self.max_cached_images = max_cached_images + self.random_pop = random_pop + self.results_cache = [] + + if pre_transform is None: + self.pre_transform = None + else: + self.pre_transform = Compose(pre_transform) + + @abstractmethod + def get_indexes(self, dataset: Union[BaseDataset, + list]) -> Union[list, int]: + """Call function to collect indexes. + + Args: + dataset (:obj:`Dataset` or list): The dataset or cached list. + + Returns: + list or int: indexes. + """ + pass + + @abstractmethod + def mix_img_transform(self, results: dict) -> dict: + """Mixed image data transformation. + + Args: + results (dict): Result dict. + + Returns: + results (dict): Updated result dict. + """ + pass + + @autocast_box_type() + def transform(self, results: dict) -> dict: + """Data augmentation function. + + The transform steps are as follows: + 1. Randomly generate index list of other images. + 2. Before Mosaic or MixUp need to go through the necessary + pre_transform, such as MixUp' pre_transform pipeline + include: 'LoadImageFromFile','LoadAnnotations', + 'Mosaic' and 'RandomAffine'. + 3. Use mix_img_transform function to implement specific + mix operations. + + Args: + results (dict): Result dict. + + Returns: + results (dict): Updated result dict. + """ + + if random.uniform(0, 1) > self.prob: + return results + + if self.use_cached: + # Be careful: deep copying can be very time-consuming + # if results includes dataset. + dataset = results.pop('dataset', None) + self.results_cache.append(copy.deepcopy(results)) + if len(self.results_cache) > self.max_cached_images: + if self.random_pop: + index = random.randint(0, len(self.results_cache) - 1) + else: + index = 0 + self.results_cache.pop(index) + + if len(self.results_cache) <= 4: + return results + else: + assert 'dataset' in results + # Be careful: deep copying can be very time-consuming + # if results includes dataset. + dataset = results.pop('dataset', None) + + for _ in range(self.max_refetch): + # get index of one or three other images + if self.use_cached: + indexes = self.get_indexes(self.results_cache) + else: + indexes = self.get_indexes(dataset) + + if not isinstance(indexes, collections.abc.Sequence): + indexes = [indexes] + + if self.use_cached: + mix_results = [ + copy.deepcopy(self.results_cache[i]) for i in indexes + ] + else: + # get images information will be used for Mosaic or MixUp + mix_results = [ + copy.deepcopy(dataset.get_data_info(index)) + for index in indexes + ] + + if self.pre_transform is not None: + for i, data in enumerate(mix_results): + # pre_transform may also require dataset + data.update({'dataset': dataset}) + # before Mosaic or MixUp need to go through + # the necessary pre_transform + _results = self.pre_transform(data) + _results.pop('dataset') + mix_results[i] = _results + + if None not in mix_results: + results['mix_results'] = mix_results + break + print('Repeated calculation') + else: + raise RuntimeError( + 'The loading pipeline of the original dataset' + ' always return None. Please check the correctness ' + 'of the dataset and its pipeline.') + + # Mosaic or MixUp + results = self.mix_img_transform(results) + + if 'mix_results' in results: + results.pop('mix_results') + results['dataset'] = dataset + + return results + + +@TRANSFORMS.register_module() +class Mosaic(BaseMixImageTransform): + """Mosaic augmentation. + + Given 4 images, mosaic transform combines them into + one output image. The output image is composed of the parts from each sub- + image. + + .. code:: text + + mosaic transform + center_x + +------------------------------+ + | pad | | + | +-----------+ pad | + | | | | + | | image1 +-----------+ + | | | | + | | | image2 | + center_y |----+-+-----------+-----------+ + | | cropped | | + |pad | image3 | image4 | + | | | | + +----|-------------+-----------+ + | | + +-------------+ + + The mosaic transform steps are as follows: + + 1. Choose the mosaic center as the intersections of 4 images + 2. Get the left top image according to the index, and randomly + sample another 3 images from the custom dataset. + 3. Sub image will be cropped if image is larger than mosaic patch + + Required Keys: + + - img + - gt_bboxes (BaseBoxes[torch.float32]) (optional) + - gt_bboxes_labels (np.int64) (optional) + - gt_ignore_flags (bool) (optional) + - mix_results (List[dict]) + + Modified Keys: + + - img + - img_shape + - gt_bboxes (optional) + - gt_bboxes_labels (optional) + - gt_ignore_flags (optional) + + Args: + img_scale (Sequence[int]): Image size after mosaic pipeline of single + image. The shape order should be (width, height). + Defaults to (640, 640). + center_ratio_range (Sequence[float]): Center ratio range of mosaic + output. Defaults to (0.5, 1.5). + bbox_clip_border (bool, optional): Whether to clip the objects outside + the border of the image. In some dataset like MOT17, the gt bboxes + are allowed to cross the border of images. Therefore, we don't + need to clip the gt bboxes in these cases. Defaults to True. + pad_val (int): Pad value. Defaults to 114. + pre_transform(Sequence[dict]): Sequence of transform object or + config dict to be composed. + prob (float): Probability of applying this transformation. + Defaults to 1.0. + use_cached (bool): Whether to use cache. Defaults to False. + max_cached_images (int): The maximum length of the cache. The larger + the cache, the stronger the randomness of this transform. As a + rule of thumb, providing 10 caches for each image suffices for + randomness. Defaults to 40. + random_pop (bool): Whether to randomly pop a result from the cache + when the cache is full. If set to False, use FIFO popping method. + Defaults to True. + max_refetch (int): The maximum number of retry iterations for getting + valid results from the pipeline. If the number of iterations is + greater than `max_refetch`, but results is still None, then the + iteration is terminated and raise the error. Defaults to 15. + """ + + def __init__(self, + img_scale: Tuple[int, int] = (640, 640), + center_ratio_range: Tuple[float, float] = (0.5, 1.5), + bbox_clip_border: bool = True, + pad_val: float = 114.0, + pre_transform: Sequence[dict] = None, + prob: float = 1.0, + use_cached: bool = False, + max_cached_images: int = 40, + random_pop: bool = True, + max_refetch: int = 15): + assert isinstance(img_scale, tuple) + assert 0 <= prob <= 1.0, 'The probability should be in range [0,1]. ' \ + f'got {prob}.' + if use_cached: + assert max_cached_images >= 4, 'The length of cache must >= 4, ' \ + f'but got {max_cached_images}.' + + super().__init__( + pre_transform=pre_transform, + prob=prob, + use_cached=use_cached, + max_cached_images=max_cached_images, + random_pop=random_pop, + max_refetch=max_refetch) + + self.img_scale = img_scale + self.center_ratio_range = center_ratio_range + self.bbox_clip_border = bbox_clip_border + self.pad_val = pad_val + + def get_indexes(self, dataset: Union[BaseDataset, list]) -> list: + """Call function to collect indexes. + + Args: + dataset (:obj:`Dataset` or list): The dataset or cached list. + + Returns: + list: indexes. + """ + indexes = [random.randint(0, len(dataset)) for _ in range(3)] + return indexes + + def mix_img_transform(self, results: dict) -> dict: + """Mixed image data transformation. + + Args: + results (dict): Result dict. + + Returns: + results (dict): Updated result dict. + """ + assert 'mix_results' in results + mosaic_bboxes = [] + mosaic_bboxes_labels = [] + mosaic_ignore_flags = [] + mosaic_masks = [] + mosaic_kps = [] + with_mask = True if 'gt_masks' in results else False + with_kps = True if 'gt_keypoints' in results else False + # self.img_scale is wh format + img_scale_w, img_scale_h = self.img_scale + + if len(results['img'].shape) == 3: + mosaic_img = np.full( + (int(img_scale_h * 2), int(img_scale_w * 2), 3), + self.pad_val, + dtype=results['img'].dtype) + else: + mosaic_img = np.full((int(img_scale_h * 2), int(img_scale_w * 2)), + self.pad_val, + dtype=results['img'].dtype) + + # mosaic center x, y + center_x = int(random.uniform(*self.center_ratio_range) * img_scale_w) + center_y = int(random.uniform(*self.center_ratio_range) * img_scale_h) + center_position = (center_x, center_y) + + loc_strs = ('top_left', 'top_right', 'bottom_left', 'bottom_right') + for i, loc in enumerate(loc_strs): + if loc == 'top_left': + results_patch = results + else: + results_patch = results['mix_results'][i - 1] + + img_i = results_patch['img'] + h_i, w_i = img_i.shape[:2] + # keep_ratio resize + scale_ratio_i = min(img_scale_h / h_i, img_scale_w / w_i) + img_i = mmcv.imresize( + img_i, (int(w_i * scale_ratio_i), int(h_i * scale_ratio_i))) + + # compute the combine parameters + paste_coord, crop_coord = self._mosaic_combine( + loc, center_position, img_i.shape[:2][::-1]) + x1_p, y1_p, x2_p, y2_p = paste_coord + x1_c, y1_c, x2_c, y2_c = crop_coord + + # crop and paste image + mosaic_img[y1_p:y2_p, x1_p:x2_p] = img_i[y1_c:y2_c, x1_c:x2_c] + + # adjust coordinate + gt_bboxes_i = results_patch['gt_bboxes'] + gt_bboxes_labels_i = results_patch['gt_bboxes_labels'] + gt_ignore_flags_i = results_patch['gt_ignore_flags'] + + padw = x1_p - x1_c + padh = y1_p - y1_c + gt_bboxes_i.rescale_([scale_ratio_i, scale_ratio_i]) + gt_bboxes_i.translate_([padw, padh]) + mosaic_bboxes.append(gt_bboxes_i) + mosaic_bboxes_labels.append(gt_bboxes_labels_i) + mosaic_ignore_flags.append(gt_ignore_flags_i) + if with_mask and results_patch.get('gt_masks', None) is not None: + gt_masks_i = results_patch['gt_masks'] + gt_masks_i = gt_masks_i.resize(img_i.shape[:2]) + gt_masks_i = gt_masks_i.translate( + out_shape=(int(self.img_scale[0] * 2), + int(self.img_scale[1] * 2)), + offset=padw, + direction='horizontal') + gt_masks_i = gt_masks_i.translate( + out_shape=(int(self.img_scale[0] * 2), + int(self.img_scale[1] * 2)), + offset=padh, + direction='vertical') + mosaic_masks.append(gt_masks_i) + if with_kps and results_patch.get('gt_keypoints', + None) is not None: + gt_kps_i = results_patch['gt_keypoints'] + gt_kps_i.rescale_([scale_ratio_i, scale_ratio_i]) + gt_kps_i.translate_([padw, padh]) + mosaic_kps.append(gt_kps_i) + + mosaic_bboxes = mosaic_bboxes[0].cat(mosaic_bboxes, 0) + mosaic_bboxes_labels = np.concatenate(mosaic_bboxes_labels, 0) + mosaic_ignore_flags = np.concatenate(mosaic_ignore_flags, 0) + + if self.bbox_clip_border: + mosaic_bboxes.clip_([2 * img_scale_h, 2 * img_scale_w]) + if with_mask: + mosaic_masks = mosaic_masks[0].cat(mosaic_masks) + results['gt_masks'] = mosaic_masks + if with_kps: + mosaic_kps = mosaic_kps[0].cat(mosaic_kps, 0) + mosaic_kps.clip_([2 * img_scale_h, 2 * img_scale_w]) + results['gt_keypoints'] = mosaic_kps + else: + # remove outside bboxes + inside_inds = mosaic_bboxes.is_inside( + [2 * img_scale_h, 2 * img_scale_w]).numpy() + mosaic_bboxes = mosaic_bboxes[inside_inds] + mosaic_bboxes_labels = mosaic_bboxes_labels[inside_inds] + mosaic_ignore_flags = mosaic_ignore_flags[inside_inds] + if with_mask: + mosaic_masks = mosaic_masks[0].cat(mosaic_masks)[inside_inds] + results['gt_masks'] = mosaic_masks + if with_kps: + mosaic_kps = mosaic_kps[0].cat(mosaic_kps, 0) + mosaic_kps = mosaic_kps[inside_inds] + results['gt_keypoints'] = mosaic_kps + + results['img'] = mosaic_img + results['img_shape'] = mosaic_img.shape + results['gt_bboxes'] = mosaic_bboxes + results['gt_bboxes_labels'] = mosaic_bboxes_labels + results['gt_ignore_flags'] = mosaic_ignore_flags + + return results + + def _mosaic_combine( + self, loc: str, center_position_xy: Sequence[float], + img_shape_wh: Sequence[int]) -> Tuple[Tuple[int], Tuple[int]]: + """Calculate global coordinate of mosaic image and local coordinate of + cropped sub-image. + + Args: + loc (str): Index for the sub-image, loc in ('top_left', + 'top_right', 'bottom_left', 'bottom_right'). + center_position_xy (Sequence[float]): Mixing center for 4 images, + (x, y). + img_shape_wh (Sequence[int]): Width and height of sub-image + + Returns: + tuple[tuple[float]]: Corresponding coordinate of pasting and + cropping + - paste_coord (tuple): paste corner coordinate in mosaic image. + - crop_coord (tuple): crop corner coordinate in mosaic image. + """ + assert loc in ('top_left', 'top_right', 'bottom_left', 'bottom_right') + if loc == 'top_left': + # index0 to top left part of image + x1, y1, x2, y2 = max(center_position_xy[0] - img_shape_wh[0], 0), \ + max(center_position_xy[1] - img_shape_wh[1], 0), \ + center_position_xy[0], \ + center_position_xy[1] + crop_coord = img_shape_wh[0] - (x2 - x1), img_shape_wh[1] - ( + y2 - y1), img_shape_wh[0], img_shape_wh[1] + + elif loc == 'top_right': + # index1 to top right part of image + x1, y1, x2, y2 = center_position_xy[0], \ + max(center_position_xy[1] - img_shape_wh[1], 0), \ + min(center_position_xy[0] + img_shape_wh[0], + self.img_scale[0] * 2), \ + center_position_xy[1] + crop_coord = 0, img_shape_wh[1] - (y2 - y1), min( + img_shape_wh[0], x2 - x1), img_shape_wh[1] + + elif loc == 'bottom_left': + # index2 to bottom left part of image + x1, y1, x2, y2 = max(center_position_xy[0] - img_shape_wh[0], 0), \ + center_position_xy[1], \ + center_position_xy[0], \ + min(self.img_scale[1] * 2, center_position_xy[1] + + img_shape_wh[1]) + crop_coord = img_shape_wh[0] - (x2 - x1), 0, img_shape_wh[0], min( + y2 - y1, img_shape_wh[1]) + + else: + # index3 to bottom right part of image + x1, y1, x2, y2 = center_position_xy[0], \ + center_position_xy[1], \ + min(center_position_xy[0] + img_shape_wh[0], + self.img_scale[0] * 2), \ + min(self.img_scale[1] * 2, center_position_xy[1] + + img_shape_wh[1]) + crop_coord = 0, 0, min(img_shape_wh[0], + x2 - x1), min(y2 - y1, img_shape_wh[1]) + + paste_coord = x1, y1, x2, y2 + return paste_coord, crop_coord + + def __repr__(self) -> str: + repr_str = self.__class__.__name__ + repr_str += f'(img_scale={self.img_scale}, ' + repr_str += f'center_ratio_range={self.center_ratio_range}, ' + repr_str += f'pad_val={self.pad_val}, ' + repr_str += f'prob={self.prob})' + return repr_str + + +@TRANSFORMS.register_module() +class Mosaic9(BaseMixImageTransform): + """Mosaic9 augmentation. + + Given 9 images, mosaic transform combines them into + one output image. The output image is composed of the parts from each sub- + image. + + .. code:: text + + +-------------------------------+------------+ + | pad | pad | | + | +----------+ | | + | | +---------------+ top_right | + | | | top | image2 | + | | top_left | image1 | | + | | image8 o--------+------+--------+---+ + | | | | | | + +----+----------+ | right |pad| + | | center | image3 | | + | left | image0 +---------------+---| + | image7 | | | | + +---+-----------+---+--------+ | | + | | cropped | | bottom_right |pad| + | |bottom_left| | image4 | | + | | image6 | bottom | | | + +---|-----------+ image5 +---------------+---| + | pad | | pad | + +-----------+------------+-------------------+ + + The mosaic transform steps are as follows: + + 1. Get the center image according to the index, and randomly + sample another 8 images from the custom dataset. + 2. Randomly offset the image after Mosaic + + Required Keys: + + - img + - gt_bboxes (BaseBoxes[torch.float32]) (optional) + - gt_bboxes_labels (np.int64) (optional) + - gt_ignore_flags (bool) (optional) + - mix_results (List[dict]) + + Modified Keys: + + - img + - img_shape + - gt_bboxes (optional) + - gt_bboxes_labels (optional) + - gt_ignore_flags (optional) + + Args: + img_scale (Sequence[int]): Image size after mosaic pipeline of single + image. The shape order should be (width, height). + Defaults to (640, 640). + bbox_clip_border (bool, optional): Whether to clip the objects outside + the border of the image. In some dataset like MOT17, the gt bboxes + are allowed to cross the border of images. Therefore, we don't + need to clip the gt bboxes in these cases. Defaults to True. + pad_val (int): Pad value. Defaults to 114. + pre_transform(Sequence[dict]): Sequence of transform object or + config dict to be composed. + prob (float): Probability of applying this transformation. + Defaults to 1.0. + use_cached (bool): Whether to use cache. Defaults to False. + max_cached_images (int): The maximum length of the cache. The larger + the cache, the stronger the randomness of this transform. As a + rule of thumb, providing 5 caches for each image suffices for + randomness. Defaults to 50. + random_pop (bool): Whether to randomly pop a result from the cache + when the cache is full. If set to False, use FIFO popping method. + Defaults to True. + max_refetch (int): The maximum number of retry iterations for getting + valid results from the pipeline. If the number of iterations is + greater than `max_refetch`, but results is still None, then the + iteration is terminated and raise the error. Defaults to 15. + """ + + def __init__(self, + img_scale: Tuple[int, int] = (640, 640), + bbox_clip_border: bool = True, + pad_val: Union[float, int] = 114.0, + pre_transform: Sequence[dict] = None, + prob: float = 1.0, + use_cached: bool = False, + max_cached_images: int = 50, + random_pop: bool = True, + max_refetch: int = 15): + assert isinstance(img_scale, tuple) + assert 0 <= prob <= 1.0, 'The probability should be in range [0,1]. ' \ + f'got {prob}.' + if use_cached: + assert max_cached_images >= 9, 'The length of cache must >= 9, ' \ + f'but got {max_cached_images}.' + + super().__init__( + pre_transform=pre_transform, + prob=prob, + use_cached=use_cached, + max_cached_images=max_cached_images, + random_pop=random_pop, + max_refetch=max_refetch) + + self.img_scale = img_scale + self.bbox_clip_border = bbox_clip_border + self.pad_val = pad_val + + # intermediate variables + self._current_img_shape = [0, 0] + self._center_img_shape = [0, 0] + self._previous_img_shape = [0, 0] + + def get_indexes(self, dataset: Union[BaseDataset, list]) -> list: + """Call function to collect indexes. + + Args: + dataset (:obj:`Dataset` or list): The dataset or cached list. + + Returns: + list: indexes. + """ + indexes = [random.randint(0, len(dataset)) for _ in range(8)] + return indexes + + def mix_img_transform(self, results: dict) -> dict: + """Mixed image data transformation. + + Args: + results (dict): Result dict. + + Returns: + results (dict): Updated result dict. + """ + assert 'mix_results' in results + + mosaic_bboxes = [] + mosaic_bboxes_labels = [] + mosaic_ignore_flags = [] + + img_scale_w, img_scale_h = self.img_scale + + if len(results['img'].shape) == 3: + mosaic_img = np.full( + (int(img_scale_h * 3), int(img_scale_w * 3), 3), + self.pad_val, + dtype=results['img'].dtype) + else: + mosaic_img = np.full((int(img_scale_h * 3), int(img_scale_w * 3)), + self.pad_val, + dtype=results['img'].dtype) + + # index = 0 is mean original image + # len(results['mix_results']) = 8 + loc_strs = ('center', 'top', 'top_right', 'right', 'bottom_right', + 'bottom', 'bottom_left', 'left', 'top_left') + + results_all = [results, *results['mix_results']] + for index, results_patch in enumerate(results_all): + img_i = results_patch['img'] + # keep_ratio resize + img_i_h, img_i_w = img_i.shape[:2] + scale_ratio_i = min(img_scale_h / img_i_h, img_scale_w / img_i_w) + img_i = mmcv.imresize( + img_i, + (int(img_i_w * scale_ratio_i), int(img_i_h * scale_ratio_i))) + + paste_coord = self._mosaic_combine(loc_strs[index], + img_i.shape[:2]) + + padw, padh = paste_coord[:2] + x1, y1, x2, y2 = (max(x, 0) for x in paste_coord) + mosaic_img[y1:y2, x1:x2] = img_i[y1 - padh:, x1 - padw:] + + gt_bboxes_i = results_patch['gt_bboxes'] + gt_bboxes_labels_i = results_patch['gt_bboxes_labels'] + gt_ignore_flags_i = results_patch['gt_ignore_flags'] + gt_bboxes_i.rescale_([scale_ratio_i, scale_ratio_i]) + gt_bboxes_i.translate_([padw, padh]) + + mosaic_bboxes.append(gt_bboxes_i) + mosaic_bboxes_labels.append(gt_bboxes_labels_i) + mosaic_ignore_flags.append(gt_ignore_flags_i) + + # Offset + offset_x = int(random.uniform(0, img_scale_w)) + offset_y = int(random.uniform(0, img_scale_h)) + mosaic_img = mosaic_img[offset_y:offset_y + 2 * img_scale_h, + offset_x:offset_x + 2 * img_scale_w] + + mosaic_bboxes = mosaic_bboxes[0].cat(mosaic_bboxes, 0) + mosaic_bboxes.translate_([-offset_x, -offset_y]) + mosaic_bboxes_labels = np.concatenate(mosaic_bboxes_labels, 0) + mosaic_ignore_flags = np.concatenate(mosaic_ignore_flags, 0) + + if self.bbox_clip_border: + mosaic_bboxes.clip_([2 * img_scale_h, 2 * img_scale_w]) + else: + # remove outside bboxes + inside_inds = mosaic_bboxes.is_inside( + [2 * img_scale_h, 2 * img_scale_w]).numpy() + mosaic_bboxes = mosaic_bboxes[inside_inds] + mosaic_bboxes_labels = mosaic_bboxes_labels[inside_inds] + mosaic_ignore_flags = mosaic_ignore_flags[inside_inds] + + results['img'] = mosaic_img + results['img_shape'] = mosaic_img.shape + results['gt_bboxes'] = mosaic_bboxes + results['gt_bboxes_labels'] = mosaic_bboxes_labels + results['gt_ignore_flags'] = mosaic_ignore_flags + return results + + def _mosaic_combine(self, loc: str, + img_shape_hw: Tuple[int, int]) -> Tuple[int, ...]: + """Calculate global coordinate of mosaic image. + + Args: + loc (str): Index for the sub-image. + img_shape_hw (Sequence[int]): Height and width of sub-image + + Returns: + paste_coord (tuple): paste corner coordinate in mosaic image. + """ + assert loc in ('center', 'top', 'top_right', 'right', 'bottom_right', + 'bottom', 'bottom_left', 'left', 'top_left') + + img_scale_w, img_scale_h = self.img_scale + + self._current_img_shape = img_shape_hw + current_img_h, current_img_w = self._current_img_shape + previous_img_h, previous_img_w = self._previous_img_shape + center_img_h, center_img_w = self._center_img_shape + + if loc == 'center': + self._center_img_shape = self._current_img_shape + # xmin, ymin, xmax, ymax + paste_coord = img_scale_w, \ + img_scale_h, \ + img_scale_w + current_img_w, \ + img_scale_h + current_img_h + elif loc == 'top': + paste_coord = img_scale_w, \ + img_scale_h - current_img_h, \ + img_scale_w + current_img_w, \ + img_scale_h + elif loc == 'top_right': + paste_coord = img_scale_w + previous_img_w, \ + img_scale_h - current_img_h, \ + img_scale_w + previous_img_w + current_img_w, \ + img_scale_h + elif loc == 'right': + paste_coord = img_scale_w + center_img_w, \ + img_scale_h, \ + img_scale_w + center_img_w + current_img_w, \ + img_scale_h + current_img_h + elif loc == 'bottom_right': + paste_coord = img_scale_w + center_img_w, \ + img_scale_h + previous_img_h, \ + img_scale_w + center_img_w + current_img_w, \ + img_scale_h + previous_img_h + current_img_h + elif loc == 'bottom': + paste_coord = img_scale_w + center_img_w - current_img_w, \ + img_scale_h + center_img_h, \ + img_scale_w + center_img_w, \ + img_scale_h + center_img_h + current_img_h + elif loc == 'bottom_left': + paste_coord = img_scale_w + center_img_w - \ + previous_img_w - current_img_w, \ + img_scale_h + center_img_h, \ + img_scale_w + center_img_w - previous_img_w, \ + img_scale_h + center_img_h + current_img_h + elif loc == 'left': + paste_coord = img_scale_w - current_img_w, \ + img_scale_h + center_img_h - current_img_h, \ + img_scale_w, \ + img_scale_h + center_img_h + elif loc == 'top_left': + paste_coord = img_scale_w - current_img_w, \ + img_scale_h + center_img_h - \ + previous_img_h - current_img_h, \ + img_scale_w, \ + img_scale_h + center_img_h - previous_img_h + + self._previous_img_shape = self._current_img_shape + # xmin, ymin, xmax, ymax + return paste_coord + + def __repr__(self) -> str: + repr_str = self.__class__.__name__ + repr_str += f'(img_scale={self.img_scale}, ' + repr_str += f'pad_val={self.pad_val}, ' + repr_str += f'prob={self.prob})' + return repr_str + + +@TRANSFORMS.register_module() +class YOLOv5MixUp(BaseMixImageTransform): + """MixUp data augmentation for YOLOv5. + + .. code:: text + + The mixup transform steps are as follows: + + 1. Another random image is picked by dataset. + 2. Randomly obtain the fusion ratio from the beta distribution, + then fuse the target + of the original image and mixup image through this ratio. + + Required Keys: + + - img + - gt_bboxes (BaseBoxes[torch.float32]) (optional) + - gt_bboxes_labels (np.int64) (optional) + - gt_ignore_flags (bool) (optional) + - mix_results (List[dict]) + + + Modified Keys: + + - img + - img_shape + - gt_bboxes (optional) + - gt_bboxes_labels (optional) + - gt_ignore_flags (optional) + + + Args: + alpha (float): parameter of beta distribution to get mixup ratio. + Defaults to 32. + beta (float): parameter of beta distribution to get mixup ratio. + Defaults to 32. + pre_transform (Sequence[dict]): Sequence of transform object or + config dict to be composed. + prob (float): Probability of applying this transformation. + Defaults to 1.0. + use_cached (bool): Whether to use cache. Defaults to False. + max_cached_images (int): The maximum length of the cache. The larger + the cache, the stronger the randomness of this transform. As a + rule of thumb, providing 10 caches for each image suffices for + randomness. Defaults to 20. + random_pop (bool): Whether to randomly pop a result from the cache + when the cache is full. If set to False, use FIFO popping method. + Defaults to True. + max_refetch (int): The maximum number of iterations. If the number of + iterations is greater than `max_refetch`, but gt_bbox is still + empty, then the iteration is terminated. Defaults to 15. + """ + + def __init__(self, + alpha: float = 32.0, + beta: float = 32.0, + pre_transform: Sequence[dict] = None, + prob: float = 1.0, + use_cached: bool = False, + max_cached_images: int = 20, + random_pop: bool = True, + max_refetch: int = 15): + if use_cached: + assert max_cached_images >= 2, 'The length of cache must >= 2, ' \ + f'but got {max_cached_images}.' + super().__init__( + pre_transform=pre_transform, + prob=prob, + use_cached=use_cached, + max_cached_images=max_cached_images, + random_pop=random_pop, + max_refetch=max_refetch) + self.alpha = alpha + self.beta = beta + + def get_indexes(self, dataset: Union[BaseDataset, list]) -> int: + """Call function to collect indexes. + + Args: + dataset (:obj:`Dataset` or list): The dataset or cached list. + + Returns: + int: indexes. + """ + return random.randint(0, len(dataset)) + + def mix_img_transform(self, results: dict) -> dict: + """YOLOv5 MixUp transform function. + + Args: + results (dict): Result dict + + Returns: + results (dict): Updated result dict. + """ + assert 'mix_results' in results + + retrieve_results = results['mix_results'][0] + retrieve_img = retrieve_results['img'] + ori_img = results['img'] + assert ori_img.shape == retrieve_img.shape + + # Randomly obtain the fusion ratio from the beta distribution, + # which is around 0.5 + ratio = np.random.beta(self.alpha, self.beta) + mixup_img = (ori_img * ratio + retrieve_img * (1 - ratio)) + + retrieve_gt_bboxes = retrieve_results['gt_bboxes'] + retrieve_gt_bboxes_labels = retrieve_results['gt_bboxes_labels'] + retrieve_gt_ignore_flags = retrieve_results['gt_ignore_flags'] + + mixup_gt_bboxes = retrieve_gt_bboxes.cat( + (results['gt_bboxes'], retrieve_gt_bboxes), dim=0) + mixup_gt_bboxes_labels = np.concatenate( + (results['gt_bboxes_labels'], retrieve_gt_bboxes_labels), axis=0) + mixup_gt_ignore_flags = np.concatenate( + (results['gt_ignore_flags'], retrieve_gt_ignore_flags), axis=0) + if 'gt_masks' in results: + assert 'gt_masks' in retrieve_results + mixup_gt_masks = results['gt_masks'].cat( + [results['gt_masks'], retrieve_results['gt_masks']]) + results['gt_masks'] = mixup_gt_masks + + results['img'] = mixup_img.astype(np.uint8) + results['img_shape'] = mixup_img.shape + results['gt_bboxes'] = mixup_gt_bboxes + results['gt_bboxes_labels'] = mixup_gt_bboxes_labels + results['gt_ignore_flags'] = mixup_gt_ignore_flags + + return results + + +@TRANSFORMS.register_module() +class YOLOXMixUp(BaseMixImageTransform): + """MixUp data augmentation for YOLOX. + + .. code:: text + + mixup transform + +---------------+--------------+ + | mixup image | | + | +--------|--------+ | + | | | | | + +---------------+ | | + | | | | + | | image | | + | | | | + | | | | + | +-----------------+ | + | pad | + +------------------------------+ + + The mixup transform steps are as follows: + + 1. Another random image is picked by dataset and embedded in + the top left patch(after padding and resizing) + 2. The target of mixup transform is the weighted average of mixup + image and origin image. + + Required Keys: + + - img + - gt_bboxes (BaseBoxes[torch.float32]) (optional) + - gt_bboxes_labels (np.int64) (optional) + - gt_ignore_flags (bool) (optional) + - mix_results (List[dict]) + + + Modified Keys: + + - img + - img_shape + - gt_bboxes (optional) + - gt_bboxes_labels (optional) + - gt_ignore_flags (optional) + + + Args: + img_scale (Sequence[int]): Image output size after mixup pipeline. + The shape order should be (width, height). Defaults to (640, 640). + ratio_range (Sequence[float]): Scale ratio of mixup image. + Defaults to (0.5, 1.5). + flip_ratio (float): Horizontal flip ratio of mixup image. + Defaults to 0.5. + pad_val (int): Pad value. Defaults to 114. + bbox_clip_border (bool, optional): Whether to clip the objects outside + the border of the image. In some dataset like MOT17, the gt bboxes + are allowed to cross the border of images. Therefore, we don't + need to clip the gt bboxes in these cases. Defaults to True. + pre_transform(Sequence[dict]): Sequence of transform object or + config dict to be composed. + prob (float): Probability of applying this transformation. + Defaults to 1.0. + use_cached (bool): Whether to use cache. Defaults to False. + max_cached_images (int): The maximum length of the cache. The larger + the cache, the stronger the randomness of this transform. As a + rule of thumb, providing 10 caches for each image suffices for + randomness. Defaults to 20. + random_pop (bool): Whether to randomly pop a result from the cache + when the cache is full. If set to False, use FIFO popping method. + Defaults to True. + max_refetch (int): The maximum number of iterations. If the number of + iterations is greater than `max_refetch`, but gt_bbox is still + empty, then the iteration is terminated. Defaults to 15. + """ + + def __init__(self, + img_scale: Tuple[int, int] = (640, 640), + ratio_range: Tuple[float, float] = (0.5, 1.5), + flip_ratio: float = 0.5, + pad_val: float = 114.0, + bbox_clip_border: bool = True, + pre_transform: Sequence[dict] = None, + prob: float = 1.0, + use_cached: bool = False, + max_cached_images: int = 20, + random_pop: bool = True, + max_refetch: int = 15): + assert isinstance(img_scale, tuple) + if use_cached: + assert max_cached_images >= 2, 'The length of cache must >= 2, ' \ + f'but got {max_cached_images}.' + super().__init__( + pre_transform=pre_transform, + prob=prob, + use_cached=use_cached, + max_cached_images=max_cached_images, + random_pop=random_pop, + max_refetch=max_refetch) + self.img_scale = img_scale + self.ratio_range = ratio_range + self.flip_ratio = flip_ratio + self.pad_val = pad_val + self.bbox_clip_border = bbox_clip_border + + def get_indexes(self, dataset: Union[BaseDataset, list]) -> int: + """Call function to collect indexes. + + Args: + dataset (:obj:`Dataset` or list): The dataset or cached list. + + Returns: + int: indexes. + """ + return random.randint(0, len(dataset)) + + def mix_img_transform(self, results: dict) -> dict: + """YOLOX MixUp transform function. + + Args: + results (dict): Result dict. + + Returns: + results (dict): Updated result dict. + """ + assert 'mix_results' in results + assert len( + results['mix_results']) == 1, 'MixUp only support 2 images now !' + + if results['mix_results'][0]['gt_bboxes'].shape[0] == 0: + # empty bbox + return results + + retrieve_results = results['mix_results'][0] + retrieve_img = retrieve_results['img'] + + jit_factor = random.uniform(*self.ratio_range) + is_filp = random.uniform(0, 1) > self.flip_ratio + + if len(retrieve_img.shape) == 3: + out_img = np.ones((self.img_scale[1], self.img_scale[0], 3), + dtype=retrieve_img.dtype) * self.pad_val + else: + out_img = np.ones( + self.img_scale[::-1], dtype=retrieve_img.dtype) * self.pad_val + + # 1. keep_ratio resize + scale_ratio = min(self.img_scale[1] / retrieve_img.shape[0], + self.img_scale[0] / retrieve_img.shape[1]) + retrieve_img = mmcv.imresize( + retrieve_img, (int(retrieve_img.shape[1] * scale_ratio), + int(retrieve_img.shape[0] * scale_ratio))) + + # 2. paste + out_img[:retrieve_img.shape[0], :retrieve_img.shape[1]] = retrieve_img + + # 3. scale jit + scale_ratio *= jit_factor + out_img = mmcv.imresize(out_img, (int(out_img.shape[1] * jit_factor), + int(out_img.shape[0] * jit_factor))) + + # 4. flip + if is_filp: + out_img = out_img[:, ::-1, :] + + # 5. random crop + ori_img = results['img'] + origin_h, origin_w = out_img.shape[:2] + target_h, target_w = ori_img.shape[:2] + padded_img = np.ones((max(origin_h, target_h), max( + origin_w, target_w), 3)) * self.pad_val + padded_img = padded_img.astype(np.uint8) + padded_img[:origin_h, :origin_w] = out_img + + x_offset, y_offset = 0, 0 + if padded_img.shape[0] > target_h: + y_offset = random.randint(0, padded_img.shape[0] - target_h) + if padded_img.shape[1] > target_w: + x_offset = random.randint(0, padded_img.shape[1] - target_w) + padded_cropped_img = padded_img[y_offset:y_offset + target_h, + x_offset:x_offset + target_w] + + # 6. adjust bbox + retrieve_gt_bboxes = retrieve_results['gt_bboxes'] + retrieve_gt_bboxes.rescale_([scale_ratio, scale_ratio]) + if self.bbox_clip_border: + retrieve_gt_bboxes.clip_([origin_h, origin_w]) + + if is_filp: + retrieve_gt_bboxes.flip_([origin_h, origin_w], + direction='horizontal') + + # 7. filter + cp_retrieve_gt_bboxes = retrieve_gt_bboxes.clone() + cp_retrieve_gt_bboxes.translate_([-x_offset, -y_offset]) + if self.bbox_clip_border: + cp_retrieve_gt_bboxes.clip_([target_h, target_w]) + + # 8. mix up + mixup_img = 0.5 * ori_img + 0.5 * padded_cropped_img + + retrieve_gt_bboxes_labels = retrieve_results['gt_bboxes_labels'] + retrieve_gt_ignore_flags = retrieve_results['gt_ignore_flags'] + + mixup_gt_bboxes = cp_retrieve_gt_bboxes.cat( + (results['gt_bboxes'], cp_retrieve_gt_bboxes), dim=0) + mixup_gt_bboxes_labels = np.concatenate( + (results['gt_bboxes_labels'], retrieve_gt_bboxes_labels), axis=0) + mixup_gt_ignore_flags = np.concatenate( + (results['gt_ignore_flags'], retrieve_gt_ignore_flags), axis=0) + + if not self.bbox_clip_border: + # remove outside bbox + inside_inds = mixup_gt_bboxes.is_inside([target_h, + target_w]).numpy() + mixup_gt_bboxes = mixup_gt_bboxes[inside_inds] + mixup_gt_bboxes_labels = mixup_gt_bboxes_labels[inside_inds] + mixup_gt_ignore_flags = mixup_gt_ignore_flags[inside_inds] + + if 'gt_keypoints' in results: + # adjust kps + retrieve_gt_keypoints = retrieve_results['gt_keypoints'] + retrieve_gt_keypoints.rescale_([scale_ratio, scale_ratio]) + if self.bbox_clip_border: + retrieve_gt_keypoints.clip_([origin_h, origin_w]) + + if is_filp: + retrieve_gt_keypoints.flip_([origin_h, origin_w], + direction='horizontal') + + # filter + cp_retrieve_gt_keypoints = retrieve_gt_keypoints.clone() + cp_retrieve_gt_keypoints.translate_([-x_offset, -y_offset]) + if self.bbox_clip_border: + cp_retrieve_gt_keypoints.clip_([target_h, target_w]) + + # mixup + mixup_gt_keypoints = cp_retrieve_gt_keypoints.cat( + (results['gt_keypoints'], cp_retrieve_gt_keypoints), dim=0) + if not self.bbox_clip_border: + # remove outside bbox + mixup_gt_keypoints = mixup_gt_keypoints[inside_inds] + results['gt_keypoints'] = mixup_gt_keypoints + + results['img'] = mixup_img.astype(np.uint8) + results['img_shape'] = mixup_img.shape + results['gt_bboxes'] = mixup_gt_bboxes + results['gt_bboxes_labels'] = mixup_gt_bboxes_labels + results['gt_ignore_flags'] = mixup_gt_ignore_flags + + return results + + def __repr__(self) -> str: + repr_str = self.__class__.__name__ + repr_str += f'(img_scale={self.img_scale}, ' + repr_str += f'ratio_range={self.ratio_range}, ' + repr_str += f'flip_ratio={self.flip_ratio}, ' + repr_str += f'pad_val={self.pad_val}, ' + repr_str += f'max_refetch={self.max_refetch}, ' + repr_str += f'bbox_clip_border={self.bbox_clip_border})' + return repr_str diff --git a/models/YOLO-World/third_party/mmyolo/mmyolo/datasets/transforms/transforms.py b/models/YOLO-World/third_party/mmyolo/mmyolo/datasets/transforms/transforms.py new file mode 100644 index 0000000000000000000000000000000000000000..8060e9c727b95ba4cfcef865385f9e40491e26da --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/mmyolo/datasets/transforms/transforms.py @@ -0,0 +1,2102 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math +from copy import deepcopy +from typing import List, Sequence, Tuple, Union + +import cv2 +import mmcv +import numpy as np +import torch +from mmcv.image.geometric import _scale_size +from mmcv.transforms import BaseTransform, Compose +from mmcv.transforms.utils import cache_randomness +from mmdet.datasets.transforms import FilterAnnotations as FilterDetAnnotations +from mmdet.datasets.transforms import LoadAnnotations as MMDET_LoadAnnotations +from mmdet.datasets.transforms import RandomAffine as MMDET_RandomAffine +from mmdet.datasets.transforms import RandomFlip as MMDET_RandomFlip +from mmdet.datasets.transforms import Resize as MMDET_Resize +from mmdet.structures.bbox import (HorizontalBoxes, autocast_box_type, + get_box_type) +from mmdet.structures.mask import PolygonMasks, polygon_to_bitmap +from numpy import random + +from mmyolo.registry import TRANSFORMS +from .keypoint_structure import Keypoints + +# TODO: Waiting for MMCV support +TRANSFORMS.register_module(module=Compose, force=True) + + +@TRANSFORMS.register_module() +class YOLOv5KeepRatioResize(MMDET_Resize): + """Resize images & bbox(if existed). + + This transform resizes the input image according to ``scale``. + Bboxes (if existed) are then resized with the same scale factor. + + Required Keys: + + - img (np.uint8) + - gt_bboxes (BaseBoxes[torch.float32]) (optional) + + Modified Keys: + + - img (np.uint8) + - img_shape (tuple) + - gt_bboxes (optional) + - scale (float) + + Added Keys: + + - scale_factor (np.float32) + + Args: + scale (Union[int, Tuple[int, int]]): Images scales for resizing. + """ + + def __init__(self, + scale: Union[int, Tuple[int, int]], + keep_ratio: bool = True, + **kwargs): + assert keep_ratio is True + super().__init__(scale=scale, keep_ratio=True, **kwargs) + + @staticmethod + def _get_rescale_ratio(old_size: Tuple[int, int], + scale: Union[float, Tuple[int]]) -> float: + """Calculate the ratio for rescaling. + + Args: + old_size (tuple[int]): The old size (w, h) of image. + scale (float | tuple[int]): The scaling factor or maximum size. + If it is a float number, then the image will be rescaled by + this factor, else if it is a tuple of 2 integers, then + the image will be rescaled as large as possible within + the scale. + + Returns: + float: The resize ratio. + """ + w, h = old_size + if isinstance(scale, (float, int)): + if scale <= 0: + raise ValueError(f'Invalid scale {scale}, must be positive.') + scale_factor = scale + elif isinstance(scale, tuple): + max_long_edge = max(scale) + max_short_edge = min(scale) + scale_factor = min(max_long_edge / max(h, w), + max_short_edge / min(h, w)) + else: + raise TypeError('Scale must be a number or tuple of int, ' + f'but got {type(scale)}') + + return scale_factor + + def _resize_img(self, results: dict): + """Resize images with ``results['scale']``.""" + assert self.keep_ratio is True + + if results.get('img', None) is not None: + image = results['img'] + original_h, original_w = image.shape[:2] + ratio = self._get_rescale_ratio((original_h, original_w), + self.scale) + + if ratio != 1: + # resize image according to the shape + # NOTE: We are currently testing on COCO that modifying + # this code will not affect the results. + # If you find that it has an effect on your results, + # please feel free to contact us. + image = mmcv.imresize( + img=image, + size=(int(original_w * ratio), int(original_h * ratio)), + interpolation='area' if ratio < 1 else 'bilinear', + backend=self.backend) + + resized_h, resized_w = image.shape[:2] + scale_ratio_h = resized_h / original_h + scale_ratio_w = resized_w / original_w + scale_factor = (scale_ratio_w, scale_ratio_h) + + results['img'] = image + results['img_shape'] = image.shape[:2] + results['scale_factor'] = scale_factor + + +@TRANSFORMS.register_module() +class LetterResize(MMDET_Resize): + """Resize and pad image while meeting stride-multiple constraints. + + Required Keys: + + - img (np.uint8) + - batch_shape (np.int64) (optional) + + Modified Keys: + + - img (np.uint8) + - img_shape (tuple) + - gt_bboxes (optional) + + Added Keys: + - pad_param (np.float32) + + Args: + scale (Union[int, Tuple[int, int]]): Images scales for resizing. + pad_val (dict): Padding value. Defaults to dict(img=0, seg=255). + use_mini_pad (bool): Whether using minimum rectangle padding. + Defaults to True + stretch_only (bool): Whether stretch to the specified size directly. + Defaults to False + allow_scale_up (bool): Allow scale up when ratio > 1. Defaults to True + half_pad_param (bool): If set to True, left and right pad_param will + be given by dividing padding_h by 2. If set to False, pad_param is + in int format. We recommend setting this to False for object + detection tasks, and True for instance segmentation tasks. + Default to False. + """ + + def __init__(self, + scale: Union[int, Tuple[int, int]], + pad_val: dict = dict(img=0, mask=0, seg=255), + use_mini_pad: bool = False, + stretch_only: bool = False, + allow_scale_up: bool = True, + half_pad_param: bool = False, + **kwargs): + super().__init__(scale=scale, keep_ratio=True, **kwargs) + + self.pad_val = pad_val + if isinstance(pad_val, (int, float)): + pad_val = dict(img=pad_val, seg=255) + assert isinstance( + pad_val, dict), f'pad_val must be dict, but got {type(pad_val)}' + + self.use_mini_pad = use_mini_pad + self.stretch_only = stretch_only + self.allow_scale_up = allow_scale_up + self.half_pad_param = half_pad_param + + def _resize_img(self, results: dict): + """Resize images with ``results['scale']``.""" + image = results.get('img', None) + if image is None: + return + + # Use batch_shape if a batch_shape policy is configured + if 'batch_shape' in results: + scale = tuple(results['batch_shape']) # hw + else: + scale = self.scale[::-1] # wh -> hw + + image_shape = image.shape[:2] # height, width + + # Scale ratio (new / old) + ratio = min(scale[0] / image_shape[0], scale[1] / image_shape[1]) + + # only scale down, do not scale up (for better test mAP) + if not self.allow_scale_up: + ratio = min(ratio, 1.0) + + ratio = [ratio, ratio] # float -> (float, float) for (height, width) + + # compute the best size of the image + no_pad_shape = (int(round(image_shape[0] * ratio[0])), + int(round(image_shape[1] * ratio[1]))) + + # padding height & width + padding_h, padding_w = [ + scale[0] - no_pad_shape[0], scale[1] - no_pad_shape[1] + ] + if self.use_mini_pad: + # minimum rectangle padding + padding_w, padding_h = np.mod(padding_w, 32), np.mod(padding_h, 32) + + elif self.stretch_only: + # stretch to the specified size directly + padding_h, padding_w = 0.0, 0.0 + no_pad_shape = (scale[0], scale[1]) + ratio = [scale[0] / image_shape[0], + scale[1] / image_shape[1]] # height, width ratios + + if image_shape != no_pad_shape: + # compare with no resize and padding size + image = mmcv.imresize( + image, (no_pad_shape[1], no_pad_shape[0]), + interpolation=self.interpolation, + backend=self.backend) + + scale_factor = (no_pad_shape[1] / image_shape[1], + no_pad_shape[0] / image_shape[0]) + + if 'scale_factor' in results: + results['scale_factor_origin'] = results['scale_factor'] + results['scale_factor'] = scale_factor + + # padding + top_padding, left_padding = int(round(padding_h // 2 - 0.1)), int( + round(padding_w // 2 - 0.1)) + bottom_padding = padding_h - top_padding + right_padding = padding_w - left_padding + + padding_list = [ + top_padding, bottom_padding, left_padding, right_padding + ] + if top_padding != 0 or bottom_padding != 0 or \ + left_padding != 0 or right_padding != 0: + + pad_val = self.pad_val.get('img', 0) + if isinstance(pad_val, int) and image.ndim == 3: + pad_val = tuple(pad_val for _ in range(image.shape[2])) + + image = mmcv.impad( + img=image, + padding=(padding_list[2], padding_list[0], padding_list[3], + padding_list[1]), + pad_val=pad_val, + padding_mode='constant') + + results['img'] = image + results['img_shape'] = image.shape + if 'pad_param' in results: + results['pad_param_origin'] = results['pad_param'] * \ + np.repeat(ratio, 2) + + if self.half_pad_param: + results['pad_param'] = np.array( + [padding_h / 2, padding_h / 2, padding_w / 2, padding_w / 2], + dtype=np.float32) + else: + # We found in object detection, using padding list with + # int type can get higher mAP. + results['pad_param'] = np.array(padding_list, dtype=np.float32) + + def _resize_masks(self, results: dict): + """Resize masks with ``results['scale']``""" + if results.get('gt_masks', None) is None: + return + + gt_masks = results['gt_masks'] + assert isinstance( + gt_masks, PolygonMasks + ), f'Only supports PolygonMasks, but got {type(gt_masks)}' + + # resize the gt_masks + gt_mask_h = results['gt_masks'].height * results['scale_factor'][1] + gt_mask_w = results['gt_masks'].width * results['scale_factor'][0] + gt_masks = results['gt_masks'].resize( + (int(round(gt_mask_h)), int(round(gt_mask_w)))) + + top_padding, _, left_padding, _ = results['pad_param'] + if int(left_padding) != 0: + gt_masks = gt_masks.translate( + out_shape=results['img_shape'][:2], + offset=int(left_padding), + direction='horizontal') + if int(top_padding) != 0: + gt_masks = gt_masks.translate( + out_shape=results['img_shape'][:2], + offset=int(top_padding), + direction='vertical') + results['gt_masks'] = gt_masks + + def _resize_bboxes(self, results: dict): + """Resize bounding boxes with ``results['scale_factor']``.""" + if results.get('gt_bboxes', None) is None: + return + results['gt_bboxes'].rescale_(results['scale_factor']) + + if len(results['pad_param']) != 4: + return + results['gt_bboxes'].translate_( + (results['pad_param'][2], results['pad_param'][0])) + + if self.clip_object_border: + results['gt_bboxes'].clip_(results['img_shape']) + + def transform(self, results: dict) -> dict: + results = super().transform(results) + if 'scale_factor_origin' in results: + scale_factor_origin = results.pop('scale_factor_origin') + results['scale_factor'] = (results['scale_factor'][0] * + scale_factor_origin[0], + results['scale_factor'][1] * + scale_factor_origin[1]) + if 'pad_param_origin' in results: + pad_param_origin = results.pop('pad_param_origin') + results['pad_param'] += pad_param_origin + return results + + +# TODO: Check if it can be merged with mmdet.YOLOXHSVRandomAug +@TRANSFORMS.register_module() +class YOLOv5HSVRandomAug(BaseTransform): + """Apply HSV augmentation to image sequentially. + + Required Keys: + + - img + + Modified Keys: + + - img + + Args: + hue_delta ([int, float]): delta of hue. Defaults to 0.015. + saturation_delta ([int, float]): delta of saturation. Defaults to 0.7. + value_delta ([int, float]): delta of value. Defaults to 0.4. + """ + + def __init__(self, + hue_delta: Union[int, float] = 0.015, + saturation_delta: Union[int, float] = 0.7, + value_delta: Union[int, float] = 0.4): + self.hue_delta = hue_delta + self.saturation_delta = saturation_delta + self.value_delta = value_delta + + def transform(self, results: dict) -> dict: + """The HSV augmentation transform function. + + Args: + results (dict): The result dict. + + Returns: + dict: The result dict. + """ + hsv_gains = \ + random.uniform(-1, 1, 3) * \ + [self.hue_delta, self.saturation_delta, self.value_delta] + 1 + hue, sat, val = cv2.split( + cv2.cvtColor(results['img'], cv2.COLOR_BGR2HSV)) + + table_list = np.arange(0, 256, dtype=hsv_gains.dtype) + lut_hue = ((table_list * hsv_gains[0]) % 180).astype(np.uint8) + lut_sat = np.clip(table_list * hsv_gains[1], 0, 255).astype(np.uint8) + lut_val = np.clip(table_list * hsv_gains[2], 0, 255).astype(np.uint8) + + im_hsv = cv2.merge( + (cv2.LUT(hue, lut_hue), cv2.LUT(sat, + lut_sat), cv2.LUT(val, lut_val))) + results['img'] = cv2.cvtColor(im_hsv, cv2.COLOR_HSV2BGR) + return results + + def __repr__(self) -> str: + repr_str = self.__class__.__name__ + repr_str += f'(hue_delta={self.hue_delta}, ' + repr_str += f'saturation_delta={self.saturation_delta}, ' + repr_str += f'value_delta={self.value_delta})' + return repr_str + + +@TRANSFORMS.register_module() +class LoadAnnotations(MMDET_LoadAnnotations): + """Because the yolo series does not need to consider ignore bboxes for the + time being, in order to speed up the pipeline, it can be excluded in + advance. + + Args: + mask2bbox (bool): Whether to use mask annotation to get bbox. + Defaults to False. + poly2mask (bool): Whether to transform the polygons to bitmaps. + Defaults to False. + merge_polygons (bool): Whether to merge polygons into one polygon. + If merged, the storage structure is simpler and training is more + effcient, especially if the mask inside a bbox is divided into + multiple polygons. Defaults to True. + """ + + def __init__(self, + mask2bbox: bool = False, + poly2mask: bool = False, + merge_polygons: bool = True, + **kwargs): + self.mask2bbox = mask2bbox + self.merge_polygons = merge_polygons + assert not poly2mask, 'Does not support BitmapMasks considering ' \ + 'that bitmap consumes more memory.' + super().__init__(poly2mask=poly2mask, **kwargs) + if self.mask2bbox: + assert self.with_mask, 'Using mask2bbox requires ' \ + 'with_mask is True.' + self._mask_ignore_flag = None + + def transform(self, results: dict) -> dict: + """Function to load multiple types annotations. + + Args: + results (dict): Result dict from :obj:``mmengine.BaseDataset``. + + Returns: + dict: The dict contains loaded bounding box, label and + semantic segmentation. + """ + if self.mask2bbox: + self._load_masks(results) + if self.with_label: + self._load_labels(results) + self._update_mask_ignore_data(results) + gt_bboxes = results['gt_masks'].get_bboxes(dst_type='hbox') + results['gt_bboxes'] = gt_bboxes + elif self.with_keypoints: + self._load_kps(results) + _, box_type_cls = get_box_type(self.box_type) + results['gt_bboxes'] = box_type_cls( + results.get('bbox', []), dtype=torch.float32) + else: + results = super().transform(results) + self._update_mask_ignore_data(results) + return results + + def _update_mask_ignore_data(self, results: dict) -> None: + if 'gt_masks' not in results: + return + + if 'gt_bboxes_labels' in results and len( + results['gt_bboxes_labels']) != len(results['gt_masks']): + assert len(results['gt_bboxes_labels']) == len( + self._mask_ignore_flag) + results['gt_bboxes_labels'] = results['gt_bboxes_labels'][ + self._mask_ignore_flag] + + if 'gt_bboxes' in results and len(results['gt_bboxes']) != len( + results['gt_masks']): + assert len(results['gt_bboxes']) == len(self._mask_ignore_flag) + results['gt_bboxes'] = results['gt_bboxes'][self._mask_ignore_flag] + + def _load_bboxes(self, results: dict): + """Private function to load bounding box annotations. + Note: BBoxes with ignore_flag of 1 is not considered. + Args: + results (dict): Result dict from :obj:``mmengine.BaseDataset``. + + Returns: + dict: The dict contains loaded bounding box annotations. + """ + gt_bboxes = [] + gt_ignore_flags = [] + for instance in results.get('instances', []): + if instance['ignore_flag'] == 0: + gt_bboxes.append(instance['bbox']) + gt_ignore_flags.append(instance['ignore_flag']) + results['gt_ignore_flags'] = np.array(gt_ignore_flags, dtype=bool) + + if self.box_type is None: + results['gt_bboxes'] = np.array( + gt_bboxes, dtype=np.float32).reshape((-1, 4)) + else: + _, box_type_cls = get_box_type(self.box_type) + results['gt_bboxes'] = box_type_cls(gt_bboxes, dtype=torch.float32) + + def _load_labels(self, results: dict): + """Private function to load label annotations. + + Note: BBoxes with ignore_flag of 1 is not considered. + Args: + results (dict): Result dict from :obj:``mmengine.BaseDataset``. + Returns: + dict: The dict contains loaded label annotations. + """ + gt_bboxes_labels = [] + for instance in results.get('instances', []): + if instance['ignore_flag'] == 0: + gt_bboxes_labels.append(instance['bbox_label']) + results['gt_bboxes_labels'] = np.array( + gt_bboxes_labels, dtype=np.int64) + + def _load_masks(self, results: dict) -> None: + """Private function to load mask annotations. + + Args: + results (dict): Result dict from :obj:``mmengine.BaseDataset``. + """ + gt_masks = [] + gt_ignore_flags = [] + self._mask_ignore_flag = [] + for instance in results.get('instances', []): + if instance['ignore_flag'] == 0: + if 'mask' in instance: + gt_mask = instance['mask'] + if isinstance(gt_mask, list): + gt_mask = [ + np.array(polygon) for polygon in gt_mask + if len(polygon) % 2 == 0 and len(polygon) >= 6 + ] + if len(gt_mask) == 0: + # ignore + self._mask_ignore_flag.append(0) + else: + if len(gt_mask) > 1 and self.merge_polygons: + gt_mask = self.merge_multi_segment(gt_mask) + gt_masks.append(gt_mask) + gt_ignore_flags.append(instance['ignore_flag']) + self._mask_ignore_flag.append(1) + else: + raise NotImplementedError( + 'Only supports mask annotations in polygon ' + 'format currently') + else: + # TODO: Actually, gt with bbox and without mask needs + # to be retained + self._mask_ignore_flag.append(0) + self._mask_ignore_flag = np.array(self._mask_ignore_flag, dtype=bool) + results['gt_ignore_flags'] = np.array(gt_ignore_flags, dtype=bool) + + h, w = results['ori_shape'] + gt_masks = PolygonMasks([mask for mask in gt_masks], h, w) + results['gt_masks'] = gt_masks + + def merge_multi_segment(self, + gt_masks: List[np.ndarray]) -> List[np.ndarray]: + """Merge multi segments to one list. + + Find the coordinates with min distance between each segment, + then connect these coordinates with one thin line to merge all + segments into one. + Args: + gt_masks(List(np.array)): + original segmentations in coco's json file. + like [segmentation1, segmentation2,...], + each segmentation is a list of coordinates. + Return: + gt_masks(List(np.array)): merged gt_masks + """ + s = [] + segments = [np.array(i).reshape(-1, 2) for i in gt_masks] + idx_list = [[] for _ in range(len(gt_masks))] + + # record the indexes with min distance between each segment + for i in range(1, len(segments)): + idx1, idx2 = self.min_index(segments[i - 1], segments[i]) + idx_list[i - 1].append(idx1) + idx_list[i].append(idx2) + + # use two round to connect all the segments + # first round: first to end, i.e. A->B(partial)->C + # second round: end to first, i.e. C->B(remaining)-A + for k in range(2): + # forward first round + if k == 0: + for i, idx in enumerate(idx_list): + # middle segments have two indexes + # reverse the index of middle segments + if len(idx) == 2 and idx[0] > idx[1]: + idx = idx[::-1] + segments[i] = segments[i][::-1, :] + # add the idx[0] point for connect next segment + segments[i] = np.roll(segments[i], -idx[0], axis=0) + segments[i] = np.concatenate( + [segments[i], segments[i][:1]]) + # deal with the first segment and the last one + if i in [0, len(idx_list) - 1]: + s.append(segments[i]) + # deal with the middle segment + # Note that in the first round, only partial segment + # are appended. + else: + idx = [0, idx[1] - idx[0]] + s.append(segments[i][idx[0]:idx[1] + 1]) + # forward second round + else: + for i in range(len(idx_list) - 1, -1, -1): + # deal with the middle segment + # append the remaining points + if i not in [0, len(idx_list) - 1]: + idx = idx_list[i] + nidx = abs(idx[1] - idx[0]) + s.append(segments[i][nidx:]) + return [np.concatenate(s).reshape(-1, )] + + def min_index(self, arr1: np.ndarray, arr2: np.ndarray) -> Tuple[int, int]: + """Find a pair of indexes with the shortest distance. + + Args: + arr1: (N, 2). + arr2: (M, 2). + Return: + tuple: a pair of indexes. + """ + dis = ((arr1[:, None, :] - arr2[None, :, :])**2).sum(-1) + return np.unravel_index(np.argmin(dis, axis=None), dis.shape) + + def _load_kps(self, results: dict) -> None: + """Private function to load keypoints annotations. + + Args: + results (dict): Result dict from + :class:`mmengine.dataset.BaseDataset`. + + Returns: + dict: The dict contains loaded keypoints annotations. + """ + results['height'] = results['img_shape'][0] + results['width'] = results['img_shape'][1] + num_instances = len(results.get('bbox', [])) + + if num_instances == 0: + results['keypoints'] = np.empty( + (0, len(results['flip_indices']), 2), dtype=np.float32) + results['keypoints_visible'] = np.empty( + (0, len(results['flip_indices'])), dtype=np.int32) + results['category_id'] = [] + + results['gt_keypoints'] = Keypoints( + keypoints=results['keypoints'], + keypoints_visible=results['keypoints_visible'], + flip_indices=results['flip_indices'], + ) + + results['gt_ignore_flags'] = np.array([False] * num_instances) + results['gt_bboxes_labels'] = np.array(results['category_id']) - 1 + + def __repr__(self) -> str: + repr_str = self.__class__.__name__ + repr_str += f'(with_bbox={self.with_bbox}, ' + repr_str += f'with_label={self.with_label}, ' + repr_str += f'with_mask={self.with_mask}, ' + repr_str += f'with_seg={self.with_seg}, ' + repr_str += f'mask2bbox={self.mask2bbox}, ' + repr_str += f'poly2mask={self.poly2mask}, ' + repr_str += f"imdecode_backend='{self.imdecode_backend}', " + repr_str += f'backend_args={self.backend_args})' + return repr_str + + +@TRANSFORMS.register_module() +class YOLOv5RandomAffine(BaseTransform): + """Random affine transform data augmentation in YOLOv5 and YOLOv8. It is + different from the implementation in YOLOX. + + This operation randomly generates affine transform matrix which including + rotation, translation, shear and scaling transforms. + If you set use_mask_refine == True, the code will use the masks + annotation to refine the bbox. + Our implementation is slightly different from the official. In COCO + dataset, a gt may have multiple mask tags. The official YOLOv5 + annotation file already combines the masks that an object has, + but our code takes into account the fact that an object has multiple masks. + + Required Keys: + + - img + - gt_bboxes (BaseBoxes[torch.float32]) (optional) + - gt_bboxes_labels (np.int64) (optional) + - gt_ignore_flags (bool) (optional) + - gt_masks (PolygonMasks) (optional) + + Modified Keys: + + - img + - img_shape + - gt_bboxes (optional) + - gt_bboxes_labels (optional) + - gt_ignore_flags (optional) + - gt_masks (PolygonMasks) (optional) + + Args: + max_rotate_degree (float): Maximum degrees of rotation transform. + Defaults to 10. + max_translate_ratio (float): Maximum ratio of translation. + Defaults to 0.1. + scaling_ratio_range (tuple[float]): Min and max ratio of + scaling transform. Defaults to (0.5, 1.5). + max_shear_degree (float): Maximum degrees of shear + transform. Defaults to 2. + border (tuple[int]): Distance from width and height sides of input + image to adjust output shape. Only used in mosaic dataset. + Defaults to (0, 0). + border_val (tuple[int]): Border padding values of 3 channels. + Defaults to (114, 114, 114). + bbox_clip_border (bool, optional): Whether to clip the objects outside + the border of the image. In some dataset like MOT17, the gt bboxes + are allowed to cross the border of images. Therefore, we don't + need to clip the gt bboxes in these cases. Defaults to True. + min_bbox_size (float): Width and height threshold to filter bboxes. + If the height or width of a box is smaller than this value, it + will be removed. Defaults to 2. + min_area_ratio (float): Threshold of area ratio between + original bboxes and wrapped bboxes. If smaller than this value, + the box will be removed. Defaults to 0.1. + use_mask_refine (bool): Whether to refine bbox by mask. Deprecated. + max_aspect_ratio (float): Aspect ratio of width and height + threshold to filter bboxes. If max(h/w, w/h) larger than this + value, the box will be removed. Defaults to 20. + resample_num (int): Number of poly to resample to. + """ + + def __init__(self, + max_rotate_degree: float = 10.0, + max_translate_ratio: float = 0.1, + scaling_ratio_range: Tuple[float, float] = (0.5, 1.5), + max_shear_degree: float = 2.0, + border: Tuple[int, int] = (0, 0), + border_val: Tuple[int, int, int] = (114, 114, 114), + bbox_clip_border: bool = True, + min_bbox_size: int = 2, + min_area_ratio: float = 0.1, + use_mask_refine: bool = False, + max_aspect_ratio: float = 20., + resample_num: int = 1000): + assert 0 <= max_translate_ratio <= 1 + assert scaling_ratio_range[0] <= scaling_ratio_range[1] + assert scaling_ratio_range[0] > 0 + self.max_rotate_degree = max_rotate_degree + self.max_translate_ratio = max_translate_ratio + self.scaling_ratio_range = scaling_ratio_range + self.max_shear_degree = max_shear_degree + self.border = border + self.border_val = border_val + self.bbox_clip_border = bbox_clip_border + self.min_bbox_size = min_bbox_size + self.min_area_ratio = min_area_ratio + # The use_mask_refine parameter has been deprecated. + self.use_mask_refine = use_mask_refine + self.max_aspect_ratio = max_aspect_ratio + self.resample_num = resample_num + + @autocast_box_type() + def transform(self, results: dict) -> dict: + """The YOLOv5 random affine transform function. + + Args: + results (dict): The result dict. + + Returns: + dict: The result dict. + """ + img = results['img'] + # self.border is wh format + height = img.shape[0] + self.border[1] * 2 + width = img.shape[1] + self.border[0] * 2 + + # Note: Different from YOLOX + center_matrix = np.eye(3, dtype=np.float32) + center_matrix[0, 2] = -img.shape[1] / 2 + center_matrix[1, 2] = -img.shape[0] / 2 + + warp_matrix, scaling_ratio = self._get_random_homography_matrix( + height, width) + warp_matrix = warp_matrix @ center_matrix + + img = cv2.warpPerspective( + img, + warp_matrix, + dsize=(width, height), + borderValue=self.border_val) + results['img'] = img + results['img_shape'] = img.shape + img_h, img_w = img.shape[:2] + + bboxes = results['gt_bboxes'] + num_bboxes = len(bboxes) + if num_bboxes: + orig_bboxes = bboxes.clone() + orig_bboxes.rescale_([scaling_ratio, scaling_ratio]) + if 'gt_masks' in results: + # If the dataset has annotations of mask, + # the mask will be used to refine bbox. + gt_masks = results['gt_masks'] + + gt_masks_resample = self.resample_masks(gt_masks) + gt_masks = self.warp_mask(gt_masks_resample, warp_matrix, + img_h, img_w) + + # refine bboxes by masks + bboxes = self.segment2box(gt_masks, height, width) + # filter bboxes outside image + valid_index = self.filter_gt_bboxes(orig_bboxes, + bboxes).numpy() + if self.bbox_clip_border: + bboxes.clip_([height - 1e-3, width - 1e-3]) + gt_masks = self.clip_polygons(gt_masks, height, width) + results['gt_masks'] = gt_masks[valid_index] + else: + bboxes.project_(warp_matrix) + if self.bbox_clip_border: + bboxes.clip_([height, width]) + + # filter bboxes + # Be careful: valid_index must convert to numpy, + # otherwise it will raise out of bounds when len(valid_index)=1 + valid_index = self.filter_gt_bboxes(orig_bboxes, + bboxes).numpy() + + results['gt_bboxes'] = bboxes[valid_index] + results['gt_bboxes_labels'] = results['gt_bboxes_labels'][ + valid_index] + results['gt_ignore_flags'] = results['gt_ignore_flags'][ + valid_index] + else: + if 'gt_masks' in results: + results['gt_masks'] = PolygonMasks([], img_h, img_w) + + return results + + def segment2box(self, gt_masks: PolygonMasks, height: int, + width: int) -> HorizontalBoxes: + """ + Convert 1 segment label to 1 box label, applying inside-image + constraint i.e. (xy1, xy2, ...) to (xyxy) + Args: + gt_masks (torch.Tensor): the segment label + width (int): the width of the image. Defaults to 640 + height (int): The height of the image. Defaults to 640 + Returns: + HorizontalBoxes: the clip bboxes from gt_masks. + """ + bboxes = [] + for _, poly_per_obj in enumerate(gt_masks): + # simply use a number that is big enough for comparison with + # coordinates + xy_min = np.array([width * 2, height * 2], dtype=np.float32) + xy_max = np.zeros(2, dtype=np.float32) - 1 + + for p in poly_per_obj: + xy = np.array(p).reshape(-1, 2).astype(np.float32) + x, y = xy.T + inside = (x >= 0) & (y >= 0) & (x <= width) & (y <= height) + x, y = x[inside], y[inside] + if not any(x): + continue + xy = np.stack([x, y], axis=0).T + + xy_min = np.minimum(xy_min, np.min(xy, axis=0)) + xy_max = np.maximum(xy_max, np.max(xy, axis=0)) + if xy_max[0] == -1: + bbox = np.zeros(4, dtype=np.float32) + else: + bbox = np.concatenate([xy_min, xy_max], axis=0) + bboxes.append(bbox) + + return HorizontalBoxes(np.stack(bboxes, axis=0)) + + # TODO: Move to mmdet + def clip_polygons(self, gt_masks: PolygonMasks, height: int, + width: int) -> PolygonMasks: + """Function to clip points of polygons with height and width. + + Args: + gt_masks (PolygonMasks): Annotations of instance segmentation. + height (int): height of clip border. + width (int): width of clip border. + Return: + clipped_masks (PolygonMasks): + Clip annotations of instance segmentation. + """ + if len(gt_masks) == 0: + clipped_masks = PolygonMasks([], height, width) + else: + clipped_masks = [] + for poly_per_obj in gt_masks: + clipped_poly_per_obj = [] + for p in poly_per_obj: + p = p.copy() + p[0::2] = p[0::2].clip(0, width) + p[1::2] = p[1::2].clip(0, height) + clipped_poly_per_obj.append(p) + clipped_masks.append(clipped_poly_per_obj) + clipped_masks = PolygonMasks(clipped_masks, height, width) + return clipped_masks + + @staticmethod + def warp_poly(poly: np.ndarray, warp_matrix: np.ndarray, img_w: int, + img_h: int) -> np.ndarray: + """Function to warp one mask and filter points outside image. + + Args: + poly (np.ndarray): Segmentation annotation with shape (n, ) and + with format (x1, y1, x2, y2, ...). + warp_matrix (np.ndarray): Affine transformation matrix. + Shape: (3, 3). + img_w (int): Width of output image. + img_h (int): Height of output image. + """ + # TODO: Current logic may cause retained masks unusable for + # semantic segmentation training, which is same as official + # implementation. + poly = poly.reshape((-1, 2)) + poly = np.concatenate((poly, np.ones( + (len(poly), 1), dtype=poly.dtype)), + axis=-1) + # transform poly + poly = poly @ warp_matrix.T + poly = poly[:, :2] / poly[:, 2:3] + + return poly.reshape(-1) + + def warp_mask(self, gt_masks: PolygonMasks, warp_matrix: np.ndarray, + img_w: int, img_h: int) -> PolygonMasks: + """Warp masks by warp_matrix and retain masks inside image after + warping. + + Args: + gt_masks (PolygonMasks): Annotations of semantic segmentation. + warp_matrix (np.ndarray): Affine transformation matrix. + Shape: (3, 3). + img_w (int): Width of output image. + img_h (int): Height of output image. + + Returns: + PolygonMasks: Masks after warping. + """ + masks = gt_masks.masks + + new_masks = [] + for poly_per_obj in masks: + warpped_poly_per_obj = [] + # One gt may have multiple masks. + for poly in poly_per_obj: + valid_poly = self.warp_poly(poly, warp_matrix, img_w, img_h) + if len(valid_poly): + warpped_poly_per_obj.append(valid_poly.reshape(-1)) + # If all the masks are invalid, + # add [0, 0, 0, 0, 0, 0,] here. + if not warpped_poly_per_obj: + # This will be filtered in function `filter_gt_bboxes`. + warpped_poly_per_obj = [ + np.zeros(6, dtype=poly_per_obj[0].dtype) + ] + new_masks.append(warpped_poly_per_obj) + + gt_masks = PolygonMasks(new_masks, img_h, img_w) + return gt_masks + + def resample_masks(self, gt_masks: PolygonMasks) -> PolygonMasks: + """Function to resample each mask annotation with shape (2 * n, ) to + shape (resample_num * 2, ). + + Args: + gt_masks (PolygonMasks): Annotations of semantic segmentation. + """ + masks = gt_masks.masks + new_masks = [] + for poly_per_obj in masks: + resample_poly_per_obj = [] + for poly in poly_per_obj: + poly = poly.reshape((-1, 2)) # xy + poly = np.concatenate((poly, poly[0:1, :]), axis=0) + x = np.linspace(0, len(poly) - 1, self.resample_num) + xp = np.arange(len(poly)) + poly = np.concatenate([ + np.interp(x, xp, poly[:, i]) for i in range(2) + ]).reshape(2, -1).T.reshape(-1) + resample_poly_per_obj.append(poly) + new_masks.append(resample_poly_per_obj) + return PolygonMasks(new_masks, gt_masks.height, gt_masks.width) + + def filter_gt_bboxes(self, origin_bboxes: HorizontalBoxes, + wrapped_bboxes: HorizontalBoxes) -> torch.Tensor: + """Filter gt bboxes. + + Args: + origin_bboxes (HorizontalBoxes): Origin bboxes. + wrapped_bboxes (HorizontalBoxes): Wrapped bboxes + + Returns: + dict: The result dict. + """ + origin_w = origin_bboxes.widths + origin_h = origin_bboxes.heights + wrapped_w = wrapped_bboxes.widths + wrapped_h = wrapped_bboxes.heights + aspect_ratio = np.maximum(wrapped_w / (wrapped_h + 1e-16), + wrapped_h / (wrapped_w + 1e-16)) + + wh_valid_idx = (wrapped_w > self.min_bbox_size) & \ + (wrapped_h > self.min_bbox_size) + area_valid_idx = wrapped_w * wrapped_h / (origin_w * origin_h + + 1e-16) > self.min_area_ratio + aspect_ratio_valid_idx = aspect_ratio < self.max_aspect_ratio + return wh_valid_idx & area_valid_idx & aspect_ratio_valid_idx + + @cache_randomness + def _get_random_homography_matrix(self, height: int, + width: int) -> Tuple[np.ndarray, float]: + """Get random homography matrix. + + Args: + height (int): Image height. + width (int): Image width. + + Returns: + Tuple[np.ndarray, float]: The result of warp_matrix and + scaling_ratio. + """ + # Rotation + rotation_degree = random.uniform(-self.max_rotate_degree, + self.max_rotate_degree) + rotation_matrix = self._get_rotation_matrix(rotation_degree) + + # Scaling + scaling_ratio = random.uniform(self.scaling_ratio_range[0], + self.scaling_ratio_range[1]) + scaling_matrix = self._get_scaling_matrix(scaling_ratio) + + # Shear + x_degree = random.uniform(-self.max_shear_degree, + self.max_shear_degree) + y_degree = random.uniform(-self.max_shear_degree, + self.max_shear_degree) + shear_matrix = self._get_shear_matrix(x_degree, y_degree) + + # Translation + trans_x = random.uniform(0.5 - self.max_translate_ratio, + 0.5 + self.max_translate_ratio) * width + trans_y = random.uniform(0.5 - self.max_translate_ratio, + 0.5 + self.max_translate_ratio) * height + translate_matrix = self._get_translation_matrix(trans_x, trans_y) + warp_matrix = ( + translate_matrix @ shear_matrix @ rotation_matrix @ scaling_matrix) + return warp_matrix, scaling_ratio + + @staticmethod + def _get_rotation_matrix(rotate_degrees: float) -> np.ndarray: + """Get rotation matrix. + + Args: + rotate_degrees (float): Rotate degrees. + + Returns: + np.ndarray: The rotation matrix. + """ + radian = math.radians(rotate_degrees) + rotation_matrix = np.array( + [[np.cos(radian), -np.sin(radian), 0.], + [np.sin(radian), np.cos(radian), 0.], [0., 0., 1.]], + dtype=np.float32) + return rotation_matrix + + @staticmethod + def _get_scaling_matrix(scale_ratio: float) -> np.ndarray: + """Get scaling matrix. + + Args: + scale_ratio (float): Scale ratio. + + Returns: + np.ndarray: The scaling matrix. + """ + scaling_matrix = np.array( + [[scale_ratio, 0., 0.], [0., scale_ratio, 0.], [0., 0., 1.]], + dtype=np.float32) + return scaling_matrix + + @staticmethod + def _get_shear_matrix(x_shear_degrees: float, + y_shear_degrees: float) -> np.ndarray: + """Get shear matrix. + + Args: + x_shear_degrees (float): X shear degrees. + y_shear_degrees (float): Y shear degrees. + + Returns: + np.ndarray: The shear matrix. + """ + x_radian = math.radians(x_shear_degrees) + y_radian = math.radians(y_shear_degrees) + shear_matrix = np.array([[1, np.tan(x_radian), 0.], + [np.tan(y_radian), 1, 0.], [0., 0., 1.]], + dtype=np.float32) + return shear_matrix + + @staticmethod + def _get_translation_matrix(x: float, y: float) -> np.ndarray: + """Get translation matrix. + + Args: + x (float): X translation. + y (float): Y translation. + + Returns: + np.ndarray: The translation matrix. + """ + translation_matrix = np.array([[1, 0., x], [0., 1, y], [0., 0., 1.]], + dtype=np.float32) + return translation_matrix + + def __repr__(self) -> str: + repr_str = self.__class__.__name__ + repr_str += f'(max_rotate_degree={self.max_rotate_degree}, ' + repr_str += f'max_translate_ratio={self.max_translate_ratio}, ' + repr_str += f'scaling_ratio_range={self.scaling_ratio_range}, ' + repr_str += f'max_shear_degree={self.max_shear_degree}, ' + repr_str += f'border={self.border}, ' + repr_str += f'border_val={self.border_val}, ' + repr_str += f'bbox_clip_border={self.bbox_clip_border})' + return repr_str + + +@TRANSFORMS.register_module() +class PPYOLOERandomDistort(BaseTransform): + """Random hue, saturation, contrast and brightness distortion. + + Required Keys: + + - img + + Modified Keys: + + - img (np.float32) + + Args: + hue_cfg (dict): Hue settings. Defaults to dict(min=-18, + max=18, prob=0.5). + saturation_cfg (dict): Saturation settings. Defaults to dict( + min=0.5, max=1.5, prob=0.5). + contrast_cfg (dict): Contrast settings. Defaults to dict( + min=0.5, max=1.5, prob=0.5). + brightness_cfg (dict): Brightness settings. Defaults to dict( + min=0.5, max=1.5, prob=0.5). + num_distort_func (int): The number of distort function. Defaults + to 4. + """ + + def __init__(self, + hue_cfg: dict = dict(min=-18, max=18, prob=0.5), + saturation_cfg: dict = dict(min=0.5, max=1.5, prob=0.5), + contrast_cfg: dict = dict(min=0.5, max=1.5, prob=0.5), + brightness_cfg: dict = dict(min=0.5, max=1.5, prob=0.5), + num_distort_func: int = 4): + self.hue_cfg = hue_cfg + self.saturation_cfg = saturation_cfg + self.contrast_cfg = contrast_cfg + self.brightness_cfg = brightness_cfg + self.num_distort_func = num_distort_func + assert 0 < self.num_distort_func <= 4, \ + 'num_distort_func must > 0 and <= 4' + for cfg in [ + self.hue_cfg, self.saturation_cfg, self.contrast_cfg, + self.brightness_cfg + ]: + assert 0. <= cfg['prob'] <= 1., 'prob must >=0 and <=1' + + def transform_hue(self, results): + """Transform hue randomly.""" + if random.uniform(0., 1.) >= self.hue_cfg['prob']: + return results + img = results['img'] + delta = random.uniform(self.hue_cfg['min'], self.hue_cfg['max']) + u = np.cos(delta * np.pi) + w = np.sin(delta * np.pi) + delta_iq = np.array([[1.0, 0.0, 0.0], [0.0, u, -w], [0.0, w, u]]) + rgb2yiq_matrix = np.array([[0.114, 0.587, 0.299], + [-0.321, -0.274, 0.596], + [0.311, -0.523, 0.211]]) + yiq2rgb_matric = np.array([[1.0, -1.107, 1.705], [1.0, -0.272, -0.647], + [1.0, 0.956, 0.621]]) + t = np.dot(np.dot(yiq2rgb_matric, delta_iq), rgb2yiq_matrix).T + img = np.dot(img, t) + results['img'] = img + return results + + def transform_saturation(self, results): + """Transform saturation randomly.""" + if random.uniform(0., 1.) >= self.saturation_cfg['prob']: + return results + img = results['img'] + delta = random.uniform(self.saturation_cfg['min'], + self.saturation_cfg['max']) + + # convert bgr img to gray img + gray = img * np.array([[[0.114, 0.587, 0.299]]], dtype=np.float32) + gray = gray.sum(axis=2, keepdims=True) + gray *= (1.0 - delta) + img *= delta + img += gray + results['img'] = img + return results + + def transform_contrast(self, results): + """Transform contrast randomly.""" + if random.uniform(0., 1.) >= self.contrast_cfg['prob']: + return results + img = results['img'] + delta = random.uniform(self.contrast_cfg['min'], + self.contrast_cfg['max']) + img *= delta + results['img'] = img + return results + + def transform_brightness(self, results): + """Transform brightness randomly.""" + if random.uniform(0., 1.) >= self.brightness_cfg['prob']: + return results + img = results['img'] + delta = random.uniform(self.brightness_cfg['min'], + self.brightness_cfg['max']) + img += delta + results['img'] = img + return results + + def transform(self, results: dict) -> dict: + """The hue, saturation, contrast and brightness distortion function. + + Args: + results (dict): The result dict. + + Returns: + dict: The result dict. + """ + results['img'] = results['img'].astype(np.float32) + + functions = [ + self.transform_brightness, self.transform_contrast, + self.transform_saturation, self.transform_hue + ] + distortions = random.permutation(functions)[:self.num_distort_func] + for func in distortions: + results = func(results) + return results + + def __repr__(self) -> str: + repr_str = self.__class__.__name__ + repr_str += f'(hue_cfg={self.hue_cfg}, ' + repr_str += f'saturation_cfg={self.saturation_cfg}, ' + repr_str += f'contrast_cfg={self.contrast_cfg}, ' + repr_str += f'brightness_cfg={self.brightness_cfg}, ' + repr_str += f'num_distort_func={self.num_distort_func})' + return repr_str + + +@TRANSFORMS.register_module() +class PPYOLOERandomCrop(BaseTransform): + """Random crop the img and bboxes. Different thresholds are used in PPYOLOE + to judge whether the clipped image meets the requirements. This + implementation is different from the implementation of RandomCrop in mmdet. + + Required Keys: + + - img + - gt_bboxes (BaseBoxes[torch.float32]) (optional) + - gt_bboxes_labels (np.int64) (optional) + - gt_ignore_flags (bool) (optional) + + Modified Keys: + + - img + - img_shape + - gt_bboxes (optional) + - gt_bboxes_labels (optional) + - gt_ignore_flags (optional) + + Added Keys: + - pad_param (np.float32) + + Args: + aspect_ratio (List[float]): Aspect ratio of cropped region. Default to + [.5, 2]. + thresholds (List[float]): Iou thresholds for deciding a valid bbox crop + in [min, max] format. Defaults to [.0, .1, .3, .5, .7, .9]. + scaling (List[float]): Ratio between a cropped region and the original + image in [min, max] format. Default to [.3, 1.]. + num_attempts (int): Number of tries for each threshold before + giving up. Default to 50. + allow_no_crop (bool): Allow return without actually cropping them. + Default to True. + cover_all_box (bool): Ensure all bboxes are covered in the final crop. + Default to False. + """ + + def __init__(self, + aspect_ratio: List[float] = [.5, 2.], + thresholds: List[float] = [.0, .1, .3, .5, .7, .9], + scaling: List[float] = [.3, 1.], + num_attempts: int = 50, + allow_no_crop: bool = True, + cover_all_box: bool = False): + self.aspect_ratio = aspect_ratio + self.thresholds = thresholds + self.scaling = scaling + self.num_attempts = num_attempts + self.allow_no_crop = allow_no_crop + self.cover_all_box = cover_all_box + + def _crop_data(self, results: dict, crop_box: Tuple[int, int, int, int], + valid_inds: np.ndarray) -> Union[dict, None]: + """Function to randomly crop images, bounding boxes, masks, semantic + segmentation maps. + + Args: + results (dict): Result dict from loading pipeline. + crop_box (Tuple[int, int, int, int]): Expected absolute coordinates + for cropping, (x1, y1, x2, y2). + valid_inds (np.ndarray): The indexes of gt that needs to be + retained. + + Returns: + results (Union[dict, None]): Randomly cropped results, 'img_shape' + key in result dict is updated according to crop size. None will + be returned when there is no valid bbox after cropping. + """ + # crop the image + img = results['img'] + crop_x1, crop_y1, crop_x2, crop_y2 = crop_box + img = img[crop_y1:crop_y2, crop_x1:crop_x2, ...] + results['img'] = img + img_shape = img.shape + results['img_shape'] = img.shape + + # crop bboxes accordingly and clip to the image boundary + if results.get('gt_bboxes', None) is not None: + bboxes = results['gt_bboxes'] + bboxes.translate_([-crop_x1, -crop_y1]) + bboxes.clip_(img_shape[:2]) + + results['gt_bboxes'] = bboxes[valid_inds] + + if results.get('gt_ignore_flags', None) is not None: + results['gt_ignore_flags'] = \ + results['gt_ignore_flags'][valid_inds] + + if results.get('gt_bboxes_labels', None) is not None: + results['gt_bboxes_labels'] = \ + results['gt_bboxes_labels'][valid_inds] + + if results.get('gt_masks', None) is not None: + results['gt_masks'] = results['gt_masks'][ + valid_inds.nonzero()[0]].crop( + np.asarray([crop_x1, crop_y1, crop_x2, crop_y2])) + + # crop semantic seg + if results.get('gt_seg_map', None) is not None: + results['gt_seg_map'] = results['gt_seg_map'][crop_y1:crop_y2, + crop_x1:crop_x2] + + return results + + @autocast_box_type() + def transform(self, results: dict) -> Union[dict, None]: + """The random crop transform function. + + Args: + results (dict): The result dict. + + Returns: + dict: The result dict. + """ + if results.get('gt_bboxes', None) is None or len( + results['gt_bboxes']) == 0: + return results + + orig_img_h, orig_img_w = results['img'].shape[:2] + gt_bboxes = results['gt_bboxes'] + + thresholds = list(self.thresholds) + if self.allow_no_crop: + thresholds.append('no_crop') + random.shuffle(thresholds) + + for thresh in thresholds: + # Determine the coordinates for cropping + if thresh == 'no_crop': + return results + + found = False + for i in range(self.num_attempts): + crop_h, crop_w = self._get_crop_size((orig_img_h, orig_img_w)) + if self.aspect_ratio is None: + if crop_h / crop_w < 0.5 or crop_h / crop_w > 2.0: + continue + + # get image crop_box + margin_h = max(orig_img_h - crop_h, 0) + margin_w = max(orig_img_w - crop_w, 0) + offset_h, offset_w = self._rand_offset((margin_h, margin_w)) + crop_y1, crop_y2 = offset_h, offset_h + crop_h + crop_x1, crop_x2 = offset_w, offset_w + crop_w + + crop_box = [crop_x1, crop_y1, crop_x2, crop_y2] + # Calculate the iou between gt_bboxes and crop_boxes + iou = self._iou_matrix(gt_bboxes, + np.array([crop_box], dtype=np.float32)) + # If the maximum value of the iou is less than thresh, + # the current crop_box is considered invalid. + if iou.max() < thresh: + continue + + # If cover_all_box == True and the minimum value of + # the iou is less than thresh, the current crop_box + # is considered invalid. + if self.cover_all_box and iou.min() < thresh: + continue + + # Get which gt_bboxes to keep after cropping. + valid_inds = self._get_valid_inds( + gt_bboxes, np.array(crop_box, dtype=np.float32)) + if valid_inds.size > 0: + found = True + break + + if found: + results = self._crop_data(results, crop_box, valid_inds) + return results + return results + + @cache_randomness + def _rand_offset(self, margin: Tuple[int, int]) -> Tuple[int, int]: + """Randomly generate crop offset. + + Args: + margin (Tuple[int, int]): The upper bound for the offset generated + randomly. + + Returns: + Tuple[int, int]: The random offset for the crop. + """ + margin_h, margin_w = margin + offset_h = np.random.randint(0, margin_h + 1) + offset_w = np.random.randint(0, margin_w + 1) + + return (offset_h, offset_w) + + @cache_randomness + def _get_crop_size(self, image_size: Tuple[int, int]) -> Tuple[int, int]: + """Randomly generates the crop size based on `image_size`. + + Args: + image_size (Tuple[int, int]): (h, w). + + Returns: + crop_size (Tuple[int, int]): (crop_h, crop_w) in absolute pixels. + """ + h, w = image_size + scale = random.uniform(*self.scaling) + if self.aspect_ratio is not None: + min_ar, max_ar = self.aspect_ratio + aspect_ratio = random.uniform( + max(min_ar, scale**2), min(max_ar, scale**-2)) + h_scale = scale / np.sqrt(aspect_ratio) + w_scale = scale * np.sqrt(aspect_ratio) + else: + h_scale = random.uniform(*self.scaling) + w_scale = random.uniform(*self.scaling) + crop_h = h * h_scale + crop_w = w * w_scale + return int(crop_h), int(crop_w) + + def _iou_matrix(self, + gt_bbox: HorizontalBoxes, + crop_bbox: np.ndarray, + eps: float = 1e-10) -> np.ndarray: + """Calculate iou between gt and image crop box. + + Args: + gt_bbox (HorizontalBoxes): Ground truth bounding boxes. + crop_bbox (np.ndarray): Image crop coordinates in + [x1, y1, x2, y2] format. + eps (float): Default to 1e-10. + Return: + (np.ndarray): IoU. + """ + gt_bbox = gt_bbox.tensor.numpy() + lefttop = np.maximum(gt_bbox[:, np.newaxis, :2], crop_bbox[:, :2]) + rightbottom = np.minimum(gt_bbox[:, np.newaxis, 2:], crop_bbox[:, 2:]) + + overlap = np.prod( + rightbottom - lefttop, + axis=2) * (lefttop < rightbottom).all(axis=2) + area_gt_bbox = np.prod(gt_bbox[:, 2:] - gt_bbox[:, :2], axis=1) + area_crop_bbox = np.prod(crop_bbox[:, 2:] - crop_bbox[:, :2], axis=1) + area_o = (area_gt_bbox[:, np.newaxis] + area_crop_bbox - overlap) + return overlap / (area_o + eps) + + def _get_valid_inds(self, gt_bbox: HorizontalBoxes, + img_crop_bbox: np.ndarray) -> np.ndarray: + """Get which Bboxes to keep at the current cropping coordinates. + + Args: + gt_bbox (HorizontalBoxes): Ground truth bounding boxes. + img_crop_bbox (np.ndarray): Image crop coordinates in + [x1, y1, x2, y2] format. + + Returns: + (np.ndarray): Valid indexes. + """ + cropped_box = gt_bbox.tensor.numpy().copy() + gt_bbox = gt_bbox.tensor.numpy().copy() + + cropped_box[:, :2] = np.maximum(gt_bbox[:, :2], img_crop_bbox[:2]) + cropped_box[:, 2:] = np.minimum(gt_bbox[:, 2:], img_crop_bbox[2:]) + cropped_box[:, :2] -= img_crop_bbox[:2] + cropped_box[:, 2:] -= img_crop_bbox[:2] + + centers = (gt_bbox[:, :2] + gt_bbox[:, 2:]) / 2 + valid = np.logical_and(img_crop_bbox[:2] <= centers, + centers < img_crop_bbox[2:]).all(axis=1) + valid = np.logical_and( + valid, (cropped_box[:, :2] < cropped_box[:, 2:]).all(axis=1)) + + return np.where(valid)[0] + + def __repr__(self) -> str: + repr_str = self.__class__.__name__ + repr_str += f'(aspect_ratio={self.aspect_ratio}, ' + repr_str += f'thresholds={self.thresholds}, ' + repr_str += f'scaling={self.scaling}, ' + repr_str += f'num_attempts={self.num_attempts}, ' + repr_str += f'allow_no_crop={self.allow_no_crop}, ' + repr_str += f'cover_all_box={self.cover_all_box})' + return repr_str + + +@TRANSFORMS.register_module() +class YOLOv5CopyPaste(BaseTransform): + """Copy-Paste used in YOLOv5 and YOLOv8. + + This transform randomly copy some objects in the image to the mirror + position of the image.It is different from the `CopyPaste` in mmdet. + + Required Keys: + + - img (np.uint8) + - gt_bboxes (BaseBoxes[torch.float32]) + - gt_bboxes_labels (np.int64) (optional) + - gt_ignore_flags (bool) (optional) + - gt_masks (PolygonMasks) (optional) + + Modified Keys: + + - img + - gt_bboxes + - gt_bboxes_labels (np.int64) (optional) + - gt_ignore_flags (optional) + - gt_masks (optional) + + Args: + ioa_thresh (float): Ioa thresholds for deciding valid bbox. + prob (float): Probability of choosing objects. + Defaults to 0.5. + """ + + def __init__(self, ioa_thresh: float = 0.3, prob: float = 0.5): + self.ioa_thresh = ioa_thresh + self.prob = prob + + @autocast_box_type() + def transform(self, results: dict) -> Union[dict, None]: + """The YOLOv5 and YOLOv8 Copy-Paste transform function. + + Args: + results (dict): The result dict. + + Returns: + dict: The result dict. + """ + if len(results.get('gt_masks', [])) == 0: + return results + gt_masks = results['gt_masks'] + assert isinstance(gt_masks, PolygonMasks), \ + 'only support type of PolygonMasks,' \ + ' but get type: %s' % type(gt_masks) + gt_bboxes = results['gt_bboxes'] + gt_bboxes_labels = results.get('gt_bboxes_labels', None) + img = results['img'] + img_h, img_w = img.shape[:2] + + # calculate ioa + gt_bboxes_flip = deepcopy(gt_bboxes) + gt_bboxes_flip.flip_(img.shape) + + ioa = self.bbox_ioa(gt_bboxes_flip, gt_bboxes) + indexes = torch.nonzero((ioa < self.ioa_thresh).all(1))[:, 0] + n = len(indexes) + valid_inds = random.choice( + indexes, size=round(self.prob * n), replace=False) + if len(valid_inds) == 0: + return results + + if gt_bboxes_labels is not None: + # prepare labels + gt_bboxes_labels = np.concatenate( + (gt_bboxes_labels, gt_bboxes_labels[valid_inds]), axis=0) + + # prepare bboxes + copypaste_bboxes = gt_bboxes_flip[valid_inds] + gt_bboxes = gt_bboxes.cat([gt_bboxes, copypaste_bboxes]) + + # prepare images + copypaste_gt_masks = gt_masks[valid_inds] + copypaste_gt_masks_flip = copypaste_gt_masks.flip() + # convert poly format to bitmap format + # example: poly: [[array(0.0, 0.0, 10.0, 0.0, 10.0, 10.0, 0.0, 10.0]] + # -> bitmap: a mask with shape equal to (1, img_h, img_w) + # # type1 low speed + # copypaste_gt_masks_bitmap = copypaste_gt_masks.to_ndarray() + # copypaste_mask = np.sum(copypaste_gt_masks_bitmap, axis=0) > 0 + + # type2 + copypaste_mask = np.zeros((img_h, img_w), dtype=np.uint8) + for poly in copypaste_gt_masks.masks: + poly = [i.reshape((-1, 1, 2)).astype(np.int32) for i in poly] + cv2.drawContours(copypaste_mask, poly, -1, (1, ), cv2.FILLED) + + copypaste_mask = copypaste_mask.astype(bool) + + # copy objects, and paste to the mirror position of the image + copypaste_mask_flip = mmcv.imflip( + copypaste_mask, direction='horizontal') + copypaste_img = mmcv.imflip(img, direction='horizontal') + img[copypaste_mask_flip] = copypaste_img[copypaste_mask_flip] + + # prepare masks + gt_masks = copypaste_gt_masks.cat([gt_masks, copypaste_gt_masks_flip]) + + if 'gt_ignore_flags' in results: + # prepare gt_ignore_flags + gt_ignore_flags = results['gt_ignore_flags'] + gt_ignore_flags = np.concatenate( + [gt_ignore_flags, gt_ignore_flags[valid_inds]], axis=0) + results['gt_ignore_flags'] = gt_ignore_flags + + results['img'] = img + results['gt_bboxes'] = gt_bboxes + if gt_bboxes_labels is not None: + results['gt_bboxes_labels'] = gt_bboxes_labels + results['gt_masks'] = gt_masks + + return results + + @staticmethod + def bbox_ioa(gt_bboxes_flip: HorizontalBoxes, + gt_bboxes: HorizontalBoxes, + eps: float = 1e-7) -> np.ndarray: + """Calculate ioa between gt_bboxes_flip and gt_bboxes. + + Args: + gt_bboxes_flip (HorizontalBoxes): Flipped ground truth + bounding boxes. + gt_bboxes (HorizontalBoxes): Ground truth bounding boxes. + eps (float): Default to 1e-10. + Return: + (Tensor): Ioa. + """ + gt_bboxes_flip = gt_bboxes_flip.tensor + gt_bboxes = gt_bboxes.tensor + + # Get the coordinates of bounding boxes + b1_x1, b1_y1, b1_x2, b1_y2 = gt_bboxes_flip.T + b2_x1, b2_y1, b2_x2, b2_y2 = gt_bboxes.T + + # Intersection area + inter_area = (torch.minimum(b1_x2[:, None], + b2_x2) - torch.maximum(b1_x1[:, None], + b2_x1)).clip(0) * \ + (torch.minimum(b1_y2[:, None], + b2_y2) - torch.maximum(b1_y1[:, None], + b2_y1)).clip(0) + + # box2 area + box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + eps + + # Intersection over box2 area + return inter_area / box2_area + + def __repr__(self) -> str: + repr_str = self.__class__.__name__ + repr_str += f'(ioa_thresh={self.ioa_thresh},' + repr_str += f'prob={self.prob})' + return repr_str + + +@TRANSFORMS.register_module() +class RemoveDataElement(BaseTransform): + """Remove unnecessary data element in results. + + Args: + keys (Union[str, Sequence[str]]): Keys need to be removed. + """ + + def __init__(self, keys: Union[str, Sequence[str]]): + self.keys = [keys] if isinstance(keys, str) else keys + + def transform(self, results: dict) -> dict: + for key in self.keys: + results.pop(key, None) + return results + + def __repr__(self) -> str: + repr_str = self.__class__.__name__ + repr_str += f'(keys={self.keys})' + return repr_str + + +@TRANSFORMS.register_module() +class RegularizeRotatedBox(BaseTransform): + """Regularize rotated boxes. + + Due to the angle periodicity, one rotated box can be represented in + many different (x, y, w, h, t). To make each rotated box unique, + ``regularize_boxes`` will take the remainder of the angle divided by + 180 degrees. + + For convenience, three angle_version can be used here: + + - 'oc': OpenCV Definition. Has the same box representation as + ``cv2.minAreaRect`` the angle ranges in [-90, 0). + - 'le90': Long Edge Definition (90). the angle ranges in [-90, 90). + The width is always longer than the height. + - 'le135': Long Edge Definition (135). the angle ranges in [-45, 135). + The width is always longer than the height. + + Required Keys: + + - gt_bboxes (RotatedBoxes[torch.float32]) + + Modified Keys: + + - gt_bboxes + + Args: + angle_version (str): Angle version. Can only be 'oc', + 'le90', or 'le135'. Defaults to 'le90. + """ + + def __init__(self, angle_version='le90') -> None: + self.angle_version = angle_version + try: + from mmrotate.structures.bbox import RotatedBoxes + self.box_type = RotatedBoxes + except ImportError: + raise ImportError( + 'Please run "mim install -r requirements/mmrotate.txt" ' + 'to install mmrotate first for rotated detection.') + + def transform(self, results: dict) -> dict: + assert isinstance(results['gt_bboxes'], self.box_type) + results['gt_bboxes'] = self.box_type( + results['gt_bboxes'].regularize_boxes(self.angle_version)) + return results + + +@TRANSFORMS.register_module() +class Polygon2Mask(BaseTransform): + """Polygons to bitmaps in YOLOv5. + + Args: + downsample_ratio (int): Downsample ratio of mask. + mask_overlap (bool): Whether to use maskoverlap in mask process. + When set to True, the implementation here is the same as the + official, with higher training speed. If set to True, all gt masks + will compress into one overlap mask, the value of mask indicates + the index of gt masks. If set to False, one mask is a binary mask. + Default to True. + coco_style (bool): Whether to use coco_style to convert the polygons to + bitmaps. Note that this option is only used to test if there is an + improvement in training speed and we recommend setting it to False. + """ + + def __init__(self, + downsample_ratio: int = 4, + mask_overlap: bool = True, + coco_style: bool = False): + self.downsample_ratio = downsample_ratio + self.mask_overlap = mask_overlap + self.coco_style = coco_style + + def polygon2mask(self, + img_shape: Tuple[int, int], + polygons: np.ndarray, + color: int = 1) -> np.ndarray: + """ + Args: + img_shape (tuple): The image size. + polygons (np.ndarray): [N, M], N is the number of polygons, + M is the number of points(Be divided by 2). + color (int): color in fillPoly. + Return: + np.ndarray: the overlap mask. + """ + nh, nw = (img_shape[0] // self.downsample_ratio, + img_shape[1] // self.downsample_ratio) + if self.coco_style: + # This practice can lead to the loss of small objects + # polygons = polygons.resize((nh, nw)).masks + # polygons = np.asarray(polygons).reshape(-1) + # mask = polygon_to_bitmap([polygons], nh, nw) + + polygons = np.asarray(polygons).reshape(-1) + mask = polygon_to_bitmap([polygons], img_shape[0], + img_shape[1]).astype(np.uint8) + mask = mmcv.imresize(mask, (nw, nh)) + else: + mask = np.zeros(img_shape, dtype=np.uint8) + polygons = np.asarray(polygons) + polygons = polygons.astype(np.int32) + shape = polygons.shape + polygons = polygons.reshape(shape[0], -1, 2) + cv2.fillPoly(mask, polygons, color=color) + # NOTE: fillPoly firstly then resize is trying the keep the same + # way of loss calculation when mask-ratio=1. + mask = mmcv.imresize(mask, (nw, nh)) + return mask + + def polygons2masks(self, + img_shape: Tuple[int, int], + polygons: PolygonMasks, + color: int = 1) -> np.ndarray: + """Return a list of bitmap masks. + + Args: + img_shape (tuple): The image size. + polygons (PolygonMasks): The mask annotations. + color (int): color in fillPoly. + Return: + List[np.ndarray]: the list of masks in bitmaps. + """ + if self.coco_style: + nh, nw = (img_shape[0] // self.downsample_ratio, + img_shape[1] // self.downsample_ratio) + masks = polygons.resize((nh, nw)).to_ndarray() + return masks + else: + masks = [] + for si in range(len(polygons)): + mask = self.polygon2mask(img_shape, polygons[si], color) + masks.append(mask) + return np.array(masks) + + def polygons2masks_overlap( + self, img_shape: Tuple[int, int], + polygons: PolygonMasks) -> Tuple[np.ndarray, np.ndarray]: + """Return a overlap mask and the sorted idx of area. + + Args: + img_shape (tuple): The image size. + polygons (PolygonMasks): The mask annotations. + color (int): color in fillPoly. + Return: + Tuple[np.ndarray, np.ndarray]: + the overlap mask and the sorted idx of area. + """ + masks = np.zeros((img_shape[0] // self.downsample_ratio, + img_shape[1] // self.downsample_ratio), + dtype=np.int32 if len(polygons) > 255 else np.uint8) + areas = [] + ms = [] + for si in range(len(polygons)): + mask = self.polygon2mask(img_shape, polygons[si], color=1) + ms.append(mask) + areas.append(mask.sum()) + areas = np.asarray(areas) + index = np.argsort(-areas) + ms = np.array(ms)[index] + for i in range(len(polygons)): + mask = ms[i] * (i + 1) + masks = masks + mask + masks = np.clip(masks, a_min=0, a_max=i + 1) + return masks, index + + def transform(self, results: dict) -> dict: + gt_masks = results['gt_masks'] + assert isinstance(gt_masks, PolygonMasks) + + if self.mask_overlap: + masks, sorted_idx = self.polygons2masks_overlap( + (gt_masks.height, gt_masks.width), gt_masks) + results['gt_bboxes'] = results['gt_bboxes'][sorted_idx] + results['gt_bboxes_labels'] = results['gt_bboxes_labels'][ + sorted_idx] + + # In this case we put gt_masks in gt_panoptic_seg + results.pop('gt_masks') + results['gt_panoptic_seg'] = torch.from_numpy(masks[None]) + else: + masks = self.polygons2masks((gt_masks.height, gt_masks.width), + gt_masks, + color=1) + masks = torch.from_numpy(masks) + # Consistent logic with mmdet + results['gt_masks'] = masks + return results + + +@TRANSFORMS.register_module() +class FilterAnnotations(FilterDetAnnotations): + """Filter invalid annotations. + + In addition to the conditions checked by ``FilterDetAnnotations``, this + filter adds a new condition requiring instances to have at least one + visible keypoints. + """ + + def __init__(self, by_keypoints: bool = False, **kwargs) -> None: + # TODO: add more filter options + super().__init__(**kwargs) + self.by_keypoints = by_keypoints + + @autocast_box_type() + def transform(self, results: dict) -> Union[dict, None]: + """Transform function to filter annotations. + + Args: + results (dict): Result dict. + Returns: + dict: Updated result dict. + """ + assert 'gt_bboxes' in results + gt_bboxes = results['gt_bboxes'] + if gt_bboxes.shape[0] == 0: + return results + + tests = [] + if self.by_box: + tests.append( + ((gt_bboxes.widths > self.min_gt_bbox_wh[0]) & + (gt_bboxes.heights > self.min_gt_bbox_wh[1])).numpy()) + + if self.by_mask: + assert 'gt_masks' in results + gt_masks = results['gt_masks'] + tests.append(gt_masks.areas >= self.min_gt_mask_area) + + if self.by_keypoints: + assert 'gt_keypoints' in results + num_keypoints = results['gt_keypoints'].num_keypoints + tests.append((num_keypoints > 0).numpy()) + + keep = tests[0] + for t in tests[1:]: + keep = keep & t + + if not keep.any(): + if self.keep_empty: + return None + + keys = ('gt_bboxes', 'gt_bboxes_labels', 'gt_masks', 'gt_ignore_flags', + 'gt_keypoints') + for key in keys: + if key in results: + results[key] = results[key][keep] + + return results + + +# TODO: Check if it can be merged with mmdet.YOLOXHSVRandomAug +@TRANSFORMS.register_module() +class RandomAffine(MMDET_RandomAffine): + + def __init__(self, **kwargs) -> None: + super().__init__(**kwargs) + + @autocast_box_type() + def transform(self, results: dict) -> dict: + img = results['img'] + height = img.shape[0] + self.border[1] * 2 + width = img.shape[1] + self.border[0] * 2 + + warp_matrix = self._get_random_homography_matrix(height, width) + + img = cv2.warpPerspective( + img, + warp_matrix, + dsize=(width, height), + borderValue=self.border_val) + results['img'] = img + results['img_shape'] = img.shape + + bboxes = results['gt_bboxes'] + num_bboxes = len(bboxes) + if num_bboxes: + bboxes.project_(warp_matrix) + if self.bbox_clip_border: + bboxes.clip_([height, width]) + # remove outside bbox + valid_index = bboxes.is_inside([height, width]).numpy() + results['gt_bboxes'] = bboxes[valid_index] + results['gt_bboxes_labels'] = results['gt_bboxes_labels'][ + valid_index] + results['gt_ignore_flags'] = results['gt_ignore_flags'][ + valid_index] + + if 'gt_masks' in results: + raise NotImplementedError('RandomAffine only supports bbox.') + + if 'gt_keypoints' in results: + keypoints = results['gt_keypoints'] + keypoints.project_(warp_matrix) + if self.bbox_clip_border: + keypoints.clip_([height, width]) + results['gt_keypoints'] = keypoints[valid_index] + + return results + + def __repr__(self) -> str: + repr_str = self.__class__.__name__ + repr_str += f'(hue_delta={self.hue_delta}, ' + repr_str += f'saturation_delta={self.saturation_delta}, ' + repr_str += f'value_delta={self.value_delta})' + return repr_str + + +# TODO: Check if it can be merged with mmdet.YOLOXHSVRandomAug +@TRANSFORMS.register_module() +class RandomFlip(MMDET_RandomFlip): + + @autocast_box_type() + def _flip(self, results: dict) -> None: + """Flip images, bounding boxes, and semantic segmentation map.""" + # flip image + results['img'] = mmcv.imflip( + results['img'], direction=results['flip_direction']) + + img_shape = results['img'].shape[:2] + + # flip bboxes + if results.get('gt_bboxes', None) is not None: + results['gt_bboxes'].flip_(img_shape, results['flip_direction']) + + # flip keypoints + if results.get('gt_keypoints', None) is not None: + results['gt_keypoints'].flip_(img_shape, results['flip_direction']) + + # flip masks + if results.get('gt_masks', None) is not None: + results['gt_masks'] = results['gt_masks'].flip( + results['flip_direction']) + + # flip segs + if results.get('gt_seg_map', None) is not None: + results['gt_seg_map'] = mmcv.imflip( + results['gt_seg_map'], direction=results['flip_direction']) + + # record homography matrix for flip + self._record_homography_matrix(results) + + +@TRANSFORMS.register_module() +class Resize(MMDET_Resize): + + def _resize_keypoints(self, results: dict) -> None: + """Resize bounding boxes with ``results['scale_factor']``.""" + if results.get('gt_keypoints', None) is not None: + results['gt_keypoints'].rescale_(results['scale_factor']) + if self.clip_object_border: + results['gt_keypoints'].clip_(results['img_shape']) + + @autocast_box_type() + def transform(self, results: dict) -> dict: + """Transform function to resize images, bounding boxes and semantic + segmentation map. + + Args: + results (dict): Result dict from loading pipeline. + Returns: + dict: Resized results, 'img', 'gt_bboxes', 'gt_seg_map', + 'scale', 'scale_factor', 'height', 'width', and 'keep_ratio' keys + are updated in result dict. + """ + if self.scale: + results['scale'] = self.scale + else: + img_shape = results['img'].shape[:2] + results['scale'] = _scale_size(img_shape[::-1], self.scale_factor) + self._resize_img(results) + self._resize_bboxes(results) + self._resize_keypoints(results) + self._resize_masks(results) + self._resize_seg(results) + self._record_homography_matrix(results) + return results diff --git a/models/YOLO-World/third_party/mmyolo/mmyolo/datasets/utils.py b/models/YOLO-World/third_party/mmyolo/mmyolo/datasets/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..efa2ff5ef07d73e82c258474db7b0e49edc4825a --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/mmyolo/datasets/utils.py @@ -0,0 +1,133 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Sequence + +import numpy as np +import torch +from mmengine.dataset import COLLATE_FUNCTIONS +from mmengine.dist import get_dist_info + +from ..registry import TASK_UTILS + + +@COLLATE_FUNCTIONS.register_module() +def yolov5_collate(data_batch: Sequence, + use_ms_training: bool = False) -> dict: + """Rewrite collate_fn to get faster training speed. + + Args: + data_batch (Sequence): Batch of data. + use_ms_training (bool): Whether to use multi-scale training. + """ + batch_imgs = [] + batch_bboxes_labels = [] + batch_masks = [] + batch_keyponits = [] + batch_keypoints_visible = [] + for i in range(len(data_batch)): + datasamples = data_batch[i]['data_samples'] + inputs = data_batch[i]['inputs'] + batch_imgs.append(inputs) + + gt_bboxes = datasamples.gt_instances.bboxes.tensor + gt_labels = datasamples.gt_instances.labels + if 'masks' in datasamples.gt_instances: + masks = datasamples.gt_instances.masks + batch_masks.append(masks) + if 'gt_panoptic_seg' in datasamples: + batch_masks.append(datasamples.gt_panoptic_seg.pan_seg) + if 'keypoints' in datasamples.gt_instances: + keypoints = datasamples.gt_instances.keypoints + keypoints_visible = datasamples.gt_instances.keypoints_visible + batch_keyponits.append(keypoints) + batch_keypoints_visible.append(keypoints_visible) + + batch_idx = gt_labels.new_full((len(gt_labels), 1), i) + bboxes_labels = torch.cat((batch_idx, gt_labels[:, None], gt_bboxes), + dim=1) + batch_bboxes_labels.append(bboxes_labels) + collated_results = { + 'data_samples': { + 'bboxes_labels': torch.cat(batch_bboxes_labels, 0) + } + } + if len(batch_masks) > 0: + collated_results['data_samples']['masks'] = torch.cat(batch_masks, 0) + + if len(batch_keyponits) > 0: + collated_results['data_samples']['keypoints'] = torch.cat( + batch_keyponits, 0) + collated_results['data_samples']['keypoints_visible'] = torch.cat( + batch_keypoints_visible, 0) + + if use_ms_training: + collated_results['inputs'] = batch_imgs + else: + collated_results['inputs'] = torch.stack(batch_imgs, 0) + return collated_results + + +@TASK_UTILS.register_module() +class BatchShapePolicy: + """BatchShapePolicy is only used in the testing phase, which can reduce the + number of pad pixels during batch inference. + + Args: + batch_size (int): Single GPU batch size during batch inference. + Defaults to 32. + img_size (int): Expected output image size. Defaults to 640. + size_divisor (int): The minimum size that is divisible + by size_divisor. Defaults to 32. + extra_pad_ratio (float): Extra pad ratio. Defaults to 0.5. + """ + + def __init__(self, + batch_size: int = 32, + img_size: int = 640, + size_divisor: int = 32, + extra_pad_ratio: float = 0.5): + self.img_size = img_size + self.size_divisor = size_divisor + self.extra_pad_ratio = extra_pad_ratio + _, world_size = get_dist_info() + # During multi-gpu testing, the batchsize should be multiplied by + # worldsize, so that the number of batches can be calculated correctly. + # The index of batches will affect the calculation of batch shape. + self.batch_size = batch_size * world_size + + def __call__(self, data_list: List[dict]) -> List[dict]: + image_shapes = [] + for data_info in data_list: + image_shapes.append((data_info['width'], data_info['height'])) + + image_shapes = np.array(image_shapes, dtype=np.float64) + + n = len(image_shapes) # number of images + batch_index = np.floor(np.arange(n) / self.batch_size).astype( + np.int64) # batch index + number_of_batches = batch_index[-1] + 1 # number of batches + + aspect_ratio = image_shapes[:, 1] / image_shapes[:, 0] # aspect ratio + irect = aspect_ratio.argsort() + + data_list = [data_list[i] for i in irect] + + aspect_ratio = aspect_ratio[irect] + # Set training image shapes + shapes = [[1, 1]] * number_of_batches + for i in range(number_of_batches): + aspect_ratio_index = aspect_ratio[batch_index == i] + min_index, max_index = aspect_ratio_index.min( + ), aspect_ratio_index.max() + if max_index < 1: + shapes[i] = [max_index, 1] + elif min_index > 1: + shapes[i] = [1, 1 / min_index] + + batch_shapes = np.ceil( + np.array(shapes) * self.img_size / self.size_divisor + + self.extra_pad_ratio).astype(np.int64) * self.size_divisor + + for i, data_info in enumerate(data_list): + data_info['batch_shape'] = batch_shapes[batch_index[i]] + + return data_list diff --git a/models/YOLO-World/third_party/mmyolo/mmyolo/datasets/yolov5_coco.py b/models/YOLO-World/third_party/mmyolo/mmyolo/datasets/yolov5_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..55bc899abfcceebfdadf7549e56336725d891dcb --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/mmyolo/datasets/yolov5_coco.py @@ -0,0 +1,65 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Any, Optional + +from mmdet.datasets import BaseDetDataset, CocoDataset + +from ..registry import DATASETS, TASK_UTILS + + +class BatchShapePolicyDataset(BaseDetDataset): + """Dataset with the batch shape policy that makes paddings with least + pixels during batch inference process, which does not require the image + scales of all batches to be the same throughout validation.""" + + def __init__(self, + *args, + batch_shapes_cfg: Optional[dict] = None, + **kwargs): + self.batch_shapes_cfg = batch_shapes_cfg + super().__init__(*args, **kwargs) + + def full_init(self): + """rewrite full_init() to be compatible with serialize_data in + BatchShapePolicy.""" + if self._fully_initialized: + return + # load data information + self.data_list = self.load_data_list() + + # batch_shapes_cfg + if self.batch_shapes_cfg: + batch_shapes_policy = TASK_UTILS.build(self.batch_shapes_cfg) + self.data_list = batch_shapes_policy(self.data_list) + del batch_shapes_policy + + # filter illegal data, such as data that has no annotations. + self.data_list = self.filter_data() + # Get subset data according to indices. + if self._indices is not None: + self.data_list = self._get_unserialized_subset(self._indices) + + # serialize data_list + if self.serialize_data: + self.data_bytes, self.data_address = self._serialize_data() + + self._fully_initialized = True + + def prepare_data(self, idx: int) -> Any: + """Pass the dataset to the pipeline during training to support mixed + data augmentation, such as Mosaic and MixUp.""" + if self.test_mode is False: + data_info = self.get_data_info(idx) + data_info['dataset'] = self + return self.pipeline(data_info) + else: + return super().prepare_data(idx) + + +@DATASETS.register_module() +class YOLOv5CocoDataset(BatchShapePolicyDataset, CocoDataset): + """Dataset for YOLOv5 COCO Dataset. + + We only add `BatchShapePolicy` function compared with CocoDataset. See + `mmyolo/datasets/utils.py#BatchShapePolicy` for details + """ + pass diff --git a/models/YOLO-World/third_party/mmyolo/mmyolo/datasets/yolov5_crowdhuman.py b/models/YOLO-World/third_party/mmyolo/mmyolo/datasets/yolov5_crowdhuman.py new file mode 100644 index 0000000000000000000000000000000000000000..486a8324fb4c7d8a34bf885f1818d2e6f974f6e7 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/mmyolo/datasets/yolov5_crowdhuman.py @@ -0,0 +1,15 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmdet.datasets import CrowdHumanDataset + +from ..registry import DATASETS +from .yolov5_coco import BatchShapePolicyDataset + + +@DATASETS.register_module() +class YOLOv5CrowdHumanDataset(BatchShapePolicyDataset, CrowdHumanDataset): + """Dataset for YOLOv5 CrowdHuman Dataset. + + We only add `BatchShapePolicy` function compared with CrowdHumanDataset. + See `mmyolo/datasets/utils.py#BatchShapePolicy` for details + """ + pass diff --git a/models/YOLO-World/third_party/mmyolo/mmyolo/datasets/yolov5_dota.py b/models/YOLO-World/third_party/mmyolo/mmyolo/datasets/yolov5_dota.py new file mode 100644 index 0000000000000000000000000000000000000000..a9647981333ed725a568a293279873ab9e20db47 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/mmyolo/datasets/yolov5_dota.py @@ -0,0 +1,29 @@ +# Copyright (c) OpenMMLab. All rights reserved. + +from mmyolo.datasets.yolov5_coco import BatchShapePolicyDataset +from ..registry import DATASETS + +try: + from mmrotate.datasets import DOTADataset + MMROTATE_AVAILABLE = True +except ImportError: + from mmengine.dataset import BaseDataset + DOTADataset = BaseDataset + MMROTATE_AVAILABLE = False + + +@DATASETS.register_module() +class YOLOv5DOTADataset(BatchShapePolicyDataset, DOTADataset): + """Dataset for YOLOv5 DOTA Dataset. + + We only add `BatchShapePolicy` function compared with DOTADataset. See + `mmyolo/datasets/utils.py#BatchShapePolicy` for details + """ + + def __init__(self, *args, **kwargs): + if not MMROTATE_AVAILABLE: + raise ImportError( + 'Please run "mim install -r requirements/mmrotate.txt" ' + 'to install mmrotate first for rotated detection.') + + super().__init__(*args, **kwargs) diff --git a/models/YOLO-World/third_party/mmyolo/mmyolo/datasets/yolov5_voc.py b/models/YOLO-World/third_party/mmyolo/mmyolo/datasets/yolov5_voc.py new file mode 100644 index 0000000000000000000000000000000000000000..5be764f1db3097645ae1be387e45cafb1b460731 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/mmyolo/datasets/yolov5_voc.py @@ -0,0 +1,15 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmdet.datasets import VOCDataset + +from mmyolo.datasets.yolov5_coco import BatchShapePolicyDataset +from ..registry import DATASETS + + +@DATASETS.register_module() +class YOLOv5VOCDataset(BatchShapePolicyDataset, VOCDataset): + """Dataset for YOLOv5 VOC Dataset. + + We only add `BatchShapePolicy` function compared with VOCDataset. See + `mmyolo/datasets/utils.py#BatchShapePolicy` for details + """ + pass diff --git a/models/YOLO-World/third_party/mmyolo/mmyolo/deploy/__init__.py b/models/YOLO-World/third_party/mmyolo/mmyolo/deploy/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..4904a9058b41526d9719994ed718ae58336d290e --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/mmyolo/deploy/__init__.py @@ -0,0 +1,7 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmdeploy.codebase.base import MMCodebase + +from .models import * # noqa: F401,F403 +from .object_detection import MMYOLO, YOLOObjectDetection + +__all__ = ['MMCodebase', 'MMYOLO', 'YOLOObjectDetection'] diff --git a/models/YOLO-World/third_party/mmyolo/mmyolo/deploy/models/__init__.py b/models/YOLO-World/third_party/mmyolo/mmyolo/deploy/models/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..4b999a0161543d6a9d2ab56d797af740dc7261e4 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/mmyolo/deploy/models/__init__.py @@ -0,0 +1,2 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from . import dense_heads # noqa: F401,F403 diff --git a/models/YOLO-World/third_party/mmyolo/mmyolo/deploy/models/dense_heads/__init__.py b/models/YOLO-World/third_party/mmyolo/mmyolo/deploy/models/dense_heads/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..cc423af3ec374cabe2b9f46d2fe4f4dc9755b8e3 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/mmyolo/deploy/models/dense_heads/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from . import yolov5_head # noqa: F401,F403 + +__all__ = ['yolov5_head'] diff --git a/models/YOLO-World/third_party/mmyolo/mmyolo/deploy/models/dense_heads/yolov5_head.py b/models/YOLO-World/third_party/mmyolo/mmyolo/deploy/models/dense_heads/yolov5_head.py new file mode 100644 index 0000000000000000000000000000000000000000..ac996ba41336243ef091e3e952430382be9ff978 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/mmyolo/deploy/models/dense_heads/yolov5_head.py @@ -0,0 +1,189 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +from functools import partial +from typing import List, Optional, Tuple + +import torch +from mmdeploy.codebase.mmdet import get_post_processing_params +from mmdeploy.codebase.mmdet.models.layers import multiclass_nms +from mmdeploy.core import FUNCTION_REWRITER +from mmengine.config import ConfigDict +from mmengine.structures import InstanceData +from torch import Tensor + +from mmyolo.deploy.models.layers import efficient_nms +from mmyolo.models.dense_heads import YOLOv5Head + + +def yolov5_bbox_decoder(priors: Tensor, bbox_preds: Tensor, + stride: int) -> Tensor: + """Decode YOLOv5 bounding boxes. + + Args: + priors (Tensor): Prior boxes in center-offset form. + bbox_preds (Tensor): Predicted bounding boxes. + stride (int): Stride of the feature map. + + Returns: + Tensor: Decoded bounding boxes. + """ + bbox_preds = bbox_preds.sigmoid() + + x_center = (priors[..., 0] + priors[..., 2]) * 0.5 + y_center = (priors[..., 1] + priors[..., 3]) * 0.5 + w = priors[..., 2] - priors[..., 0] + h = priors[..., 3] - priors[..., 1] + + x_center_pred = (bbox_preds[..., 0] - 0.5) * 2 * stride + x_center + y_center_pred = (bbox_preds[..., 1] - 0.5) * 2 * stride + y_center + w_pred = (bbox_preds[..., 2] * 2)**2 * w + h_pred = (bbox_preds[..., 3] * 2)**2 * h + + decoded_bboxes = torch.stack( + [x_center_pred, y_center_pred, w_pred, h_pred], dim=-1) + + return decoded_bboxes + + +@FUNCTION_REWRITER.register_rewriter( + func_name='mmyolo.models.dense_heads.yolov5_head.' + 'YOLOv5Head.predict_by_feat') +def yolov5_head__predict_by_feat(self, + cls_scores: List[Tensor], + bbox_preds: List[Tensor], + objectnesses: Optional[List[Tensor]] = None, + batch_img_metas: Optional[List[dict]] = None, + cfg: Optional[ConfigDict] = None, + rescale: bool = False, + with_nms: bool = True) -> Tuple[InstanceData]: + """Transform a batch of output features extracted by the head into + bbox results. + Args: + cls_scores (list[Tensor]): Classification scores for all + scale levels, each is a 4D-tensor, has shape + (batch_size, num_priors * num_classes, H, W). + bbox_preds (list[Tensor]): Box energies / deltas for all + scale levels, each is a 4D-tensor, has shape + (batch_size, num_priors * 4, H, W). + objectnesses (list[Tensor], Optional): Score factor for + all scale level, each is a 4D-tensor, has shape + (batch_size, 1, H, W). + batch_img_metas (list[dict], Optional): Batch image meta info. + Defaults to None. + cfg (ConfigDict, optional): Test / postprocessing + configuration, if None, test_cfg would be used. + Defaults to None. + rescale (bool): If True, return boxes in original image space. + Defaults to False. + with_nms (bool): If True, do nms before return boxes. + Defaults to True. + Returns: + tuple[Tensor, Tensor]: The first item is an (N, num_box, 5) tensor, + where 5 represent (tl_x, tl_y, br_x, br_y, score), N is batch + size and the score between 0 and 1. The shape of the second + tensor in the tuple is (N, num_box), and each element + represents the class label of the corresponding box. + """ + ctx = FUNCTION_REWRITER.get_context() + detector_type = type(self) + deploy_cfg = ctx.cfg + use_efficientnms = deploy_cfg.get('use_efficientnms', False) + dtype = cls_scores[0].dtype + device = cls_scores[0].device + bbox_decoder = self.bbox_coder.decode + nms_func = multiclass_nms + if use_efficientnms: + if detector_type is YOLOv5Head: + nms_func = partial(efficient_nms, box_coding=0) + bbox_decoder = yolov5_bbox_decoder + else: + nms_func = efficient_nms + + assert len(cls_scores) == len(bbox_preds) + cfg = self.test_cfg if cfg is None else cfg + cfg = copy.deepcopy(cfg) + + num_imgs = cls_scores[0].shape[0] + featmap_sizes = [cls_score.shape[2:] for cls_score in cls_scores] + + mlvl_priors = self.prior_generator.grid_priors( + featmap_sizes, dtype=dtype, device=device) + + flatten_priors = torch.cat(mlvl_priors) + + mlvl_strides = [ + flatten_priors.new_full( + (featmap_size[0] * featmap_size[1] * self.num_base_priors, ), + stride) + for featmap_size, stride in zip(featmap_sizes, self.featmap_strides) + ] + flatten_stride = torch.cat(mlvl_strides) + + # flatten cls_scores, bbox_preds and objectness + flatten_cls_scores = [ + cls_score.permute(0, 2, 3, 1).reshape(num_imgs, -1, self.num_classes) + for cls_score in cls_scores + ] + cls_scores = torch.cat(flatten_cls_scores, dim=1).sigmoid() + + flatten_bbox_preds = [ + bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, 4) + for bbox_pred in bbox_preds + ] + flatten_bbox_preds = torch.cat(flatten_bbox_preds, dim=1) + + if objectnesses is not None: + flatten_objectness = [ + objectness.permute(0, 2, 3, 1).reshape(num_imgs, -1) + for objectness in objectnesses + ] + flatten_objectness = torch.cat(flatten_objectness, dim=1).sigmoid() + cls_scores = cls_scores * (flatten_objectness.unsqueeze(-1)) + + scores = cls_scores + + bboxes = bbox_decoder(flatten_priors[None], flatten_bbox_preds, + flatten_stride) + + if not with_nms: + return bboxes, scores + + post_params = get_post_processing_params(deploy_cfg) + max_output_boxes_per_class = post_params.max_output_boxes_per_class + iou_threshold = cfg.nms.get('iou_threshold', post_params.iou_threshold) + score_threshold = cfg.get('score_thr', post_params.score_threshold) + pre_top_k = post_params.pre_top_k + keep_top_k = cfg.get('max_per_img', post_params.keep_top_k) + + return nms_func(bboxes, scores, max_output_boxes_per_class, iou_threshold, + score_threshold, pre_top_k, keep_top_k) + + +@FUNCTION_REWRITER.register_rewriter( + func_name='mmyolo.models.dense_heads.yolov5_head.' + 'YOLOv5Head.predict', + backend='rknn') +def yolov5_head__predict__rknn(self, x: Tuple[Tensor], *args, + **kwargs) -> Tuple[Tensor, Tensor, Tensor]: + """Perform forward propagation of the detection head and predict detection + results on the features of the upstream network. + + Args: + x (tuple[Tensor]): Multi-level features from the + upstream network, each is a 4D-tensor. + """ + outs = self(x) + return outs + + +@FUNCTION_REWRITER.register_rewriter( + func_name='mmyolo.models.dense_heads.yolov5_head.' + 'YOLOv5HeadModule.forward', + backend='rknn') +def yolov5_head_module__forward__rknn( + self, x: Tensor, *args, **kwargs) -> Tuple[Tensor, Tensor, Tensor]: + """Forward feature of a single scale level.""" + out = [] + for i, feat in enumerate(x): + out.append(self.convs_pred[i](feat)) + return out diff --git a/models/YOLO-World/third_party/mmyolo/mmyolo/deploy/models/layers/__init__.py b/models/YOLO-World/third_party/mmyolo/mmyolo/deploy/models/layers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..6017cf83425b640eb788a8abf6b253f29d759afb --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/mmyolo/deploy/models/layers/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .bbox_nms import efficient_nms + +__all__ = ['efficient_nms'] diff --git a/models/YOLO-World/third_party/mmyolo/mmyolo/deploy/models/layers/bbox_nms.py b/models/YOLO-World/third_party/mmyolo/mmyolo/deploy/models/layers/bbox_nms.py new file mode 100644 index 0000000000000000000000000000000000000000..4db81c0227a36e0315855082dcd8125e1f9be70a --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/mmyolo/deploy/models/layers/bbox_nms.py @@ -0,0 +1,113 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +from mmdeploy.core import mark +from torch import Tensor + + +def _efficient_nms( + boxes: Tensor, + scores: Tensor, + max_output_boxes_per_class: int = 1000, + iou_threshold: float = 0.5, + score_threshold: float = 0.05, + pre_top_k: int = -1, + keep_top_k: int = 100, + box_coding: int = 0, +): + """Wrapper for `efficient_nms` with TensorRT. + + Args: + boxes (Tensor): The bounding boxes of shape [N, num_boxes, 4]. + scores (Tensor): The detection scores of shape + [N, num_boxes, num_classes]. + max_output_boxes_per_class (int): Maximum number of output + boxes per class of nms. Defaults to 1000. + iou_threshold (float): IOU threshold of nms. Defaults to 0.5. + score_threshold (float): score threshold of nms. + Defaults to 0.05. + pre_top_k (int): Number of top K boxes to keep before nms. + Defaults to -1. + keep_top_k (int): Number of top K boxes to keep after nms. + Defaults to -1. + box_coding (int): Bounding boxes format for nms. + Defaults to 0 means [x, y, w, h]. + Set to 1 means [x1, y1 ,x2, y2]. + + Returns: + tuple[Tensor, Tensor]: (dets, labels), `dets` of shape [N, num_det, 5] + and `labels` of shape [N, num_det]. + """ + boxes = boxes if boxes.dim() == 4 else boxes.unsqueeze(2) + _, det_boxes, det_scores, labels = TRTEfficientNMSop.apply( + boxes, scores, -1, box_coding, iou_threshold, keep_top_k, '1', 0, + score_threshold) + dets = torch.cat([det_boxes, det_scores.unsqueeze(2)], -1) + + # retain shape info + batch_size = boxes.size(0) + + dets_shape = dets.shape + label_shape = labels.shape + dets = dets.reshape([batch_size, *dets_shape[1:]]) + labels = labels.reshape([batch_size, *label_shape[1:]]) + return dets, labels + + +@mark('efficient_nms', inputs=['boxes', 'scores'], outputs=['dets', 'labels']) +def efficient_nms(*args, **kwargs): + """Wrapper function for `_efficient_nms`.""" + return _efficient_nms(*args, **kwargs) + + +class TRTEfficientNMSop(torch.autograd.Function): + """Efficient NMS op for TensorRT.""" + + @staticmethod + def forward( + ctx, + boxes, + scores, + background_class=-1, + box_coding=0, + iou_threshold=0.45, + max_output_boxes=100, + plugin_version='1', + score_activation=0, + score_threshold=0.25, + ): + """Forward function of TRTEfficientNMSop.""" + batch_size, num_boxes, num_classes = scores.shape + num_det = torch.randint( + 0, max_output_boxes, (batch_size, 1), dtype=torch.int32) + det_boxes = torch.randn(batch_size, max_output_boxes, 4) + det_scores = torch.randn(batch_size, max_output_boxes) + det_classes = torch.randint( + 0, num_classes, (batch_size, max_output_boxes), dtype=torch.int32) + return num_det, det_boxes, det_scores, det_classes + + @staticmethod + def symbolic(g, + boxes, + scores, + background_class=-1, + box_coding=0, + iou_threshold=0.45, + max_output_boxes=100, + plugin_version='1', + score_activation=0, + score_threshold=0.25): + """Symbolic function of TRTEfficientNMSop.""" + out = g.op( + 'TRT::EfficientNMS_TRT', + boxes, + scores, + background_class_i=background_class, + box_coding_i=box_coding, + iou_threshold_f=iou_threshold, + max_output_boxes_i=max_output_boxes, + plugin_version_s=plugin_version, + score_activation_i=score_activation, + score_threshold_f=score_threshold, + outputs=4) + nums, boxes, scores, classes = out + return nums, boxes, scores, classes diff --git a/models/YOLO-World/third_party/mmyolo/mmyolo/deploy/object_detection.py b/models/YOLO-World/third_party/mmyolo/mmyolo/deploy/object_detection.py new file mode 100644 index 0000000000000000000000000000000000000000..7efdfcfb7a46c8bc6b90e76bd06d9065410e55f0 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/mmyolo/deploy/object_detection.py @@ -0,0 +1,132 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Callable, Dict, Optional + +import torch +from mmdeploy.codebase.base import CODEBASE, MMCodebase +from mmdeploy.codebase.mmdet.deploy import ObjectDetection +from mmdeploy.utils import Codebase, Task +from mmengine import Config +from mmengine.registry import Registry + +MMYOLO_TASK = Registry('mmyolo_tasks') + + +@CODEBASE.register_module(Codebase.MMYOLO.value) +class MMYOLO(MMCodebase): + """MMYOLO codebase class.""" + + task_registry = MMYOLO_TASK + + @classmethod + def register_deploy_modules(cls): + """register all rewriters for mmdet.""" + import mmdeploy.codebase.mmdet.models # noqa: F401 + import mmdeploy.codebase.mmdet.ops # noqa: F401 + import mmdeploy.codebase.mmdet.structures # noqa: F401 + + @classmethod + def register_all_modules(cls): + """register all modules.""" + from mmdet.utils.setup_env import \ + register_all_modules as register_all_modules_mmdet + + from mmyolo.utils.setup_env import \ + register_all_modules as register_all_modules_mmyolo + + cls.register_deploy_modules() + register_all_modules_mmyolo(True) + register_all_modules_mmdet(False) + + +def _get_dataset_metainfo(model_cfg: Config): + """Get metainfo of dataset. + + Args: + model_cfg Config: Input model Config object. + + Returns: + list[str]: A list of string specifying names of different class. + """ + from mmyolo import datasets # noqa + from mmyolo.registry import DATASETS + + module_dict = DATASETS.module_dict + for dataloader_name in [ + 'test_dataloader', 'val_dataloader', 'train_dataloader' + ]: + if dataloader_name not in model_cfg: + continue + dataloader_cfg = model_cfg[dataloader_name] + dataset_cfg = dataloader_cfg.dataset + dataset_cls = module_dict.get(dataset_cfg.type, None) + if dataset_cls is None: + continue + if hasattr(dataset_cls, '_load_metainfo') and isinstance( + dataset_cls._load_metainfo, Callable): + meta = dataset_cls._load_metainfo( + dataset_cfg.get('metainfo', None)) + if meta is not None: + return meta + if hasattr(dataset_cls, 'METAINFO'): + return dataset_cls.METAINFO + + return None + + +@MMYOLO_TASK.register_module(Task.OBJECT_DETECTION.value) +class YOLOObjectDetection(ObjectDetection): + """YOLO Object Detection task.""" + + def get_visualizer(self, name: str, save_dir: str): + """Get visualizer. + + Args: + name (str): Name of visualizer. + save_dir (str): Directory to save visualization results. + + Returns: + Visualizer: A visualizer instance. + """ + from mmdet.visualization import DetLocalVisualizer # noqa: F401,F403 + metainfo = _get_dataset_metainfo(self.model_cfg) + visualizer = super().get_visualizer(name, save_dir) + if metainfo is not None: + visualizer.dataset_meta = metainfo + return visualizer + + def build_pytorch_model(self, + model_checkpoint: Optional[str] = None, + cfg_options: Optional[Dict] = None, + **kwargs) -> torch.nn.Module: + """Initialize torch model. + + Args: + model_checkpoint (str): The checkpoint file of torch model, + defaults to `None`. + cfg_options (dict): Optional config key-pair parameters. + Returns: + nn.Module: An initialized torch model generated by other OpenMMLab + codebases. + """ + from copy import deepcopy + + from mmengine.model import revert_sync_batchnorm + from mmengine.registry import MODELS + + from mmyolo.utils import switch_to_deploy + + model = deepcopy(self.model_cfg.model) + preprocess_cfg = deepcopy(self.model_cfg.get('preprocess_cfg', {})) + preprocess_cfg.update( + deepcopy(self.model_cfg.get('data_preprocessor', {}))) + model.setdefault('data_preprocessor', preprocess_cfg) + model = MODELS.build(model) + if model_checkpoint is not None: + from mmengine.runner.checkpoint import load_checkpoint + load_checkpoint(model, model_checkpoint, map_location=self.device) + + model = revert_sync_batchnorm(model) + switch_to_deploy(model) + model = model.to(self.device) + model.eval() + return model diff --git a/models/YOLO-World/third_party/mmyolo/mmyolo/engine/__init__.py b/models/YOLO-World/third_party/mmyolo/mmyolo/engine/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b2e0a126c09797b327f7309d6e980245b7e44773 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/mmyolo/engine/__init__.py @@ -0,0 +1,3 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .hooks import * # noqa: F401,F403 +from .optimizers import * # noqa: F401,F403 diff --git a/models/YOLO-World/third_party/mmyolo/mmyolo/engine/hooks/__init__.py b/models/YOLO-World/third_party/mmyolo/mmyolo/engine/hooks/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..0b8deebc8827da5b9a3f8c92a2fffe70e42d0bfa --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/mmyolo/engine/hooks/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .ppyoloe_param_scheduler_hook import PPYOLOEParamSchedulerHook +from .switch_to_deploy_hook import SwitchToDeployHook +from .yolov5_param_scheduler_hook import YOLOv5ParamSchedulerHook +from .yolox_mode_switch_hook import YOLOXModeSwitchHook + +__all__ = [ + 'YOLOv5ParamSchedulerHook', 'YOLOXModeSwitchHook', 'SwitchToDeployHook', + 'PPYOLOEParamSchedulerHook' +] diff --git a/models/YOLO-World/third_party/mmyolo/mmyolo/engine/hooks/ppyoloe_param_scheduler_hook.py b/models/YOLO-World/third_party/mmyolo/mmyolo/engine/hooks/ppyoloe_param_scheduler_hook.py new file mode 100644 index 0000000000000000000000000000000000000000..26dfe6ef2d5cf590ea381efb3e42cdc1c5492361 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/mmyolo/engine/hooks/ppyoloe_param_scheduler_hook.py @@ -0,0 +1,96 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math +from typing import Optional + +from mmengine.hooks import ParamSchedulerHook +from mmengine.runner import Runner + +from mmyolo.registry import HOOKS + + +@HOOKS.register_module() +class PPYOLOEParamSchedulerHook(ParamSchedulerHook): + """A hook to update learning rate and momentum in optimizer of PPYOLOE. We + use this hook to implement adaptive computation for `warmup_total_iters`, + which is not possible with the built-in ParamScheduler in mmyolo. + + Args: + warmup_min_iter (int): Minimum warmup iters. Defaults to 1000. + start_factor (float): The number we multiply learning rate in the + first epoch. The multiplication factor changes towards end_factor + in the following epochs. Defaults to 0. + warmup_epochs (int): Epochs for warmup. Defaults to 5. + min_lr_ratio (float): Minimum learning rate ratio. + total_epochs (int): In PPYOLOE, `total_epochs` is set to + training_epochs x 1.2. Defaults to 360. + """ + priority = 9 + + def __init__(self, + warmup_min_iter: int = 1000, + start_factor: float = 0., + warmup_epochs: int = 5, + min_lr_ratio: float = 0.0, + total_epochs: int = 360): + + self.warmup_min_iter = warmup_min_iter + self.start_factor = start_factor + self.warmup_epochs = warmup_epochs + self.min_lr_ratio = min_lr_ratio + self.total_epochs = total_epochs + + self._warmup_end = False + self._base_lr = None + + def before_train(self, runner: Runner): + """Operations before train. + + Args: + runner (Runner): The runner of the training process. + """ + optimizer = runner.optim_wrapper.optimizer + for group in optimizer.param_groups: + # If the param is never be scheduled, record the current value + # as the initial value. + group.setdefault('initial_lr', group['lr']) + + self._base_lr = [ + group['initial_lr'] for group in optimizer.param_groups + ] + self._min_lr = [i * self.min_lr_ratio for i in self._base_lr] + + def before_train_iter(self, + runner: Runner, + batch_idx: int, + data_batch: Optional[dict] = None): + """Operations before each training iteration. + + Args: + runner (Runner): The runner of the training process. + batch_idx (int): The index of the current batch in the train loop. + data_batch (dict or tuple or list, optional): Data from dataloader. + """ + cur_iters = runner.iter + optimizer = runner.optim_wrapper.optimizer + dataloader_len = len(runner.train_dataloader) + + # The minimum warmup is self.warmup_min_iter + warmup_total_iters = max( + round(self.warmup_epochs * dataloader_len), self.warmup_min_iter) + + if cur_iters <= warmup_total_iters: + # warm up + alpha = cur_iters / warmup_total_iters + factor = self.start_factor * (1 - alpha) + alpha + + for group_idx, param in enumerate(optimizer.param_groups): + param['lr'] = self._base_lr[group_idx] * factor + else: + for group_idx, param in enumerate(optimizer.param_groups): + total_iters = self.total_epochs * dataloader_len + lr = self._min_lr[group_idx] + ( + self._base_lr[group_idx] - + self._min_lr[group_idx]) * 0.5 * ( + math.cos((cur_iters - warmup_total_iters) * math.pi / + (total_iters - warmup_total_iters)) + 1.0) + param['lr'] = lr diff --git a/models/YOLO-World/third_party/mmyolo/mmyolo/engine/hooks/switch_to_deploy_hook.py b/models/YOLO-World/third_party/mmyolo/mmyolo/engine/hooks/switch_to_deploy_hook.py new file mode 100644 index 0000000000000000000000000000000000000000..28ac345f40c44c974fb33b7bf9756a61fcabf820 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/mmyolo/engine/hooks/switch_to_deploy_hook.py @@ -0,0 +1,21 @@ +# Copyright (c) OpenMMLab. All rights reserved. + +from mmengine.hooks import Hook +from mmengine.runner import Runner + +from mmyolo.registry import HOOKS +from mmyolo.utils import switch_to_deploy + + +@HOOKS.register_module() +class SwitchToDeployHook(Hook): + """Switch to deploy mode before testing. + + This hook converts the multi-channel structure of the training network + (high performance) to the one-way structure of the testing network (fast + speed and memory saving). + """ + + def before_test_epoch(self, runner: Runner): + """Switch to deploy mode before testing.""" + switch_to_deploy(runner.model) diff --git a/models/YOLO-World/third_party/mmyolo/mmyolo/engine/hooks/yolov5_param_scheduler_hook.py b/models/YOLO-World/third_party/mmyolo/mmyolo/engine/hooks/yolov5_param_scheduler_hook.py new file mode 100644 index 0000000000000000000000000000000000000000..777bb49d7abd7fc37385370546d05e70c274b3b7 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/mmyolo/engine/hooks/yolov5_param_scheduler_hook.py @@ -0,0 +1,130 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math +from typing import Optional + +import numpy as np +from mmengine.hooks import ParamSchedulerHook +from mmengine.runner import Runner + +from mmyolo.registry import HOOKS + + +def linear_fn(lr_factor: float, max_epochs: int): + """Generate linear function.""" + return lambda x: (1 - x / max_epochs) * (1.0 - lr_factor) + lr_factor + + +def cosine_fn(lr_factor: float, max_epochs: int): + """Generate cosine function.""" + return lambda x: ( + (1 - math.cos(x * math.pi / max_epochs)) / 2) * (lr_factor - 1) + 1 + + +@HOOKS.register_module() +class YOLOv5ParamSchedulerHook(ParamSchedulerHook): + """A hook to update learning rate and momentum in optimizer of YOLOv5.""" + priority = 9 + + scheduler_maps = {'linear': linear_fn, 'cosine': cosine_fn} + + def __init__(self, + scheduler_type: str = 'linear', + lr_factor: float = 0.01, + max_epochs: int = 300, + warmup_epochs: int = 3, + warmup_bias_lr: float = 0.1, + warmup_momentum: float = 0.8, + warmup_mim_iter: int = 1000, + **kwargs): + + assert scheduler_type in self.scheduler_maps + + self.warmup_epochs = warmup_epochs + self.warmup_bias_lr = warmup_bias_lr + self.warmup_momentum = warmup_momentum + self.warmup_mim_iter = warmup_mim_iter + + kwargs.update({'lr_factor': lr_factor, 'max_epochs': max_epochs}) + self.scheduler_fn = self.scheduler_maps[scheduler_type](**kwargs) + + self._warmup_end = False + self._base_lr = None + self._base_momentum = None + + def before_train(self, runner: Runner): + """Operations before train. + + Args: + runner (Runner): The runner of the training process. + """ + optimizer = runner.optim_wrapper.optimizer + for group in optimizer.param_groups: + # If the param is never be scheduled, record the current value + # as the initial value. + group.setdefault('initial_lr', group['lr']) + group.setdefault('initial_momentum', group.get('momentum', -1)) + + self._base_lr = [ + group['initial_lr'] for group in optimizer.param_groups + ] + self._base_momentum = [ + group['initial_momentum'] for group in optimizer.param_groups + ] + + def before_train_iter(self, + runner: Runner, + batch_idx: int, + data_batch: Optional[dict] = None): + """Operations before each training iteration. + + Args: + runner (Runner): The runner of the training process. + batch_idx (int): The index of the current batch in the train loop. + data_batch (dict or tuple or list, optional): Data from dataloader. + """ + cur_iters = runner.iter + cur_epoch = runner.epoch + optimizer = runner.optim_wrapper.optimizer + + # The minimum warmup is self.warmup_mim_iter + warmup_total_iters = max( + round(self.warmup_epochs * len(runner.train_dataloader)), + self.warmup_mim_iter) + + if cur_iters <= warmup_total_iters: + xp = [0, warmup_total_iters] + for group_idx, param in enumerate(optimizer.param_groups): + if group_idx == 2: + # bias learning rate will be handled specially + yp = [ + self.warmup_bias_lr, + self._base_lr[group_idx] * self.scheduler_fn(cur_epoch) + ] + else: + yp = [ + 0.0, + self._base_lr[group_idx] * self.scheduler_fn(cur_epoch) + ] + param['lr'] = np.interp(cur_iters, xp, yp) + + if 'momentum' in param: + param['momentum'] = np.interp( + cur_iters, xp, + [self.warmup_momentum, self._base_momentum[group_idx]]) + else: + self._warmup_end = True + + def after_train_epoch(self, runner: Runner): + """Operations after each training epoch. + + Args: + runner (Runner): The runner of the training process. + """ + if not self._warmup_end: + return + + cur_epoch = runner.epoch + optimizer = runner.optim_wrapper.optimizer + for group_idx, param in enumerate(optimizer.param_groups): + param['lr'] = self._base_lr[group_idx] * self.scheduler_fn( + cur_epoch) diff --git a/models/YOLO-World/third_party/mmyolo/mmyolo/engine/hooks/yolox_mode_switch_hook.py b/models/YOLO-World/third_party/mmyolo/mmyolo/engine/hooks/yolox_mode_switch_hook.py new file mode 100644 index 0000000000000000000000000000000000000000..27711768c3f89b26410ae1373bc920d0bfded603 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/mmyolo/engine/hooks/yolox_mode_switch_hook.py @@ -0,0 +1,54 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +from typing import Sequence + +from mmengine.hooks import Hook +from mmengine.model import is_model_wrapper +from mmengine.runner import Runner + +from mmyolo.registry import HOOKS + + +@HOOKS.register_module() +class YOLOXModeSwitchHook(Hook): + """Switch the mode of YOLOX during training. + + This hook turns off the mosaic and mixup data augmentation and switches + to use L1 loss in bbox_head. + + Args: + num_last_epochs (int): The number of latter epochs in the end of the + training to close the data augmentation and switch to L1 loss. + Defaults to 15. + """ + + def __init__(self, + num_last_epochs: int = 15, + new_train_pipeline: Sequence[dict] = None): + self.num_last_epochs = num_last_epochs + self.new_train_pipeline_cfg = new_train_pipeline + + def before_train_epoch(self, runner: Runner): + """Close mosaic and mixup augmentation and switches to use L1 loss.""" + epoch = runner.epoch + model = runner.model + if is_model_wrapper(model): + model = model.module + + if (epoch + 1) == runner.max_epochs - self.num_last_epochs: + runner.logger.info(f'New Pipeline: {self.new_train_pipeline_cfg}') + + train_dataloader_cfg = copy.deepcopy(runner.cfg.train_dataloader) + train_dataloader_cfg.dataset.pipeline = self.new_train_pipeline_cfg + # Note: Why rebuild the dataset? + # When build_dataloader will make a deep copy of the dataset, + # it will lead to potential risks, such as the global instance + # object FileClient data is disordered. + # This problem needs to be solved in the future. + new_train_dataloader = Runner.build_dataloader( + train_dataloader_cfg) + runner.train_loop.dataloader = new_train_dataloader + + runner.logger.info('recreate the dataloader!') + runner.logger.info('Add additional bbox reg loss now!') + model.bbox_head.use_bbox_aux = True diff --git a/models/YOLO-World/third_party/mmyolo/mmyolo/engine/optimizers/__init__.py b/models/YOLO-World/third_party/mmyolo/mmyolo/engine/optimizers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b598020d05db54cdc1d803d39ebd2c91026a6112 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/mmyolo/engine/optimizers/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .yolov5_optim_constructor import YOLOv5OptimizerConstructor +from .yolov7_optim_wrapper_constructor import YOLOv7OptimWrapperConstructor + +__all__ = ['YOLOv5OptimizerConstructor', 'YOLOv7OptimWrapperConstructor'] diff --git a/models/YOLO-World/third_party/mmyolo/mmyolo/engine/optimizers/yolov5_optim_constructor.py b/models/YOLO-World/third_party/mmyolo/mmyolo/engine/optimizers/yolov5_optim_constructor.py new file mode 100644 index 0000000000000000000000000000000000000000..5e5f42cb5c2c18962f989288b45011c742845c2f --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/mmyolo/engine/optimizers/yolov5_optim_constructor.py @@ -0,0 +1,132 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Optional + +import torch.nn as nn +from mmengine.dist import get_world_size +from mmengine.logging import print_log +from mmengine.model import is_model_wrapper +from mmengine.optim import OptimWrapper + +from mmyolo.registry import (OPTIM_WRAPPER_CONSTRUCTORS, OPTIM_WRAPPERS, + OPTIMIZERS) + + +@OPTIM_WRAPPER_CONSTRUCTORS.register_module() +class YOLOv5OptimizerConstructor: + """YOLOv5 constructor for optimizers. + + It has the following functions: + + - divides the optimizer parameters into 3 groups: + Conv, Bias and BN + + - support `weight_decay` parameter adaption based on + `batch_size_per_gpu` + + Args: + optim_wrapper_cfg (dict): The config dict of the optimizer wrapper. + Positional fields are + + - ``type``: class name of the OptimizerWrapper + - ``optimizer``: The configuration of optimizer. + + Optional fields are + + - any arguments of the corresponding optimizer wrapper type, + e.g., accumulative_counts, clip_grad, etc. + + The positional fields of ``optimizer`` are + + - `type`: class name of the optimizer. + + Optional fields are + + - any arguments of the corresponding optimizer type, e.g., + lr, weight_decay, momentum, etc. + + paramwise_cfg (dict, optional): Parameter-wise options. Must include + `base_total_batch_size` if not None. If the total input batch + is smaller than `base_total_batch_size`, the `weight_decay` + parameter will be kept unchanged, otherwise linear scaling. + + Example: + >>> model = torch.nn.modules.Conv1d(1, 1, 1) + >>> optim_wrapper_cfg = dict( + >>> dict(type='OptimWrapper', optimizer=dict(type='SGD', lr=0.01, + >>> momentum=0.9, weight_decay=0.0001, batch_size_per_gpu=16)) + >>> paramwise_cfg = dict(base_total_batch_size=64) + >>> optim_wrapper_builder = YOLOv5OptimizerConstructor( + >>> optim_wrapper_cfg, paramwise_cfg) + >>> optim_wrapper = optim_wrapper_builder(model) + """ + + def __init__(self, + optim_wrapper_cfg: dict, + paramwise_cfg: Optional[dict] = None): + if paramwise_cfg is None: + paramwise_cfg = {'base_total_batch_size': 64} + assert 'base_total_batch_size' in paramwise_cfg + + if not isinstance(optim_wrapper_cfg, dict): + raise TypeError('optimizer_cfg should be a dict', + f'but got {type(optim_wrapper_cfg)}') + assert 'optimizer' in optim_wrapper_cfg, ( + '`optim_wrapper_cfg` must contain "optimizer" config') + + self.optim_wrapper_cfg = optim_wrapper_cfg + self.optimizer_cfg = self.optim_wrapper_cfg.pop('optimizer') + self.base_total_batch_size = paramwise_cfg['base_total_batch_size'] + + def __call__(self, model: nn.Module) -> OptimWrapper: + if is_model_wrapper(model): + model = model.module + optimizer_cfg = self.optimizer_cfg.copy() + weight_decay = optimizer_cfg.pop('weight_decay', 0) + + if 'batch_size_per_gpu' in optimizer_cfg: + batch_size_per_gpu = optimizer_cfg.pop('batch_size_per_gpu') + # No scaling if total_batch_size is less than + # base_total_batch_size, otherwise linear scaling. + total_batch_size = get_world_size() * batch_size_per_gpu + accumulate = max( + round(self.base_total_batch_size / total_batch_size), 1) + scale_factor = total_batch_size * \ + accumulate / self.base_total_batch_size + + if scale_factor != 1: + weight_decay *= scale_factor + print_log(f'Scaled weight_decay to {weight_decay}', 'current') + + params_groups = [], [], [] + + for v in model.modules(): + if hasattr(v, 'bias') and isinstance(v.bias, nn.Parameter): + params_groups[2].append(v.bias) + # Includes SyncBatchNorm + if isinstance(v, nn.modules.batchnorm._NormBase): + params_groups[1].append(v.weight) + elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter): + params_groups[0].append(v.weight) + + # Note: Make sure bias is in the last parameter group + optimizer_cfg['params'] = [] + # conv + optimizer_cfg['params'].append({ + 'params': params_groups[0], + 'weight_decay': weight_decay + }) + # bn + optimizer_cfg['params'].append({'params': params_groups[1]}) + # bias + optimizer_cfg['params'].append({'params': params_groups[2]}) + + print_log( + 'Optimizer groups: %g .bias, %g conv.weight, %g other' % + (len(params_groups[2]), len(params_groups[0]), len( + params_groups[1])), 'current') + del params_groups + + optimizer = OPTIMIZERS.build(optimizer_cfg) + optim_wrapper = OPTIM_WRAPPERS.build( + self.optim_wrapper_cfg, default_args=dict(optimizer=optimizer)) + return optim_wrapper diff --git a/models/YOLO-World/third_party/mmyolo/mmyolo/engine/optimizers/yolov7_optim_wrapper_constructor.py b/models/YOLO-World/third_party/mmyolo/mmyolo/engine/optimizers/yolov7_optim_wrapper_constructor.py new file mode 100644 index 0000000000000000000000000000000000000000..79ea8b69976760c0e45e35f8420d0cc69b13331a --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/mmyolo/engine/optimizers/yolov7_optim_wrapper_constructor.py @@ -0,0 +1,139 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Optional + +import torch.nn as nn +from mmengine.dist import get_world_size +from mmengine.logging import print_log +from mmengine.model import is_model_wrapper +from mmengine.optim import OptimWrapper + +from mmyolo.models.dense_heads.yolov7_head import ImplicitA, ImplicitM +from mmyolo.registry import (OPTIM_WRAPPER_CONSTRUCTORS, OPTIM_WRAPPERS, + OPTIMIZERS) + + +# TODO: Consider merging into YOLOv5OptimizerConstructor +@OPTIM_WRAPPER_CONSTRUCTORS.register_module() +class YOLOv7OptimWrapperConstructor: + """YOLOv7 constructor for optimizer wrappers. + + It has the following functions: + + - divides the optimizer parameters into 3 groups: + Conv, Bias and BN/ImplicitA/ImplicitM + + - support `weight_decay` parameter adaption based on + `batch_size_per_gpu` + + Args: + optim_wrapper_cfg (dict): The config dict of the optimizer wrapper. + Positional fields are + + - ``type``: class name of the OptimizerWrapper + - ``optimizer``: The configuration of optimizer. + + Optional fields are + + - any arguments of the corresponding optimizer wrapper type, + e.g., accumulative_counts, clip_grad, etc. + + The positional fields of ``optimizer`` are + + - `type`: class name of the optimizer. + + Optional fields are + + - any arguments of the corresponding optimizer type, e.g., + lr, weight_decay, momentum, etc. + + paramwise_cfg (dict, optional): Parameter-wise options. Must include + `base_total_batch_size` if not None. If the total input batch + is smaller than `base_total_batch_size`, the `weight_decay` + parameter will be kept unchanged, otherwise linear scaling. + + Example: + >>> model = torch.nn.modules.Conv1d(1, 1, 1) + >>> optim_wrapper_cfg = dict( + >>> dict(type='OptimWrapper', optimizer=dict(type='SGD', lr=0.01, + >>> momentum=0.9, weight_decay=0.0001, batch_size_per_gpu=16)) + >>> paramwise_cfg = dict(base_total_batch_size=64) + >>> optim_wrapper_builder = YOLOv7OptimWrapperConstructor( + >>> optim_wrapper_cfg, paramwise_cfg) + >>> optim_wrapper = optim_wrapper_builder(model) + """ + + def __init__(self, + optim_wrapper_cfg: dict, + paramwise_cfg: Optional[dict] = None): + if paramwise_cfg is None: + paramwise_cfg = {'base_total_batch_size': 64} + assert 'base_total_batch_size' in paramwise_cfg + + if not isinstance(optim_wrapper_cfg, dict): + raise TypeError('optimizer_cfg should be a dict', + f'but got {type(optim_wrapper_cfg)}') + assert 'optimizer' in optim_wrapper_cfg, ( + '`optim_wrapper_cfg` must contain "optimizer" config') + + self.optim_wrapper_cfg = optim_wrapper_cfg + self.optimizer_cfg = self.optim_wrapper_cfg.pop('optimizer') + self.base_total_batch_size = paramwise_cfg['base_total_batch_size'] + + def __call__(self, model: nn.Module) -> OptimWrapper: + if is_model_wrapper(model): + model = model.module + optimizer_cfg = self.optimizer_cfg.copy() + weight_decay = optimizer_cfg.pop('weight_decay', 0) + + if 'batch_size_per_gpu' in optimizer_cfg: + batch_size_per_gpu = optimizer_cfg.pop('batch_size_per_gpu') + # No scaling if total_batch_size is less than + # base_total_batch_size, otherwise linear scaling. + total_batch_size = get_world_size() * batch_size_per_gpu + accumulate = max( + round(self.base_total_batch_size / total_batch_size), 1) + scale_factor = total_batch_size * \ + accumulate / self.base_total_batch_size + + if scale_factor != 1: + weight_decay *= scale_factor + print_log(f'Scaled weight_decay to {weight_decay}', 'current') + + params_groups = [], [], [] + for v in model.modules(): + # no decay + # Caution: Coupling with model + if isinstance(v, (ImplicitA, ImplicitM)): + params_groups[0].append(v.implicit) + elif isinstance(v, nn.modules.batchnorm._NormBase): + params_groups[0].append(v.weight) + # apply decay + elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter): + params_groups[1].append(v.weight) # apply decay + + # biases, no decay + if hasattr(v, 'bias') and isinstance(v.bias, nn.Parameter): + params_groups[2].append(v.bias) + + # Note: Make sure bias is in the last parameter group + optimizer_cfg['params'] = [] + # conv + optimizer_cfg['params'].append({ + 'params': params_groups[1], + 'weight_decay': weight_decay + }) + # bn ... + optimizer_cfg['params'].append({'params': params_groups[0]}) + # bias + optimizer_cfg['params'].append({'params': params_groups[2]}) + + print_log( + 'Optimizer groups: %g .bias, %g conv.weight, %g other' % + (len(params_groups[2]), len(params_groups[1]), len( + params_groups[0])), 'current') + del params_groups + + optimizer = OPTIMIZERS.build(optimizer_cfg) + optim_wrapper = OPTIM_WRAPPERS.build( + self.optim_wrapper_cfg, default_args=dict(optimizer=optimizer)) + return optim_wrapper diff --git a/models/YOLO-World/third_party/mmyolo/mmyolo/models/__init__.py b/models/YOLO-World/third_party/mmyolo/mmyolo/models/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..51c37f0436f131dcd26b9a8115e58fe49d59207e --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/mmyolo/models/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .backbones import * # noqa: F401,F403 +from .data_preprocessors import * # noqa: F401,F403 +from .dense_heads import * # noqa: F401,F403 +from .detectors import * # noqa: F401,F403 +from .layers import * # noqa: F401,F403 +from .losses import * # noqa: F401,F403 +from .necks import * # noqa: F401,F403 +from .plugins import * # noqa: F401,F403 +from .task_modules import * # noqa: F401,F403 diff --git a/models/YOLO-World/third_party/mmyolo/mmyolo/models/backbones/__init__.py b/models/YOLO-World/third_party/mmyolo/mmyolo/models/backbones/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..48c8e28b1e7eb97e3f7cb064c75af0dc79b4cc8d --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/mmyolo/models/backbones/__init__.py @@ -0,0 +1,13 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .base_backbone import BaseBackbone +from .csp_darknet import YOLOv5CSPDarknet, YOLOv8CSPDarknet, YOLOXCSPDarknet +from .csp_resnet import PPYOLOECSPResNet +from .cspnext import CSPNeXt +from .efficient_rep import YOLOv6CSPBep, YOLOv6EfficientRep +from .yolov7_backbone import YOLOv7Backbone + +__all__ = [ + 'YOLOv5CSPDarknet', 'BaseBackbone', 'YOLOv6EfficientRep', 'YOLOv6CSPBep', + 'YOLOXCSPDarknet', 'CSPNeXt', 'YOLOv7Backbone', 'PPYOLOECSPResNet', + 'YOLOv8CSPDarknet' +] diff --git a/models/YOLO-World/third_party/mmyolo/mmyolo/models/backbones/base_backbone.py b/models/YOLO-World/third_party/mmyolo/mmyolo/models/backbones/base_backbone.py new file mode 100644 index 0000000000000000000000000000000000000000..730c7095eccf66b0d563fad96122454c98dff0ac --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/mmyolo/models/backbones/base_backbone.py @@ -0,0 +1,225 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from abc import ABCMeta, abstractmethod +from typing import List, Sequence, Union + +import torch +import torch.nn as nn +from mmcv.cnn import build_plugin_layer +from mmdet.utils import ConfigType, OptMultiConfig +from mmengine.model import BaseModule +from torch.nn.modules.batchnorm import _BatchNorm + +from mmyolo.registry import MODELS + + +@MODELS.register_module() +class BaseBackbone(BaseModule, metaclass=ABCMeta): + """BaseBackbone backbone used in YOLO series. + + .. code:: text + + Backbone model structure diagram + +-----------+ + | input | + +-----------+ + v + +-----------+ + | stem | + | layer | + +-----------+ + v + +-----------+ + | stage | + | layer 1 | + +-----------+ + v + +-----------+ + | stage | + | layer 2 | + +-----------+ + v + ...... + v + +-----------+ + | stage | + | layer n | + +-----------+ + In P5 model, n=4 + In P6 model, n=5 + + Args: + arch_setting (list): Architecture of BaseBackbone. + plugins (list[dict]): List of plugins for stages, each dict contains: + + - cfg (dict, required): Cfg dict to build plugin. + - stages (tuple[bool], optional): Stages to apply plugin, length + should be same as 'num_stages'. + deepen_factor (float): Depth multiplier, multiply number of + blocks in CSP layer by this amount. Defaults to 1.0. + widen_factor (float): Width multiplier, multiply number of + channels in each layer by this amount. Defaults to 1.0. + input_channels: Number of input image channels. Defaults to 3. + out_indices (Sequence[int]): Output from which stages. + Defaults to (2, 3, 4). + frozen_stages (int): Stages to be frozen (stop grad and set eval + mode). -1 means not freezing any parameters. Defaults to -1. + norm_cfg (dict): Dictionary to construct and config norm layer. + Defaults to None. + act_cfg (dict): Config dict for activation layer. + Defaults to None. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Defaults to False. + init_cfg (dict or list[dict], optional): Initialization config dict. + Defaults to None. + """ + + def __init__(self, + arch_setting: list, + deepen_factor: float = 1.0, + widen_factor: float = 1.0, + input_channels: int = 3, + out_indices: Sequence[int] = (2, 3, 4), + frozen_stages: int = -1, + plugins: Union[dict, List[dict]] = None, + norm_cfg: ConfigType = None, + act_cfg: ConfigType = None, + norm_eval: bool = False, + init_cfg: OptMultiConfig = None): + super().__init__(init_cfg) + self.num_stages = len(arch_setting) + self.arch_setting = arch_setting + + assert set(out_indices).issubset( + i for i in range(len(arch_setting) + 1)) + + if frozen_stages not in range(-1, len(arch_setting) + 1): + raise ValueError('"frozen_stages" must be in range(-1, ' + 'len(arch_setting) + 1). But received ' + f'{frozen_stages}') + + self.input_channels = input_channels + self.out_indices = out_indices + self.frozen_stages = frozen_stages + self.widen_factor = widen_factor + self.deepen_factor = deepen_factor + self.norm_eval = norm_eval + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + self.plugins = plugins + + self.stem = self.build_stem_layer() + self.layers = ['stem'] + + for idx, setting in enumerate(arch_setting): + stage = [] + stage += self.build_stage_layer(idx, setting) + if plugins is not None: + stage += self.make_stage_plugins(plugins, idx, setting) + self.add_module(f'stage{idx + 1}', nn.Sequential(*stage)) + self.layers.append(f'stage{idx + 1}') + + @abstractmethod + def build_stem_layer(self): + """Build a stem layer.""" + pass + + @abstractmethod + def build_stage_layer(self, stage_idx: int, setting: list): + """Build a stage layer. + + Args: + stage_idx (int): The index of a stage layer. + setting (list): The architecture setting of a stage layer. + """ + pass + + def make_stage_plugins(self, plugins, stage_idx, setting): + """Make plugins for backbone ``stage_idx`` th stage. + + Currently we support to insert ``context_block``, + ``empirical_attention_block``, ``nonlocal_block``, ``dropout_block`` + into the backbone. + + + An example of plugins format could be: + + Examples: + >>> plugins=[ + ... dict(cfg=dict(type='xxx', arg1='xxx'), + ... stages=(False, True, True, True)), + ... dict(cfg=dict(type='yyy'), + ... stages=(True, True, True, True)), + ... ] + >>> model = YOLOv5CSPDarknet() + >>> stage_plugins = model.make_stage_plugins(plugins, 0, setting) + >>> assert len(stage_plugins) == 1 + + Suppose ``stage_idx=0``, the structure of blocks in the stage would be: + + .. code-block:: none + + conv1 -> conv2 -> conv3 -> yyy + + Suppose ``stage_idx=1``, the structure of blocks in the stage would be: + + .. code-block:: none + + conv1 -> conv2 -> conv3 -> xxx -> yyy + + + Args: + plugins (list[dict]): List of plugins cfg to build. The postfix is + required if multiple same type plugins are inserted. + stage_idx (int): Index of stage to build + If stages is missing, the plugin would be applied to all + stages. + setting (list): The architecture setting of a stage layer. + + Returns: + list[nn.Module]: Plugins for current stage + """ + # TODO: It is not general enough to support any channel and needs + # to be refactored + in_channels = int(setting[1] * self.widen_factor) + plugin_layers = [] + for plugin in plugins: + plugin = plugin.copy() + stages = plugin.pop('stages', None) + assert stages is None or len(stages) == self.num_stages + if stages is None or stages[stage_idx]: + name, layer = build_plugin_layer( + plugin['cfg'], in_channels=in_channels) + plugin_layers.append(layer) + return plugin_layers + + def _freeze_stages(self): + """Freeze the parameters of the specified stage so that they are no + longer updated.""" + if self.frozen_stages >= 0: + for i in range(self.frozen_stages + 1): + m = getattr(self, self.layers[i]) + m.eval() + for param in m.parameters(): + param.requires_grad = False + + def train(self, mode: bool = True): + """Convert the model into training mode while keep normalization layer + frozen.""" + super().train(mode) + self._freeze_stages() + if mode and self.norm_eval: + for m in self.modules(): + if isinstance(m, _BatchNorm): + m.eval() + + def forward(self, x: torch.Tensor) -> tuple: + """Forward batch_inputs from the data_preprocessor.""" + outs = [] + for i, layer_name in enumerate(self.layers): + layer = getattr(self, layer_name) + x = layer(x) + if i in self.out_indices: + outs.append(x) + + return tuple(outs) diff --git a/models/YOLO-World/third_party/mmyolo/mmyolo/models/backbones/csp_darknet.py b/models/YOLO-World/third_party/mmyolo/mmyolo/models/backbones/csp_darknet.py new file mode 100644 index 0000000000000000000000000000000000000000..92bd69a5a9378a37ed8fb50c52dfba0de6879083 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/mmyolo/models/backbones/csp_darknet.py @@ -0,0 +1,427 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Tuple, Union + +import torch +import torch.nn as nn +from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule +from mmdet.models.backbones.csp_darknet import CSPLayer, Focus +from mmdet.utils import ConfigType, OptMultiConfig + +from mmyolo.registry import MODELS +from ..layers import CSPLayerWithTwoConv, SPPFBottleneck +from ..utils import make_divisible, make_round +from .base_backbone import BaseBackbone + + +@MODELS.register_module() +class YOLOv5CSPDarknet(BaseBackbone): + """CSP-Darknet backbone used in YOLOv5. + Args: + arch (str): Architecture of CSP-Darknet, from {P5, P6}. + Defaults to P5. + plugins (list[dict]): List of plugins for stages, each dict contains: + - cfg (dict, required): Cfg dict to build plugin. + - stages (tuple[bool], optional): Stages to apply plugin, length + should be same as 'num_stages'. + deepen_factor (float): Depth multiplier, multiply number of + blocks in CSP layer by this amount. Defaults to 1.0. + widen_factor (float): Width multiplier, multiply number of + channels in each layer by this amount. Defaults to 1.0. + input_channels (int): Number of input image channels. Defaults to: 3. + out_indices (Tuple[int]): Output from which stages. + Defaults to (2, 3, 4). + frozen_stages (int): Stages to be frozen (stop grad and set eval + mode). -1 means not freezing any parameters. Defaults to -1. + norm_cfg (dict): Dictionary to construct and config norm layer. + Defaults to dict(type='BN', requires_grad=True). + act_cfg (dict): Config dict for activation layer. + Defaults to dict(type='SiLU', inplace=True). + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Defaults to False. + init_cfg (Union[dict,list[dict]], optional): Initialization config + dict. Defaults to None. + Example: + >>> from mmyolo.models import YOLOv5CSPDarknet + >>> import torch + >>> model = YOLOv5CSPDarknet() + >>> model.eval() + >>> inputs = torch.rand(1, 3, 416, 416) + >>> level_outputs = model(inputs) + >>> for level_out in level_outputs: + ... print(tuple(level_out.shape)) + ... + (1, 256, 52, 52) + (1, 512, 26, 26) + (1, 1024, 13, 13) + """ + # From left to right: + # in_channels, out_channels, num_blocks, add_identity, use_spp + arch_settings = { + 'P5': [[64, 128, 3, True, False], [128, 256, 6, True, False], + [256, 512, 9, True, False], [512, 1024, 3, True, True]], + 'P6': [[64, 128, 3, True, False], [128, 256, 6, True, False], + [256, 512, 9, True, False], [512, 768, 3, True, False], + [768, 1024, 3, True, True]] + } + + def __init__(self, + arch: str = 'P5', + plugins: Union[dict, List[dict]] = None, + deepen_factor: float = 1.0, + widen_factor: float = 1.0, + input_channels: int = 3, + out_indices: Tuple[int] = (2, 3, 4), + frozen_stages: int = -1, + norm_cfg: ConfigType = dict( + type='BN', momentum=0.03, eps=0.001), + act_cfg: ConfigType = dict(type='SiLU', inplace=True), + norm_eval: bool = False, + init_cfg: OptMultiConfig = None): + super().__init__( + self.arch_settings[arch], + deepen_factor, + widen_factor, + input_channels=input_channels, + out_indices=out_indices, + plugins=plugins, + frozen_stages=frozen_stages, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + norm_eval=norm_eval, + init_cfg=init_cfg) + + def build_stem_layer(self) -> nn.Module: + """Build a stem layer.""" + return ConvModule( + self.input_channels, + make_divisible(self.arch_setting[0][0], self.widen_factor), + kernel_size=6, + stride=2, + padding=2, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + + def build_stage_layer(self, stage_idx: int, setting: list) -> list: + """Build a stage layer. + + Args: + stage_idx (int): The index of a stage layer. + setting (list): The architecture setting of a stage layer. + """ + in_channels, out_channels, num_blocks, add_identity, use_spp = setting + + in_channels = make_divisible(in_channels, self.widen_factor) + out_channels = make_divisible(out_channels, self.widen_factor) + num_blocks = make_round(num_blocks, self.deepen_factor) + stage = [] + conv_layer = ConvModule( + in_channels, + out_channels, + kernel_size=3, + stride=2, + padding=1, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + stage.append(conv_layer) + csp_layer = CSPLayer( + out_channels, + out_channels, + num_blocks=num_blocks, + add_identity=add_identity, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + stage.append(csp_layer) + if use_spp: + spp = SPPFBottleneck( + out_channels, + out_channels, + kernel_sizes=5, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + stage.append(spp) + return stage + + def init_weights(self): + """Initialize the parameters.""" + if self.init_cfg is None: + for m in self.modules(): + if isinstance(m, torch.nn.Conv2d): + # In order to be consistent with the source code, + # reset the Conv2d initialization parameters + m.reset_parameters() + else: + super().init_weights() + + +@MODELS.register_module() +class YOLOv8CSPDarknet(BaseBackbone): + """CSP-Darknet backbone used in YOLOv8. + + Args: + arch (str): Architecture of CSP-Darknet, from {P5}. + Defaults to P5. + last_stage_out_channels (int): Final layer output channel. + Defaults to 1024. + plugins (list[dict]): List of plugins for stages, each dict contains: + - cfg (dict, required): Cfg dict to build plugin. + - stages (tuple[bool], optional): Stages to apply plugin, length + should be same as 'num_stages'. + deepen_factor (float): Depth multiplier, multiply number of + blocks in CSP layer by this amount. Defaults to 1.0. + widen_factor (float): Width multiplier, multiply number of + channels in each layer by this amount. Defaults to 1.0. + input_channels (int): Number of input image channels. Defaults to: 3. + out_indices (Tuple[int]): Output from which stages. + Defaults to (2, 3, 4). + frozen_stages (int): Stages to be frozen (stop grad and set eval + mode). -1 means not freezing any parameters. Defaults to -1. + norm_cfg (dict): Dictionary to construct and config norm layer. + Defaults to dict(type='BN', requires_grad=True). + act_cfg (dict): Config dict for activation layer. + Defaults to dict(type='SiLU', inplace=True). + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Defaults to False. + init_cfg (Union[dict,list[dict]], optional): Initialization config + dict. Defaults to None. + + Example: + >>> from mmyolo.models import YOLOv8CSPDarknet + >>> import torch + >>> model = YOLOv8CSPDarknet() + >>> model.eval() + >>> inputs = torch.rand(1, 3, 416, 416) + >>> level_outputs = model(inputs) + >>> for level_out in level_outputs: + ... print(tuple(level_out.shape)) + ... + (1, 256, 52, 52) + (1, 512, 26, 26) + (1, 1024, 13, 13) + """ + # From left to right: + # in_channels, out_channels, num_blocks, add_identity, use_spp + # the final out_channels will be set according to the param. + arch_settings = { + 'P5': [[64, 128, 3, True, False], [128, 256, 6, True, False], + [256, 512, 6, True, False], [512, None, 3, True, True]], + } + + def __init__(self, + arch: str = 'P5', + last_stage_out_channels: int = 1024, + plugins: Union[dict, List[dict]] = None, + deepen_factor: float = 1.0, + widen_factor: float = 1.0, + input_channels: int = 3, + out_indices: Tuple[int] = (2, 3, 4), + frozen_stages: int = -1, + norm_cfg: ConfigType = dict( + type='BN', momentum=0.03, eps=0.001), + act_cfg: ConfigType = dict(type='SiLU', inplace=True), + norm_eval: bool = False, + init_cfg: OptMultiConfig = None): + self.arch_settings[arch][-1][1] = last_stage_out_channels + super().__init__( + self.arch_settings[arch], + deepen_factor, + widen_factor, + input_channels=input_channels, + out_indices=out_indices, + plugins=plugins, + frozen_stages=frozen_stages, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + norm_eval=norm_eval, + init_cfg=init_cfg) + + def build_stem_layer(self) -> nn.Module: + """Build a stem layer.""" + return ConvModule( + self.input_channels, + make_divisible(self.arch_setting[0][0], self.widen_factor), + kernel_size=3, + stride=2, + padding=1, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + + def build_stage_layer(self, stage_idx: int, setting: list) -> list: + """Build a stage layer. + + Args: + stage_idx (int): The index of a stage layer. + setting (list): The architecture setting of a stage layer. + """ + in_channels, out_channels, num_blocks, add_identity, use_spp = setting + + in_channels = make_divisible(in_channels, self.widen_factor) + out_channels = make_divisible(out_channels, self.widen_factor) + num_blocks = make_round(num_blocks, self.deepen_factor) + stage = [] + conv_layer = ConvModule( + in_channels, + out_channels, + kernel_size=3, + stride=2, + padding=1, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + stage.append(conv_layer) + csp_layer = CSPLayerWithTwoConv( + out_channels, + out_channels, + num_blocks=num_blocks, + add_identity=add_identity, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + stage.append(csp_layer) + if use_spp: + spp = SPPFBottleneck( + out_channels, + out_channels, + kernel_sizes=5, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + stage.append(spp) + return stage + + def init_weights(self): + """Initialize the parameters.""" + if self.init_cfg is None: + for m in self.modules(): + if isinstance(m, torch.nn.Conv2d): + # In order to be consistent with the source code, + # reset the Conv2d initialization parameters + m.reset_parameters() + else: + super().init_weights() + + +@MODELS.register_module() +class YOLOXCSPDarknet(BaseBackbone): + """CSP-Darknet backbone used in YOLOX. + + Args: + arch (str): Architecture of CSP-Darknet, from {P5, P6}. + Defaults to P5. + plugins (list[dict]): List of plugins for stages, each dict contains: + + - cfg (dict, required): Cfg dict to build plugin. + - stages (tuple[bool], optional): Stages to apply plugin, length + should be same as 'num_stages'. + deepen_factor (float): Depth multiplier, multiply number of + blocks in CSP layer by this amount. Defaults to 1.0. + widen_factor (float): Width multiplier, multiply number of + channels in each layer by this amount. Defaults to 1.0. + input_channels (int): Number of input image channels. Defaults to 3. + out_indices (Tuple[int]): Output from which stages. + Defaults to (2, 3, 4). + frozen_stages (int): Stages to be frozen (stop grad and set eval + mode). -1 means not freezing any parameters. Defaults to -1. + use_depthwise (bool): Whether to use depthwise separable convolution. + Defaults to False. + spp_kernal_sizes: (tuple[int]): Sequential of kernel sizes of SPP + layers. Defaults to (5, 9, 13). + norm_cfg (dict): Dictionary to construct and config norm layer. + Defaults to dict(type='BN', momentum=0.03, eps=0.001). + act_cfg (dict): Config dict for activation layer. + Defaults to dict(type='SiLU', inplace=True). + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. + init_cfg (Union[dict,list[dict]], optional): Initialization config + dict. Defaults to None. + Example: + >>> from mmyolo.models import YOLOXCSPDarknet + >>> import torch + >>> model = YOLOXCSPDarknet() + >>> model.eval() + >>> inputs = torch.rand(1, 3, 416, 416) + >>> level_outputs = model(inputs) + >>> for level_out in level_outputs: + ... print(tuple(level_out.shape)) + ... + (1, 256, 52, 52) + (1, 512, 26, 26) + (1, 1024, 13, 13) + """ + # From left to right: + # in_channels, out_channels, num_blocks, add_identity, use_spp + arch_settings = { + 'P5': [[64, 128, 3, True, False], [128, 256, 9, True, False], + [256, 512, 9, True, False], [512, 1024, 3, False, True]], + } + + def __init__(self, + arch: str = 'P5', + plugins: Union[dict, List[dict]] = None, + deepen_factor: float = 1.0, + widen_factor: float = 1.0, + input_channels: int = 3, + out_indices: Tuple[int] = (2, 3, 4), + frozen_stages: int = -1, + use_depthwise: bool = False, + spp_kernal_sizes: Tuple[int] = (5, 9, 13), + norm_cfg: ConfigType = dict( + type='BN', momentum=0.03, eps=0.001), + act_cfg: ConfigType = dict(type='SiLU', inplace=True), + norm_eval: bool = False, + init_cfg: OptMultiConfig = None): + self.use_depthwise = use_depthwise + self.spp_kernal_sizes = spp_kernal_sizes + super().__init__(self.arch_settings[arch], deepen_factor, widen_factor, + input_channels, out_indices, frozen_stages, plugins, + norm_cfg, act_cfg, norm_eval, init_cfg) + + def build_stem_layer(self) -> nn.Module: + """Build a stem layer.""" + return Focus( + 3, + make_divisible(64, self.widen_factor), + kernel_size=3, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + + def build_stage_layer(self, stage_idx: int, setting: list) -> list: + """Build a stage layer. + + Args: + stage_idx (int): The index of a stage layer. + setting (list): The architecture setting of a stage layer. + """ + in_channels, out_channels, num_blocks, add_identity, use_spp = setting + + in_channels = make_divisible(in_channels, self.widen_factor) + out_channels = make_divisible(out_channels, self.widen_factor) + num_blocks = make_round(num_blocks, self.deepen_factor) + stage = [] + conv = DepthwiseSeparableConvModule \ + if self.use_depthwise else ConvModule + conv_layer = conv( + in_channels, + out_channels, + kernel_size=3, + stride=2, + padding=1, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + stage.append(conv_layer) + if use_spp: + spp = SPPFBottleneck( + out_channels, + out_channels, + kernel_sizes=self.spp_kernal_sizes, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + stage.append(spp) + csp_layer = CSPLayer( + out_channels, + out_channels, + num_blocks=num_blocks, + add_identity=add_identity, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + stage.append(csp_layer) + return stage diff --git a/models/YOLO-World/third_party/mmyolo/mmyolo/models/backbones/csp_resnet.py b/models/YOLO-World/third_party/mmyolo/mmyolo/models/backbones/csp_resnet.py new file mode 100644 index 0000000000000000000000000000000000000000..a42ed489d8872913f4aacce08497c8e48fdace49 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/mmyolo/models/backbones/csp_resnet.py @@ -0,0 +1,169 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Tuple, Union + +import torch.nn as nn +from mmcv.cnn import ConvModule +from mmdet.utils import ConfigType, OptMultiConfig + +from mmyolo.models.backbones import BaseBackbone +from mmyolo.models.layers.yolo_bricks import CSPResLayer +from mmyolo.registry import MODELS + + +@MODELS.register_module() +class PPYOLOECSPResNet(BaseBackbone): + """CSP-ResNet backbone used in PPYOLOE. + + Args: + arch (str): Architecture of CSPNeXt, from {P5, P6}. + Defaults to P5. + deepen_factor (float): Depth multiplier, multiply number of + blocks in CSP layer by this amount. Defaults to 1.0. + widen_factor (float): Width multiplier, multiply number of + channels in each layer by this amount. Defaults to 1.0. + out_indices (Sequence[int]): Output from which stages. + Defaults to (2, 3, 4). + frozen_stages (int): Stages to be frozen (stop grad and set eval + mode). -1 means not freezing any parameters. Defaults to -1. + plugins (list[dict]): List of plugins for stages, each dict contains: + - cfg (dict, required): Cfg dict to build plugin. + - stages (tuple[bool], optional): Stages to apply plugin, length + should be same as 'num_stages'. + arch_ovewrite (list): Overwrite default arch settings. + Defaults to None. + block_cfg (dict): Config dict for block. Defaults to + dict(type='PPYOLOEBasicBlock', shortcut=True, use_alpha=True) + norm_cfg (:obj:`ConfigDict` or dict): Dictionary to construct and + config norm layer. Defaults to dict(type='BN', momentum=0.1, + eps=1e-5). + act_cfg (:obj:`ConfigDict` or dict): Config dict for activation layer. + Defaults to dict(type='SiLU', inplace=True). + attention_cfg (dict): Config dict for `EffectiveSELayer`. + Defaults to dict(type='EffectiveSELayer', + act_cfg=dict(type='HSigmoid')). + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. + init_cfg (:obj:`ConfigDict` or dict or list[dict] or + list[:obj:`ConfigDict`]): Initialization config dict. + use_large_stem (bool): Whether to use large stem layer. + Defaults to False. + """ + # From left to right: + # in_channels, out_channels, num_blocks + arch_settings = { + 'P5': [[64, 128, 3], [128, 256, 6], [256, 512, 6], [512, 1024, 3]] + } + + def __init__(self, + arch: str = 'P5', + deepen_factor: float = 1.0, + widen_factor: float = 1.0, + input_channels: int = 3, + out_indices: Tuple[int] = (2, 3, 4), + frozen_stages: int = -1, + plugins: Union[dict, List[dict]] = None, + arch_ovewrite: dict = None, + block_cfg: ConfigType = dict( + type='PPYOLOEBasicBlock', shortcut=True, use_alpha=True), + norm_cfg: ConfigType = dict( + type='BN', momentum=0.1, eps=1e-5), + act_cfg: ConfigType = dict(type='SiLU', inplace=True), + attention_cfg: ConfigType = dict( + type='EffectiveSELayer', act_cfg=dict(type='HSigmoid')), + norm_eval: bool = False, + init_cfg: OptMultiConfig = None, + use_large_stem: bool = False): + arch_setting = self.arch_settings[arch] + if arch_ovewrite: + arch_setting = arch_ovewrite + arch_setting = [[ + int(in_channels * widen_factor), + int(out_channels * widen_factor), + round(num_blocks * deepen_factor) + ] for in_channels, out_channels, num_blocks in arch_setting] + self.block_cfg = block_cfg + self.use_large_stem = use_large_stem + self.attention_cfg = attention_cfg + + super().__init__( + arch_setting, + deepen_factor, + widen_factor, + input_channels=input_channels, + out_indices=out_indices, + plugins=plugins, + frozen_stages=frozen_stages, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + norm_eval=norm_eval, + init_cfg=init_cfg) + + def build_stem_layer(self) -> nn.Module: + """Build a stem layer.""" + if self.use_large_stem: + stem = nn.Sequential( + ConvModule( + self.input_channels, + self.arch_setting[0][0] // 2, + 3, + stride=2, + padding=1, + act_cfg=self.act_cfg, + norm_cfg=self.norm_cfg), + ConvModule( + self.arch_setting[0][0] // 2, + self.arch_setting[0][0] // 2, + 3, + stride=1, + padding=1, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg), + ConvModule( + self.arch_setting[0][0] // 2, + self.arch_setting[0][0], + 3, + stride=1, + padding=1, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg)) + else: + stem = nn.Sequential( + ConvModule( + self.input_channels, + self.arch_setting[0][0] // 2, + 3, + stride=2, + padding=1, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg), + ConvModule( + self.arch_setting[0][0] // 2, + self.arch_setting[0][0], + 3, + stride=1, + padding=1, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg)) + return stem + + def build_stage_layer(self, stage_idx: int, setting: list) -> list: + """Build a stage layer. + + Args: + stage_idx (int): The index of a stage layer. + setting (list): The architecture setting of a stage layer. + """ + in_channels, out_channels, num_blocks = setting + + cspres_layer = CSPResLayer( + in_channels=in_channels, + out_channels=out_channels, + num_block=num_blocks, + block_cfg=self.block_cfg, + stride=2, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg, + attention_cfg=self.attention_cfg, + use_spp=False) + return [cspres_layer] diff --git a/models/YOLO-World/third_party/mmyolo/mmyolo/models/backbones/cspnext.py b/models/YOLO-World/third_party/mmyolo/mmyolo/models/backbones/cspnext.py new file mode 100644 index 0000000000000000000000000000000000000000..adca9dd9d11baecefda90a99a4188e78c2ca8188 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/mmyolo/models/backbones/cspnext.py @@ -0,0 +1,187 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math +from typing import List, Sequence, Union + +import torch.nn as nn +from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule +from mmdet.models.backbones.csp_darknet import CSPLayer +from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig + +from mmyolo.registry import MODELS +from ..layers import SPPFBottleneck +from .base_backbone import BaseBackbone + + +@MODELS.register_module() +class CSPNeXt(BaseBackbone): + """CSPNeXt backbone used in RTMDet. + + Args: + arch (str): Architecture of CSPNeXt, from {P5, P6}. + Defaults to P5. + deepen_factor (float): Depth multiplier, multiply number of + blocks in CSP layer by this amount. Defaults to 1.0. + widen_factor (float): Width multiplier, multiply number of + channels in each layer by this amount. Defaults to 1.0. + out_indices (Sequence[int]): Output from which stages. + Defaults to (2, 3, 4). + frozen_stages (int): Stages to be frozen (stop grad and set eval + mode). -1 means not freezing any parameters. Defaults to -1. + plugins (list[dict]): List of plugins for stages, each dict contains: + - cfg (dict, required): Cfg dict to build plugin.Defaults to + - stages (tuple[bool], optional): Stages to apply plugin, length + should be same as 'num_stages'. + use_depthwise (bool): Whether to use depthwise separable convolution. + Defaults to False. + expand_ratio (float): Ratio to adjust the number of channels of the + hidden layer. Defaults to 0.5. + arch_ovewrite (list): Overwrite default arch settings. + Defaults to None. + channel_attention (bool): Whether to add channel attention in each + stage. Defaults to True. + conv_cfg (:obj:`ConfigDict` or dict, optional): Config dict for + convolution layer. Defaults to None. + norm_cfg (:obj:`ConfigDict` or dict): Dictionary to construct and + config norm layer. Defaults to dict(type='BN', requires_grad=True). + act_cfg (:obj:`ConfigDict` or dict): Config dict for activation layer. + Defaults to dict(type='SiLU', inplace=True). + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. + init_cfg (:obj:`ConfigDict` or dict or list[dict] or + list[:obj:`ConfigDict`]): Initialization config dict. + """ + # From left to right: + # in_channels, out_channels, num_blocks, add_identity, use_spp + arch_settings = { + 'P5': [[64, 128, 3, True, False], [128, 256, 6, True, False], + [256, 512, 6, True, False], [512, 1024, 3, False, True]], + 'P6': [[64, 128, 3, True, False], [128, 256, 6, True, False], + [256, 512, 6, True, False], [512, 768, 3, True, False], + [768, 1024, 3, False, True]] + } + + def __init__( + self, + arch: str = 'P5', + deepen_factor: float = 1.0, + widen_factor: float = 1.0, + input_channels: int = 3, + out_indices: Sequence[int] = (2, 3, 4), + frozen_stages: int = -1, + plugins: Union[dict, List[dict]] = None, + use_depthwise: bool = False, + expand_ratio: float = 0.5, + arch_ovewrite: dict = None, + channel_attention: bool = True, + conv_cfg: OptConfigType = None, + norm_cfg: ConfigType = dict(type='BN'), + act_cfg: ConfigType = dict(type='SiLU', inplace=True), + norm_eval: bool = False, + init_cfg: OptMultiConfig = dict( + type='Kaiming', + layer='Conv2d', + a=math.sqrt(5), + distribution='uniform', + mode='fan_in', + nonlinearity='leaky_relu') + ) -> None: + arch_setting = self.arch_settings[arch] + if arch_ovewrite: + arch_setting = arch_ovewrite + self.channel_attention = channel_attention + self.use_depthwise = use_depthwise + self.conv = DepthwiseSeparableConvModule \ + if use_depthwise else ConvModule + self.expand_ratio = expand_ratio + self.conv_cfg = conv_cfg + + super().__init__( + arch_setting, + deepen_factor, + widen_factor, + input_channels, + out_indices, + frozen_stages=frozen_stages, + plugins=plugins, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + norm_eval=norm_eval, + init_cfg=init_cfg) + + def build_stem_layer(self) -> nn.Module: + """Build a stem layer.""" + stem = nn.Sequential( + ConvModule( + 3, + int(self.arch_setting[0][0] * self.widen_factor // 2), + 3, + padding=1, + stride=2, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg), + ConvModule( + int(self.arch_setting[0][0] * self.widen_factor // 2), + int(self.arch_setting[0][0] * self.widen_factor // 2), + 3, + padding=1, + stride=1, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg), + ConvModule( + int(self.arch_setting[0][0] * self.widen_factor // 2), + int(self.arch_setting[0][0] * self.widen_factor), + 3, + padding=1, + stride=1, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg)) + return stem + + def build_stage_layer(self, stage_idx: int, setting: list) -> list: + """Build a stage layer. + + Args: + stage_idx (int): The index of a stage layer. + setting (list): The architecture setting of a stage layer. + """ + in_channels, out_channels, num_blocks, add_identity, use_spp = setting + + in_channels = int(in_channels * self.widen_factor) + out_channels = int(out_channels * self.widen_factor) + num_blocks = max(round(num_blocks * self.deepen_factor), 1) + + stage = [] + conv_layer = self.conv( + in_channels, + out_channels, + 3, + stride=2, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + stage.append(conv_layer) + if use_spp: + spp = SPPFBottleneck( + out_channels, + out_channels, + kernel_sizes=5, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + stage.append(spp) + csp_layer = CSPLayer( + out_channels, + out_channels, + num_blocks=num_blocks, + add_identity=add_identity, + use_depthwise=self.use_depthwise, + use_cspnext_block=True, + expand_ratio=self.expand_ratio, + channel_attention=self.channel_attention, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + stage.append(csp_layer) + return stage diff --git a/models/YOLO-World/third_party/mmyolo/mmyolo/models/backbones/efficient_rep.py b/models/YOLO-World/third_party/mmyolo/mmyolo/models/backbones/efficient_rep.py new file mode 100644 index 0000000000000000000000000000000000000000..32e455f06972af148fa56bba1c4178b0e2d540bd --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/mmyolo/models/backbones/efficient_rep.py @@ -0,0 +1,305 @@ +# Copyright (c) OpenMMLab. All rights reserved. + +from typing import List, Tuple, Union + +import torch +import torch.nn as nn +from mmdet.utils import ConfigType, OptMultiConfig + +from mmyolo.models.layers.yolo_bricks import CSPSPPFBottleneck, SPPFBottleneck +from mmyolo.registry import MODELS +from ..layers import BepC3StageBlock, RepStageBlock +from ..utils import make_round +from .base_backbone import BaseBackbone + + +@MODELS.register_module() +class YOLOv6EfficientRep(BaseBackbone): + """EfficientRep backbone used in YOLOv6. + Args: + arch (str): Architecture of BaseDarknet, from {P5, P6}. + Defaults to P5. + plugins (list[dict]): List of plugins for stages, each dict contains: + - cfg (dict, required): Cfg dict to build plugin. + - stages (tuple[bool], optional): Stages to apply plugin, length + should be same as 'num_stages'. + deepen_factor (float): Depth multiplier, multiply number of + blocks in CSP layer by this amount. Defaults to 1.0. + widen_factor (float): Width multiplier, multiply number of + channels in each layer by this amount. Defaults to 1.0. + input_channels (int): Number of input image channels. Defaults to 3. + out_indices (Tuple[int]): Output from which stages. + Defaults to (2, 3, 4). + frozen_stages (int): Stages to be frozen (stop grad and set eval + mode). -1 means not freezing any parameters. Defaults to -1. + norm_cfg (dict): Dictionary to construct and config norm layer. + Defaults to dict(type='BN', requires_grad=True). + act_cfg (dict): Config dict for activation layer. + Defaults to dict(type='LeakyReLU', negative_slope=0.1). + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Defaults to False. + block_cfg (dict): Config dict for the block used to build each + layer. Defaults to dict(type='RepVGGBlock'). + init_cfg (Union[dict, list[dict]], optional): Initialization config + dict. Defaults to None. + Example: + >>> from mmyolo.models import YOLOv6EfficientRep + >>> import torch + >>> model = YOLOv6EfficientRep() + >>> model.eval() + >>> inputs = torch.rand(1, 3, 416, 416) + >>> level_outputs = model(inputs) + >>> for level_out in level_outputs: + ... print(tuple(level_out.shape)) + ... + (1, 256, 52, 52) + (1, 512, 26, 26) + (1, 1024, 13, 13) + """ + # From left to right: + # in_channels, out_channels, num_blocks, use_spp + arch_settings = { + 'P5': [[64, 128, 6, False], [128, 256, 12, False], + [256, 512, 18, False], [512, 1024, 6, True]] + } + + def __init__(self, + arch: str = 'P5', + plugins: Union[dict, List[dict]] = None, + deepen_factor: float = 1.0, + widen_factor: float = 1.0, + input_channels: int = 3, + out_indices: Tuple[int] = (2, 3, 4), + frozen_stages: int = -1, + use_cspsppf: bool = False, + norm_cfg: ConfigType = dict( + type='BN', momentum=0.03, eps=0.001), + act_cfg: ConfigType = dict(type='ReLU', inplace=True), + norm_eval: bool = False, + block_cfg: ConfigType = dict(type='RepVGGBlock'), + init_cfg: OptMultiConfig = None): + self.block_cfg = block_cfg + self.use_cspsppf = use_cspsppf + super().__init__( + self.arch_settings[arch], + deepen_factor, + widen_factor, + input_channels=input_channels, + out_indices=out_indices, + plugins=plugins, + frozen_stages=frozen_stages, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + norm_eval=norm_eval, + init_cfg=init_cfg) + + def build_stem_layer(self) -> nn.Module: + """Build a stem layer.""" + + block_cfg = self.block_cfg.copy() + block_cfg.update( + dict( + in_channels=self.input_channels, + out_channels=int(self.arch_setting[0][0] * self.widen_factor), + kernel_size=3, + stride=2, + )) + return MODELS.build(block_cfg) + + def build_stage_layer(self, stage_idx: int, setting: list) -> list: + """Build a stage layer. + + Args: + stage_idx (int): The index of a stage layer. + setting (list): The architecture setting of a stage layer. + """ + in_channels, out_channels, num_blocks, use_spp = setting + + in_channels = int(in_channels * self.widen_factor) + out_channels = int(out_channels * self.widen_factor) + num_blocks = make_round(num_blocks, self.deepen_factor) + + rep_stage_block = RepStageBlock( + in_channels=out_channels, + out_channels=out_channels, + num_blocks=num_blocks, + block_cfg=self.block_cfg, + ) + + block_cfg = self.block_cfg.copy() + block_cfg.update( + dict( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=3, + stride=2)) + stage = [] + + ef_block = nn.Sequential(MODELS.build(block_cfg), rep_stage_block) + + stage.append(ef_block) + + if use_spp: + spp = SPPFBottleneck( + in_channels=out_channels, + out_channels=out_channels, + kernel_sizes=5, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + if self.use_cspsppf: + spp = CSPSPPFBottleneck( + in_channels=out_channels, + out_channels=out_channels, + kernel_sizes=5, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + stage.append(spp) + return stage + + def init_weights(self): + if self.init_cfg is None: + """Initialize the parameters.""" + for m in self.modules(): + if isinstance(m, torch.nn.Conv2d): + # In order to be consistent with the source code, + # reset the Conv2d initialization parameters + m.reset_parameters() + else: + super().init_weights() + + +@MODELS.register_module() +class YOLOv6CSPBep(YOLOv6EfficientRep): + """CSPBep backbone used in YOLOv6. + Args: + arch (str): Architecture of BaseDarknet, from {P5, P6}. + Defaults to P5. + plugins (list[dict]): List of plugins for stages, each dict contains: + - cfg (dict, required): Cfg dict to build plugin. + - stages (tuple[bool], optional): Stages to apply plugin, length + should be same as 'num_stages'. + deepen_factor (float): Depth multiplier, multiply number of + blocks in CSP layer by this amount. Defaults to 1.0. + widen_factor (float): Width multiplier, multiply number of + channels in each layer by this amount. Defaults to 1.0. + input_channels (int): Number of input image channels. Defaults to 3. + out_indices (Tuple[int]): Output from which stages. + Defaults to (2, 3, 4). + frozen_stages (int): Stages to be frozen (stop grad and set eval + mode). -1 means not freezing any parameters. Defaults to -1. + norm_cfg (dict): Dictionary to construct and config norm layer. + Defaults to dict(type='BN', requires_grad=True). + act_cfg (dict): Config dict for activation layer. + Defaults to dict(type='LeakyReLU', negative_slope=0.1). + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Defaults to False. + block_cfg (dict): Config dict for the block used to build each + layer. Defaults to dict(type='RepVGGBlock'). + block_act_cfg (dict): Config dict for activation layer used in each + stage. Defaults to dict(type='SiLU', inplace=True). + init_cfg (Union[dict, list[dict]], optional): Initialization config + dict. Defaults to None. + Example: + >>> from mmyolo.models import YOLOv6CSPBep + >>> import torch + >>> model = YOLOv6CSPBep() + >>> model.eval() + >>> inputs = torch.rand(1, 3, 416, 416) + >>> level_outputs = model(inputs) + >>> for level_out in level_outputs: + ... print(tuple(level_out.shape)) + ... + (1, 256, 52, 52) + (1, 512, 26, 26) + (1, 1024, 13, 13) + """ + # From left to right: + # in_channels, out_channels, num_blocks, use_spp + arch_settings = { + 'P5': [[64, 128, 6, False], [128, 256, 12, False], + [256, 512, 18, False], [512, 1024, 6, True]] + } + + def __init__(self, + arch: str = 'P5', + plugins: Union[dict, List[dict]] = None, + deepen_factor: float = 1.0, + widen_factor: float = 1.0, + input_channels: int = 3, + hidden_ratio: float = 0.5, + out_indices: Tuple[int] = (2, 3, 4), + frozen_stages: int = -1, + use_cspsppf: bool = False, + norm_cfg: ConfigType = dict( + type='BN', momentum=0.03, eps=0.001), + act_cfg: ConfigType = dict(type='SiLU', inplace=True), + norm_eval: bool = False, + block_cfg: ConfigType = dict(type='ConvWrapper'), + init_cfg: OptMultiConfig = None): + self.hidden_ratio = hidden_ratio + self.use_cspsppf = use_cspsppf + super().__init__( + arch=arch, + deepen_factor=deepen_factor, + widen_factor=widen_factor, + input_channels=input_channels, + out_indices=out_indices, + plugins=plugins, + frozen_stages=frozen_stages, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + norm_eval=norm_eval, + block_cfg=block_cfg, + init_cfg=init_cfg) + + def build_stage_layer(self, stage_idx: int, setting: list) -> list: + """Build a stage layer. + + Args: + stage_idx (int): The index of a stage layer. + setting (list): The architecture setting of a stage layer. + """ + in_channels, out_channels, num_blocks, use_spp = setting + in_channels = int(in_channels * self.widen_factor) + out_channels = int(out_channels * self.widen_factor) + num_blocks = make_round(num_blocks, self.deepen_factor) + + rep_stage_block = BepC3StageBlock( + in_channels=out_channels, + out_channels=out_channels, + num_blocks=num_blocks, + hidden_ratio=self.hidden_ratio, + block_cfg=self.block_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + block_cfg = self.block_cfg.copy() + block_cfg.update( + dict( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=3, + stride=2)) + stage = [] + + ef_block = nn.Sequential(MODELS.build(block_cfg), rep_stage_block) + + stage.append(ef_block) + + if use_spp: + spp = SPPFBottleneck( + in_channels=out_channels, + out_channels=out_channels, + kernel_sizes=5, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + if self.use_cspsppf: + spp = CSPSPPFBottleneck( + in_channels=out_channels, + out_channels=out_channels, + kernel_sizes=5, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + stage.append(spp) + return stage diff --git a/models/YOLO-World/third_party/mmyolo/mmyolo/models/backbones/yolov7_backbone.py b/models/YOLO-World/third_party/mmyolo/mmyolo/models/backbones/yolov7_backbone.py new file mode 100644 index 0000000000000000000000000000000000000000..bb9a5eed85ca1ee6884f7348ef3745a9ceaba032 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/mmyolo/models/backbones/yolov7_backbone.py @@ -0,0 +1,285 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Optional, Tuple, Union + +import torch.nn as nn +from mmcv.cnn import ConvModule +from mmdet.models.backbones.csp_darknet import Focus +from mmdet.utils import ConfigType, OptMultiConfig + +from mmyolo.registry import MODELS +from ..layers import MaxPoolAndStrideConvBlock +from .base_backbone import BaseBackbone + + +@MODELS.register_module() +class YOLOv7Backbone(BaseBackbone): + """Backbone used in YOLOv7. + + Args: + arch (str): Architecture of YOLOv7Defaults to L. + deepen_factor (float): Depth multiplier, multiply number of + blocks in CSP layer by this amount. Defaults to 1.0. + widen_factor (float): Width multiplier, multiply number of + channels in each layer by this amount. Defaults to 1.0. + out_indices (Sequence[int]): Output from which stages. + Defaults to (2, 3, 4). + frozen_stages (int): Stages to be frozen (stop grad and set eval + mode). -1 means not freezing any parameters. Defaults to -1. + plugins (list[dict]): List of plugins for stages, each dict contains: + + - cfg (dict, required): Cfg dict to build plugin. + - stages (tuple[bool], optional): Stages to apply plugin, length + should be same as 'num_stages'. + norm_cfg (:obj:`ConfigDict` or dict): Dictionary to construct and + config norm layer. Defaults to dict(type='BN', requires_grad=True). + act_cfg (:obj:`ConfigDict` or dict): Config dict for activation layer. + Defaults to dict(type='SiLU', inplace=True). + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. + init_cfg (:obj:`ConfigDict` or dict or list[dict] or + list[:obj:`ConfigDict`]): Initialization config dict. + """ + _tiny_stage1_cfg = dict(type='TinyDownSampleBlock', middle_ratio=0.5) + _tiny_stage2_4_cfg = dict(type='TinyDownSampleBlock', middle_ratio=1.0) + _l_expand_channel_2x = dict( + type='ELANBlock', + middle_ratio=0.5, + block_ratio=0.5, + num_blocks=2, + num_convs_in_block=2) + _l_no_change_channel = dict( + type='ELANBlock', + middle_ratio=0.25, + block_ratio=0.25, + num_blocks=2, + num_convs_in_block=2) + _x_expand_channel_2x = dict( + type='ELANBlock', + middle_ratio=0.4, + block_ratio=0.4, + num_blocks=3, + num_convs_in_block=2) + _x_no_change_channel = dict( + type='ELANBlock', + middle_ratio=0.2, + block_ratio=0.2, + num_blocks=3, + num_convs_in_block=2) + _w_no_change_channel = dict( + type='ELANBlock', + middle_ratio=0.5, + block_ratio=0.5, + num_blocks=2, + num_convs_in_block=2) + _e_no_change_channel = dict( + type='ELANBlock', + middle_ratio=0.4, + block_ratio=0.4, + num_blocks=3, + num_convs_in_block=2) + _d_no_change_channel = dict( + type='ELANBlock', + middle_ratio=1 / 3, + block_ratio=1 / 3, + num_blocks=4, + num_convs_in_block=2) + _e2e_no_change_channel = dict( + type='EELANBlock', + num_elan_block=2, + middle_ratio=0.4, + block_ratio=0.4, + num_blocks=3, + num_convs_in_block=2) + + # From left to right: + # in_channels, out_channels, Block_params + arch_settings = { + 'Tiny': [[64, 64, _tiny_stage1_cfg], [64, 128, _tiny_stage2_4_cfg], + [128, 256, _tiny_stage2_4_cfg], + [256, 512, _tiny_stage2_4_cfg]], + 'L': [[64, 256, _l_expand_channel_2x], + [256, 512, _l_expand_channel_2x], + [512, 1024, _l_expand_channel_2x], + [1024, 1024, _l_no_change_channel]], + 'X': [[80, 320, _x_expand_channel_2x], + [320, 640, _x_expand_channel_2x], + [640, 1280, _x_expand_channel_2x], + [1280, 1280, _x_no_change_channel]], + 'W': + [[64, 128, _w_no_change_channel], [128, 256, _w_no_change_channel], + [256, 512, _w_no_change_channel], [512, 768, _w_no_change_channel], + [768, 1024, _w_no_change_channel]], + 'E': + [[80, 160, _e_no_change_channel], [160, 320, _e_no_change_channel], + [320, 640, _e_no_change_channel], [640, 960, _e_no_change_channel], + [960, 1280, _e_no_change_channel]], + 'D': [[96, 192, + _d_no_change_channel], [192, 384, _d_no_change_channel], + [384, 768, _d_no_change_channel], + [768, 1152, _d_no_change_channel], + [1152, 1536, _d_no_change_channel]], + 'E2E': [[80, 160, _e2e_no_change_channel], + [160, 320, _e2e_no_change_channel], + [320, 640, _e2e_no_change_channel], + [640, 960, _e2e_no_change_channel], + [960, 1280, _e2e_no_change_channel]], + } + + def __init__(self, + arch: str = 'L', + deepen_factor: float = 1.0, + widen_factor: float = 1.0, + input_channels: int = 3, + out_indices: Tuple[int] = (2, 3, 4), + frozen_stages: int = -1, + plugins: Union[dict, List[dict]] = None, + norm_cfg: ConfigType = dict( + type='BN', momentum=0.03, eps=0.001), + act_cfg: ConfigType = dict(type='SiLU', inplace=True), + norm_eval: bool = False, + init_cfg: OptMultiConfig = None): + assert arch in self.arch_settings.keys() + self.arch = arch + super().__init__( + self.arch_settings[arch], + deepen_factor, + widen_factor, + input_channels=input_channels, + out_indices=out_indices, + plugins=plugins, + frozen_stages=frozen_stages, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + norm_eval=norm_eval, + init_cfg=init_cfg) + + def build_stem_layer(self) -> nn.Module: + """Build a stem layer.""" + if self.arch in ['L', 'X']: + stem = nn.Sequential( + ConvModule( + 3, + int(self.arch_setting[0][0] * self.widen_factor // 2), + 3, + padding=1, + stride=1, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg), + ConvModule( + int(self.arch_setting[0][0] * self.widen_factor // 2), + int(self.arch_setting[0][0] * self.widen_factor), + 3, + padding=1, + stride=2, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg), + ConvModule( + int(self.arch_setting[0][0] * self.widen_factor), + int(self.arch_setting[0][0] * self.widen_factor), + 3, + padding=1, + stride=1, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg)) + elif self.arch == 'Tiny': + stem = nn.Sequential( + ConvModule( + 3, + int(self.arch_setting[0][0] * self.widen_factor // 2), + 3, + padding=1, + stride=2, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg), + ConvModule( + int(self.arch_setting[0][0] * self.widen_factor // 2), + int(self.arch_setting[0][0] * self.widen_factor), + 3, + padding=1, + stride=2, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg)) + elif self.arch in ['W', 'E', 'D', 'E2E']: + stem = Focus( + 3, + int(self.arch_setting[0][0] * self.widen_factor), + kernel_size=3, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + return stem + + def build_stage_layer(self, stage_idx: int, setting: list) -> list: + """Build a stage layer. + + Args: + stage_idx (int): The index of a stage layer. + setting (list): The architecture setting of a stage layer. + """ + in_channels, out_channels, stage_block_cfg = setting + in_channels = int(in_channels * self.widen_factor) + out_channels = int(out_channels * self.widen_factor) + + stage_block_cfg = stage_block_cfg.copy() + stage_block_cfg.setdefault('norm_cfg', self.norm_cfg) + stage_block_cfg.setdefault('act_cfg', self.act_cfg) + + stage_block_cfg['in_channels'] = in_channels + stage_block_cfg['out_channels'] = out_channels + + stage = [] + if self.arch in ['W', 'E', 'D', 'E2E']: + stage_block_cfg['in_channels'] = out_channels + elif self.arch in ['L', 'X']: + if stage_idx == 0: + stage_block_cfg['in_channels'] = out_channels // 2 + + downsample_layer = self._build_downsample_layer( + stage_idx, in_channels, out_channels) + stage.append(MODELS.build(stage_block_cfg)) + if downsample_layer is not None: + stage.insert(0, downsample_layer) + return stage + + def _build_downsample_layer(self, stage_idx: int, in_channels: int, + out_channels: int) -> Optional[nn.Module]: + """Build a downsample layer pre stage.""" + if self.arch in ['E', 'D', 'E2E']: + downsample_layer = MaxPoolAndStrideConvBlock( + in_channels, + out_channels, + use_in_channels_of_middle=True, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + elif self.arch == 'W': + downsample_layer = ConvModule( + in_channels, + out_channels, + 3, + stride=2, + padding=1, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + elif self.arch == 'Tiny': + if stage_idx != 0: + downsample_layer = nn.MaxPool2d(2, 2) + else: + downsample_layer = None + elif self.arch in ['L', 'X']: + if stage_idx == 0: + downsample_layer = ConvModule( + in_channels, + out_channels // 2, + 3, + stride=2, + padding=1, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + else: + downsample_layer = MaxPoolAndStrideConvBlock( + in_channels, + in_channels, + use_in_channels_of_middle=False, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + return downsample_layer diff --git a/models/YOLO-World/third_party/mmyolo/mmyolo/models/data_preprocessors/__init__.py b/models/YOLO-World/third_party/mmyolo/mmyolo/models/data_preprocessors/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3ef4f6d7d801cb8150ebca645ddb3cbf5d1b9599 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/mmyolo/models/data_preprocessors/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .data_preprocessor import (PPYOLOEBatchRandomResize, + PPYOLOEDetDataPreprocessor, + YOLOv5DetDataPreprocessor, + YOLOXBatchSyncRandomResize) + +__all__ = [ + 'YOLOv5DetDataPreprocessor', 'PPYOLOEDetDataPreprocessor', + 'PPYOLOEBatchRandomResize', 'YOLOXBatchSyncRandomResize' +] diff --git a/models/YOLO-World/third_party/mmyolo/mmyolo/models/data_preprocessors/data_preprocessor.py b/models/YOLO-World/third_party/mmyolo/mmyolo/models/data_preprocessors/data_preprocessor.py new file mode 100644 index 0000000000000000000000000000000000000000..a29b90844323836e0264f827edf27aa20dca2507 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/mmyolo/models/data_preprocessors/data_preprocessor.py @@ -0,0 +1,310 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import random +from typing import List, Optional, Tuple, Union + +import torch +import torch.nn.functional as F +from mmdet.models import BatchSyncRandomResize +from mmdet.models.data_preprocessors import DetDataPreprocessor +from mmengine import MessageHub, is_list_of +from mmengine.structures import BaseDataElement +from torch import Tensor + +from mmyolo.registry import MODELS + +CastData = Union[tuple, dict, BaseDataElement, torch.Tensor, list, bytes, str, + None] + + +@MODELS.register_module() +class YOLOXBatchSyncRandomResize(BatchSyncRandomResize): + """YOLOX batch random resize. + + Args: + random_size_range (tuple): The multi-scale random range during + multi-scale training. + interval (int): The iter interval of change + image size. Defaults to 10. + size_divisor (int): Image size divisible factor. + Defaults to 32. + """ + + def forward(self, inputs: Tensor, data_samples: dict) -> Tensor and dict: + """resize a batch of images and bboxes to shape ``self._input_size``""" + h, w = inputs.shape[-2:] + inputs = inputs.float() + assert isinstance(data_samples, dict) + + if self._input_size is None: + self._input_size = (h, w) + scale_y = self._input_size[0] / h + scale_x = self._input_size[1] / w + if scale_x != 1 or scale_y != 1: + inputs = F.interpolate( + inputs, + size=self._input_size, + mode='bilinear', + align_corners=False) + + data_samples['bboxes_labels'][:, 2::2] *= scale_x + data_samples['bboxes_labels'][:, 3::2] *= scale_y + + if 'keypoints' in data_samples: + data_samples['keypoints'][..., 0] *= scale_x + data_samples['keypoints'][..., 1] *= scale_y + + message_hub = MessageHub.get_current_instance() + if (message_hub.get_info('iter') + 1) % self._interval == 0: + self._input_size = self._get_random_size( + aspect_ratio=float(w / h), device=inputs.device) + + return inputs, data_samples + + +@MODELS.register_module() +class YOLOv5DetDataPreprocessor(DetDataPreprocessor): + """Rewrite collate_fn to get faster training speed. + + Note: It must be used together with `mmyolo.datasets.utils.yolov5_collate` + """ + + def __init__(self, *args, non_blocking: Optional[bool] = True, **kwargs): + super().__init__(*args, non_blocking=non_blocking, **kwargs) + + def forward(self, data: dict, training: bool = False) -> dict: + """Perform normalization, padding and bgr2rgb conversion based on + ``DetDataPreprocessorr``. + + Args: + data (dict): Data sampled from dataloader. + training (bool): Whether to enable training time augmentation. + + Returns: + dict: Data in the same format as the model input. + """ + if not training: + return super().forward(data, training) + + data = self.cast_data(data) + inputs, data_samples = data['inputs'], data['data_samples'] + assert isinstance(data['data_samples'], dict) + + # TODO: Supports multi-scale training + if self._channel_conversion and inputs.shape[1] == 3: + inputs = inputs[:, [2, 1, 0], ...] + if self._enable_normalize: + inputs = (inputs - self.mean) / self.std + + if self.batch_augments is not None: + for batch_aug in self.batch_augments: + inputs, data_samples = batch_aug(inputs, data_samples) + + img_metas = [{'batch_input_shape': inputs.shape[2:]}] * len(inputs) + data_samples_output = { + 'bboxes_labels': data_samples['bboxes_labels'], + 'img_metas': img_metas + } + if 'masks' in data_samples: + data_samples_output['masks'] = data_samples['masks'] + if 'keypoints' in data_samples: + data_samples_output['keypoints'] = data_samples['keypoints'] + data_samples_output['keypoints_visible'] = data_samples[ + 'keypoints_visible'] + + return {'inputs': inputs, 'data_samples': data_samples_output} + + +@MODELS.register_module() +class PPYOLOEDetDataPreprocessor(DetDataPreprocessor): + """Image pre-processor for detection tasks. + + The main difference between PPYOLOEDetDataPreprocessor and + DetDataPreprocessor is the normalization order. The official + PPYOLOE resize image first, and then normalize image. + In DetDataPreprocessor, the order is reversed. + + Note: It must be used together with + `mmyolo.datasets.utils.yolov5_collate` + """ + + def forward(self, data: dict, training: bool = False) -> dict: + """Perform normalization、padding and bgr2rgb conversion based on + ``BaseDataPreprocessor``. This class use batch_augments first, and then + normalize the image, which is different from the `DetDataPreprocessor` + . + + Args: + data (dict): Data sampled from dataloader. + training (bool): Whether to enable training time augmentation. + + Returns: + dict: Data in the same format as the model input. + """ + if not training: + return super().forward(data, training) + + assert isinstance(data['inputs'], list) and is_list_of( + data['inputs'], torch.Tensor), \ + '"inputs" should be a list of Tensor, but got ' \ + f'{type(data["inputs"])}. The possible reason for this ' \ + 'is that you are not using it with ' \ + '"mmyolo.datasets.utils.yolov5_collate". Please refer to ' \ + '"cconfigs/ppyoloe/ppyoloe_plus_s_fast_8xb8-80e_coco.py".' + + data = self.cast_data(data) + inputs, data_samples = data['inputs'], data['data_samples'] + assert isinstance(data['data_samples'], dict) + + # Process data. + batch_inputs = [] + for _input in inputs: + # channel transform + if self._channel_conversion: + _input = _input[[2, 1, 0], ...] + # Convert to float after channel conversion to ensure + # efficiency + _input = _input.float() + batch_inputs.append(_input) + + # Batch random resize image. + if self.batch_augments is not None: + for batch_aug in self.batch_augments: + inputs, data_samples = batch_aug(batch_inputs, data_samples) + + if self._enable_normalize: + inputs = (inputs - self.mean) / self.std + + img_metas = [{'batch_input_shape': inputs.shape[2:]}] * len(inputs) + data_samples = { + 'bboxes_labels': data_samples['bboxes_labels'], + 'img_metas': img_metas + } + + return {'inputs': inputs, 'data_samples': data_samples} + + +# TODO: No generality. Its input data format is different +# mmdet's batch aug, and it must be compatible in the future. +@MODELS.register_module() +class PPYOLOEBatchRandomResize(BatchSyncRandomResize): + """PPYOLOE batch random resize. + + Args: + random_size_range (tuple): The multi-scale random range during + multi-scale training. + interval (int): The iter interval of change + image size. Defaults to 10. + size_divisor (int): Image size divisible factor. + Defaults to 32. + random_interp (bool): Whether to choose interp_mode randomly. + If set to True, the type of `interp_mode` must be list. + If set to False, the type of `interp_mode` must be str. + Defaults to True. + interp_mode (Union[List, str]): The modes available for resizing + are ('nearest', 'bilinear', 'bicubic', 'area'). + keep_ratio (bool): Whether to keep the aspect ratio when resizing + the image. Now we only support keep_ratio=False. + Defaults to False. + """ + + def __init__(self, + random_size_range: Tuple[int, int], + interval: int = 1, + size_divisor: int = 32, + random_interp=True, + interp_mode: Union[List[str], str] = [ + 'nearest', 'bilinear', 'bicubic', 'area' + ], + keep_ratio: bool = False) -> None: + super().__init__(random_size_range, interval, size_divisor) + self.random_interp = random_interp + self.keep_ratio = keep_ratio + # TODO: need to support keep_ratio==True + assert not self.keep_ratio, 'We do not yet support keep_ratio=True' + + if self.random_interp: + assert isinstance(interp_mode, list) and len(interp_mode) > 1,\ + 'While random_interp==True, the type of `interp_mode`' \ + ' must be list and len(interp_mode) must large than 1' + self.interp_mode_list = interp_mode + self.interp_mode = None + else: + assert isinstance(interp_mode, str),\ + 'While random_interp==False, the type of ' \ + '`interp_mode` must be str' + assert interp_mode in ['nearest', 'bilinear', 'bicubic', 'area'] + self.interp_mode_list = None + self.interp_mode = interp_mode + + def forward(self, inputs: list, + data_samples: dict) -> Tuple[Tensor, Tensor]: + """Resize a batch of images and bboxes to shape ``self._input_size``. + + The inputs and data_samples should be list, and + ``PPYOLOEBatchRandomResize`` must be used with + ``PPYOLOEDetDataPreprocessor`` and ``yolov5_collate`` with + ``use_ms_training == True``. + """ + assert isinstance(inputs, list),\ + 'The type of inputs must be list. The possible reason for this ' \ + 'is that you are not using it with `PPYOLOEDetDataPreprocessor` ' \ + 'and `yolov5_collate` with use_ms_training == True.' + + bboxes_labels = data_samples['bboxes_labels'] + + message_hub = MessageHub.get_current_instance() + if (message_hub.get_info('iter') + 1) % self._interval == 0: + # get current input size + self._input_size, interp_mode = self._get_random_size_and_interp() + if self.random_interp: + self.interp_mode = interp_mode + + # TODO: need to support type(inputs)==Tensor + if isinstance(inputs, list): + outputs = [] + for i in range(len(inputs)): + _batch_input = inputs[i] + h, w = _batch_input.shape[-2:] + scale_y = self._input_size[0] / h + scale_x = self._input_size[1] / w + if scale_x != 1. or scale_y != 1.: + if self.interp_mode in ('nearest', 'area'): + align_corners = None + else: + align_corners = False + _batch_input = F.interpolate( + _batch_input.unsqueeze(0), + size=self._input_size, + mode=self.interp_mode, + align_corners=align_corners) + + # rescale boxes + indexes = bboxes_labels[:, 0] == i + bboxes_labels[indexes, 2] *= scale_x + bboxes_labels[indexes, 3] *= scale_y + bboxes_labels[indexes, 4] *= scale_x + bboxes_labels[indexes, 5] *= scale_y + + data_samples['bboxes_labels'] = bboxes_labels + else: + _batch_input = _batch_input.unsqueeze(0) + + outputs.append(_batch_input) + + # convert to Tensor + return torch.cat(outputs, dim=0), data_samples + else: + raise NotImplementedError('Not implemented yet!') + + def _get_random_size_and_interp(self) -> Tuple[int, int]: + """Randomly generate a shape in ``_random_size_range`` and a + interp_mode in interp_mode_list.""" + size = random.randint(*self._random_size_range) + input_size = (self._size_divisor * size, self._size_divisor * size) + + if self.random_interp: + interp_ind = random.randint(0, len(self.interp_mode_list) - 1) + interp_mode = self.interp_mode_list[interp_ind] + else: + interp_mode = None + return input_size, interp_mode diff --git a/models/YOLO-World/third_party/mmyolo/mmyolo/models/dense_heads/__init__.py b/models/YOLO-World/third_party/mmyolo/mmyolo/models/dense_heads/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..90587c3fbb280082262d48b031a64ea7c69b3dec --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/mmyolo/models/dense_heads/__init__.py @@ -0,0 +1,23 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .ppyoloe_head import PPYOLOEHead, PPYOLOEHeadModule +from .rtmdet_head import RTMDetHead, RTMDetSepBNHeadModule +from .rtmdet_ins_head import RTMDetInsSepBNHead, RTMDetInsSepBNHeadModule +from .rtmdet_rotated_head import (RTMDetRotatedHead, + RTMDetRotatedSepBNHeadModule) +from .yolov5_head import YOLOv5Head, YOLOv5HeadModule +from .yolov5_ins_head import YOLOv5InsHead, YOLOv5InsHeadModule +from .yolov6_head import YOLOv6Head, YOLOv6HeadModule +from .yolov7_head import YOLOv7Head, YOLOv7HeadModule, YOLOv7p6HeadModule +from .yolov8_head import YOLOv8Head, YOLOv8HeadModule +from .yolox_head import YOLOXHead, YOLOXHeadModule +from .yolox_pose_head import YOLOXPoseHead, YOLOXPoseHeadModule + +__all__ = [ + 'YOLOv5Head', 'YOLOv6Head', 'YOLOXHead', 'YOLOv5HeadModule', + 'YOLOv6HeadModule', 'YOLOXHeadModule', 'RTMDetHead', + 'RTMDetSepBNHeadModule', 'YOLOv7Head', 'PPYOLOEHead', 'PPYOLOEHeadModule', + 'YOLOv7HeadModule', 'YOLOv7p6HeadModule', 'YOLOv8Head', 'YOLOv8HeadModule', + 'RTMDetRotatedHead', 'RTMDetRotatedSepBNHeadModule', 'RTMDetInsSepBNHead', + 'RTMDetInsSepBNHeadModule', 'YOLOv5InsHead', 'YOLOv5InsHeadModule', + 'YOLOXPoseHead', 'YOLOXPoseHeadModule' +] diff --git a/models/YOLO-World/third_party/mmyolo/mmyolo/models/dense_heads/ppyoloe_head.py b/models/YOLO-World/third_party/mmyolo/mmyolo/models/dense_heads/ppyoloe_head.py new file mode 100644 index 0000000000000000000000000000000000000000..f4689876785c40cbd7449cab8f378c8f6d1c1b89 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/mmyolo/models/dense_heads/ppyoloe_head.py @@ -0,0 +1,374 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Sequence, Tuple, Union + +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmdet.models.utils import multi_apply +from mmdet.utils import (ConfigType, OptConfigType, OptInstanceList, + OptMultiConfig, reduce_mean) +from mmengine import MessageHub +from mmengine.model import BaseModule, bias_init_with_prob +from mmengine.structures import InstanceData +from torch import Tensor + +from mmyolo.registry import MODELS +from ..layers.yolo_bricks import PPYOLOESELayer +from ..utils import gt_instances_preprocess +from .yolov6_head import YOLOv6Head + + +@MODELS.register_module() +class PPYOLOEHeadModule(BaseModule): + """PPYOLOEHead head module used in `PPYOLOE. + + `_. + + Args: + num_classes (int): Number of categories excluding the background + category. + in_channels (int): Number of channels in the input feature map. + widen_factor (float): Width multiplier, multiply number of + channels in each layer by this amount. Defaults to 1.0. + num_base_priors (int): The number of priors (points) at a point + on the feature grid. + featmap_strides (Sequence[int]): Downsample factor of each feature map. + Defaults to (8, 16, 32). + reg_max (int): Max value of integral set :math: ``{0, ..., reg_max}`` + in QFL setting. Defaults to 16. + norm_cfg (dict): Config dict for normalization layer. + Defaults to dict(type='BN', momentum=0.03, eps=0.001). + act_cfg (dict): Config dict for activation layer. + Defaults to dict(type='SiLU', inplace=True). + init_cfg (dict or list[dict], optional): Initialization config dict. + Defaults to None. + """ + + def __init__(self, + num_classes: int, + in_channels: Union[int, Sequence], + widen_factor: float = 1.0, + num_base_priors: int = 1, + featmap_strides: Sequence[int] = (8, 16, 32), + reg_max: int = 16, + norm_cfg: ConfigType = dict( + type='BN', momentum=0.1, eps=1e-5), + act_cfg: ConfigType = dict(type='SiLU', inplace=True), + init_cfg: OptMultiConfig = None): + super().__init__(init_cfg=init_cfg) + + self.num_classes = num_classes + self.featmap_strides = featmap_strides + self.num_levels = len(self.featmap_strides) + self.num_base_priors = num_base_priors + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + self.reg_max = reg_max + + if isinstance(in_channels, int): + self.in_channels = [int(in_channels * widen_factor) + ] * self.num_levels + else: + self.in_channels = [int(i * widen_factor) for i in in_channels] + + self._init_layers() + + def init_weights(self, prior_prob=0.01): + """Initialize the weight and bias of PPYOLOE head.""" + super().init_weights() + for conv in self.cls_preds: + conv.bias.data.fill_(bias_init_with_prob(prior_prob)) + conv.weight.data.fill_(0.) + + for conv in self.reg_preds: + conv.bias.data.fill_(1.0) + conv.weight.data.fill_(0.) + + def _init_layers(self): + """initialize conv layers in PPYOLOE head.""" + self.cls_preds = nn.ModuleList() + self.reg_preds = nn.ModuleList() + self.cls_stems = nn.ModuleList() + self.reg_stems = nn.ModuleList() + + for in_channel in self.in_channels: + self.cls_stems.append( + PPYOLOESELayer( + in_channel, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg)) + self.reg_stems.append( + PPYOLOESELayer( + in_channel, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg)) + + for in_channel in self.in_channels: + self.cls_preds.append( + nn.Conv2d(in_channel, self.num_classes, 3, padding=1)) + self.reg_preds.append( + nn.Conv2d(in_channel, 4 * (self.reg_max + 1), 3, padding=1)) + + # init proj + proj = torch.arange(self.reg_max + 1, dtype=torch.float) + self.register_buffer('proj', proj, persistent=False) + + def forward(self, x: Tuple[Tensor]) -> Tensor: + """Forward features from the upstream network. + + Args: + x (Tuple[Tensor]): Features from the upstream network, each is + a 4D-tensor. + Returns: + Tuple[List]: A tuple of multi-level classification scores, bbox + predictions. + """ + assert len(x) == self.num_levels + + return multi_apply(self.forward_single, x, self.cls_stems, + self.cls_preds, self.reg_stems, self.reg_preds) + + def forward_single(self, x: Tensor, cls_stem: nn.ModuleList, + cls_pred: nn.ModuleList, reg_stem: nn.ModuleList, + reg_pred: nn.ModuleList) -> Tensor: + """Forward feature of a single scale level.""" + b, _, h, w = x.shape + avg_feat = F.adaptive_avg_pool2d(x, (1, 1)) + cls_logit = cls_pred(cls_stem(x, avg_feat) + x) + bbox_dist_preds = reg_pred(reg_stem(x, avg_feat)) + if self.reg_max > 1: + bbox_dist_preds = bbox_dist_preds.reshape( + [-1, 4, self.reg_max + 1, h * w]).permute(0, 3, 1, 2) + bbox_preds = bbox_dist_preds.softmax(3).matmul( + self.proj.view([-1, 1])).squeeze(-1) + bbox_preds = bbox_preds.transpose(1, 2).reshape(b, -1, h, w) + else: + bbox_preds = bbox_dist_preds + if self.training: + return cls_logit, bbox_preds, bbox_dist_preds + else: + return cls_logit, bbox_preds + + +@MODELS.register_module() +class PPYOLOEHead(YOLOv6Head): + """PPYOLOEHead head used in `PPYOLOE `_. + The YOLOv6 head and the PPYOLOE head are only slightly different. + Distribution focal loss is extra used in PPYOLOE, but not in YOLOv6. + + Args: + head_module(ConfigType): Base module used for YOLOv5Head + prior_generator(dict): Points generator feature maps in + 2D points-based detectors. + bbox_coder (:obj:`ConfigDict` or dict): Config of bbox coder. + loss_cls (:obj:`ConfigDict` or dict): Config of classification loss. + loss_bbox (:obj:`ConfigDict` or dict): Config of localization loss. + loss_dfl (:obj:`ConfigDict` or dict): Config of distribution focal + loss. + train_cfg (:obj:`ConfigDict` or dict, optional): Training config of + anchor head. Defaults to None. + test_cfg (:obj:`ConfigDict` or dict, optional): Testing config of + anchor head. Defaults to None. + init_cfg (:obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or + list[dict], optional): Initialization config dict. + Defaults to None. + """ + + def __init__(self, + head_module: ConfigType, + prior_generator: ConfigType = dict( + type='mmdet.MlvlPointGenerator', + offset=0.5, + strides=[8, 16, 32]), + bbox_coder: ConfigType = dict(type='DistancePointBBoxCoder'), + loss_cls: ConfigType = dict( + type='mmdet.VarifocalLoss', + use_sigmoid=True, + alpha=0.75, + gamma=2.0, + iou_weighted=True, + reduction='sum', + loss_weight=1.0), + loss_bbox: ConfigType = dict( + type='IoULoss', + iou_mode='giou', + bbox_format='xyxy', + reduction='mean', + loss_weight=2.5, + return_iou=False), + loss_dfl: ConfigType = dict( + type='mmdet.DistributionFocalLoss', + reduction='mean', + loss_weight=0.5 / 4), + train_cfg: OptConfigType = None, + test_cfg: OptConfigType = None, + init_cfg: OptMultiConfig = None): + super().__init__( + head_module=head_module, + prior_generator=prior_generator, + bbox_coder=bbox_coder, + loss_cls=loss_cls, + loss_bbox=loss_bbox, + train_cfg=train_cfg, + test_cfg=test_cfg, + init_cfg=init_cfg) + self.loss_dfl = MODELS.build(loss_dfl) + # ppyoloe doesn't need loss_obj + self.loss_obj = None + + def loss_by_feat( + self, + cls_scores: Sequence[Tensor], + bbox_preds: Sequence[Tensor], + bbox_dist_preds: Sequence[Tensor], + batch_gt_instances: Sequence[InstanceData], + batch_img_metas: Sequence[dict], + batch_gt_instances_ignore: OptInstanceList = None) -> dict: + """Calculate the loss based on the features extracted by the detection + head. + + Args: + cls_scores (Sequence[Tensor]): Box scores for each scale level, + each is a 4D-tensor, the channel number is + num_priors * num_classes. + bbox_preds (Sequence[Tensor]): Box energies / deltas for each scale + level, each is a 4D-tensor, the channel number is + num_priors * 4. + bbox_dist_preds (Sequence[Tensor]): Box distribution logits for + each scale level with shape (bs, reg_max + 1, H*W, 4). + batch_gt_instances (list[:obj:`InstanceData`]): Batch of + gt_instance. It usually includes ``bboxes`` and ``labels`` + attributes. + batch_img_metas (list[dict]): Meta information of each image, e.g., + image size, scaling factor, etc. + batch_gt_instances_ignore (list[:obj:`InstanceData`], optional): + Batch of gt_instances_ignore. It includes ``bboxes`` attribute + data that is ignored during training and testing. + Defaults to None. + Returns: + dict[str, Tensor]: A dictionary of losses. + """ + + # get epoch information from message hub + message_hub = MessageHub.get_current_instance() + current_epoch = message_hub.get_info('epoch') + + num_imgs = len(batch_img_metas) + + current_featmap_sizes = [ + cls_score.shape[2:] for cls_score in cls_scores + ] + # If the shape does not equal, generate new one + if current_featmap_sizes != self.featmap_sizes_train: + self.featmap_sizes_train = current_featmap_sizes + + mlvl_priors_with_stride = self.prior_generator.grid_priors( + self.featmap_sizes_train, + dtype=cls_scores[0].dtype, + device=cls_scores[0].device, + with_stride=True) + + self.num_level_priors = [len(n) for n in mlvl_priors_with_stride] + self.flatten_priors_train = torch.cat( + mlvl_priors_with_stride, dim=0) + self.stride_tensor = self.flatten_priors_train[..., [2]] + + # gt info + gt_info = gt_instances_preprocess(batch_gt_instances, num_imgs) + gt_labels = gt_info[:, :, :1] + gt_bboxes = gt_info[:, :, 1:] # xyxy + pad_bbox_flag = (gt_bboxes.sum(-1, keepdim=True) > 0).float() + + # pred info + flatten_cls_preds = [ + cls_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, + self.num_classes) + for cls_pred in cls_scores + ] + flatten_pred_bboxes = [ + bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, 4) + for bbox_pred in bbox_preds + ] + # (bs, reg_max+1, n, 4) -> (bs, n, 4, reg_max+1) + flatten_pred_dists = [ + bbox_pred_org.permute(0, 2, 3, 1).reshape( + num_imgs, -1, (self.head_module.reg_max + 1) * 4) + for bbox_pred_org in bbox_dist_preds + ] + + flatten_dist_preds = torch.cat(flatten_pred_dists, dim=1) + flatten_cls_preds = torch.cat(flatten_cls_preds, dim=1) + flatten_pred_bboxes = torch.cat(flatten_pred_bboxes, dim=1) + flatten_pred_bboxes = self.bbox_coder.decode( + self.flatten_priors_train[..., :2], flatten_pred_bboxes, + self.stride_tensor[..., 0]) + pred_scores = torch.sigmoid(flatten_cls_preds) + + if current_epoch < self.initial_epoch: + assigned_result = self.initial_assigner( + flatten_pred_bboxes.detach(), self.flatten_priors_train, + self.num_level_priors, gt_labels, gt_bboxes, pad_bbox_flag) + else: + assigned_result = self.assigner(flatten_pred_bboxes.detach(), + pred_scores.detach(), + self.flatten_priors_train, + gt_labels, gt_bboxes, + pad_bbox_flag) + + assigned_bboxes = assigned_result['assigned_bboxes'] + assigned_scores = assigned_result['assigned_scores'] + fg_mask_pre_prior = assigned_result['fg_mask_pre_prior'] + + # cls loss + with torch.cuda.amp.autocast(enabled=False): + loss_cls = self.loss_cls(flatten_cls_preds, assigned_scores) + + # rescale bbox + assigned_bboxes /= self.stride_tensor + flatten_pred_bboxes /= self.stride_tensor + + assigned_scores_sum = assigned_scores.sum() + # reduce_mean between all gpus + assigned_scores_sum = torch.clamp( + reduce_mean(assigned_scores_sum), min=1) + loss_cls /= assigned_scores_sum + + # select positive samples mask + num_pos = fg_mask_pre_prior.sum() + if num_pos > 0: + # when num_pos > 0, assigned_scores_sum will >0, so the loss_bbox + # will not report an error + # iou loss + prior_bbox_mask = fg_mask_pre_prior.unsqueeze(-1).repeat([1, 1, 4]) + pred_bboxes_pos = torch.masked_select( + flatten_pred_bboxes, prior_bbox_mask).reshape([-1, 4]) + assigned_bboxes_pos = torch.masked_select( + assigned_bboxes, prior_bbox_mask).reshape([-1, 4]) + bbox_weight = torch.masked_select( + assigned_scores.sum(-1), fg_mask_pre_prior).unsqueeze(-1) + loss_bbox = self.loss_bbox( + pred_bboxes_pos, + assigned_bboxes_pos, + weight=bbox_weight, + avg_factor=assigned_scores_sum) + + # dfl loss + dist_mask = fg_mask_pre_prior.unsqueeze(-1).repeat( + [1, 1, (self.head_module.reg_max + 1) * 4]) + + pred_dist_pos = torch.masked_select( + flatten_dist_preds, + dist_mask).reshape([-1, 4, self.head_module.reg_max + 1]) + assigned_ltrb = self.bbox_coder.encode( + self.flatten_priors_train[..., :2] / self.stride_tensor, + assigned_bboxes, + max_dis=self.head_module.reg_max, + eps=0.01) + assigned_ltrb_pos = torch.masked_select( + assigned_ltrb, prior_bbox_mask).reshape([-1, 4]) + loss_dfl = self.loss_dfl( + pred_dist_pos.reshape(-1, self.head_module.reg_max + 1), + assigned_ltrb_pos.reshape(-1), + weight=bbox_weight.expand(-1, 4).reshape(-1), + avg_factor=assigned_scores_sum) + else: + loss_bbox = flatten_pred_bboxes.sum() * 0 + loss_dfl = flatten_pred_bboxes.sum() * 0 + + return dict(loss_cls=loss_cls, loss_bbox=loss_bbox, loss_dfl=loss_dfl) diff --git a/models/YOLO-World/third_party/mmyolo/mmyolo/models/dense_heads/rtmdet_head.py b/models/YOLO-World/third_party/mmyolo/mmyolo/models/dense_heads/rtmdet_head.py new file mode 100644 index 0000000000000000000000000000000000000000..54245a97f404b66eba47e41f03302110c8894134 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/mmyolo/models/dense_heads/rtmdet_head.py @@ -0,0 +1,368 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Sequence, Tuple + +import torch +import torch.nn as nn +from mmcv.cnn import ConvModule, is_norm +from mmdet.models.task_modules.samplers import PseudoSampler +from mmdet.structures.bbox import distance2bbox +from mmdet.utils import (ConfigType, InstanceList, OptConfigType, + OptInstanceList, OptMultiConfig, reduce_mean) +from mmengine.model import (BaseModule, bias_init_with_prob, constant_init, + normal_init) +from torch import Tensor + +from mmyolo.registry import MODELS, TASK_UTILS +from ..utils import gt_instances_preprocess +from .yolov5_head import YOLOv5Head + + +@MODELS.register_module() +class RTMDetSepBNHeadModule(BaseModule): + """Detection Head of RTMDet. + + Args: + num_classes (int): Number of categories excluding the background + category. + in_channels (int): Number of channels in the input feature map. + widen_factor (float): Width multiplier, multiply number of + channels in each layer by this amount. Defaults to 1.0. + num_base_priors (int): The number of priors (points) at a point + on the feature grid. Defaults to 1. + feat_channels (int): Number of hidden channels. Used in child classes. + Defaults to 256 + stacked_convs (int): Number of stacking convs of the head. + Defaults to 2. + featmap_strides (Sequence[int]): Downsample factor of each feature map. + Defaults to (8, 16, 32). + share_conv (bool): Whether to share conv layers between stages. + Defaults to True. + pred_kernel_size (int): Kernel size of ``nn.Conv2d``. Defaults to 1. + conv_cfg (:obj:`ConfigDict` or dict, optional): Config dict for + convolution layer. Defaults to None. + norm_cfg (:obj:`ConfigDict` or dict): Config dict for normalization + layer. Defaults to ``dict(type='BN')``. + act_cfg (:obj:`ConfigDict` or dict): Config dict for activation layer. + Default: dict(type='SiLU', inplace=True). + init_cfg (:obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or + list[dict], optional): Initialization config dict. + Defaults to None. + """ + + def __init__( + self, + num_classes: int, + in_channels: int, + widen_factor: float = 1.0, + num_base_priors: int = 1, + feat_channels: int = 256, + stacked_convs: int = 2, + featmap_strides: Sequence[int] = [8, 16, 32], + share_conv: bool = True, + pred_kernel_size: int = 1, + conv_cfg: OptConfigType = None, + norm_cfg: ConfigType = dict(type='BN'), + act_cfg: ConfigType = dict(type='SiLU', inplace=True), + init_cfg: OptMultiConfig = None, + ): + super().__init__(init_cfg=init_cfg) + self.share_conv = share_conv + self.num_classes = num_classes + self.pred_kernel_size = pred_kernel_size + self.feat_channels = int(feat_channels * widen_factor) + self.stacked_convs = stacked_convs + self.num_base_priors = num_base_priors + + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + self.featmap_strides = featmap_strides + + self.in_channels = int(in_channels * widen_factor) + + self._init_layers() + + def _init_layers(self): + """Initialize layers of the head.""" + self.cls_convs = nn.ModuleList() + self.reg_convs = nn.ModuleList() + + self.rtm_cls = nn.ModuleList() + self.rtm_reg = nn.ModuleList() + for n in range(len(self.featmap_strides)): + cls_convs = nn.ModuleList() + reg_convs = nn.ModuleList() + for i in range(self.stacked_convs): + chn = self.in_channels if i == 0 else self.feat_channels + cls_convs.append( + ConvModule( + chn, + self.feat_channels, + 3, + stride=1, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg)) + reg_convs.append( + ConvModule( + chn, + self.feat_channels, + 3, + stride=1, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg)) + self.cls_convs.append(cls_convs) + self.reg_convs.append(reg_convs) + + self.rtm_cls.append( + nn.Conv2d( + self.feat_channels, + self.num_base_priors * self.num_classes, + self.pred_kernel_size, + padding=self.pred_kernel_size // 2)) + self.rtm_reg.append( + nn.Conv2d( + self.feat_channels, + self.num_base_priors * 4, + self.pred_kernel_size, + padding=self.pred_kernel_size // 2)) + + if self.share_conv: + for n in range(len(self.featmap_strides)): + for i in range(self.stacked_convs): + self.cls_convs[n][i].conv = self.cls_convs[0][i].conv + self.reg_convs[n][i].conv = self.reg_convs[0][i].conv + + def init_weights(self) -> None: + """Initialize weights of the head.""" + # Use prior in model initialization to improve stability + super().init_weights() + for m in self.modules(): + if isinstance(m, nn.Conv2d): + normal_init(m, mean=0, std=0.01) + if is_norm(m): + constant_init(m, 1) + bias_cls = bias_init_with_prob(0.01) + for rtm_cls, rtm_reg in zip(self.rtm_cls, self.rtm_reg): + normal_init(rtm_cls, std=0.01, bias=bias_cls) + normal_init(rtm_reg, std=0.01) + + def forward(self, feats: Tuple[Tensor, ...]) -> tuple: + """Forward features from the upstream network. + + Args: + feats (tuple[Tensor]): Features from the upstream network, each is + a 4D-tensor. + + Returns: + tuple: Usually a tuple of classification scores and bbox prediction + - cls_scores (list[Tensor]): Classification scores for all scale + levels, each is a 4D-tensor, the channels number is + num_base_priors * num_classes. + - bbox_preds (list[Tensor]): Box energies / deltas for all scale + levels, each is a 4D-tensor, the channels number is + num_base_priors * 4. + """ + + cls_scores = [] + bbox_preds = [] + for idx, x in enumerate(feats): + cls_feat = x + reg_feat = x + + for cls_layer in self.cls_convs[idx]: + cls_feat = cls_layer(cls_feat) + cls_score = self.rtm_cls[idx](cls_feat) + + for reg_layer in self.reg_convs[idx]: + reg_feat = reg_layer(reg_feat) + + reg_dist = self.rtm_reg[idx](reg_feat) + cls_scores.append(cls_score) + bbox_preds.append(reg_dist) + return tuple(cls_scores), tuple(bbox_preds) + + +@MODELS.register_module() +class RTMDetHead(YOLOv5Head): + """RTMDet head. + + Args: + head_module(ConfigType): Base module used for RTMDetHead + prior_generator: Points generator feature maps in + 2D points-based detectors. + bbox_coder (:obj:`ConfigDict` or dict): Config of bbox coder. + loss_cls (:obj:`ConfigDict` or dict): Config of classification loss. + loss_bbox (:obj:`ConfigDict` or dict): Config of localization loss. + train_cfg (:obj:`ConfigDict` or dict, optional): Training config of + anchor head. Defaults to None. + test_cfg (:obj:`ConfigDict` or dict, optional): Testing config of + anchor head. Defaults to None. + init_cfg (:obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or + list[dict], optional): Initialization config dict. + Defaults to None. + """ + + def __init__(self, + head_module: ConfigType, + prior_generator: ConfigType = dict( + type='mmdet.MlvlPointGenerator', + offset=0, + strides=[8, 16, 32]), + bbox_coder: ConfigType = dict(type='DistancePointBBoxCoder'), + loss_cls: ConfigType = dict( + type='mmdet.QualityFocalLoss', + use_sigmoid=True, + beta=2.0, + loss_weight=1.0), + loss_bbox: ConfigType = dict( + type='mmdet.GIoULoss', loss_weight=2.0), + train_cfg: OptConfigType = None, + test_cfg: OptConfigType = None, + init_cfg: OptMultiConfig = None): + + super().__init__( + head_module=head_module, + prior_generator=prior_generator, + bbox_coder=bbox_coder, + loss_cls=loss_cls, + loss_bbox=loss_bbox, + train_cfg=train_cfg, + test_cfg=test_cfg, + init_cfg=init_cfg) + + self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False) + if self.use_sigmoid_cls: + self.cls_out_channels = self.num_classes + else: + self.cls_out_channels = self.num_classes + 1 + # rtmdet doesn't need loss_obj + self.loss_obj = None + + def special_init(self): + """Since YOLO series algorithms will inherit from YOLOv5Head, but + different algorithms have special initialization process. + + The special_init function is designed to deal with this situation. + """ + if self.train_cfg: + self.assigner = TASK_UTILS.build(self.train_cfg.assigner) + if self.train_cfg.get('sampler', None) is not None: + self.sampler = TASK_UTILS.build( + self.train_cfg.sampler, default_args=dict(context=self)) + else: + self.sampler = PseudoSampler(context=self) + + self.featmap_sizes_train = None + self.flatten_priors_train = None + + def forward(self, x: Tuple[Tensor]) -> Tuple[List]: + """Forward features from the upstream network. + + Args: + x (Tuple[Tensor]): Features from the upstream network, each is + a 4D-tensor. + Returns: + Tuple[List]: A tuple of multi-level classification scores, bbox + predictions, and objectnesses. + """ + return self.head_module(x) + + def loss_by_feat( + self, + cls_scores: List[Tensor], + bbox_preds: List[Tensor], + batch_gt_instances: InstanceList, + batch_img_metas: List[dict], + batch_gt_instances_ignore: OptInstanceList = None) -> dict: + """Compute losses of the head. + + Args: + cls_scores (list[Tensor]): Box scores for each scale level + Has shape (N, num_anchors * num_classes, H, W) + bbox_preds (list[Tensor]): Decoded box for each scale + level with shape (N, num_anchors * 4, H, W) in + [tl_x, tl_y, br_x, br_y] format. + batch_gt_instances (list[:obj:`InstanceData`]): Batch of + gt_instance. It usually includes ``bboxes`` and ``labels`` + attributes. + batch_img_metas (list[dict]): Meta information of each image, e.g., + image size, scaling factor, etc. + batch_gt_instances_ignore (list[:obj:`InstanceData`], Optional): + Batch of gt_instances_ignore. It includes ``bboxes`` attribute + data that is ignored during training and testing. + Defaults to None. + + Returns: + dict[str, Tensor]: A dictionary of loss components. + """ + num_imgs = len(batch_img_metas) + featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] + assert len(featmap_sizes) == self.prior_generator.num_levels + + gt_info = gt_instances_preprocess(batch_gt_instances, num_imgs) + gt_labels = gt_info[:, :, :1] + gt_bboxes = gt_info[:, :, 1:] # xyxy + pad_bbox_flag = (gt_bboxes.sum(-1, keepdim=True) > 0).float() + + device = cls_scores[0].device + + # If the shape does not equal, generate new one + if featmap_sizes != self.featmap_sizes_train: + self.featmap_sizes_train = featmap_sizes + mlvl_priors_with_stride = self.prior_generator.grid_priors( + featmap_sizes, device=device, with_stride=True) + self.flatten_priors_train = torch.cat( + mlvl_priors_with_stride, dim=0) + + flatten_cls_scores = torch.cat([ + cls_score.permute(0, 2, 3, 1).reshape(num_imgs, -1, + self.cls_out_channels) + for cls_score in cls_scores + ], 1).contiguous() + + flatten_bboxes = torch.cat([ + bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, 4) + for bbox_pred in bbox_preds + ], 1) + flatten_bboxes = flatten_bboxes * self.flatten_priors_train[..., -1, + None] + flatten_bboxes = distance2bbox(self.flatten_priors_train[..., :2], + flatten_bboxes) + + assigned_result = self.assigner(flatten_bboxes.detach(), + flatten_cls_scores.detach(), + self.flatten_priors_train, gt_labels, + gt_bboxes, pad_bbox_flag) + + labels = assigned_result['assigned_labels'].reshape(-1) + label_weights = assigned_result['assigned_labels_weights'].reshape(-1) + bbox_targets = assigned_result['assigned_bboxes'].reshape(-1, 4) + assign_metrics = assigned_result['assign_metrics'].reshape(-1) + cls_preds = flatten_cls_scores.reshape(-1, self.num_classes) + bbox_preds = flatten_bboxes.reshape(-1, 4) + + # FG cat_id: [0, num_classes -1], BG cat_id: num_classes + bg_class_ind = self.num_classes + pos_inds = ((labels >= 0) + & (labels < bg_class_ind)).nonzero().squeeze(1) + avg_factor = reduce_mean(assign_metrics.sum()).clamp_(min=1).item() + + loss_cls = self.loss_cls( + cls_preds, (labels, assign_metrics), + label_weights, + avg_factor=avg_factor) + + if len(pos_inds) > 0: + loss_bbox = self.loss_bbox( + bbox_preds[pos_inds], + bbox_targets[pos_inds], + weight=assign_metrics[pos_inds], + avg_factor=avg_factor) + else: + loss_bbox = bbox_preds.sum() * 0 + + return dict(loss_cls=loss_cls, loss_bbox=loss_bbox) diff --git a/models/YOLO-World/third_party/mmyolo/mmyolo/models/dense_heads/rtmdet_ins_head.py b/models/YOLO-World/third_party/mmyolo/mmyolo/models/dense_heads/rtmdet_ins_head.py new file mode 100644 index 0000000000000000000000000000000000000000..1d0562aad6fb977516924ef9cd72cdef54ff0016 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/mmyolo/models/dense_heads/rtmdet_ins_head.py @@ -0,0 +1,725 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +from typing import List, Optional, Tuple + +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import ConvModule, is_norm +from mmcv.ops import batched_nms +from mmdet.models.utils import filter_scores_and_topk +from mmdet.structures.bbox import get_box_tensor, get_box_wh, scale_boxes +from mmdet.utils import (ConfigType, InstanceList, OptConfigType, + OptInstanceList, OptMultiConfig) +from mmengine import ConfigDict +from mmengine.model import (BaseModule, bias_init_with_prob, constant_init, + normal_init) +from mmengine.structures import InstanceData +from torch import Tensor + +from mmyolo.registry import MODELS +from .rtmdet_head import RTMDetHead, RTMDetSepBNHeadModule + + +class MaskFeatModule(BaseModule): + """Mask feature head used in RTMDet-Ins. Copy from mmdet. + + Args: + in_channels (int): Number of channels in the input feature map. + feat_channels (int): Number of hidden channels of the mask feature + map branch. + stacked_convs (int): Number of convs in mask feature branch. + num_levels (int): The starting feature map level from RPN that + will be used to predict the mask feature map. + num_prototypes (int): Number of output channel of the mask feature + map branch. This is the channel count of the mask + feature map that to be dynamically convolved with the predicted + kernel. + act_cfg (:obj:`ConfigDict` or dict): Config dict for activation layer. + Default: dict(type='ReLU', inplace=True) + norm_cfg (dict): Config dict for normalization layer. Default: None. + """ + + def __init__( + self, + in_channels: int, + feat_channels: int = 256, + stacked_convs: int = 4, + num_levels: int = 3, + num_prototypes: int = 8, + act_cfg: ConfigType = dict(type='ReLU', inplace=True), + norm_cfg: ConfigType = dict(type='BN') + ) -> None: + super().__init__(init_cfg=None) + self.num_levels = num_levels + self.fusion_conv = nn.Conv2d(num_levels * in_channels, in_channels, 1) + convs = [] + for i in range(stacked_convs): + in_c = in_channels if i == 0 else feat_channels + convs.append( + ConvModule( + in_c, + feat_channels, + 3, + padding=1, + act_cfg=act_cfg, + norm_cfg=norm_cfg)) + self.stacked_convs = nn.Sequential(*convs) + self.projection = nn.Conv2d( + feat_channels, num_prototypes, kernel_size=1) + + def forward(self, features: Tuple[Tensor, ...]) -> Tensor: + # multi-level feature fusion + fusion_feats = [features[0]] + size = features[0].shape[-2:] + for i in range(1, self.num_levels): + f = F.interpolate(features[i], size=size, mode='bilinear') + fusion_feats.append(f) + fusion_feats = torch.cat(fusion_feats, dim=1) + fusion_feats = self.fusion_conv(fusion_feats) + # pred mask feats + mask_features = self.stacked_convs(fusion_feats) + mask_features = self.projection(mask_features) + return mask_features + + +@MODELS.register_module() +class RTMDetInsSepBNHeadModule(RTMDetSepBNHeadModule): + """Detection and Instance Segmentation Head of RTMDet. + + Args: + num_classes (int): Number of categories excluding the background + category. + num_prototypes (int): Number of mask prototype features extracted + from the mask head. Defaults to 8. + dyconv_channels (int): Channel of the dynamic conv layers. + Defaults to 8. + num_dyconvs (int): Number of the dynamic convolution layers. + Defaults to 3. + use_sigmoid_cls (bool): Use sigmoid for class prediction. + Defaults to True. + """ + + def __init__(self, + num_classes: int, + *args, + num_prototypes: int = 8, + dyconv_channels: int = 8, + num_dyconvs: int = 3, + use_sigmoid_cls: bool = True, + **kwargs): + self.num_prototypes = num_prototypes + self.num_dyconvs = num_dyconvs + self.dyconv_channels = dyconv_channels + self.use_sigmoid_cls = use_sigmoid_cls + if self.use_sigmoid_cls: + self.cls_out_channels = num_classes + else: + self.cls_out_channels = num_classes + 1 + super().__init__(num_classes=num_classes, *args, **kwargs) + + def _init_layers(self): + """Initialize layers of the head.""" + self.cls_convs = nn.ModuleList() + self.reg_convs = nn.ModuleList() + self.kernel_convs = nn.ModuleList() + + self.rtm_cls = nn.ModuleList() + self.rtm_reg = nn.ModuleList() + self.rtm_kernel = nn.ModuleList() + self.rtm_obj = nn.ModuleList() + + # calculate num dynamic parameters + weight_nums, bias_nums = [], [] + for i in range(self.num_dyconvs): + if i == 0: + weight_nums.append( + (self.num_prototypes + 2) * self.dyconv_channels) + bias_nums.append(self.dyconv_channels) + elif i == self.num_dyconvs - 1: + weight_nums.append(self.dyconv_channels) + bias_nums.append(1) + else: + weight_nums.append(self.dyconv_channels * self.dyconv_channels) + bias_nums.append(self.dyconv_channels) + self.weight_nums = weight_nums + self.bias_nums = bias_nums + self.num_gen_params = sum(weight_nums) + sum(bias_nums) + pred_pad_size = self.pred_kernel_size // 2 + + for n in range(len(self.featmap_strides)): + cls_convs = nn.ModuleList() + reg_convs = nn.ModuleList() + kernel_convs = nn.ModuleList() + for i in range(self.stacked_convs): + chn = self.in_channels if i == 0 else self.feat_channels + cls_convs.append( + ConvModule( + chn, + self.feat_channels, + 3, + stride=1, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg)) + reg_convs.append( + ConvModule( + chn, + self.feat_channels, + 3, + stride=1, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg)) + kernel_convs.append( + ConvModule( + chn, + self.feat_channels, + 3, + stride=1, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg)) + self.cls_convs.append(cls_convs) + self.reg_convs.append(cls_convs) + self.kernel_convs.append(kernel_convs) + + self.rtm_cls.append( + nn.Conv2d( + self.feat_channels, + self.num_base_priors * self.cls_out_channels, + self.pred_kernel_size, + padding=pred_pad_size)) + self.rtm_reg.append( + nn.Conv2d( + self.feat_channels, + self.num_base_priors * 4, + self.pred_kernel_size, + padding=pred_pad_size)) + self.rtm_kernel.append( + nn.Conv2d( + self.feat_channels, + self.num_gen_params, + self.pred_kernel_size, + padding=pred_pad_size)) + + if self.share_conv: + for n in range(len(self.featmap_strides)): + for i in range(self.stacked_convs): + self.cls_convs[n][i].conv = self.cls_convs[0][i].conv + self.reg_convs[n][i].conv = self.reg_convs[0][i].conv + + self.mask_head = MaskFeatModule( + in_channels=self.in_channels, + feat_channels=self.feat_channels, + stacked_convs=4, + num_levels=len(self.featmap_strides), + num_prototypes=self.num_prototypes, + act_cfg=self.act_cfg, + norm_cfg=self.norm_cfg) + + def init_weights(self) -> None: + """Initialize weights of the head.""" + for m in self.modules(): + if isinstance(m, nn.Conv2d): + normal_init(m, mean=0, std=0.01) + if is_norm(m): + constant_init(m, 1) + bias_cls = bias_init_with_prob(0.01) + for rtm_cls, rtm_reg, rtm_kernel in zip(self.rtm_cls, self.rtm_reg, + self.rtm_kernel): + normal_init(rtm_cls, std=0.01, bias=bias_cls) + normal_init(rtm_reg, std=0.01, bias=1) + + def forward(self, feats: Tuple[Tensor, ...]) -> tuple: + """Forward features from the upstream network. + + Args: + feats (tuple[Tensor]): Features from the upstream network, each is + a 4D-tensor. + + Returns: + tuple: Usually a tuple of classification scores and bbox prediction + - cls_scores (list[Tensor]): Classification scores for all scale + levels, each is a 4D-tensor, the channels number is + num_base_priors * num_classes. + - bbox_preds (list[Tensor]): Box energies / deltas for all scale + levels, each is a 4D-tensor, the channels number is + num_base_priors * 4. + - kernel_preds (list[Tensor]): Dynamic conv kernels for all scale + levels, each is a 4D-tensor, the channels number is + num_gen_params. + - mask_feat (Tensor): Mask prototype features. + Has shape (batch_size, num_prototypes, H, W). + """ + mask_feat = self.mask_head(feats) + + cls_scores = [] + bbox_preds = [] + kernel_preds = [] + for idx, (x, stride) in enumerate(zip(feats, self.featmap_strides)): + cls_feat = x + reg_feat = x + kernel_feat = x + + for cls_layer in self.cls_convs[idx]: + cls_feat = cls_layer(cls_feat) + cls_score = self.rtm_cls[idx](cls_feat) + + for kernel_layer in self.kernel_convs[idx]: + kernel_feat = kernel_layer(kernel_feat) + kernel_pred = self.rtm_kernel[idx](kernel_feat) + + for reg_layer in self.reg_convs[idx]: + reg_feat = reg_layer(reg_feat) + reg_dist = self.rtm_reg[idx](reg_feat) + + cls_scores.append(cls_score) + bbox_preds.append(reg_dist) + kernel_preds.append(kernel_pred) + return tuple(cls_scores), tuple(bbox_preds), tuple( + kernel_preds), mask_feat + + +@MODELS.register_module() +class RTMDetInsSepBNHead(RTMDetHead): + """RTMDet Instance Segmentation head. + + Args: + head_module(ConfigType): Base module used for RTMDetInsSepBNHead + prior_generator: Points generator feature maps in + 2D points-based detectors. + bbox_coder (:obj:`ConfigDict` or dict): Config of bbox coder. + loss_cls (:obj:`ConfigDict` or dict): Config of classification loss. + loss_bbox (:obj:`ConfigDict` or dict): Config of localization loss. + loss_mask (:obj:`ConfigDict` or dict): Config of mask loss. + train_cfg (:obj:`ConfigDict` or dict, optional): Training config of + anchor head. Defaults to None. + test_cfg (:obj:`ConfigDict` or dict, optional): Testing config of + anchor head. Defaults to None. + init_cfg (:obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or + list[dict], optional): Initialization config dict. + Defaults to None. + """ + + def __init__(self, + head_module: ConfigType, + prior_generator: ConfigType = dict( + type='mmdet.MlvlPointGenerator', + offset=0, + strides=[8, 16, 32]), + bbox_coder: ConfigType = dict(type='DistancePointBBoxCoder'), + loss_cls: ConfigType = dict( + type='mmdet.QualityFocalLoss', + use_sigmoid=True, + beta=2.0, + loss_weight=1.0), + loss_bbox: ConfigType = dict( + type='mmdet.GIoULoss', loss_weight=2.0), + loss_mask=dict( + type='mmdet.DiceLoss', + loss_weight=2.0, + eps=5e-6, + reduction='mean'), + train_cfg: OptConfigType = None, + test_cfg: OptConfigType = None, + init_cfg: OptMultiConfig = None): + + super().__init__( + head_module=head_module, + prior_generator=prior_generator, + bbox_coder=bbox_coder, + loss_cls=loss_cls, + loss_bbox=loss_bbox, + train_cfg=train_cfg, + test_cfg=test_cfg, + init_cfg=init_cfg) + + self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False) + if isinstance(self.head_module, RTMDetInsSepBNHeadModule): + assert self.use_sigmoid_cls == self.head_module.use_sigmoid_cls + self.loss_mask = MODELS.build(loss_mask) + + def predict_by_feat(self, + cls_scores: List[Tensor], + bbox_preds: List[Tensor], + kernel_preds: List[Tensor], + mask_feats: Tensor, + score_factors: Optional[List[Tensor]] = None, + batch_img_metas: Optional[List[dict]] = None, + cfg: Optional[ConfigDict] = None, + rescale: bool = True, + with_nms: bool = True) -> List[InstanceData]: + """Transform a batch of output features extracted from the head into + bbox results. + + Note: When score_factors is not None, the cls_scores are + usually multiplied by it then obtain the real score used in NMS. + + Args: + cls_scores (list[Tensor]): Classification scores for all + scale levels, each is a 4D-tensor, has shape + (batch_size, num_priors * num_classes, H, W). + bbox_preds (list[Tensor]): Box energies / deltas for all + scale levels, each is a 4D-tensor, has shape + (batch_size, num_priors * 4, H, W). + kernel_preds (list[Tensor]): Kernel predictions of dynamic + convs for all scale levels, each is a 4D-tensor, has shape + (batch_size, num_params, H, W). + mask_feats (Tensor): Mask prototype features extracted from the + mask head, has shape (batch_size, num_prototypes, H, W). + score_factors (list[Tensor], optional): Score factor for + all scale level, each is a 4D-tensor, has shape + (batch_size, num_priors * 1, H, W). Defaults to None. + batch_img_metas (list[dict], Optional): Batch image meta info. + Defaults to None. + cfg (ConfigDict, optional): Test / postprocessing + configuration, if None, test_cfg would be used. + Defaults to None. + rescale (bool): If True, return boxes in original image space. + Defaults to False. + with_nms (bool): If True, do nms before return boxes. + Defaults to True. + + Returns: + list[:obj:`InstanceData`]: Object detection and instance + segmentation results of each image after the post process. + Each item usually contains following keys. + + - scores (Tensor): Classification scores, has a shape + (num_instance, ) + - labels (Tensor): Labels of bboxes, has a shape + (num_instances, ). + - bboxes (Tensor): Has a shape (num_instances, 4), + the last dimension 4 arrange as (x1, y1, x2, y2). + - masks (Tensor): Has a shape (num_instances, h, w). + """ + cfg = self.test_cfg if cfg is None else cfg + cfg = copy.deepcopy(cfg) + + multi_label = cfg.multi_label + multi_label &= self.num_classes > 1 + cfg.multi_label = multi_label + + num_imgs = len(batch_img_metas) + featmap_sizes = [cls_score.shape[2:] for cls_score in cls_scores] + + # If the shape does not change, use the previous mlvl_priors + if featmap_sizes != self.featmap_sizes: + self.mlvl_priors = self.prior_generator.grid_priors( + featmap_sizes, + dtype=cls_scores[0].dtype, + device=cls_scores[0].device, + with_stride=True) + self.featmap_sizes = featmap_sizes + flatten_priors = torch.cat(self.mlvl_priors) + + mlvl_strides = [ + flatten_priors.new_full( + (featmap_size.numel() * self.num_base_priors, ), stride) for + featmap_size, stride in zip(featmap_sizes, self.featmap_strides) + ] + flatten_stride = torch.cat(mlvl_strides) + + # flatten cls_scores, bbox_preds + flatten_cls_scores = [ + cls_score.permute(0, 2, 3, 1).reshape(num_imgs, -1, + self.num_classes) + for cls_score in cls_scores + ] + flatten_bbox_preds = [ + bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, 4) + for bbox_pred in bbox_preds + ] + flatten_kernel_preds = [ + kernel_pred.permute(0, 2, 3, + 1).reshape(num_imgs, -1, + self.head_module.num_gen_params) + for kernel_pred in kernel_preds + ] + + flatten_cls_scores = torch.cat(flatten_cls_scores, dim=1).sigmoid() + flatten_bbox_preds = torch.cat(flatten_bbox_preds, dim=1) + flatten_decoded_bboxes = self.bbox_coder.decode( + flatten_priors[..., :2].unsqueeze(0), flatten_bbox_preds, + flatten_stride) + + flatten_kernel_preds = torch.cat(flatten_kernel_preds, dim=1) + + results_list = [] + for (bboxes, scores, kernel_pred, mask_feat, + img_meta) in zip(flatten_decoded_bboxes, flatten_cls_scores, + flatten_kernel_preds, mask_feats, + batch_img_metas): + ori_shape = img_meta['ori_shape'] + scale_factor = img_meta['scale_factor'] + if 'pad_param' in img_meta: + pad_param = img_meta['pad_param'] + else: + pad_param = None + + score_thr = cfg.get('score_thr', -1) + if scores.shape[0] == 0: + empty_results = InstanceData() + empty_results.bboxes = bboxes + empty_results.scores = scores[:, 0] + empty_results.labels = scores[:, 0].int() + h, w = ori_shape[:2] if rescale else img_meta['img_shape'][:2] + empty_results.masks = torch.zeros( + size=(0, h, w), dtype=torch.bool, device=bboxes.device) + results_list.append(empty_results) + continue + + nms_pre = cfg.get('nms_pre', 100000) + if cfg.multi_label is False: + scores, labels = scores.max(1, keepdim=True) + scores, _, keep_idxs, results = filter_scores_and_topk( + scores, + score_thr, + nms_pre, + results=dict( + labels=labels[:, 0], + kernel_pred=kernel_pred, + priors=flatten_priors)) + labels = results['labels'] + kernel_pred = results['kernel_pred'] + priors = results['priors'] + else: + out = filter_scores_and_topk( + scores, + score_thr, + nms_pre, + results=dict( + kernel_pred=kernel_pred, priors=flatten_priors)) + scores, labels, keep_idxs, filtered_results = out + kernel_pred = filtered_results['kernel_pred'] + priors = filtered_results['priors'] + + results = InstanceData( + scores=scores, + labels=labels, + bboxes=bboxes[keep_idxs], + kernels=kernel_pred, + priors=priors) + + if rescale: + if pad_param is not None: + results.bboxes -= results.bboxes.new_tensor([ + pad_param[2], pad_param[0], pad_param[2], pad_param[0] + ]) + results.bboxes /= results.bboxes.new_tensor( + scale_factor).repeat((1, 2)) + + if cfg.get('yolox_style', False): + # do not need max_per_img + cfg.max_per_img = len(results) + + results = self._bbox_mask_post_process( + results=results, + mask_feat=mask_feat, + cfg=cfg, + rescale_bbox=False, + rescale_mask=rescale, + with_nms=with_nms, + pad_param=pad_param, + img_meta=img_meta) + results.bboxes[:, 0::2].clamp_(0, ori_shape[1]) + results.bboxes[:, 1::2].clamp_(0, ori_shape[0]) + + results_list.append(results) + return results_list + + def _bbox_mask_post_process( + self, + results: InstanceData, + mask_feat: Tensor, + cfg: ConfigDict, + rescale_bbox: bool = False, + rescale_mask: bool = True, + with_nms: bool = True, + pad_param: Optional[np.ndarray] = None, + img_meta: Optional[dict] = None) -> InstanceData: + """bbox and mask post-processing method. + + The boxes would be rescaled to the original image scale and do + the nms operation. Usually `with_nms` is False is used for aug test. + + Args: + results (:obj:`InstaceData`): Detection instance results, + each item has shape (num_bboxes, ). + mask_feat (Tensor): Mask prototype features extracted from the + mask head, has shape (batch_size, num_prototypes, H, W). + cfg (ConfigDict): Test / postprocessing configuration, + if None, test_cfg would be used. + rescale_bbox (bool): If True, return boxes in original image space. + Default to False. + rescale_mask (bool): If True, return masks in original image space. + Default to True. + with_nms (bool): If True, do nms before return boxes. + Default to True. + img_meta (dict, optional): Image meta info. Defaults to None. + + Returns: + :obj:`InstanceData`: Detection results of each image + after the post process. + Each item usually contains following keys. + + - scores (Tensor): Classification scores, has a shape + (num_instance, ) + - labels (Tensor): Labels of bboxes, has a shape + (num_instances, ). + - bboxes (Tensor): Has a shape (num_instances, 4), + the last dimension 4 arrange as (x1, y1, x2, y2). + - masks (Tensor): Has a shape (num_instances, h, w). + """ + if rescale_bbox: + assert img_meta.get('scale_factor') is not None + scale_factor = [1 / s for s in img_meta['scale_factor']] + results.bboxes = scale_boxes(results.bboxes, scale_factor) + + if hasattr(results, 'score_factors'): + # TODO: Add sqrt operation in order to be consistent with + # the paper. + score_factors = results.pop('score_factors') + results.scores = results.scores * score_factors + + # filter small size bboxes + if cfg.get('min_bbox_size', -1) >= 0: + w, h = get_box_wh(results.bboxes) + valid_mask = (w > cfg.min_bbox_size) & (h > cfg.min_bbox_size) + if not valid_mask.all(): + results = results[valid_mask] + + # TODO: deal with `with_nms` and `nms_cfg=None` in test_cfg + assert with_nms, 'with_nms must be True for RTMDet-Ins' + if results.bboxes.numel() > 0: + bboxes = get_box_tensor(results.bboxes) + det_bboxes, keep_idxs = batched_nms(bboxes, results.scores, + results.labels, cfg.nms) + results = results[keep_idxs] + # some nms would reweight the score, such as softnms + results.scores = det_bboxes[:, -1] + results = results[:cfg.max_per_img] + + # process masks + mask_logits = self._mask_predict_by_feat(mask_feat, + results.kernels, + results.priors) + + stride = self.prior_generator.strides[0][0] + mask_logits = F.interpolate( + mask_logits.unsqueeze(0), scale_factor=stride, mode='bilinear') + if rescale_mask: + # TODO: When use mmdet.Resize or mmdet.Pad, will meet bug + # Use img_meta to crop and resize + ori_h, ori_w = img_meta['ori_shape'][:2] + if isinstance(pad_param, np.ndarray): + pad_param = pad_param.astype(np.int32) + crop_y1, crop_y2 = pad_param[ + 0], mask_logits.shape[-2] - pad_param[1] + crop_x1, crop_x2 = pad_param[ + 2], mask_logits.shape[-1] - pad_param[3] + mask_logits = mask_logits[..., crop_y1:crop_y2, + crop_x1:crop_x2] + mask_logits = F.interpolate( + mask_logits, + size=[ori_h, ori_w], + mode='bilinear', + align_corners=False) + + masks = mask_logits.sigmoid().squeeze(0) + masks = masks > cfg.mask_thr_binary + results.masks = masks + else: + h, w = img_meta['ori_shape'][:2] if rescale_mask else img_meta[ + 'img_shape'][:2] + results.masks = torch.zeros( + size=(results.bboxes.shape[0], h, w), + dtype=torch.bool, + device=results.bboxes.device) + return results + + def _mask_predict_by_feat(self, mask_feat: Tensor, kernels: Tensor, + priors: Tensor) -> Tensor: + """Generate mask logits from mask features with dynamic convs. + + Args: + mask_feat (Tensor): Mask prototype features. + Has shape (num_prototypes, H, W). + kernels (Tensor): Kernel parameters for each instance. + Has shape (num_instance, num_params) + priors (Tensor): Center priors for each instance. + Has shape (num_instance, 4). + Returns: + Tensor: Instance segmentation masks for each instance. + Has shape (num_instance, H, W). + """ + num_inst = kernels.shape[0] + h, w = mask_feat.size()[-2:] + if num_inst < 1: + return torch.empty( + size=(num_inst, h, w), + dtype=mask_feat.dtype, + device=mask_feat.device) + if len(mask_feat.shape) < 4: + mask_feat.unsqueeze(0) + + coord = self.prior_generator.single_level_grid_priors( + (h, w), level_idx=0, device=mask_feat.device).reshape(1, -1, 2) + num_inst = priors.shape[0] + points = priors[:, :2].reshape(-1, 1, 2) + strides = priors[:, 2:].reshape(-1, 1, 2) + relative_coord = (points - coord).permute(0, 2, 1) / ( + strides[..., 0].reshape(-1, 1, 1) * 8) + relative_coord = relative_coord.reshape(num_inst, 2, h, w) + + mask_feat = torch.cat( + [relative_coord, + mask_feat.repeat(num_inst, 1, 1, 1)], dim=1) + weights, biases = self.parse_dynamic_params(kernels) + + n_layers = len(weights) + x = mask_feat.reshape(1, -1, h, w) + for i, (weight, bias) in enumerate(zip(weights, biases)): + x = F.conv2d( + x, weight, bias=bias, stride=1, padding=0, groups=num_inst) + if i < n_layers - 1: + x = F.relu(x) + x = x.reshape(num_inst, h, w) + return x + + def parse_dynamic_params(self, flatten_kernels: Tensor) -> tuple: + """split kernel head prediction to conv weight and bias.""" + n_inst = flatten_kernels.size(0) + n_layers = len(self.head_module.weight_nums) + params_splits = list( + torch.split_with_sizes( + flatten_kernels, + self.head_module.weight_nums + self.head_module.bias_nums, + dim=1)) + weight_splits = params_splits[:n_layers] + bias_splits = params_splits[n_layers:] + for i in range(n_layers): + if i < n_layers - 1: + weight_splits[i] = weight_splits[i].reshape( + n_inst * self.head_module.dyconv_channels, -1, 1, 1) + bias_splits[i] = bias_splits[i].reshape( + n_inst * self.head_module.dyconv_channels) + else: + weight_splits[i] = weight_splits[i].reshape(n_inst, -1, 1, 1) + bias_splits[i] = bias_splits[i].reshape(n_inst) + + return weight_splits, bias_splits + + def loss_by_feat( + self, + cls_scores: List[Tensor], + bbox_preds: List[Tensor], + batch_gt_instances: InstanceList, + batch_img_metas: List[dict], + batch_gt_instances_ignore: OptInstanceList = None) -> dict: + raise NotImplementedError diff --git a/models/YOLO-World/third_party/mmyolo/mmyolo/models/dense_heads/rtmdet_rotated_head.py b/models/YOLO-World/third_party/mmyolo/mmyolo/models/dense_heads/rtmdet_rotated_head.py new file mode 100644 index 0000000000000000000000000000000000000000..1428b4fd05065e3dba764313febc46d6125408ac --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/mmyolo/models/dense_heads/rtmdet_rotated_head.py @@ -0,0 +1,641 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +import warnings +from typing import List, Optional, Sequence, Tuple + +import torch +import torch.nn as nn +from mmdet.models.utils import filter_scores_and_topk +from mmdet.structures.bbox import HorizontalBoxes, distance2bbox +from mmdet.structures.bbox.transforms import bbox_cxcywh_to_xyxy, scale_boxes +from mmdet.utils import (ConfigType, InstanceList, OptConfigType, + OptInstanceList, OptMultiConfig, reduce_mean) +from mmengine.config import ConfigDict +from mmengine.model import normal_init +from mmengine.structures import InstanceData +from torch import Tensor + +from mmyolo.registry import MODELS, TASK_UTILS +from ..utils import gt_instances_preprocess +from .rtmdet_head import RTMDetHead, RTMDetSepBNHeadModule + +try: + from mmrotate.structures.bbox import RotatedBoxes, distance2obb + MMROTATE_AVAILABLE = True +except ImportError: + RotatedBoxes = None + distance2obb = None + MMROTATE_AVAILABLE = False + + +@MODELS.register_module() +class RTMDetRotatedSepBNHeadModule(RTMDetSepBNHeadModule): + """Detection Head Module of RTMDet-R. + + Compared with RTMDet Detection Head Module, RTMDet-R adds + a conv for angle prediction. + An `angle_out_dim` arg is added, which is generated by the + angle_coder module and controls the angle pred dim. + + Args: + num_classes (int): Number of categories excluding the background + category. + in_channels (int): Number of channels in the input feature map. + widen_factor (float): Width multiplier, multiply number of + channels in each layer by this amount. Defaults to 1.0. + num_base_priors (int): The number of priors (points) at a point + on the feature grid. Defaults to 1. + feat_channels (int): Number of hidden channels. Used in child classes. + Defaults to 256 + stacked_convs (int): Number of stacking convs of the head. + Defaults to 2. + featmap_strides (Sequence[int]): Downsample factor of each feature map. + Defaults to (8, 16, 32). + share_conv (bool): Whether to share conv layers between stages. + Defaults to True. + pred_kernel_size (int): Kernel size of ``nn.Conv2d``. Defaults to 1. + angle_out_dim (int): Encoded length of angle, will passed by head. + Defaults to 1. + conv_cfg (:obj:`ConfigDict` or dict, optional): Config dict for + convolution layer. Defaults to None. + norm_cfg (:obj:`ConfigDict` or dict): Config dict for normalization + layer. Defaults to ``dict(type='BN')``. + act_cfg (:obj:`ConfigDict` or dict): Config dict for activation layer. + Default: dict(type='SiLU', inplace=True). + init_cfg (:obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or + list[dict], optional): Initialization config dict. + Defaults to None. + """ + + def __init__( + self, + num_classes: int, + in_channels: int, + widen_factor: float = 1.0, + num_base_priors: int = 1, + feat_channels: int = 256, + stacked_convs: int = 2, + featmap_strides: Sequence[int] = [8, 16, 32], + share_conv: bool = True, + pred_kernel_size: int = 1, + angle_out_dim: int = 1, + conv_cfg: OptConfigType = None, + norm_cfg: ConfigType = dict(type='BN'), + act_cfg: ConfigType = dict(type='SiLU', inplace=True), + init_cfg: OptMultiConfig = None, + ): + self.angle_out_dim = angle_out_dim + super().__init__( + num_classes=num_classes, + in_channels=in_channels, + widen_factor=widen_factor, + num_base_priors=num_base_priors, + feat_channels=feat_channels, + stacked_convs=stacked_convs, + featmap_strides=featmap_strides, + share_conv=share_conv, + pred_kernel_size=pred_kernel_size, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + init_cfg=init_cfg) + + def _init_layers(self): + """Initialize layers of the head.""" + super()._init_layers() + self.rtm_ang = nn.ModuleList() + for _ in range(len(self.featmap_strides)): + self.rtm_ang.append( + nn.Conv2d( + self.feat_channels, + self.num_base_priors * self.angle_out_dim, + self.pred_kernel_size, + padding=self.pred_kernel_size // 2)) + + def init_weights(self) -> None: + """Initialize weights of the head.""" + # Use prior in model initialization to improve stability + super().init_weights() + for rtm_ang in self.rtm_ang: + normal_init(rtm_ang, std=0.01) + + def forward(self, feats: Tuple[Tensor, ...]) -> tuple: + """Forward features from the upstream network. + + Args: + feats (tuple[Tensor]): Features from the upstream network, each is + a 4D-tensor. + + Returns: + tuple: Usually a tuple of classification scores and bbox prediction + - cls_scores (list[Tensor]): Classification scores for all scale + levels, each is a 4D-tensor, the channels number is + num_base_priors * num_classes. + - bbox_preds (list[Tensor]): Box energies / deltas for all scale + levels, each is a 4D-tensor, the channels number is + num_base_priors * 4. + - angle_preds (list[Tensor]): Angle prediction for all scale + levels, each is a 4D-tensor, the channels number is + num_base_priors * angle_out_dim. + """ + + cls_scores = [] + bbox_preds = [] + angle_preds = [] + for idx, x in enumerate(feats): + cls_feat = x + reg_feat = x + + for cls_layer in self.cls_convs[idx]: + cls_feat = cls_layer(cls_feat) + cls_score = self.rtm_cls[idx](cls_feat) + + for reg_layer in self.reg_convs[idx]: + reg_feat = reg_layer(reg_feat) + + reg_dist = self.rtm_reg[idx](reg_feat) + angle_pred = self.rtm_ang[idx](reg_feat) + + cls_scores.append(cls_score) + bbox_preds.append(reg_dist) + angle_preds.append(angle_pred) + return tuple(cls_scores), tuple(bbox_preds), tuple(angle_preds) + + +@MODELS.register_module() +class RTMDetRotatedHead(RTMDetHead): + """RTMDet-R head. + + Compared with RTMDetHead, RTMDetRotatedHead add some args to support + rotated object detection. + + - `angle_version` used to limit angle_range during training. + - `angle_coder` used to encode and decode angle, which is similar + to bbox_coder. + - `use_hbbox_loss` and `loss_angle` allow custom regression loss + calculation for rotated box. + + There are three combination options for regression: + + 1. `use_hbbox_loss=False` and loss_angle is None. + + .. code:: text + + bbox_pred────(tblr)───┐ + ▼ + angle_pred decode──►rbox_pred──(xywha)─►loss_bbox + │ ▲ + └────►decode──(a)─┘ + + 2. `use_hbbox_loss=False` and loss_angle is specified. + A angle loss is added on angle_pred. + + .. code:: text + + bbox_pred────(tblr)───┐ + ▼ + angle_pred decode──►rbox_pred──(xywha)─►loss_bbox + │ ▲ + ├────►decode──(a)─┘ + │ + └───────────────────────────────────────────►loss_angle + + 3. `use_hbbox_loss=True` and loss_angle is specified. + In this case the loss_angle must be set. + + .. code:: text + + bbox_pred──(tblr)──►decode──►hbox_pred──(xyxy)──►loss_bbox + + angle_pred──────────────────────────────────────►loss_angle + + - There's a `decoded_with_angle` flag in test_cfg, which is similar + to training process. + + When `decoded_with_angle=True`: + + .. code:: text + + bbox_pred────(tblr)───┐ + ▼ + angle_pred decode──(xywha)──►rbox_pred + │ ▲ + └────►decode──(a)─┘ + + When `decoded_with_angle=False`: + + .. code:: text + + bbox_pred──(tblr)─►decode + │ (xyxy) + ▼ + format───(xywh)──►concat──(xywha)──►rbox_pred + ▲ + angle_pred────────►decode────(a)───────┘ + + Args: + head_module(ConfigType): Base module used for RTMDetRotatedHead. + prior_generator: Points generator feature maps in + 2D points-based detectors. + bbox_coder (:obj:`ConfigDict` or dict): Config of bbox coder. + loss_cls (:obj:`ConfigDict` or dict): Config of classification loss. + loss_bbox (:obj:`ConfigDict` or dict): Config of localization loss. + angle_version (str): Angle representations. Defaults to 'le90'. + use_hbbox_loss (bool): If true, use horizontal bbox loss and + loss_angle should not be None. Default to False. + angle_coder (:obj:`ConfigDict` or dict): Config of angle coder. + loss_angle (:obj:`ConfigDict` or dict, optional): Config of angle loss. + train_cfg (:obj:`ConfigDict` or dict, optional): Training config of + anchor head. Defaults to None. + test_cfg (:obj:`ConfigDict` or dict, optional): Testing config of + anchor head. Defaults to None. + init_cfg (:obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or + list[dict], optional): Initialization config dict. + Defaults to None. + """ + + def __init__( + self, + head_module: ConfigType, + prior_generator: ConfigType = dict( + type='mmdet.MlvlPointGenerator', strides=[8, 16, 32], + offset=0), + bbox_coder: ConfigType = dict(type='DistanceAnglePointCoder'), + loss_cls: ConfigType = dict( + type='mmdet.QualityFocalLoss', + use_sigmoid=True, + beta=2.0, + loss_weight=1.0), + loss_bbox: ConfigType = dict( + type='mmrotate.RotatedIoULoss', mode='linear', + loss_weight=2.0), + angle_version: str = 'le90', + use_hbbox_loss: bool = False, + angle_coder: ConfigType = dict(type='mmrotate.PseudoAngleCoder'), + loss_angle: OptConfigType = None, + train_cfg: OptConfigType = None, + test_cfg: OptConfigType = None, + init_cfg: OptMultiConfig = None): + if not MMROTATE_AVAILABLE: + raise ImportError( + 'Please run "mim install -r requirements/mmrotate.txt" ' + 'to install mmrotate first for rotated detection.') + + self.angle_version = angle_version + self.use_hbbox_loss = use_hbbox_loss + if self.use_hbbox_loss: + assert loss_angle is not None, \ + ('When use hbbox loss, loss_angle needs to be specified') + self.angle_coder = TASK_UTILS.build(angle_coder) + self.angle_out_dim = self.angle_coder.encode_size + if head_module.get('angle_out_dim') is not None: + warnings.warn('angle_out_dim will be overridden by angle_coder ' + 'and does not need to be set manually') + + head_module['angle_out_dim'] = self.angle_out_dim + super().__init__( + head_module=head_module, + prior_generator=prior_generator, + bbox_coder=bbox_coder, + loss_cls=loss_cls, + loss_bbox=loss_bbox, + train_cfg=train_cfg, + test_cfg=test_cfg, + init_cfg=init_cfg) + + if loss_angle is not None: + self.loss_angle = MODELS.build(loss_angle) + else: + self.loss_angle = None + + def predict_by_feat(self, + cls_scores: List[Tensor], + bbox_preds: List[Tensor], + angle_preds: List[Tensor], + objectnesses: Optional[List[Tensor]] = None, + batch_img_metas: Optional[List[dict]] = None, + cfg: Optional[ConfigDict] = None, + rescale: bool = True, + with_nms: bool = True) -> List[InstanceData]: + """Transform a batch of output features extracted by the head into bbox + results. + + Args: + cls_scores (list[Tensor]): Classification scores for all + scale levels, each is a 4D-tensor, has shape + (batch_size, num_priors * num_classes, H, W). + bbox_preds (list[Tensor]): Box energies / deltas for all + scale levels, each is a 4D-tensor, has shape + (batch_size, num_priors * 4, H, W). + angle_preds (list[Tensor]): Box angle for each scale level + with shape (N, num_points * angle_dim, H, W) + objectnesses (list[Tensor], Optional): Score factor for + all scale level, each is a 4D-tensor, has shape + (batch_size, 1, H, W). + batch_img_metas (list[dict], Optional): Batch image meta info. + Defaults to None. + cfg (ConfigDict, optional): Test / postprocessing + configuration, if None, test_cfg would be used. + Defaults to None. + rescale (bool): If True, return boxes in original image space. + Defaults to False. + with_nms (bool): If True, do nms before return boxes. + Defaults to True. + + Returns: + list[:obj:`InstanceData`]: Object detection results of each image + after the post process. Each item usually contains following keys. + - scores (Tensor): Classification scores, has a shape + (num_instance, ) + - labels (Tensor): Labels of bboxes, has a shape + (num_instances, ). + - bboxes (Tensor): Has a shape (num_instances, 5), + the last dimension 4 arrange as (x, y, w, h, angle). + """ + assert len(cls_scores) == len(bbox_preds) + if objectnesses is None: + with_objectnesses = False + else: + with_objectnesses = True + assert len(cls_scores) == len(objectnesses) + + cfg = self.test_cfg if cfg is None else cfg + cfg = copy.deepcopy(cfg) + + multi_label = cfg.multi_label + multi_label &= self.num_classes > 1 + cfg.multi_label = multi_label + + # Whether to decode rbox with angle. + # different setting lead to different final results. + # Defaults to True. + decode_with_angle = cfg.get('decode_with_angle', True) + + num_imgs = len(batch_img_metas) + featmap_sizes = [cls_score.shape[2:] for cls_score in cls_scores] + + # If the shape does not change, use the previous mlvl_priors + if featmap_sizes != self.featmap_sizes: + self.mlvl_priors = self.prior_generator.grid_priors( + featmap_sizes, + dtype=cls_scores[0].dtype, + device=cls_scores[0].device) + self.featmap_sizes = featmap_sizes + flatten_priors = torch.cat(self.mlvl_priors) + + mlvl_strides = [ + flatten_priors.new_full( + (featmap_size.numel() * self.num_base_priors, ), stride) for + featmap_size, stride in zip(featmap_sizes, self.featmap_strides) + ] + flatten_stride = torch.cat(mlvl_strides) + + # flatten cls_scores, bbox_preds and objectness + flatten_cls_scores = [ + cls_score.permute(0, 2, 3, 1).reshape(num_imgs, -1, + self.num_classes) + for cls_score in cls_scores + ] + flatten_bbox_preds = [ + bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, 4) + for bbox_pred in bbox_preds + ] + flatten_angle_preds = [ + angle_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, + self.angle_out_dim) + for angle_pred in angle_preds + ] + + flatten_cls_scores = torch.cat(flatten_cls_scores, dim=1).sigmoid() + flatten_bbox_preds = torch.cat(flatten_bbox_preds, dim=1) + flatten_angle_preds = torch.cat(flatten_angle_preds, dim=1) + flatten_angle_preds = self.angle_coder.decode( + flatten_angle_preds, keepdim=True) + + if decode_with_angle: + flatten_rbbox_preds = torch.cat( + [flatten_bbox_preds, flatten_angle_preds], dim=-1) + flatten_decoded_bboxes = self.bbox_coder.decode( + flatten_priors[None], flatten_rbbox_preds, flatten_stride) + else: + flatten_decoded_hbboxes = self.bbox_coder.decode( + flatten_priors[None], flatten_bbox_preds, flatten_stride) + flatten_decoded_hbboxes = HorizontalBoxes.xyxy_to_cxcywh( + flatten_decoded_hbboxes) + flatten_decoded_bboxes = torch.cat( + [flatten_decoded_hbboxes, flatten_angle_preds], dim=-1) + + if with_objectnesses: + flatten_objectness = [ + objectness.permute(0, 2, 3, 1).reshape(num_imgs, -1) + for objectness in objectnesses + ] + flatten_objectness = torch.cat(flatten_objectness, dim=1).sigmoid() + else: + flatten_objectness = [None for _ in range(num_imgs)] + + results_list = [] + for (bboxes, scores, objectness, + img_meta) in zip(flatten_decoded_bboxes, flatten_cls_scores, + flatten_objectness, batch_img_metas): + scale_factor = img_meta['scale_factor'] + if 'pad_param' in img_meta: + pad_param = img_meta['pad_param'] + else: + pad_param = None + + score_thr = cfg.get('score_thr', -1) + # yolox_style does not require the following operations + if objectness is not None and score_thr > 0 and not cfg.get( + 'yolox_style', False): + conf_inds = objectness > score_thr + bboxes = bboxes[conf_inds, :] + scores = scores[conf_inds, :] + objectness = objectness[conf_inds] + + if objectness is not None: + # conf = obj_conf * cls_conf + scores *= objectness[:, None] + + if scores.shape[0] == 0: + empty_results = InstanceData() + empty_results.bboxes = RotatedBoxes(bboxes) + empty_results.scores = scores[:, 0] + empty_results.labels = scores[:, 0].int() + results_list.append(empty_results) + continue + + nms_pre = cfg.get('nms_pre', 100000) + if cfg.multi_label is False: + scores, labels = scores.max(1, keepdim=True) + scores, _, keep_idxs, results = filter_scores_and_topk( + scores, + score_thr, + nms_pre, + results=dict(labels=labels[:, 0])) + labels = results['labels'] + else: + scores, labels, keep_idxs, _ = filter_scores_and_topk( + scores, score_thr, nms_pre) + + results = InstanceData( + scores=scores, + labels=labels, + bboxes=RotatedBoxes(bboxes[keep_idxs])) + + if rescale: + if pad_param is not None: + results.bboxes.translate_([-pad_param[2], -pad_param[0]]) + + scale_factor = [1 / s for s in img_meta['scale_factor']] + results.bboxes = scale_boxes(results.bboxes, scale_factor) + + if cfg.get('yolox_style', False): + # do not need max_per_img + cfg.max_per_img = len(results) + + results = self._bbox_post_process( + results=results, + cfg=cfg, + rescale=False, + with_nms=with_nms, + img_meta=img_meta) + + results_list.append(results) + return results_list + + def loss_by_feat( + self, + cls_scores: List[Tensor], + bbox_preds: List[Tensor], + angle_preds: List[Tensor], + batch_gt_instances: InstanceList, + batch_img_metas: List[dict], + batch_gt_instances_ignore: OptInstanceList = None) -> dict: + """Compute losses of the head. + + Args: + cls_scores (list[Tensor]): Box scores for each scale level + Has shape (N, num_anchors * num_classes, H, W) + bbox_preds (list[Tensor]): Decoded box for each scale + level with shape (N, num_anchors * 4, H, W) in + [tl_x, tl_y, br_x, br_y] format. + angle_preds (list[Tensor]): Angle prediction for each scale + level with shape (N, num_anchors * angle_out_dim, H, W). + batch_gt_instances (list[:obj:`InstanceData`]): Batch of + gt_instance. It usually includes ``bboxes`` and ``labels`` + attributes. + batch_img_metas (list[dict]): Meta information of each image, e.g., + image size, scaling factor, etc. + batch_gt_instances_ignore (list[:obj:`InstanceData`], Optional): + Batch of gt_instances_ignore. It includes ``bboxes`` attribute + data that is ignored during training and testing. + Defaults to None. + + Returns: + dict[str, Tensor]: A dictionary of loss components. + """ + num_imgs = len(batch_img_metas) + featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] + assert len(featmap_sizes) == self.prior_generator.num_levels + + gt_info = gt_instances_preprocess(batch_gt_instances, num_imgs) + gt_labels = gt_info[:, :, :1] + gt_bboxes = gt_info[:, :, 1:] # xywha + pad_bbox_flag = (gt_bboxes.sum(-1, keepdim=True) > 0).float() + + device = cls_scores[0].device + + # If the shape does not equal, generate new one + if featmap_sizes != self.featmap_sizes_train: + self.featmap_sizes_train = featmap_sizes + mlvl_priors_with_stride = self.prior_generator.grid_priors( + featmap_sizes, device=device, with_stride=True) + self.flatten_priors_train = torch.cat( + mlvl_priors_with_stride, dim=0) + + flatten_cls_scores = torch.cat([ + cls_score.permute(0, 2, 3, 1).reshape(num_imgs, -1, + self.cls_out_channels) + for cls_score in cls_scores + ], 1).contiguous() + + flatten_tblrs = torch.cat([ + bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, 4) + for bbox_pred in bbox_preds + ], 1) + flatten_tblrs = flatten_tblrs * self.flatten_priors_train[..., -1, + None] + flatten_angles = torch.cat([ + angle_pred.permute(0, 2, 3, 1).reshape( + num_imgs, -1, self.angle_out_dim) for angle_pred in angle_preds + ], 1) + flatten_decoded_angle = self.angle_coder.decode( + flatten_angles, keepdim=True) + flatten_tblra = torch.cat([flatten_tblrs, flatten_decoded_angle], + dim=-1) + flatten_rbboxes = distance2obb( + self.flatten_priors_train[..., :2], + flatten_tblra, + angle_version=self.angle_version) + if self.use_hbbox_loss: + flatten_hbboxes = distance2bbox(self.flatten_priors_train[..., :2], + flatten_tblrs) + + assigned_result = self.assigner(flatten_rbboxes.detach(), + flatten_cls_scores.detach(), + self.flatten_priors_train, gt_labels, + gt_bboxes, pad_bbox_flag) + + labels = assigned_result['assigned_labels'].reshape(-1) + label_weights = assigned_result['assigned_labels_weights'].reshape(-1) + bbox_targets = assigned_result['assigned_bboxes'].reshape(-1, 5) + assign_metrics = assigned_result['assign_metrics'].reshape(-1) + cls_preds = flatten_cls_scores.reshape(-1, self.num_classes) + + # FG cat_id: [0, num_classes -1], BG cat_id: num_classes + bg_class_ind = self.num_classes + pos_inds = ((labels >= 0) + & (labels < bg_class_ind)).nonzero().squeeze(1) + avg_factor = reduce_mean(assign_metrics.sum()).clamp_(min=1).item() + + loss_cls = self.loss_cls( + cls_preds, (labels, assign_metrics), + label_weights, + avg_factor=avg_factor) + + pos_bbox_targets = bbox_targets[pos_inds] + + if self.use_hbbox_loss: + bbox_preds = flatten_hbboxes.reshape(-1, 4) + pos_bbox_targets = bbox_cxcywh_to_xyxy(pos_bbox_targets[:, :4]) + else: + bbox_preds = flatten_rbboxes.reshape(-1, 5) + angle_preds = flatten_angles.reshape(-1, self.angle_out_dim) + + if len(pos_inds) > 0: + loss_bbox = self.loss_bbox( + bbox_preds[pos_inds], + pos_bbox_targets, + weight=assign_metrics[pos_inds], + avg_factor=avg_factor) + loss_angle = angle_preds.sum() * 0 + if self.loss_angle is not None: + pos_angle_targets = bbox_targets[pos_inds][:, 4:5] + pos_angle_targets = self.angle_coder.encode(pos_angle_targets) + loss_angle = self.loss_angle( + angle_preds[pos_inds], + pos_angle_targets, + weight=assign_metrics[pos_inds], + avg_factor=avg_factor) + else: + loss_bbox = bbox_preds.sum() * 0 + loss_angle = angle_preds.sum() * 0 + + losses = dict() + losses['loss_cls'] = loss_cls + losses['loss_bbox'] = loss_bbox + if self.loss_angle is not None: + losses['loss_angle'] = loss_angle + + return losses diff --git a/models/YOLO-World/third_party/mmyolo/mmyolo/models/dense_heads/yolov5_head.py b/models/YOLO-World/third_party/mmyolo/mmyolo/models/dense_heads/yolov5_head.py new file mode 100644 index 0000000000000000000000000000000000000000..fb24617fc17c2861ea150b0fb9ceb3d8a145bb9d --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/mmyolo/models/dense_heads/yolov5_head.py @@ -0,0 +1,895 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +import math +from typing import List, Optional, Sequence, Tuple, Union + +import torch +import torch.nn as nn +from mmdet.models.dense_heads.base_dense_head import BaseDenseHead +from mmdet.models.utils import filter_scores_and_topk, multi_apply +from mmdet.structures.bbox import bbox_overlaps +from mmdet.utils import (ConfigType, OptConfigType, OptInstanceList, + OptMultiConfig) +from mmengine.config import ConfigDict +from mmengine.dist import get_dist_info +from mmengine.logging import print_log +from mmengine.model import BaseModule +from mmengine.structures import InstanceData +from torch import Tensor + +from mmyolo.registry import MODELS, TASK_UTILS +from ..utils import make_divisible + + +def get_prior_xy_info(index: int, num_base_priors: int, + featmap_sizes: int) -> Tuple[int, int, int]: + """Get prior index and xy index in feature map by flatten index.""" + _, featmap_w = featmap_sizes + priors = index % num_base_priors + xy_index = index // num_base_priors + grid_y = xy_index // featmap_w + grid_x = xy_index % featmap_w + return priors, grid_x, grid_y + + +@MODELS.register_module() +class YOLOv5HeadModule(BaseModule): + """YOLOv5Head head module used in `YOLOv5`. + + Args: + num_classes (int): Number of categories excluding the background + category. + in_channels (Union[int, Sequence]): Number of channels in the input + feature map. + widen_factor (float): Width multiplier, multiply number of + channels in each layer by this amount. Defaults to 1.0. + num_base_priors (int): The number of priors (points) at a point + on the feature grid. + featmap_strides (Sequence[int]): Downsample factor of each feature map. + Defaults to (8, 16, 32). + init_cfg (:obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or + list[dict], optional): Initialization config dict. + Defaults to None. + """ + + def __init__(self, + num_classes: int, + in_channels: Union[int, Sequence], + widen_factor: float = 1.0, + num_base_priors: int = 3, + featmap_strides: Sequence[int] = (8, 16, 32), + init_cfg: OptMultiConfig = None): + super().__init__(init_cfg=init_cfg) + self.num_classes = num_classes + self.widen_factor = widen_factor + + self.featmap_strides = featmap_strides + self.num_out_attrib = 5 + self.num_classes + self.num_levels = len(self.featmap_strides) + self.num_base_priors = num_base_priors + + if isinstance(in_channels, int): + self.in_channels = [make_divisible(in_channels, widen_factor) + ] * self.num_levels + else: + self.in_channels = [ + make_divisible(i, widen_factor) for i in in_channels + ] + + self._init_layers() + + def _init_layers(self): + """initialize conv layers in YOLOv5 head.""" + self.convs_pred = nn.ModuleList() + for i in range(self.num_levels): + conv_pred = nn.Conv2d(self.in_channels[i], + self.num_base_priors * self.num_out_attrib, + 1) + + self.convs_pred.append(conv_pred) + + def init_weights(self): + """Initialize the bias of YOLOv5 head.""" + super().init_weights() + for mi, s in zip(self.convs_pred, self.featmap_strides): # from + b = mi.bias.data.view(self.num_base_priors, -1) + # obj (8 objects per 640 image) + b.data[:, 4] += math.log(8 / (640 / s)**2) + # NOTE: The following initialization can only be performed on the + # bias of the category, if the following initialization is + # performed on the bias of mask coefficient, + # there will be a significant decrease in mask AP. + b.data[:, 5:5 + self.num_classes] += math.log( + 0.6 / (self.num_classes - 0.999999)) + + mi.bias.data = b.view(-1) + + def forward(self, x: Tuple[Tensor]) -> Tuple[List]: + """Forward features from the upstream network. + + Args: + x (Tuple[Tensor]): Features from the upstream network, each is + a 4D-tensor. + Returns: + Tuple[List]: A tuple of multi-level classification scores, bbox + predictions, and objectnesses. + """ + assert len(x) == self.num_levels + return multi_apply(self.forward_single, x, self.convs_pred) + + def forward_single(self, x: Tensor, + convs: nn.Module) -> Tuple[Tensor, Tensor, Tensor]: + """Forward feature of a single scale level.""" + + pred_map = convs(x) + bs, _, ny, nx = pred_map.shape + pred_map = pred_map.view(bs, self.num_base_priors, self.num_out_attrib, + ny, nx) + + cls_score = pred_map[:, :, 5:, ...].reshape(bs, -1, ny, nx) + bbox_pred = pred_map[:, :, :4, ...].reshape(bs, -1, ny, nx) + objectness = pred_map[:, :, 4:5, ...].reshape(bs, -1, ny, nx) + + return cls_score, bbox_pred, objectness + + +@MODELS.register_module() +class YOLOv5Head(BaseDenseHead): + """YOLOv5Head head used in `YOLOv5`. + + Args: + head_module(ConfigType): Base module used for YOLOv5Head + prior_generator(dict): Points generator feature maps in + 2D points-based detectors. + bbox_coder (:obj:`ConfigDict` or dict): Config of bbox coder. + loss_cls (:obj:`ConfigDict` or dict): Config of classification loss. + loss_bbox (:obj:`ConfigDict` or dict): Config of localization loss. + loss_obj (:obj:`ConfigDict` or dict): Config of objectness loss. + prior_match_thr (float): Defaults to 4.0. + ignore_iof_thr (float): Defaults to -1.0. + obj_level_weights (List[float]): Defaults to [4.0, 1.0, 0.4]. + train_cfg (:obj:`ConfigDict` or dict, optional): Training config of + anchor head. Defaults to None. + test_cfg (:obj:`ConfigDict` or dict, optional): Testing config of + anchor head. Defaults to None. + init_cfg (:obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or + list[dict], optional): Initialization config dict. + Defaults to None. + """ + + def __init__(self, + head_module: ConfigType, + prior_generator: ConfigType = dict( + type='mmdet.YOLOAnchorGenerator', + base_sizes=[[(10, 13), (16, 30), (33, 23)], + [(30, 61), (62, 45), (59, 119)], + [(116, 90), (156, 198), (373, 326)]], + strides=[8, 16, 32]), + bbox_coder: ConfigType = dict(type='YOLOv5BBoxCoder'), + loss_cls: ConfigType = dict( + type='mmdet.CrossEntropyLoss', + use_sigmoid=True, + reduction='mean', + loss_weight=0.5), + loss_bbox: ConfigType = dict( + type='IoULoss', + iou_mode='ciou', + bbox_format='xywh', + eps=1e-7, + reduction='mean', + loss_weight=0.05, + return_iou=True), + loss_obj: ConfigType = dict( + type='mmdet.CrossEntropyLoss', + use_sigmoid=True, + reduction='mean', + loss_weight=1.0), + prior_match_thr: float = 4.0, + near_neighbor_thr: float = 0.5, + ignore_iof_thr: float = -1.0, + obj_level_weights: List[float] = [4.0, 1.0, 0.4], + train_cfg: OptConfigType = None, + test_cfg: OptConfigType = None, + init_cfg: OptMultiConfig = None): + super().__init__(init_cfg=init_cfg) + + self.head_module = MODELS.build(head_module) + self.num_classes = self.head_module.num_classes + self.featmap_strides = self.head_module.featmap_strides + self.num_levels = len(self.featmap_strides) + + self.train_cfg = train_cfg + self.test_cfg = test_cfg + + self.loss_cls: nn.Module = MODELS.build(loss_cls) + self.loss_bbox: nn.Module = MODELS.build(loss_bbox) + self.loss_obj: nn.Module = MODELS.build(loss_obj) + + self.prior_generator = TASK_UTILS.build(prior_generator) + self.bbox_coder = TASK_UTILS.build(bbox_coder) + self.num_base_priors = self.prior_generator.num_base_priors[0] + + self.featmap_sizes = [torch.empty(1)] * self.num_levels + + self.prior_match_thr = prior_match_thr + self.near_neighbor_thr = near_neighbor_thr + self.obj_level_weights = obj_level_weights + self.ignore_iof_thr = ignore_iof_thr + + self.special_init() + + def special_init(self): + """Since YOLO series algorithms will inherit from YOLOv5Head, but + different algorithms have special initialization process. + + The special_init function is designed to deal with this situation. + """ + assert len(self.obj_level_weights) == len( + self.featmap_strides) == self.num_levels + if self.prior_match_thr != 4.0: + print_log( + "!!!Now, you've changed the prior_match_thr " + 'parameter to something other than 4.0. Please make sure ' + 'that you have modified both the regression formula in ' + 'bbox_coder and before loss_box computation, ' + 'otherwise the accuracy may be degraded!!!') + + if self.num_classes == 1: + print_log('!!!You are using `YOLOv5Head` with num_classes == 1.' + ' The loss_cls will be 0. This is a normal phenomenon.') + + priors_base_sizes = torch.tensor( + self.prior_generator.base_sizes, dtype=torch.float) + featmap_strides = torch.tensor( + self.featmap_strides, dtype=torch.float)[:, None, None] + self.register_buffer( + 'priors_base_sizes', + priors_base_sizes / featmap_strides, + persistent=False) + + grid_offset = torch.tensor([ + [0, 0], # center + [1, 0], # left + [0, 1], # up + [-1, 0], # right + [0, -1], # bottom + ]).float() + self.register_buffer( + 'grid_offset', grid_offset[:, None], persistent=False) + + prior_inds = torch.arange(self.num_base_priors).float().view( + self.num_base_priors, 1) + self.register_buffer('prior_inds', prior_inds, persistent=False) + + def forward(self, x: Tuple[Tensor]) -> Tuple[List]: + """Forward features from the upstream network. + + Args: + x (Tuple[Tensor]): Features from the upstream network, each is + a 4D-tensor. + Returns: + Tuple[List]: A tuple of multi-level classification scores, bbox + predictions, and objectnesses. + """ + return self.head_module(x) + + def predict_by_feat(self, + cls_scores: List[Tensor], + bbox_preds: List[Tensor], + objectnesses: Optional[List[Tensor]] = None, + batch_img_metas: Optional[List[dict]] = None, + cfg: Optional[ConfigDict] = None, + rescale: bool = True, + with_nms: bool = True) -> List[InstanceData]: + """Transform a batch of output features extracted by the head into + bbox results. + Args: + cls_scores (list[Tensor]): Classification scores for all + scale levels, each is a 4D-tensor, has shape + (batch_size, num_priors * num_classes, H, W). + bbox_preds (list[Tensor]): Box energies / deltas for all + scale levels, each is a 4D-tensor, has shape + (batch_size, num_priors * 4, H, W). + objectnesses (list[Tensor], Optional): Score factor for + all scale level, each is a 4D-tensor, has shape + (batch_size, 1, H, W). + batch_img_metas (list[dict], Optional): Batch image meta info. + Defaults to None. + cfg (ConfigDict, optional): Test / postprocessing + configuration, if None, test_cfg would be used. + Defaults to None. + rescale (bool): If True, return boxes in original image space. + Defaults to False. + with_nms (bool): If True, do nms before return boxes. + Defaults to True. + + Returns: + list[:obj:`InstanceData`]: Object detection results of each image + after the post process. Each item usually contains following keys. + + - scores (Tensor): Classification scores, has a shape + (num_instance, ) + - labels (Tensor): Labels of bboxes, has a shape + (num_instances, ). + - bboxes (Tensor): Has a shape (num_instances, 4), + the last dimension 4 arrange as (x1, y1, x2, y2). + """ + assert len(cls_scores) == len(bbox_preds) + if objectnesses is None: + with_objectnesses = False + else: + with_objectnesses = True + assert len(cls_scores) == len(objectnesses) + + cfg = self.test_cfg if cfg is None else cfg + cfg = copy.deepcopy(cfg) + + multi_label = cfg.multi_label + multi_label &= self.num_classes > 1 + cfg.multi_label = multi_label + + num_imgs = len(batch_img_metas) + featmap_sizes = [cls_score.shape[2:] for cls_score in cls_scores] + + # If the shape does not change, use the previous mlvl_priors + if featmap_sizes != self.featmap_sizes: + self.mlvl_priors = self.prior_generator.grid_priors( + featmap_sizes, + dtype=cls_scores[0].dtype, + device=cls_scores[0].device) + self.featmap_sizes = featmap_sizes + flatten_priors = torch.cat(self.mlvl_priors) + + mlvl_strides = [ + flatten_priors.new_full( + (featmap_size.numel() * self.num_base_priors, ), stride) for + featmap_size, stride in zip(featmap_sizes, self.featmap_strides) + ] + flatten_stride = torch.cat(mlvl_strides) + + # flatten cls_scores, bbox_preds and objectness + flatten_cls_scores = [ + cls_score.permute(0, 2, 3, 1).reshape(num_imgs, -1, + self.num_classes) + for cls_score in cls_scores + ] + flatten_bbox_preds = [ + bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, 4) + for bbox_pred in bbox_preds + ] + + flatten_cls_scores = torch.cat(flatten_cls_scores, dim=1).sigmoid() + flatten_bbox_preds = torch.cat(flatten_bbox_preds, dim=1) + flatten_decoded_bboxes = self.bbox_coder.decode( + flatten_priors[None], flatten_bbox_preds, flatten_stride) + + if with_objectnesses: + flatten_objectness = [ + objectness.permute(0, 2, 3, 1).reshape(num_imgs, -1) + for objectness in objectnesses + ] + flatten_objectness = torch.cat(flatten_objectness, dim=1).sigmoid() + else: + flatten_objectness = [None for _ in range(num_imgs)] + + results_list = [] + for (bboxes, scores, objectness, + img_meta) in zip(flatten_decoded_bboxes, flatten_cls_scores, + flatten_objectness, batch_img_metas): + ori_shape = img_meta['ori_shape'] + scale_factor = img_meta['scale_factor'] + if 'pad_param' in img_meta: + pad_param = img_meta['pad_param'] + else: + pad_param = None + + score_thr = cfg.get('score_thr', -1) + # yolox_style does not require the following operations + if objectness is not None and score_thr > 0 and not cfg.get( + 'yolox_style', False): + conf_inds = objectness > score_thr + bboxes = bboxes[conf_inds, :] + scores = scores[conf_inds, :] + objectness = objectness[conf_inds] + + if objectness is not None: + # conf = obj_conf * cls_conf + scores *= objectness[:, None] + + if scores.shape[0] == 0: + empty_results = InstanceData() + empty_results.bboxes = bboxes + empty_results.scores = scores[:, 0] + empty_results.labels = scores[:, 0].int() + results_list.append(empty_results) + continue + + nms_pre = cfg.get('nms_pre', 100000) + if cfg.multi_label is False: + scores, labels = scores.max(1, keepdim=True) + scores, _, keep_idxs, results = filter_scores_and_topk( + scores, + score_thr, + nms_pre, + results=dict(labels=labels[:, 0])) + labels = results['labels'] + else: + scores, labels, keep_idxs, _ = filter_scores_and_topk( + scores, score_thr, nms_pre) + + results = InstanceData( + scores=scores, labels=labels, bboxes=bboxes[keep_idxs]) + + if rescale: + if pad_param is not None: + results.bboxes -= results.bboxes.new_tensor([ + pad_param[2], pad_param[0], pad_param[2], pad_param[0] + ]) + results.bboxes /= results.bboxes.new_tensor( + scale_factor).repeat((1, 2)) + + if cfg.get('yolox_style', False): + # do not need max_per_img + cfg.max_per_img = len(results) + + results = self._bbox_post_process( + results=results, + cfg=cfg, + rescale=False, + with_nms=with_nms, + img_meta=img_meta) + results.bboxes[:, 0::2].clamp_(0, ori_shape[1]) + results.bboxes[:, 1::2].clamp_(0, ori_shape[0]) + + results_list.append(results) + return results_list + + def loss(self, x: Tuple[Tensor], batch_data_samples: Union[list, + dict]) -> dict: + """Perform forward propagation and loss calculation of the detection + head on the features of the upstream network. + + Args: + x (tuple[Tensor]): Features from the upstream network, each is + a 4D-tensor. + batch_data_samples (List[:obj:`DetDataSample`], dict): The Data + Samples. It usually includes information such as + `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`. + + Returns: + dict: A dictionary of loss components. + """ + + if isinstance(batch_data_samples, list): + losses = super().loss(x, batch_data_samples) + else: + outs = self(x) + # Fast version + loss_inputs = outs + (batch_data_samples['bboxes_labels'], + batch_data_samples['img_metas']) + losses = self.loss_by_feat(*loss_inputs) + + return losses + + def loss_by_feat( + self, + cls_scores: Sequence[Tensor], + bbox_preds: Sequence[Tensor], + objectnesses: Sequence[Tensor], + batch_gt_instances: Sequence[InstanceData], + batch_img_metas: Sequence[dict], + batch_gt_instances_ignore: OptInstanceList = None) -> dict: + """Calculate the loss based on the features extracted by the detection + head. + + Args: + cls_scores (Sequence[Tensor]): Box scores for each scale level, + each is a 4D-tensor, the channel number is + num_priors * num_classes. + bbox_preds (Sequence[Tensor]): Box energies / deltas for each scale + level, each is a 4D-tensor, the channel number is + num_priors * 4. + objectnesses (Sequence[Tensor]): Score factor for + all scale level, each is a 4D-tensor, has shape + (batch_size, 1, H, W). + batch_gt_instances (Sequence[InstanceData]): Batch of + gt_instance. It usually includes ``bboxes`` and ``labels`` + attributes. + batch_img_metas (Sequence[dict]): Meta information of each image, + e.g., image size, scaling factor, etc. + batch_gt_instances_ignore (list[:obj:`InstanceData`], optional): + Batch of gt_instances_ignore. It includes ``bboxes`` attribute + data that is ignored during training and testing. + Defaults to None. + Returns: + dict[str, Tensor]: A dictionary of losses. + """ + if self.ignore_iof_thr != -1: + # TODO: Support fast version + # convert ignore gt + batch_target_ignore_list = [] + for i, gt_instances_ignore in enumerate(batch_gt_instances_ignore): + bboxes = gt_instances_ignore.bboxes + labels = gt_instances_ignore.labels + index = bboxes.new_full((len(bboxes), 1), i) + # (batch_idx, label, bboxes) + target = torch.cat((index, labels[:, None].float(), bboxes), + dim=1) + batch_target_ignore_list.append(target) + + # (num_bboxes, 6) + batch_gt_targets_ignore = torch.cat( + batch_target_ignore_list, dim=0) + if batch_gt_targets_ignore.shape[0] != 0: + # Consider regions with ignore in annotations + return self._loss_by_feat_with_ignore( + cls_scores, + bbox_preds, + objectnesses, + batch_gt_instances=batch_gt_instances, + batch_img_metas=batch_img_metas, + batch_gt_instances_ignore=batch_gt_targets_ignore) + + # 1. Convert gt to norm format + batch_targets_normed = self._convert_gt_to_norm_format( + batch_gt_instances, batch_img_metas) + + device = cls_scores[0].device + loss_cls = torch.zeros(1, device=device) + loss_box = torch.zeros(1, device=device) + loss_obj = torch.zeros(1, device=device) + scaled_factor = torch.ones(7, device=device) + + for i in range(self.num_levels): + batch_size, _, h, w = bbox_preds[i].shape + target_obj = torch.zeros_like(objectnesses[i]) + + # empty gt bboxes + if batch_targets_normed.shape[1] == 0: + loss_box += bbox_preds[i].sum() * 0 + loss_cls += cls_scores[i].sum() * 0 + loss_obj += self.loss_obj( + objectnesses[i], target_obj) * self.obj_level_weights[i] + continue + + priors_base_sizes_i = self.priors_base_sizes[i] + # feature map scale whwh + scaled_factor[2:6] = torch.tensor( + bbox_preds[i].shape)[[3, 2, 3, 2]] + # Scale batch_targets from range 0-1 to range 0-features_maps size. + # (num_base_priors, num_bboxes, 7) + batch_targets_scaled = batch_targets_normed * scaled_factor + + # 2. Shape match + wh_ratio = batch_targets_scaled[..., + 4:6] / priors_base_sizes_i[:, None] + match_inds = torch.max( + wh_ratio, 1 / wh_ratio).max(2)[0] < self.prior_match_thr + batch_targets_scaled = batch_targets_scaled[match_inds] + + # no gt bbox matches anchor + if batch_targets_scaled.shape[0] == 0: + loss_box += bbox_preds[i].sum() * 0 + loss_cls += cls_scores[i].sum() * 0 + loss_obj += self.loss_obj( + objectnesses[i], target_obj) * self.obj_level_weights[i] + continue + + # 3. Positive samples with additional neighbors + + # check the left, up, right, bottom sides of the + # targets grid, and determine whether assigned + # them as positive samples as well. + batch_targets_cxcy = batch_targets_scaled[:, 2:4] + grid_xy = scaled_factor[[2, 3]] - batch_targets_cxcy + left, up = ((batch_targets_cxcy % 1 < self.near_neighbor_thr) & + (batch_targets_cxcy > 1)).T + right, bottom = ((grid_xy % 1 < self.near_neighbor_thr) & + (grid_xy > 1)).T + offset_inds = torch.stack( + (torch.ones_like(left), left, up, right, bottom)) + + batch_targets_scaled = batch_targets_scaled.repeat( + (5, 1, 1))[offset_inds] + retained_offsets = self.grid_offset.repeat(1, offset_inds.shape[1], + 1)[offset_inds] + + # prepare pred results and positive sample indexes to + # calculate class loss and bbox lo + _chunk_targets = batch_targets_scaled.chunk(4, 1) + img_class_inds, grid_xy, grid_wh, priors_inds = _chunk_targets + priors_inds, (img_inds, class_inds) = priors_inds.long().view( + -1), img_class_inds.long().T + + grid_xy_long = (grid_xy - + retained_offsets * self.near_neighbor_thr).long() + grid_x_inds, grid_y_inds = grid_xy_long.T + bboxes_targets = torch.cat((grid_xy - grid_xy_long, grid_wh), 1) + + # 4. Calculate loss + # bbox loss + retained_bbox_pred = bbox_preds[i].reshape( + batch_size, self.num_base_priors, -1, h, + w)[img_inds, priors_inds, :, grid_y_inds, grid_x_inds] + priors_base_sizes_i = priors_base_sizes_i[priors_inds] + decoded_bbox_pred = self._decode_bbox_to_xywh( + retained_bbox_pred, priors_base_sizes_i) + loss_box_i, iou = self.loss_bbox(decoded_bbox_pred, bboxes_targets) + loss_box += loss_box_i + + # obj loss + iou = iou.detach().clamp(0) + target_obj[img_inds, priors_inds, grid_y_inds, + grid_x_inds] = iou.type(target_obj.dtype) + loss_obj += self.loss_obj(objectnesses[i], + target_obj) * self.obj_level_weights[i] + + # cls loss + if self.num_classes > 1: + pred_cls_scores = cls_scores[i].reshape( + batch_size, self.num_base_priors, -1, h, + w)[img_inds, priors_inds, :, grid_y_inds, grid_x_inds] + + target_class = torch.full_like(pred_cls_scores, 0.) + target_class[range(batch_targets_scaled.shape[0]), + class_inds] = 1. + loss_cls += self.loss_cls(pred_cls_scores, target_class) + else: + loss_cls += cls_scores[i].sum() * 0 + + _, world_size = get_dist_info() + return dict( + loss_cls=loss_cls * batch_size * world_size, + loss_obj=loss_obj * batch_size * world_size, + loss_bbox=loss_box * batch_size * world_size) + + def _convert_gt_to_norm_format(self, + batch_gt_instances: Sequence[InstanceData], + batch_img_metas: Sequence[dict]) -> Tensor: + if isinstance(batch_gt_instances, torch.Tensor): + # fast version + img_shape = batch_img_metas[0]['batch_input_shape'] + gt_bboxes_xyxy = batch_gt_instances[:, 2:] + xy1, xy2 = gt_bboxes_xyxy.split((2, 2), dim=-1) + gt_bboxes_xywh = torch.cat([(xy2 + xy1) / 2, (xy2 - xy1)], dim=-1) + gt_bboxes_xywh[:, 1::2] /= img_shape[0] + gt_bboxes_xywh[:, 0::2] /= img_shape[1] + batch_gt_instances[:, 2:] = gt_bboxes_xywh + + # (num_base_priors, num_bboxes, 6) + batch_targets_normed = batch_gt_instances.repeat( + self.num_base_priors, 1, 1) + else: + batch_target_list = [] + # Convert xyxy bbox to yolo format. + for i, gt_instances in enumerate(batch_gt_instances): + img_shape = batch_img_metas[i]['batch_input_shape'] + bboxes = gt_instances.bboxes + labels = gt_instances.labels + + xy1, xy2 = bboxes.split((2, 2), dim=-1) + bboxes = torch.cat([(xy2 + xy1) / 2, (xy2 - xy1)], dim=-1) + # normalized to 0-1 + bboxes[:, 1::2] /= img_shape[0] + bboxes[:, 0::2] /= img_shape[1] + + index = bboxes.new_full((len(bboxes), 1), i) + # (batch_idx, label, normed_bbox) + target = torch.cat((index, labels[:, None].float(), bboxes), + dim=1) + batch_target_list.append(target) + + # (num_base_priors, num_bboxes, 6) + batch_targets_normed = torch.cat( + batch_target_list, dim=0).repeat(self.num_base_priors, 1, 1) + + # (num_base_priors, num_bboxes, 1) + batch_targets_prior_inds = self.prior_inds.repeat( + 1, batch_targets_normed.shape[1])[..., None] + # (num_base_priors, num_bboxes, 7) + # (img_ind, labels, bbox_cx, bbox_cy, bbox_w, bbox_h, prior_ind) + batch_targets_normed = torch.cat( + (batch_targets_normed, batch_targets_prior_inds), 2) + return batch_targets_normed + + def _decode_bbox_to_xywh(self, bbox_pred, priors_base_sizes) -> Tensor: + bbox_pred = bbox_pred.sigmoid() + pred_xy = bbox_pred[:, :2] * 2 - 0.5 + pred_wh = (bbox_pred[:, 2:] * 2)**2 * priors_base_sizes + decoded_bbox_pred = torch.cat((pred_xy, pred_wh), dim=-1) + return decoded_bbox_pred + + def _loss_by_feat_with_ignore( + self, cls_scores: Sequence[Tensor], bbox_preds: Sequence[Tensor], + objectnesses: Sequence[Tensor], + batch_gt_instances: Sequence[InstanceData], + batch_img_metas: Sequence[dict], + batch_gt_instances_ignore: Sequence[Tensor]) -> dict: + """Calculate the loss based on the features extracted by the detection + head. + + Args: + cls_scores (Sequence[Tensor]): Box scores for each scale level, + each is a 4D-tensor, the channel number is + num_priors * num_classes. + bbox_preds (Sequence[Tensor]): Box energies / deltas for each scale + level, each is a 4D-tensor, the channel number is + num_priors * 4. + objectnesses (Sequence[Tensor]): Score factor for + all scale level, each is a 4D-tensor, has shape + (batch_size, 1, H, W). + batch_gt_instances (Sequence[InstanceData]): Batch of + gt_instance. It usually includes ``bboxes`` and ``labels`` + attributes. + batch_img_metas (Sequence[dict]): Meta information of each image, + e.g., image size, scaling factor, etc. + batch_gt_instances_ignore (Sequence[Tensor]): Ignore boxes with + batch_ids and labels, each is a 2D-tensor, the channel number + is 6, means that (batch_id, label, xmin, ymin, xmax, ymax). + Returns: + dict[str, Tensor]: A dictionary of losses. + """ + # 1. Convert gt to norm format + batch_targets_normed = self._convert_gt_to_norm_format( + batch_gt_instances, batch_img_metas) + + featmap_sizes = [cls_score.shape[2:] for cls_score in cls_scores] + if featmap_sizes != self.featmap_sizes: + self.mlvl_priors = self.prior_generator.grid_priors( + featmap_sizes, + dtype=cls_scores[0].dtype, + device=cls_scores[0].device) + self.featmap_sizes = featmap_sizes + + device = cls_scores[0].device + loss_cls = torch.zeros(1, device=device) + loss_box = torch.zeros(1, device=device) + loss_obj = torch.zeros(1, device=device) + scaled_factor = torch.ones(7, device=device) + + for i in range(self.num_levels): + batch_size, _, h, w = bbox_preds[i].shape + target_obj = torch.zeros_like(objectnesses[i]) + + not_ignore_flags = bbox_preds[i].new_ones(batch_size, + self.num_base_priors, h, + w) + + ignore_overlaps = bbox_overlaps(self.mlvl_priors[i], + batch_gt_instances_ignore[..., 2:], + 'iof') + ignore_max_overlaps, ignore_max_ignore_index = ignore_overlaps.max( + dim=1) + + batch_inds = batch_gt_instances_ignore[:, + 0][ignore_max_ignore_index] + ignore_inds = (ignore_max_overlaps > self.ignore_iof_thr).nonzero( + as_tuple=True)[0] + batch_inds = batch_inds[ignore_inds].long() + ignore_priors, ignore_grid_xs, ignore_grid_ys = get_prior_xy_info( + ignore_inds, self.num_base_priors, self.featmap_sizes[i]) + not_ignore_flags[batch_inds, ignore_priors, ignore_grid_ys, + ignore_grid_xs] = 0 + + # empty gt bboxes + if batch_targets_normed.shape[1] == 0: + loss_box += bbox_preds[i].sum() * 0 + loss_cls += cls_scores[i].sum() * 0 + loss_obj += self.loss_obj( + objectnesses[i], + target_obj, + weight=not_ignore_flags, + avg_factor=max(not_ignore_flags.sum(), + 1)) * self.obj_level_weights[i] + continue + + priors_base_sizes_i = self.priors_base_sizes[i] + # feature map scale whwh + scaled_factor[2:6] = torch.tensor( + bbox_preds[i].shape)[[3, 2, 3, 2]] + # Scale batch_targets from range 0-1 to range 0-features_maps size. + # (num_base_priors, num_bboxes, 7) + batch_targets_scaled = batch_targets_normed * scaled_factor + + # 2. Shape match + wh_ratio = batch_targets_scaled[..., + 4:6] / priors_base_sizes_i[:, None] + match_inds = torch.max( + wh_ratio, 1 / wh_ratio).max(2)[0] < self.prior_match_thr + batch_targets_scaled = batch_targets_scaled[match_inds] + + # no gt bbox matches anchor + if batch_targets_scaled.shape[0] == 0: + loss_box += bbox_preds[i].sum() * 0 + loss_cls += cls_scores[i].sum() * 0 + loss_obj += self.loss_obj( + objectnesses[i], + target_obj, + weight=not_ignore_flags, + avg_factor=max(not_ignore_flags.sum(), + 1)) * self.obj_level_weights[i] + continue + + # 3. Positive samples with additional neighbors + + # check the left, up, right, bottom sides of the + # targets grid, and determine whether assigned + # them as positive samples as well. + batch_targets_cxcy = batch_targets_scaled[:, 2:4] + grid_xy = scaled_factor[[2, 3]] - batch_targets_cxcy + left, up = ((batch_targets_cxcy % 1 < self.near_neighbor_thr) & + (batch_targets_cxcy > 1)).T + right, bottom = ((grid_xy % 1 < self.near_neighbor_thr) & + (grid_xy > 1)).T + offset_inds = torch.stack( + (torch.ones_like(left), left, up, right, bottom)) + + batch_targets_scaled = batch_targets_scaled.repeat( + (5, 1, 1))[offset_inds] + retained_offsets = self.grid_offset.repeat(1, offset_inds.shape[1], + 1)[offset_inds] + + # prepare pred results and positive sample indexes to + # calculate class loss and bbox lo + _chunk_targets = batch_targets_scaled.chunk(4, 1) + img_class_inds, grid_xy, grid_wh, priors_inds = _chunk_targets + priors_inds, (img_inds, class_inds) = priors_inds.long().view( + -1), img_class_inds.long().T + + grid_xy_long = (grid_xy - + retained_offsets * self.near_neighbor_thr).long() + grid_x_inds, grid_y_inds = grid_xy_long.T + bboxes_targets = torch.cat((grid_xy - grid_xy_long, grid_wh), 1) + + # 4. Calculate loss + # bbox loss + retained_bbox_pred = bbox_preds[i].reshape( + batch_size, self.num_base_priors, -1, h, + w)[img_inds, priors_inds, :, grid_y_inds, grid_x_inds] + priors_base_sizes_i = priors_base_sizes_i[priors_inds] + decoded_bbox_pred = self._decode_bbox_to_xywh( + retained_bbox_pred, priors_base_sizes_i) + + not_ignore_weights = not_ignore_flags[img_inds, priors_inds, + grid_y_inds, grid_x_inds] + loss_box_i, iou = self.loss_bbox( + decoded_bbox_pred, + bboxes_targets, + weight=not_ignore_weights, + avg_factor=max(not_ignore_weights.sum(), 1)) + loss_box += loss_box_i + + # obj loss + iou = iou.detach().clamp(0) + target_obj[img_inds, priors_inds, grid_y_inds, + grid_x_inds] = iou.type(target_obj.dtype) + loss_obj += self.loss_obj( + objectnesses[i], + target_obj, + weight=not_ignore_flags, + avg_factor=max(not_ignore_flags.sum(), + 1)) * self.obj_level_weights[i] + + # cls loss + if self.num_classes > 1: + pred_cls_scores = cls_scores[i].reshape( + batch_size, self.num_base_priors, -1, h, + w)[img_inds, priors_inds, :, grid_y_inds, grid_x_inds] + + target_class = torch.full_like(pred_cls_scores, 0.) + target_class[range(batch_targets_scaled.shape[0]), + class_inds] = 1. + loss_cls += self.loss_cls( + pred_cls_scores, + target_class, + weight=not_ignore_weights[:, None].repeat( + 1, self.num_classes), + avg_factor=max(not_ignore_weights.sum(), 1)) + else: + loss_cls += cls_scores[i].sum() * 0 + + _, world_size = get_dist_info() + return dict( + loss_cls=loss_cls * batch_size * world_size, + loss_obj=loss_obj * batch_size * world_size, + loss_bbox=loss_box * batch_size * world_size) diff --git a/models/YOLO-World/third_party/mmyolo/mmyolo/models/dense_heads/yolov5_ins_head.py b/models/YOLO-World/third_party/mmyolo/mmyolo/models/dense_heads/yolov5_ins_head.py new file mode 100644 index 0000000000000000000000000000000000000000..df94f422e904791252067e22ea8e3a643a77a8d0 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/mmyolo/models/dense_heads/yolov5_ins_head.py @@ -0,0 +1,740 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +from typing import List, Optional, Sequence, Tuple, Union + +import mmcv +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import ConvModule +from mmdet.models.utils import filter_scores_and_topk, multi_apply +from mmdet.structures.bbox import bbox_cxcywh_to_xyxy +from mmdet.utils import ConfigType, OptInstanceList +from mmengine.config import ConfigDict +from mmengine.dist import get_dist_info +from mmengine.model import BaseModule +from mmengine.structures import InstanceData +from torch import Tensor + +from mmyolo.registry import MODELS +from ..utils import make_divisible +from .yolov5_head import YOLOv5Head, YOLOv5HeadModule + + +class ProtoModule(BaseModule): + """Mask Proto module for segmentation models of YOLOv5. + + Args: + in_channels (int): Number of channels in the input feature map. + middle_channels (int): Number of channels in the middle feature map. + mask_channels (int): Number of channels in the output mask feature + map. This is the channel count of the mask. + norm_cfg (:obj:`ConfigDict` or dict): Config dict for normalization + layer. Defaults to ``dict(type='BN', momentum=0.03, eps=0.001)``. + act_cfg (:obj:`ConfigDict` or dict): Config dict for activation layer. + Default: dict(type='SiLU', inplace=True). + """ + + def __init__(self, + *args, + in_channels: int = 32, + middle_channels: int = 256, + mask_channels: int = 32, + norm_cfg: ConfigType = dict( + type='BN', momentum=0.03, eps=0.001), + act_cfg: ConfigType = dict(type='SiLU', inplace=True), + **kwargs): + super().__init__(*args, **kwargs) + self.conv1 = ConvModule( + in_channels, + middle_channels, + kernel_size=3, + padding=1, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + self.upsample = nn.Upsample(scale_factor=2, mode='nearest') + self.conv2 = ConvModule( + middle_channels, + middle_channels, + kernel_size=3, + padding=1, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + self.conv3 = ConvModule( + middle_channels, + mask_channels, + kernel_size=1, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + + def forward(self, x: Tensor) -> Tensor: + return self.conv3(self.conv2(self.upsample(self.conv1(x)))) + + +@MODELS.register_module() +class YOLOv5InsHeadModule(YOLOv5HeadModule): + """Detection and Instance Segmentation Head of YOLOv5. + + Args: + num_classes (int): Number of categories excluding the background + category. + mask_channels (int): Number of channels in the mask feature map. + This is the channel count of the mask. + proto_channels (int): Number of channels in the proto feature map. + widen_factor (float): Width multiplier, multiply number of + channels in each layer by this amount. Defaults to 1.0. + norm_cfg (:obj:`ConfigDict` or dict): Config dict for normalization + layer. Defaults to ``dict(type='BN', momentum=0.03, eps=0.001)``. + act_cfg (:obj:`ConfigDict` or dict): Config dict for activation layer. + Default: dict(type='SiLU', inplace=True). + """ + + def __init__(self, + *args, + num_classes: int, + mask_channels: int = 32, + proto_channels: int = 256, + widen_factor: float = 1.0, + norm_cfg: ConfigType = dict( + type='BN', momentum=0.03, eps=0.001), + act_cfg: ConfigType = dict(type='SiLU', inplace=True), + **kwargs): + self.mask_channels = mask_channels + self.num_out_attrib_with_proto = 5 + num_classes + mask_channels + self.proto_channels = make_divisible(proto_channels, widen_factor) + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + super().__init__( + *args, + num_classes=num_classes, + widen_factor=widen_factor, + **kwargs) + + def _init_layers(self): + """initialize conv layers in YOLOv5 Ins head.""" + self.convs_pred = nn.ModuleList() + for i in range(self.num_levels): + conv_pred = nn.Conv2d( + self.in_channels[i], + self.num_base_priors * self.num_out_attrib_with_proto, 1) + self.convs_pred.append(conv_pred) + + self.proto_pred = ProtoModule( + in_channels=self.in_channels[0], + middle_channels=self.proto_channels, + mask_channels=self.mask_channels, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + + def forward(self, x: Tuple[Tensor]) -> Tuple[List]: + """Forward features from the upstream network. + + Args: + x (Tuple[Tensor]): Features from the upstream network, each is + a 4D-tensor. + Returns: + Tuple[List]: A tuple of multi-level classification scores, bbox + predictions, objectnesses, and mask predictions. + """ + assert len(x) == self.num_levels + cls_scores, bbox_preds, objectnesses, coeff_preds = multi_apply( + self.forward_single, x, self.convs_pred) + mask_protos = self.proto_pred(x[0]) + return cls_scores, bbox_preds, objectnesses, coeff_preds, mask_protos + + def forward_single( + self, x: Tensor, + convs_pred: nn.Module) -> Tuple[Tensor, Tensor, Tensor, Tensor]: + """Forward feature of a single scale level.""" + + pred_map = convs_pred(x) + bs, _, ny, nx = pred_map.shape + pred_map = pred_map.view(bs, self.num_base_priors, + self.num_out_attrib_with_proto, ny, nx) + + cls_score = pred_map[:, :, 5:self.num_classes + 5, + ...].reshape(bs, -1, ny, nx) + bbox_pred = pred_map[:, :, :4, ...].reshape(bs, -1, ny, nx) + objectness = pred_map[:, :, 4:5, ...].reshape(bs, -1, ny, nx) + coeff_pred = pred_map[:, :, self.num_classes + 5:, + ...].reshape(bs, -1, ny, nx) + + return cls_score, bbox_pred, objectness, coeff_pred + + +@MODELS.register_module() +class YOLOv5InsHead(YOLOv5Head): + """YOLOv5 Instance Segmentation and Detection head. + + Args: + mask_overlap(bool): Defaults to True. + loss_mask (:obj:`ConfigDict` or dict): Config of mask loss. + loss_mask_weight (float): The weight of mask loss. + """ + + def __init__(self, + *args, + mask_overlap: bool = True, + loss_mask: ConfigType = dict( + type='mmdet.CrossEntropyLoss', + use_sigmoid=True, + reduction='none'), + loss_mask_weight=0.05, + **kwargs): + super().__init__(*args, **kwargs) + self.mask_overlap = mask_overlap + self.loss_mask: nn.Module = MODELS.build(loss_mask) + self.loss_mask_weight = loss_mask_weight + + def loss(self, x: Tuple[Tensor], batch_data_samples: Union[list, + dict]) -> dict: + """Perform forward propagation and loss calculation of the detection + head on the features of the upstream network. + + Args: + x (tuple[Tensor]): Features from the upstream network, each is + a 4D-tensor. + batch_data_samples (List[:obj:`DetDataSample`], dict): The Data + Samples. It usually includes information such as + `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`. + + Returns: + dict: A dictionary of loss components. + """ + + if isinstance(batch_data_samples, list): + # TODO: support non-fast version ins segmention + raise NotImplementedError + else: + outs = self(x) + # Fast version + loss_inputs = outs + (batch_data_samples['bboxes_labels'], + batch_data_samples['masks'], + batch_data_samples['img_metas']) + losses = self.loss_by_feat(*loss_inputs) + + return losses + + def loss_by_feat( + self, + cls_scores: Sequence[Tensor], + bbox_preds: Sequence[Tensor], + objectnesses: Sequence[Tensor], + coeff_preds: Sequence[Tensor], + proto_preds: Tensor, + batch_gt_instances: Sequence[InstanceData], + batch_gt_masks: Sequence[Tensor], + batch_img_metas: Sequence[dict], + batch_gt_instances_ignore: OptInstanceList = None) -> dict: + """Calculate the loss based on the features extracted by the detection + head. + + Args: + cls_scores (Sequence[Tensor]): Box scores for each scale level, + each is a 4D-tensor, the channel number is + num_priors * num_classes. + bbox_preds (Sequence[Tensor]): Box energies / deltas for each scale + level, each is a 4D-tensor, the channel number is + num_priors * 4. + objectnesses (Sequence[Tensor]): Score factor for + all scale level, each is a 4D-tensor, has shape + (batch_size, 1, H, W). + coeff_preds (Sequence[Tensor]): Mask coefficient for each scale + level, each is a 4D-tensor, the channel number is + num_priors * mask_channels. + proto_preds (Tensor): Mask prototype features extracted from the + mask head, has shape (batch_size, mask_channels, H, W). + batch_gt_instances (Sequence[InstanceData]): Batch of + gt_instance. It usually includes ``bboxes`` and ``labels`` + attributes. + batch_gt_masks (Sequence[Tensor]): Batch of gt_mask. + batch_img_metas (Sequence[dict]): Meta information of each image, + e.g., image size, scaling factor, etc. + batch_gt_instances_ignore (list[:obj:`InstanceData`], optional): + Batch of gt_instances_ignore. It includes ``bboxes`` attribute + data that is ignored during training and testing. + Defaults to None. + Returns: + dict[str, Tensor]: A dictionary of losses. + """ + # 1. Convert gt to norm format + batch_targets_normed = self._convert_gt_to_norm_format( + batch_gt_instances, batch_img_metas) + + device = cls_scores[0].device + loss_cls = torch.zeros(1, device=device) + loss_box = torch.zeros(1, device=device) + loss_obj = torch.zeros(1, device=device) + loss_mask = torch.zeros(1, device=device) + scaled_factor = torch.ones(8, device=device) + + for i in range(self.num_levels): + batch_size, _, h, w = bbox_preds[i].shape + target_obj = torch.zeros_like(objectnesses[i]) + + # empty gt bboxes + if batch_targets_normed.shape[1] == 0: + loss_box += bbox_preds[i].sum() * 0 + loss_cls += cls_scores[i].sum() * 0 + loss_obj += self.loss_obj( + objectnesses[i], target_obj) * self.obj_level_weights[i] + loss_mask += coeff_preds[i].sum() * 0 + continue + + priors_base_sizes_i = self.priors_base_sizes[i] + # feature map scale whwh + scaled_factor[2:6] = torch.tensor( + bbox_preds[i].shape)[[3, 2, 3, 2]] + # Scale batch_targets from range 0-1 to range 0-features_maps size. + # (num_base_priors, num_bboxes, 8) + batch_targets_scaled = batch_targets_normed * scaled_factor + + # 2. Shape match + wh_ratio = batch_targets_scaled[..., + 4:6] / priors_base_sizes_i[:, None] + match_inds = torch.max( + wh_ratio, 1 / wh_ratio).max(2)[0] < self.prior_match_thr + batch_targets_scaled = batch_targets_scaled[match_inds] + + # no gt bbox matches anchor + if batch_targets_scaled.shape[0] == 0: + loss_box += bbox_preds[i].sum() * 0 + loss_cls += cls_scores[i].sum() * 0 + loss_obj += self.loss_obj( + objectnesses[i], target_obj) * self.obj_level_weights[i] + loss_mask += coeff_preds[i].sum() * 0 + continue + + # 3. Positive samples with additional neighbors + + # check the left, up, right, bottom sides of the + # targets grid, and determine whether assigned + # them as positive samples as well. + batch_targets_cxcy = batch_targets_scaled[:, 2:4] + grid_xy = scaled_factor[[2, 3]] - batch_targets_cxcy + left, up = ((batch_targets_cxcy % 1 < self.near_neighbor_thr) & + (batch_targets_cxcy > 1)).T + right, bottom = ((grid_xy % 1 < self.near_neighbor_thr) & + (grid_xy > 1)).T + offset_inds = torch.stack( + (torch.ones_like(left), left, up, right, bottom)) + + batch_targets_scaled = batch_targets_scaled.repeat( + (5, 1, 1))[offset_inds] + retained_offsets = self.grid_offset.repeat(1, offset_inds.shape[1], + 1)[offset_inds] + + # prepare pred results and positive sample indexes to + # calculate class loss and bbox lo + _chunk_targets = batch_targets_scaled.chunk(4, 1) + img_class_inds, grid_xy, grid_wh,\ + priors_targets_inds = _chunk_targets + (priors_inds, targets_inds) = priors_targets_inds.long().T + (img_inds, class_inds) = img_class_inds.long().T + + grid_xy_long = (grid_xy - + retained_offsets * self.near_neighbor_thr).long() + grid_x_inds, grid_y_inds = grid_xy_long.T + bboxes_targets = torch.cat((grid_xy - grid_xy_long, grid_wh), 1) + + # 4. Calculate loss + # bbox loss + retained_bbox_pred = bbox_preds[i].reshape( + batch_size, self.num_base_priors, -1, h, + w)[img_inds, priors_inds, :, grid_y_inds, grid_x_inds] + priors_base_sizes_i = priors_base_sizes_i[priors_inds] + decoded_bbox_pred = self._decode_bbox_to_xywh( + retained_bbox_pred, priors_base_sizes_i) + loss_box_i, iou = self.loss_bbox(decoded_bbox_pred, bboxes_targets) + loss_box += loss_box_i + + # obj loss + iou = iou.detach().clamp(0) + target_obj[img_inds, priors_inds, grid_y_inds, + grid_x_inds] = iou.type(target_obj.dtype) + loss_obj += self.loss_obj(objectnesses[i], + target_obj) * self.obj_level_weights[i] + + # cls loss + if self.num_classes > 1: + pred_cls_scores = cls_scores[i].reshape( + batch_size, self.num_base_priors, -1, h, + w)[img_inds, priors_inds, :, grid_y_inds, grid_x_inds] + + target_class = torch.full_like(pred_cls_scores, 0.) + target_class[range(batch_targets_scaled.shape[0]), + class_inds] = 1. + loss_cls += self.loss_cls(pred_cls_scores, target_class) + else: + loss_cls += cls_scores[i].sum() * 0 + + # mask regression + retained_coeff_preds = coeff_preds[i].reshape( + batch_size, self.num_base_priors, -1, h, + w)[img_inds, priors_inds, :, grid_y_inds, grid_x_inds] + + _, c, mask_h, mask_w = proto_preds.shape + if batch_gt_masks.shape[-2:] != (mask_h, mask_w): + batch_gt_masks = F.interpolate( + batch_gt_masks[None], (mask_h, mask_w), mode='nearest')[0] + + xywh_normed = batch_targets_scaled[:, 2:6] / scaled_factor[2:6] + area_normed = xywh_normed[:, 2:].prod(1) + xywh_scaled = xywh_normed * torch.tensor( + proto_preds.shape, device=device)[[3, 2, 3, 2]] + xyxy_scaled = bbox_cxcywh_to_xyxy(xywh_scaled) + + for bs in range(batch_size): + match_inds = (img_inds == bs) # matching index + if not match_inds.any(): + continue + + if self.mask_overlap: + mask_gti = torch.where( + batch_gt_masks[bs][None] == + targets_inds[match_inds].view(-1, 1, 1), 1.0, 0.0) + else: + mask_gti = batch_gt_masks[targets_inds][match_inds] + + mask_preds = (retained_coeff_preds[match_inds] + @ proto_preds[bs].view(c, -1)).view( + -1, mask_h, mask_w) + loss_mask_full = self.loss_mask(mask_preds, mask_gti) + loss_mask += ( + self.crop_mask(loss_mask_full[None], + xyxy_scaled[match_inds]).mean(dim=(2, 3)) / + area_normed[match_inds]).mean() + + _, world_size = get_dist_info() + return dict( + loss_cls=loss_cls * batch_size * world_size, + loss_obj=loss_obj * batch_size * world_size, + loss_bbox=loss_box * batch_size * world_size, + loss_mask=loss_mask * self.loss_mask_weight * world_size) + + def _convert_gt_to_norm_format(self, + batch_gt_instances: Sequence[InstanceData], + batch_img_metas: Sequence[dict]) -> Tensor: + """Add target_inds for instance segmentation.""" + batch_targets_normed = super()._convert_gt_to_norm_format( + batch_gt_instances, batch_img_metas) + + if self.mask_overlap: + batch_size = len(batch_img_metas) + target_inds = [] + for i in range(batch_size): + # find number of targets of each image + num_gts = (batch_gt_instances[:, 0] == i).sum() + # (num_anchor, num_gts) + target_inds.append( + torch.arange(num_gts, device=batch_gt_instances.device). + float().view(1, num_gts).repeat(self.num_base_priors, 1) + + 1) + target_inds = torch.cat(target_inds, 1) + else: + num_gts = batch_gt_instances.shape[0] + target_inds = torch.arange( + num_gts, device=batch_gt_instances.device).float().view( + 1, num_gts).repeat(self.num_base_priors, 1) + batch_targets_normed = torch.cat( + [batch_targets_normed, target_inds[..., None]], 2) + return batch_targets_normed + + def predict_by_feat(self, + cls_scores: List[Tensor], + bbox_preds: List[Tensor], + objectnesses: Optional[List[Tensor]] = None, + coeff_preds: Optional[List[Tensor]] = None, + proto_preds: Optional[Tensor] = None, + batch_img_metas: Optional[List[dict]] = None, + cfg: Optional[ConfigDict] = None, + rescale: bool = True, + with_nms: bool = True) -> List[InstanceData]: + """Transform a batch of output features extracted from the head into + bbox results. + Note: When score_factors is not None, the cls_scores are + usually multiplied by it then obtain the real score used in NMS. + Args: + cls_scores (list[Tensor]): Classification scores for all + scale levels, each is a 4D-tensor, has shape + (batch_size, num_priors * num_classes, H, W). + bbox_preds (list[Tensor]): Box energies / deltas for all + scale levels, each is a 4D-tensor, has shape + (batch_size, num_priors * 4, H, W). + objectnesses (list[Tensor], Optional): Score factor for + all scale level, each is a 4D-tensor, has shape + (batch_size, 1, H, W). + coeff_preds (list[Tensor]): Mask coefficients predictions + for all scale levels, each is a 4D-tensor, has shape + (batch_size, mask_channels, H, W). + proto_preds (Tensor): Mask prototype features extracted from the + mask head, has shape (batch_size, mask_channels, H, W). + batch_img_metas (list[dict], Optional): Batch image meta info. + Defaults to None. + cfg (ConfigDict, optional): Test / postprocessing + configuration, if None, test_cfg would be used. + Defaults to None. + rescale (bool): If True, return boxes in original image space. + Defaults to False. + with_nms (bool): If True, do nms before return boxes. + Defaults to True. + Returns: + list[:obj:`InstanceData`]: Object detection and instance + segmentation results of each image after the post process. + Each item usually contains following keys. + - scores (Tensor): Classification scores, has a shape + (num_instance, ) + - labels (Tensor): Labels of bboxes, has a shape + (num_instances, ). + - bboxes (Tensor): Has a shape (num_instances, 4), + the last dimension 4 arrange as (x1, y1, x2, y2). + - masks (Tensor): Has a shape (num_instances, h, w). + """ + assert len(cls_scores) == len(bbox_preds) == len(coeff_preds) + if objectnesses is None: + with_objectnesses = False + else: + with_objectnesses = True + assert len(cls_scores) == len(objectnesses) + + cfg = self.test_cfg if cfg is None else cfg + cfg = copy.deepcopy(cfg) + + multi_label = cfg.multi_label + multi_label &= self.num_classes > 1 + cfg.multi_label = multi_label + + num_imgs = len(batch_img_metas) + featmap_sizes = [cls_score.shape[2:] for cls_score in cls_scores] + + # If the shape does not change, use the previous mlvl_priors + if featmap_sizes != self.featmap_sizes: + self.mlvl_priors = self.prior_generator.grid_priors( + featmap_sizes, + dtype=cls_scores[0].dtype, + device=cls_scores[0].device) + self.featmap_sizes = featmap_sizes + flatten_priors = torch.cat(self.mlvl_priors) + + mlvl_strides = [ + flatten_priors.new_full( + (featmap_size.numel() * self.num_base_priors, ), stride) for + featmap_size, stride in zip(featmap_sizes, self.featmap_strides) + ] + flatten_stride = torch.cat(mlvl_strides) + + # flatten cls_scores, bbox_preds and objectness + flatten_cls_scores = [ + cls_score.permute(0, 2, 3, 1).reshape(num_imgs, -1, + self.num_classes) + for cls_score in cls_scores + ] + flatten_bbox_preds = [ + bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, 4) + for bbox_pred in bbox_preds + ] + flatten_coeff_preds = [ + coeff_pred.permute(0, 2, 3, + 1).reshape(num_imgs, -1, + self.head_module.mask_channels) + for coeff_pred in coeff_preds + ] + + flatten_cls_scores = torch.cat(flatten_cls_scores, dim=1).sigmoid() + flatten_bbox_preds = torch.cat(flatten_bbox_preds, dim=1) + flatten_decoded_bboxes = self.bbox_coder.decode( + flatten_priors.unsqueeze(0), flatten_bbox_preds, flatten_stride) + + flatten_coeff_preds = torch.cat(flatten_coeff_preds, dim=1) + + if with_objectnesses: + flatten_objectness = [ + objectness.permute(0, 2, 3, 1).reshape(num_imgs, -1) + for objectness in objectnesses + ] + flatten_objectness = torch.cat(flatten_objectness, dim=1).sigmoid() + else: + flatten_objectness = [None for _ in range(len(featmap_sizes))] + + results_list = [] + for (bboxes, scores, objectness, coeffs, mask_proto, + img_meta) in zip(flatten_decoded_bboxes, flatten_cls_scores, + flatten_objectness, flatten_coeff_preds, + proto_preds, batch_img_metas): + ori_shape = img_meta['ori_shape'] + batch_input_shape = img_meta['batch_input_shape'] + input_shape_h, input_shape_w = batch_input_shape + if 'pad_param' in img_meta: + pad_param = img_meta['pad_param'] + input_shape_withoutpad = (input_shape_h - pad_param[0] - + pad_param[1], input_shape_w - + pad_param[2] - pad_param[3]) + else: + pad_param = None + input_shape_withoutpad = batch_input_shape + scale_factor = (input_shape_withoutpad[1] / ori_shape[1], + input_shape_withoutpad[0] / ori_shape[0]) + + score_thr = cfg.get('score_thr', -1) + # yolox_style does not require the following operations + if objectness is not None and score_thr > 0 and not cfg.get( + 'yolox_style', False): + conf_inds = objectness > score_thr + bboxes = bboxes[conf_inds, :] + scores = scores[conf_inds, :] + objectness = objectness[conf_inds] + coeffs = coeffs[conf_inds] + + if objectness is not None: + # conf = obj_conf * cls_conf + scores *= objectness[:, None] + # NOTE: Important + coeffs *= objectness[:, None] + + if scores.shape[0] == 0: + empty_results = InstanceData() + empty_results.bboxes = bboxes + empty_results.scores = scores[:, 0] + empty_results.labels = scores[:, 0].int() + h, w = ori_shape[:2] if rescale else img_meta['img_shape'][:2] + empty_results.masks = torch.zeros( + size=(0, h, w), dtype=torch.bool, device=bboxes.device) + results_list.append(empty_results) + continue + + nms_pre = cfg.get('nms_pre', 100000) + if cfg.multi_label is False: + scores, labels = scores.max(1, keepdim=True) + scores, _, keep_idxs, results = filter_scores_and_topk( + scores, + score_thr, + nms_pre, + results=dict(labels=labels[:, 0], coeffs=coeffs)) + labels = results['labels'] + coeffs = results['coeffs'] + else: + out = filter_scores_and_topk( + scores, score_thr, nms_pre, results=dict(coeffs=coeffs)) + scores, labels, keep_idxs, filtered_results = out + coeffs = filtered_results['coeffs'] + + results = InstanceData( + scores=scores, + labels=labels, + bboxes=bboxes[keep_idxs], + coeffs=coeffs) + + if cfg.get('yolox_style', False): + # do not need max_per_img + cfg.max_per_img = len(results) + + results = self._bbox_post_process( + results=results, + cfg=cfg, + rescale=False, + with_nms=with_nms, + img_meta=img_meta) + + if len(results.bboxes): + masks = self.process_mask(mask_proto, results.coeffs, + results.bboxes, + (input_shape_h, input_shape_w), True) + if rescale: + if pad_param is not None: + # bbox minus pad param + top_pad, _, left_pad, _ = pad_param + results.bboxes -= results.bboxes.new_tensor( + [left_pad, top_pad, left_pad, top_pad]) + # mask crop pad param + top, left = int(top_pad), int(left_pad) + bottom, right = int(input_shape_h - + top_pad), int(input_shape_w - + left_pad) + masks = masks[:, :, top:bottom, left:right] + results.bboxes /= results.bboxes.new_tensor( + scale_factor).repeat((1, 2)) + + fast_test = cfg.get('fast_test', False) + if fast_test: + masks = F.interpolate( + masks, + size=ori_shape, + mode='bilinear', + align_corners=False) + masks = masks.squeeze(0) + masks = masks > cfg.mask_thr_binary + else: + masks.gt_(cfg.mask_thr_binary) + masks = torch.as_tensor(masks, dtype=torch.uint8) + masks = masks[0].permute(1, 2, + 0).contiguous().cpu().numpy() + masks = mmcv.imresize(masks, + (ori_shape[1], ori_shape[0])) + + if len(masks.shape) == 2: + masks = masks[:, :, None] + masks = torch.from_numpy(masks).permute(2, 0, 1) + + results.bboxes[:, 0::2].clamp_(0, ori_shape[1]) + results.bboxes[:, 1::2].clamp_(0, ori_shape[0]) + + results.masks = masks.bool() + results_list.append(results) + else: + h, w = ori_shape[:2] if rescale else img_meta['img_shape'][:2] + results.masks = torch.zeros( + size=(0, h, w), dtype=torch.bool, device=bboxes.device) + results_list.append(results) + return results_list + + def process_mask(self, + mask_proto: Tensor, + mask_coeff_pred: Tensor, + bboxes: Tensor, + shape: Tuple[int, int], + upsample: bool = False) -> Tensor: + """Generate mask logits results. + + Args: + mask_proto (Tensor): Mask prototype features. + Has shape (num_instance, mask_channels). + mask_coeff_pred (Tensor): Mask coefficients prediction for + single image. Has shape (mask_channels, H, W) + bboxes (Tensor): Tensor of the bbox. Has shape (num_instance, 4). + shape (Tuple): Batch input shape of image. + upsample (bool): Whether upsample masks results to batch input + shape. Default to False. + Return: + Tensor: Instance segmentation masks for each instance. + Has shape (num_instance, H, W). + """ + c, mh, mw = mask_proto.shape # CHW + masks = ( + mask_coeff_pred @ mask_proto.float().view(c, -1)).sigmoid().view( + -1, mh, mw)[None] + if upsample: + masks = F.interpolate( + masks, shape, mode='bilinear', align_corners=False) # 1CHW + masks = self.crop_mask(masks, bboxes) + return masks + + def crop_mask(self, masks: Tensor, boxes: Tensor) -> Tensor: + """Crop mask by the bounding box. + + Args: + masks (Tensor): Predicted mask results. Has shape + (1, num_instance, H, W). + boxes (Tensor): Tensor of the bbox. Has shape (num_instance, 4). + Returns: + (torch.Tensor): The masks are being cropped to the bounding box. + """ + _, n, h, w = masks.shape + x1, y1, x2, y2 = torch.chunk(boxes[:, :, None], 4, 1) + r = torch.arange( + w, device=masks.device, + dtype=x1.dtype)[None, None, None, :] # rows shape(1, 1, w, 1) + c = torch.arange( + h, device=masks.device, + dtype=x1.dtype)[None, None, :, None] # cols shape(1, h, 1, 1) + + return masks * ((r >= x1) * (r < x2) * (c >= y1) * (c < y2)) diff --git a/models/YOLO-World/third_party/mmyolo/mmyolo/models/dense_heads/yolov6_head.py b/models/YOLO-World/third_party/mmyolo/mmyolo/models/dense_heads/yolov6_head.py new file mode 100644 index 0000000000000000000000000000000000000000..3b01133f04f467de9beab08ac9bae602d4588a96 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/mmyolo/models/dense_heads/yolov6_head.py @@ -0,0 +1,396 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Sequence, Tuple, Union + +import torch +import torch.nn as nn +from mmcv.cnn import ConvModule +from mmdet.models.utils import multi_apply +from mmdet.utils import (ConfigType, OptConfigType, OptInstanceList, + OptMultiConfig) +from mmengine import MessageHub +from mmengine.dist import get_dist_info +from mmengine.model import BaseModule, bias_init_with_prob +from mmengine.structures import InstanceData +from torch import Tensor + +from mmyolo.registry import MODELS, TASK_UTILS +from ..utils import gt_instances_preprocess +from .yolov5_head import YOLOv5Head + + +@MODELS.register_module() +class YOLOv6HeadModule(BaseModule): + """YOLOv6Head head module used in `YOLOv6. + + `_. + + Args: + num_classes (int): Number of categories excluding the background + category. + in_channels (Union[int, Sequence]): Number of channels in the input + feature map. + widen_factor (float): Width multiplier, multiply number of + channels in each layer by this amount. Defaults to 1.0. + num_base_priors: (int): The number of priors (points) at a point + on the feature grid. + featmap_strides (Sequence[int]): Downsample factor of each feature map. + Defaults to [8, 16, 32]. + None, otherwise False. Defaults to "auto". + norm_cfg (:obj:`ConfigDict` or dict): Config dict for normalization + layer. Defaults to dict(type='BN', momentum=0.03, eps=0.001). + act_cfg (:obj:`ConfigDict` or dict): Config dict for activation layer. + Defaults to None. + init_cfg (:obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or + list[dict], optional): Initialization config dict. + Defaults to None. + """ + + def __init__(self, + num_classes: int, + in_channels: Union[int, Sequence], + widen_factor: float = 1.0, + num_base_priors: int = 1, + reg_max=0, + featmap_strides: Sequence[int] = (8, 16, 32), + norm_cfg: ConfigType = dict( + type='BN', momentum=0.03, eps=0.001), + act_cfg: ConfigType = dict(type='SiLU', inplace=True), + init_cfg: OptMultiConfig = None): + super().__init__(init_cfg=init_cfg) + + self.num_classes = num_classes + self.featmap_strides = featmap_strides + self.num_levels = len(self.featmap_strides) + self.num_base_priors = num_base_priors + self.reg_max = reg_max + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + + if isinstance(in_channels, int): + self.in_channels = [int(in_channels * widen_factor) + ] * self.num_levels + else: + self.in_channels = [int(i * widen_factor) for i in in_channels] + + self._init_layers() + + def _init_layers(self): + """initialize conv layers in YOLOv6 head.""" + # Init decouple head + self.cls_convs = nn.ModuleList() + self.reg_convs = nn.ModuleList() + self.cls_preds = nn.ModuleList() + self.reg_preds = nn.ModuleList() + self.stems = nn.ModuleList() + + if self.reg_max > 1: + proj = torch.arange( + self.reg_max + self.num_base_priors, dtype=torch.float) + self.register_buffer('proj', proj, persistent=False) + + for i in range(self.num_levels): + self.stems.append( + ConvModule( + in_channels=self.in_channels[i], + out_channels=self.in_channels[i], + kernel_size=1, + stride=1, + padding=1 // 2, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg)) + self.cls_convs.append( + ConvModule( + in_channels=self.in_channels[i], + out_channels=self.in_channels[i], + kernel_size=3, + stride=1, + padding=3 // 2, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg)) + self.reg_convs.append( + ConvModule( + in_channels=self.in_channels[i], + out_channels=self.in_channels[i], + kernel_size=3, + stride=1, + padding=3 // 2, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg)) + self.cls_preds.append( + nn.Conv2d( + in_channels=self.in_channels[i], + out_channels=self.num_base_priors * self.num_classes, + kernel_size=1)) + self.reg_preds.append( + nn.Conv2d( + in_channels=self.in_channels[i], + out_channels=(self.num_base_priors + self.reg_max) * 4, + kernel_size=1)) + + def init_weights(self): + super().init_weights() + bias_init = bias_init_with_prob(0.01) + for conv in self.cls_preds: + conv.bias.data.fill_(bias_init) + conv.weight.data.fill_(0.) + + for conv in self.reg_preds: + conv.bias.data.fill_(1.0) + conv.weight.data.fill_(0.) + + def forward(self, x: Tuple[Tensor]) -> Tuple[List]: + """Forward features from the upstream network. + + Args: + x (Tuple[Tensor]): Features from the upstream network, each is + a 4D-tensor. + Returns: + Tuple[List]: A tuple of multi-level classification scores, bbox + predictions. + """ + assert len(x) == self.num_levels + return multi_apply(self.forward_single, x, self.stems, self.cls_convs, + self.cls_preds, self.reg_convs, self.reg_preds) + + def forward_single(self, x: Tensor, stem: nn.Module, cls_conv: nn.Module, + cls_pred: nn.Module, reg_conv: nn.Module, + reg_pred: nn.Module) -> Tuple[Tensor, Tensor]: + """Forward feature of a single scale level.""" + b, _, h, w = x.shape + y = stem(x) + cls_x = y + reg_x = y + cls_feat = cls_conv(cls_x) + reg_feat = reg_conv(reg_x) + + cls_score = cls_pred(cls_feat) + bbox_dist_preds = reg_pred(reg_feat) + + if self.reg_max > 1: + bbox_dist_preds = bbox_dist_preds.reshape( + [-1, 4, self.reg_max + self.num_base_priors, + h * w]).permute(0, 3, 1, 2) + + # TODO: The get_flops script cannot handle the situation of + # matmul, and needs to be fixed later + # bbox_preds = bbox_dist_preds.softmax(3).matmul(self.proj) + bbox_preds = bbox_dist_preds.softmax(3).matmul( + self.proj.view([-1, 1])).squeeze(-1) + bbox_preds = bbox_preds.transpose(1, 2).reshape(b, -1, h, w) + else: + bbox_preds = bbox_dist_preds + + if self.training: + return cls_score, bbox_preds, bbox_dist_preds + else: + return cls_score, bbox_preds + + +@MODELS.register_module() +class YOLOv6Head(YOLOv5Head): + """YOLOv6Head head used in `YOLOv6 `_. + + Args: + head_module(ConfigType): Base module used for YOLOv6Head + prior_generator(dict): Points generator feature maps + in 2D points-based detectors. + loss_cls (:obj:`ConfigDict` or dict): Config of classification loss. + loss_bbox (:obj:`ConfigDict` or dict): Config of localization loss. + train_cfg (:obj:`ConfigDict` or dict, optional): Training config of + anchor head. Defaults to None. + test_cfg (:obj:`ConfigDict` or dict, optional): Testing config of + anchor head. Defaults to None. + init_cfg (:obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or + list[dict], optional): Initialization config dict. + Defaults to None. + """ + + def __init__(self, + head_module: ConfigType, + prior_generator: ConfigType = dict( + type='mmdet.MlvlPointGenerator', + offset=0.5, + strides=[8, 16, 32]), + bbox_coder: ConfigType = dict(type='DistancePointBBoxCoder'), + loss_cls: ConfigType = dict( + type='mmdet.VarifocalLoss', + use_sigmoid=True, + alpha=0.75, + gamma=2.0, + iou_weighted=True, + reduction='sum', + loss_weight=1.0), + loss_bbox: ConfigType = dict( + type='IoULoss', + iou_mode='giou', + bbox_format='xyxy', + reduction='mean', + loss_weight=2.5, + return_iou=False), + train_cfg: OptConfigType = None, + test_cfg: OptConfigType = None, + init_cfg: OptMultiConfig = None): + super().__init__( + head_module=head_module, + prior_generator=prior_generator, + bbox_coder=bbox_coder, + loss_cls=loss_cls, + loss_bbox=loss_bbox, + train_cfg=train_cfg, + test_cfg=test_cfg, + init_cfg=init_cfg) + # yolov6 doesn't need loss_obj + self.loss_obj = None + + def special_init(self): + """Since YOLO series algorithms will inherit from YOLOv5Head, but + different algorithms have special initialization process. + + The special_init function is designed to deal with this situation. + """ + if self.train_cfg: + self.initial_epoch = self.train_cfg['initial_epoch'] + self.initial_assigner = TASK_UTILS.build( + self.train_cfg.initial_assigner) + self.assigner = TASK_UTILS.build(self.train_cfg.assigner) + + # Add common attributes to reduce calculation + self.featmap_sizes_train = None + self.num_level_priors = None + self.flatten_priors_train = None + self.stride_tensor = None + + def loss_by_feat( + self, + cls_scores: Sequence[Tensor], + bbox_preds: Sequence[Tensor], + bbox_dist_preds: Sequence[Tensor], + batch_gt_instances: Sequence[InstanceData], + batch_img_metas: Sequence[dict], + batch_gt_instances_ignore: OptInstanceList = None) -> dict: + """Calculate the loss based on the features extracted by the detection + head. + + Args: + cls_scores (Sequence[Tensor]): Box scores for each scale level, + each is a 4D-tensor, the channel number is + num_priors * num_classes. + bbox_preds (Sequence[Tensor]): Box energies / deltas for each scale + level, each is a 4D-tensor, the channel number is + num_priors * 4. + batch_gt_instances (list[:obj:`InstanceData`]): Batch of + gt_instance. It usually includes ``bboxes`` and ``labels`` + attributes. + batch_img_metas (list[dict]): Meta information of each image, e.g., + image size, scaling factor, etc. + batch_gt_instances_ignore (list[:obj:`InstanceData`], optional): + Batch of gt_instances_ignore. It includes ``bboxes`` attribute + data that is ignored during training and testing. + Defaults to None. + Returns: + dict[str, Tensor]: A dictionary of losses. + """ + + # get epoch information from message hub + message_hub = MessageHub.get_current_instance() + current_epoch = message_hub.get_info('epoch') + + num_imgs = len(batch_img_metas) + if batch_gt_instances_ignore is None: + batch_gt_instances_ignore = [None] * num_imgs + + current_featmap_sizes = [ + cls_score.shape[2:] for cls_score in cls_scores + ] + # If the shape does not equal, generate new one + if current_featmap_sizes != self.featmap_sizes_train: + self.featmap_sizes_train = current_featmap_sizes + + mlvl_priors_with_stride = self.prior_generator.grid_priors( + self.featmap_sizes_train, + dtype=cls_scores[0].dtype, + device=cls_scores[0].device, + with_stride=True) + + self.num_level_priors = [len(n) for n in mlvl_priors_with_stride] + self.flatten_priors_train = torch.cat( + mlvl_priors_with_stride, dim=0) + self.stride_tensor = self.flatten_priors_train[..., [2]] + + # gt info + gt_info = gt_instances_preprocess(batch_gt_instances, num_imgs) + gt_labels = gt_info[:, :, :1] + gt_bboxes = gt_info[:, :, 1:] # xyxy + pad_bbox_flag = (gt_bboxes.sum(-1, keepdim=True) > 0).float() + + # pred info + flatten_cls_preds = [ + cls_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, + self.num_classes) + for cls_pred in cls_scores + ] + + flatten_pred_bboxes = [ + bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, 4) + for bbox_pred in bbox_preds + ] + + flatten_cls_preds = torch.cat(flatten_cls_preds, dim=1) + flatten_pred_bboxes = torch.cat(flatten_pred_bboxes, dim=1) + flatten_pred_bboxes = self.bbox_coder.decode( + self.flatten_priors_train[..., :2], flatten_pred_bboxes, + self.stride_tensor[:, 0]) + pred_scores = torch.sigmoid(flatten_cls_preds) + + if current_epoch < self.initial_epoch: + assigned_result = self.initial_assigner( + flatten_pred_bboxes.detach(), self.flatten_priors_train, + self.num_level_priors, gt_labels, gt_bboxes, pad_bbox_flag) + else: + assigned_result = self.assigner(flatten_pred_bboxes.detach(), + pred_scores.detach(), + self.flatten_priors_train, + gt_labels, gt_bboxes, + pad_bbox_flag) + + assigned_bboxes = assigned_result['assigned_bboxes'] + assigned_scores = assigned_result['assigned_scores'] + fg_mask_pre_prior = assigned_result['fg_mask_pre_prior'] + + # cls loss + with torch.cuda.amp.autocast(enabled=False): + loss_cls = self.loss_cls(flatten_cls_preds, assigned_scores) + + # rescale bbox + assigned_bboxes /= self.stride_tensor + flatten_pred_bboxes /= self.stride_tensor + + # TODO: Add all_reduce makes training more stable + assigned_scores_sum = assigned_scores.sum() + if assigned_scores_sum > 0: + loss_cls /= assigned_scores_sum + + # select positive samples mask + num_pos = fg_mask_pre_prior.sum() + if num_pos > 0: + # when num_pos > 0, assigned_scores_sum will >0, so the loss_bbox + # will not report an error + # iou loss + prior_bbox_mask = fg_mask_pre_prior.unsqueeze(-1).repeat([1, 1, 4]) + pred_bboxes_pos = torch.masked_select( + flatten_pred_bboxes, prior_bbox_mask).reshape([-1, 4]) + assigned_bboxes_pos = torch.masked_select( + assigned_bboxes, prior_bbox_mask).reshape([-1, 4]) + bbox_weight = torch.masked_select( + assigned_scores.sum(-1), fg_mask_pre_prior).unsqueeze(-1) + loss_bbox = self.loss_bbox( + pred_bboxes_pos, + assigned_bboxes_pos, + weight=bbox_weight, + avg_factor=assigned_scores_sum) + else: + loss_bbox = flatten_pred_bboxes.sum() * 0 + + _, world_size = get_dist_info() + return dict( + loss_cls=loss_cls * world_size, loss_bbox=loss_bbox * world_size) diff --git a/models/YOLO-World/third_party/mmyolo/mmyolo/models/dense_heads/yolov7_head.py b/models/YOLO-World/third_party/mmyolo/mmyolo/models/dense_heads/yolov7_head.py new file mode 100644 index 0000000000000000000000000000000000000000..124883cf4b4c5b51d6643edc7c2f813178d80c78 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/mmyolo/models/dense_heads/yolov7_head.py @@ -0,0 +1,404 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math +from typing import List, Optional, Sequence, Tuple, Union + +import torch +import torch.nn as nn +from mmcv.cnn import ConvModule +from mmdet.models.utils import multi_apply +from mmdet.utils import ConfigType, OptInstanceList +from mmengine.dist import get_dist_info +from mmengine.structures import InstanceData +from torch import Tensor + +from mmyolo.registry import MODELS +from ..layers import ImplicitA, ImplicitM +from ..task_modules.assigners.batch_yolov7_assigner import BatchYOLOv7Assigner +from .yolov5_head import YOLOv5Head, YOLOv5HeadModule + + +@MODELS.register_module() +class YOLOv7HeadModule(YOLOv5HeadModule): + """YOLOv7Head head module used in YOLOv7.""" + + def _init_layers(self): + """initialize conv layers in YOLOv7 head.""" + self.convs_pred = nn.ModuleList() + for i in range(self.num_levels): + conv_pred = nn.Sequential( + ImplicitA(self.in_channels[i]), + nn.Conv2d(self.in_channels[i], + self.num_base_priors * self.num_out_attrib, 1), + ImplicitM(self.num_base_priors * self.num_out_attrib), + ) + self.convs_pred.append(conv_pred) + + def init_weights(self): + """Initialize the bias of YOLOv7 head.""" + super(YOLOv5HeadModule, self).init_weights() + for mi, s in zip(self.convs_pred, self.featmap_strides): # from + mi = mi[1] # nn.Conv2d + + b = mi.bias.data.view(self.num_base_priors, -1) + # obj (8 objects per 640 image) + b.data[:, 4] += math.log(8 / (640 / s)**2) + b.data[:, 5:] += math.log(0.6 / (self.num_classes - 0.99)) + + mi.bias.data = b.view(-1) + + +@MODELS.register_module() +class YOLOv7p6HeadModule(YOLOv5HeadModule): + """YOLOv7Head head module used in YOLOv7.""" + + def __init__(self, + *args, + main_out_channels: Sequence[int] = [256, 512, 768, 1024], + aux_out_channels: Sequence[int] = [320, 640, 960, 1280], + use_aux: bool = True, + norm_cfg: ConfigType = dict( + type='BN', momentum=0.03, eps=0.001), + act_cfg: ConfigType = dict(type='SiLU', inplace=True), + **kwargs): + self.main_out_channels = main_out_channels + self.aux_out_channels = aux_out_channels + self.use_aux = use_aux + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + super().__init__(*args, **kwargs) + + def _init_layers(self): + """initialize conv layers in YOLOv7 head.""" + self.main_convs_pred = nn.ModuleList() + for i in range(self.num_levels): + conv_pred = nn.Sequential( + ConvModule( + self.in_channels[i], + self.main_out_channels[i], + 3, + padding=1, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg), + ImplicitA(self.main_out_channels[i]), + nn.Conv2d(self.main_out_channels[i], + self.num_base_priors * self.num_out_attrib, 1), + ImplicitM(self.num_base_priors * self.num_out_attrib), + ) + self.main_convs_pred.append(conv_pred) + + if self.use_aux: + self.aux_convs_pred = nn.ModuleList() + for i in range(self.num_levels): + aux_pred = nn.Sequential( + ConvModule( + self.in_channels[i], + self.aux_out_channels[i], + 3, + padding=1, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg), + nn.Conv2d(self.aux_out_channels[i], + self.num_base_priors * self.num_out_attrib, 1)) + self.aux_convs_pred.append(aux_pred) + else: + self.aux_convs_pred = [None] * len(self.main_convs_pred) + + def init_weights(self): + """Initialize the bias of YOLOv5 head.""" + super(YOLOv5HeadModule, self).init_weights() + for mi, aux, s in zip(self.main_convs_pred, self.aux_convs_pred, + self.featmap_strides): # from + mi = mi[2] # nn.Conv2d + b = mi.bias.data.view(3, -1) + # obj (8 objects per 640 image) + b.data[:, 4] += math.log(8 / (640 / s)**2) + b.data[:, 5:] += math.log(0.6 / (self.num_classes - 0.99)) + mi.bias.data = b.view(-1) + + if self.use_aux: + aux = aux[1] # nn.Conv2d + b = aux.bias.data.view(3, -1) + # obj (8 objects per 640 image) + b.data[:, 4] += math.log(8 / (640 / s)**2) + b.data[:, 5:] += math.log(0.6 / (self.num_classes - 0.99)) + mi.bias.data = b.view(-1) + + def forward(self, x: Tuple[Tensor]) -> Tuple[List]: + """Forward features from the upstream network. + + Args: + x (Tuple[Tensor]): Features from the upstream network, each is + a 4D-tensor. + Returns: + Tuple[List]: A tuple of multi-level classification scores, bbox + predictions, and objectnesses. + """ + assert len(x) == self.num_levels + return multi_apply(self.forward_single, x, self.main_convs_pred, + self.aux_convs_pred) + + def forward_single(self, x: Tensor, convs: nn.Module, + aux_convs: Optional[nn.Module]) \ + -> Tuple[Union[Tensor, List], Union[Tensor, List], + Union[Tensor, List]]: + """Forward feature of a single scale level.""" + + pred_map = convs(x) + bs, _, ny, nx = pred_map.shape + pred_map = pred_map.view(bs, self.num_base_priors, self.num_out_attrib, + ny, nx) + + cls_score = pred_map[:, :, 5:, ...].reshape(bs, -1, ny, nx) + bbox_pred = pred_map[:, :, :4, ...].reshape(bs, -1, ny, nx) + objectness = pred_map[:, :, 4:5, ...].reshape(bs, -1, ny, nx) + + if not self.training or not self.use_aux: + return cls_score, bbox_pred, objectness + else: + aux_pred_map = aux_convs(x) + aux_pred_map = aux_pred_map.view(bs, self.num_base_priors, + self.num_out_attrib, ny, nx) + aux_cls_score = aux_pred_map[:, :, 5:, ...].reshape(bs, -1, ny, nx) + aux_bbox_pred = aux_pred_map[:, :, :4, ...].reshape(bs, -1, ny, nx) + aux_objectness = aux_pred_map[:, :, 4:5, + ...].reshape(bs, -1, ny, nx) + + return [cls_score, + aux_cls_score], [bbox_pred, aux_bbox_pred + ], [objectness, aux_objectness] + + +@MODELS.register_module() +class YOLOv7Head(YOLOv5Head): + """YOLOv7Head head used in `YOLOv7 `_. + + Args: + simota_candidate_topk (int): The candidate top-k which used to + get top-k ious to calculate dynamic-k in BatchYOLOv7Assigner. + Defaults to 10. + simota_iou_weight (float): The scale factor for regression + iou cost in BatchYOLOv7Assigner. Defaults to 3.0. + simota_cls_weight (float): The scale factor for classification + cost in BatchYOLOv7Assigner. Defaults to 1.0. + """ + + def __init__(self, + *args, + simota_candidate_topk: int = 20, + simota_iou_weight: float = 3.0, + simota_cls_weight: float = 1.0, + aux_loss_weights: float = 0.25, + **kwargs): + super().__init__(*args, **kwargs) + self.aux_loss_weights = aux_loss_weights + self.assigner = BatchYOLOv7Assigner( + num_classes=self.num_classes, + num_base_priors=self.num_base_priors, + featmap_strides=self.featmap_strides, + prior_match_thr=self.prior_match_thr, + candidate_topk=simota_candidate_topk, + iou_weight=simota_iou_weight, + cls_weight=simota_cls_weight) + + def loss_by_feat( + self, + cls_scores: Sequence[Union[Tensor, List]], + bbox_preds: Sequence[Union[Tensor, List]], + objectnesses: Sequence[Union[Tensor, List]], + batch_gt_instances: Sequence[InstanceData], + batch_img_metas: Sequence[dict], + batch_gt_instances_ignore: OptInstanceList = None) -> dict: + """Calculate the loss based on the features extracted by the detection + head. + + Args: + cls_scores (Sequence[Tensor]): Box scores for each scale level, + each is a 4D-tensor, the channel number is + num_priors * num_classes. + bbox_preds (Sequence[Tensor]): Box energies / deltas for each scale + level, each is a 4D-tensor, the channel number is + num_priors * 4. + objectnesses (Sequence[Tensor]): Score factor for + all scale level, each is a 4D-tensor, has shape + (batch_size, 1, H, W). + batch_gt_instances (list[:obj:`InstanceData`]): Batch of + gt_instance. It usually includes ``bboxes`` and ``labels`` + attributes. + batch_img_metas (list[dict]): Meta information of each image, e.g., + image size, scaling factor, etc. + batch_gt_instances_ignore (list[:obj:`InstanceData`], optional): + Batch of gt_instances_ignore. It includes ``bboxes`` attribute + data that is ignored during training and testing. + Defaults to None. + Returns: + dict[str, Tensor]: A dictionary of losses. + """ + + if isinstance(cls_scores[0], Sequence): + with_aux = True + batch_size = cls_scores[0][0].shape[0] + device = cls_scores[0][0].device + + bbox_preds_main, bbox_preds_aux = zip(*bbox_preds) + objectnesses_main, objectnesses_aux = zip(*objectnesses) + cls_scores_main, cls_scores_aux = zip(*cls_scores) + + head_preds = self._merge_predict_results(bbox_preds_main, + objectnesses_main, + cls_scores_main) + head_preds_aux = self._merge_predict_results( + bbox_preds_aux, objectnesses_aux, cls_scores_aux) + else: + with_aux = False + batch_size = cls_scores[0].shape[0] + device = cls_scores[0].device + + head_preds = self._merge_predict_results(bbox_preds, objectnesses, + cls_scores) + + # Convert gt to norm xywh format + # (num_base_priors, num_batch_gt, 7) + # 7 is mean (batch_idx, cls_id, x_norm, y_norm, + # w_norm, h_norm, prior_idx) + batch_targets_normed = self._convert_gt_to_norm_format( + batch_gt_instances, batch_img_metas) + + scaled_factors = [ + torch.tensor(head_pred.shape, device=device)[[3, 2, 3, 2]] + for head_pred in head_preds + ] + + loss_cls, loss_obj, loss_box = self._calc_loss( + head_preds=head_preds, + head_preds_aux=None, + batch_targets_normed=batch_targets_normed, + near_neighbor_thr=self.near_neighbor_thr, + scaled_factors=scaled_factors, + batch_img_metas=batch_img_metas, + device=device) + + if with_aux: + loss_cls_aux, loss_obj_aux, loss_box_aux = self._calc_loss( + head_preds=head_preds, + head_preds_aux=head_preds_aux, + batch_targets_normed=batch_targets_normed, + near_neighbor_thr=self.near_neighbor_thr * 2, + scaled_factors=scaled_factors, + batch_img_metas=batch_img_metas, + device=device) + loss_cls += self.aux_loss_weights * loss_cls_aux + loss_obj += self.aux_loss_weights * loss_obj_aux + loss_box += self.aux_loss_weights * loss_box_aux + + _, world_size = get_dist_info() + return dict( + loss_cls=loss_cls * batch_size * world_size, + loss_obj=loss_obj * batch_size * world_size, + loss_bbox=loss_box * batch_size * world_size) + + def _calc_loss(self, head_preds, head_preds_aux, batch_targets_normed, + near_neighbor_thr, scaled_factors, batch_img_metas, device): + loss_cls = torch.zeros(1, device=device) + loss_box = torch.zeros(1, device=device) + loss_obj = torch.zeros(1, device=device) + + assigner_results = self.assigner( + head_preds, + batch_targets_normed, + batch_img_metas[0]['batch_input_shape'], + self.priors_base_sizes, + self.grid_offset, + near_neighbor_thr=near_neighbor_thr) + # mlvl is mean multi_level + mlvl_positive_infos = assigner_results['mlvl_positive_infos'] + mlvl_priors = assigner_results['mlvl_priors'] + mlvl_targets_normed = assigner_results['mlvl_targets_normed'] + + if head_preds_aux is not None: + # This is mean calc aux branch loss + head_preds = head_preds_aux + + for i, head_pred in enumerate(head_preds): + batch_inds, proir_idx, grid_x, grid_y = mlvl_positive_infos[i].T + num_pred_positive = batch_inds.shape[0] + target_obj = torch.zeros_like(head_pred[..., 0]) + # empty positive sampler + if num_pred_positive == 0: + loss_box += head_pred[..., :4].sum() * 0 + loss_cls += head_pred[..., 5:].sum() * 0 + loss_obj += self.loss_obj( + head_pred[..., 4], target_obj) * self.obj_level_weights[i] + continue + + priors = mlvl_priors[i] + targets_normed = mlvl_targets_normed[i] + + head_pred_positive = head_pred[batch_inds, proir_idx, grid_y, + grid_x] + + # calc bbox loss + grid_xy = torch.stack([grid_x, grid_y], dim=1) + decoded_pred_bbox = self._decode_bbox_to_xywh( + head_pred_positive[:, :4], priors, grid_xy) + target_bbox_scaled = targets_normed[:, 2:6] * scaled_factors[i] + + loss_box_i, iou = self.loss_bbox(decoded_pred_bbox, + target_bbox_scaled) + loss_box += loss_box_i + + # calc obj loss + target_obj[batch_inds, proir_idx, grid_y, + grid_x] = iou.detach().clamp(0).type(target_obj.dtype) + loss_obj += self.loss_obj(head_pred[..., 4], + target_obj) * self.obj_level_weights[i] + + # calc cls loss + if self.num_classes > 1: + pred_cls_scores = targets_normed[:, 1].long() + target_class = torch.full_like( + head_pred_positive[:, 5:], 0., device=device) + target_class[range(num_pred_positive), pred_cls_scores] = 1. + loss_cls += self.loss_cls(head_pred_positive[:, 5:], + target_class) + else: + loss_cls += head_pred_positive[:, 5:].sum() * 0 + return loss_cls, loss_obj, loss_box + + def _merge_predict_results(self, bbox_preds: Sequence[Tensor], + objectnesses: Sequence[Tensor], + cls_scores: Sequence[Tensor]) -> List[Tensor]: + """Merge predict output from 3 heads. + + Args: + cls_scores (Sequence[Tensor]): Box scores for each scale level, + each is a 4D-tensor, the channel number is + num_priors * num_classes. + bbox_preds (Sequence[Tensor]): Box energies / deltas for each scale + level, each is a 4D-tensor, the channel number is + num_priors * 4. + objectnesses (Sequence[Tensor]): Score factor for + all scale level, each is a 4D-tensor, has shape + (batch_size, 1, H, W). + + Returns: + List[Tensor]: Merged output. + """ + head_preds = [] + for bbox_pred, objectness, cls_score in zip(bbox_preds, objectnesses, + cls_scores): + b, _, h, w = bbox_pred.shape + bbox_pred = bbox_pred.reshape(b, self.num_base_priors, -1, h, w) + objectness = objectness.reshape(b, self.num_base_priors, -1, h, w) + cls_score = cls_score.reshape(b, self.num_base_priors, -1, h, w) + head_pred = torch.cat([bbox_pred, objectness, cls_score], + dim=2).permute(0, 1, 3, 4, 2).contiguous() + head_preds.append(head_pred) + return head_preds + + def _decode_bbox_to_xywh(self, bbox_pred, priors_base_sizes, + grid_xy) -> Tensor: + bbox_pred = bbox_pred.sigmoid() + pred_xy = bbox_pred[:, :2] * 2 - 0.5 + grid_xy + pred_wh = (bbox_pred[:, 2:] * 2)**2 * priors_base_sizes + decoded_bbox_pred = torch.cat((pred_xy, pred_wh), dim=-1) + return decoded_bbox_pred diff --git a/models/YOLO-World/third_party/mmyolo/mmyolo/models/dense_heads/yolov8_head.py b/models/YOLO-World/third_party/mmyolo/mmyolo/models/dense_heads/yolov8_head.py new file mode 100644 index 0000000000000000000000000000000000000000..292024178ce2c249f63c9ce1168da767d9718fcf --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/mmyolo/models/dense_heads/yolov8_head.py @@ -0,0 +1,396 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math +from typing import List, Sequence, Tuple, Union + +import torch +import torch.nn as nn +from mmcv.cnn import ConvModule +from mmdet.models.utils import multi_apply +from mmdet.utils import (ConfigType, OptConfigType, OptInstanceList, + OptMultiConfig) +from mmengine.dist import get_dist_info +from mmengine.model import BaseModule +from mmengine.structures import InstanceData +from torch import Tensor + +from mmyolo.registry import MODELS, TASK_UTILS +from ..utils import gt_instances_preprocess, make_divisible +from .yolov5_head import YOLOv5Head + + +@MODELS.register_module() +class YOLOv8HeadModule(BaseModule): + """YOLOv8HeadModule head module used in `YOLOv8`. + + Args: + num_classes (int): Number of categories excluding the background + category. + in_channels (Union[int, Sequence]): Number of channels in the input + feature map. + widen_factor (float): Width multiplier, multiply number of + channels in each layer by this amount. Defaults to 1.0. + num_base_priors (int): The number of priors (points) at a point + on the feature grid. + featmap_strides (Sequence[int]): Downsample factor of each feature map. + Defaults to [8, 16, 32]. + reg_max (int): Max value of integral set :math: ``{0, ..., reg_max-1}`` + in QFL setting. Defaults to 16. + norm_cfg (:obj:`ConfigDict` or dict): Config dict for normalization + layer. Defaults to dict(type='BN', momentum=0.03, eps=0.001). + act_cfg (:obj:`ConfigDict` or dict): Config dict for activation layer. + Defaults to None. + init_cfg (:obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or + list[dict], optional): Initialization config dict. + Defaults to None. + """ + + def __init__(self, + num_classes: int, + in_channels: Union[int, Sequence], + widen_factor: float = 1.0, + num_base_priors: int = 1, + featmap_strides: Sequence[int] = (8, 16, 32), + reg_max: int = 16, + norm_cfg: ConfigType = dict( + type='BN', momentum=0.03, eps=0.001), + act_cfg: ConfigType = dict(type='SiLU', inplace=True), + init_cfg: OptMultiConfig = None): + super().__init__(init_cfg=init_cfg) + self.num_classes = num_classes + self.featmap_strides = featmap_strides + self.num_levels = len(self.featmap_strides) + self.num_base_priors = num_base_priors + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + self.in_channels = in_channels + self.reg_max = reg_max + + in_channels = [] + for channel in self.in_channels: + channel = make_divisible(channel, widen_factor) + in_channels.append(channel) + self.in_channels = in_channels + + self._init_layers() + + def init_weights(self, prior_prob=0.01): + """Initialize the weight and bias of PPYOLOE head.""" + super().init_weights() + for reg_pred, cls_pred, stride in zip(self.reg_preds, self.cls_preds, + self.featmap_strides): + reg_pred[-1].bias.data[:] = 1.0 # box + # cls (.01 objects, 80 classes, 640 img) + cls_pred[-1].bias.data[:self.num_classes] = math.log( + 5 / self.num_classes / (640 / stride)**2) + + def _init_layers(self): + """initialize conv layers in YOLOv8 head.""" + # Init decouple head + self.cls_preds = nn.ModuleList() + self.reg_preds = nn.ModuleList() + + reg_out_channels = max( + (16, self.in_channels[0] // 4, self.reg_max * 4)) + cls_out_channels = max(self.in_channels[0], self.num_classes) + + for i in range(self.num_levels): + self.reg_preds.append( + nn.Sequential( + ConvModule( + in_channels=self.in_channels[i], + out_channels=reg_out_channels, + kernel_size=3, + stride=1, + padding=1, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg), + ConvModule( + in_channels=reg_out_channels, + out_channels=reg_out_channels, + kernel_size=3, + stride=1, + padding=1, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg), + nn.Conv2d( + in_channels=reg_out_channels, + out_channels=4 * self.reg_max, + kernel_size=1))) + self.cls_preds.append( + nn.Sequential( + ConvModule( + in_channels=self.in_channels[i], + out_channels=cls_out_channels, + kernel_size=3, + stride=1, + padding=1, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg), + ConvModule( + in_channels=cls_out_channels, + out_channels=cls_out_channels, + kernel_size=3, + stride=1, + padding=1, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg), + nn.Conv2d( + in_channels=cls_out_channels, + out_channels=self.num_classes, + kernel_size=1))) + + proj = torch.arange(self.reg_max, dtype=torch.float) + self.register_buffer('proj', proj, persistent=False) + + def forward(self, x: Tuple[Tensor]) -> Tuple[List]: + """Forward features from the upstream network. + + Args: + x (Tuple[Tensor]): Features from the upstream network, each is + a 4D-tensor. + Returns: + Tuple[List]: A tuple of multi-level classification scores, bbox + predictions + """ + assert len(x) == self.num_levels + return multi_apply(self.forward_single, x, self.cls_preds, + self.reg_preds) + + def forward_single(self, x: torch.Tensor, cls_pred: nn.ModuleList, + reg_pred: nn.ModuleList) -> Tuple: + """Forward feature of a single scale level.""" + b, _, h, w = x.shape + cls_logit = cls_pred(x) + bbox_dist_preds = reg_pred(x) + if self.reg_max > 1: + bbox_dist_preds = bbox_dist_preds.reshape( + [-1, 4, self.reg_max, h * w]).permute(0, 3, 1, 2) + + # TODO: The get_flops script cannot handle the situation of + # matmul, and needs to be fixed later + # bbox_preds = bbox_dist_preds.softmax(3).matmul(self.proj) + bbox_preds = bbox_dist_preds.softmax(3).matmul( + self.proj.view([-1, 1])).squeeze(-1) + bbox_preds = bbox_preds.transpose(1, 2).reshape(b, -1, h, w) + else: + bbox_preds = bbox_dist_preds + if self.training: + return cls_logit, bbox_preds, bbox_dist_preds + else: + return cls_logit, bbox_preds + + +@MODELS.register_module() +class YOLOv8Head(YOLOv5Head): + """YOLOv8Head head used in `YOLOv8`. + + Args: + head_module(:obj:`ConfigDict` or dict): Base module used for YOLOv8Head + prior_generator(dict): Points generator feature maps + in 2D points-based detectors. + bbox_coder (:obj:`ConfigDict` or dict): Config of bbox coder. + loss_cls (:obj:`ConfigDict` or dict): Config of classification loss. + loss_bbox (:obj:`ConfigDict` or dict): Config of localization loss. + loss_dfl (:obj:`ConfigDict` or dict): Config of Distribution Focal + Loss. + train_cfg (:obj:`ConfigDict` or dict, optional): Training config of + anchor head. Defaults to None. + test_cfg (:obj:`ConfigDict` or dict, optional): Testing config of + anchor head. Defaults to None. + init_cfg (:obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or + list[dict], optional): Initialization config dict. + Defaults to None. + """ + + def __init__(self, + head_module: ConfigType, + prior_generator: ConfigType = dict( + type='mmdet.MlvlPointGenerator', + offset=0.5, + strides=[8, 16, 32]), + bbox_coder: ConfigType = dict(type='DistancePointBBoxCoder'), + loss_cls: ConfigType = dict( + type='mmdet.CrossEntropyLoss', + use_sigmoid=True, + reduction='none', + loss_weight=0.5), + loss_bbox: ConfigType = dict( + type='IoULoss', + iou_mode='ciou', + bbox_format='xyxy', + reduction='sum', + loss_weight=7.5, + return_iou=False), + loss_dfl=dict( + type='mmdet.DistributionFocalLoss', + reduction='mean', + loss_weight=1.5 / 4), + train_cfg: OptConfigType = None, + test_cfg: OptConfigType = None, + init_cfg: OptMultiConfig = None): + super().__init__( + head_module=head_module, + prior_generator=prior_generator, + bbox_coder=bbox_coder, + loss_cls=loss_cls, + loss_bbox=loss_bbox, + train_cfg=train_cfg, + test_cfg=test_cfg, + init_cfg=init_cfg) + self.loss_dfl = MODELS.build(loss_dfl) + # YOLOv8 doesn't need loss_obj + self.loss_obj = None + + def special_init(self): + """Since YOLO series algorithms will inherit from YOLOv5Head, but + different algorithms have special initialization process. + + The special_init function is designed to deal with this situation. + """ + if self.train_cfg: + self.assigner = TASK_UTILS.build(self.train_cfg.assigner) + + # Add common attributes to reduce calculation + self.featmap_sizes_train = None + self.num_level_priors = None + self.flatten_priors_train = None + self.stride_tensor = None + + def loss_by_feat( + self, + cls_scores: Sequence[Tensor], + bbox_preds: Sequence[Tensor], + bbox_dist_preds: Sequence[Tensor], + batch_gt_instances: Sequence[InstanceData], + batch_img_metas: Sequence[dict], + batch_gt_instances_ignore: OptInstanceList = None) -> dict: + """Calculate the loss based on the features extracted by the detection + head. + + Args: + cls_scores (Sequence[Tensor]): Box scores for each scale level, + each is a 4D-tensor, the channel number is + num_priors * num_classes. + bbox_preds (Sequence[Tensor]): Box energies / deltas for each scale + level, each is a 4D-tensor, the channel number is + num_priors * 4. + bbox_dist_preds (Sequence[Tensor]): Box distribution logits for + each scale level with shape (bs, reg_max + 1, H*W, 4). + batch_gt_instances (list[:obj:`InstanceData`]): Batch of + gt_instance. It usually includes ``bboxes`` and ``labels`` + attributes. + batch_img_metas (list[dict]): Meta information of each image, e.g., + image size, scaling factor, etc. + batch_gt_instances_ignore (list[:obj:`InstanceData`], optional): + Batch of gt_instances_ignore. It includes ``bboxes`` attribute + data that is ignored during training and testing. + Defaults to None. + Returns: + dict[str, Tensor]: A dictionary of losses. + """ + num_imgs = len(batch_img_metas) + + current_featmap_sizes = [ + cls_score.shape[2:] for cls_score in cls_scores + ] + # If the shape does not equal, generate new one + if current_featmap_sizes != self.featmap_sizes_train: + self.featmap_sizes_train = current_featmap_sizes + + mlvl_priors_with_stride = self.prior_generator.grid_priors( + self.featmap_sizes_train, + dtype=cls_scores[0].dtype, + device=cls_scores[0].device, + with_stride=True) + + self.num_level_priors = [len(n) for n in mlvl_priors_with_stride] + self.flatten_priors_train = torch.cat( + mlvl_priors_with_stride, dim=0) + self.stride_tensor = self.flatten_priors_train[..., [2]] + + # gt info + gt_info = gt_instances_preprocess(batch_gt_instances, num_imgs) + gt_labels = gt_info[:, :, :1] + gt_bboxes = gt_info[:, :, 1:] # xyxy + pad_bbox_flag = (gt_bboxes.sum(-1, keepdim=True) > 0).float() + + # pred info + flatten_cls_preds = [ + cls_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, + self.num_classes) + for cls_pred in cls_scores + ] + flatten_pred_bboxes = [ + bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, 4) + for bbox_pred in bbox_preds + ] + # (bs, n, 4 * reg_max) + flatten_pred_dists = [ + bbox_pred_org.reshape(num_imgs, -1, self.head_module.reg_max * 4) + for bbox_pred_org in bbox_dist_preds + ] + + flatten_dist_preds = torch.cat(flatten_pred_dists, dim=1) + flatten_cls_preds = torch.cat(flatten_cls_preds, dim=1) + flatten_pred_bboxes = torch.cat(flatten_pred_bboxes, dim=1) + flatten_pred_bboxes = self.bbox_coder.decode( + self.flatten_priors_train[..., :2], flatten_pred_bboxes, + self.stride_tensor[..., 0]) + + assigned_result = self.assigner( + (flatten_pred_bboxes.detach()).type(gt_bboxes.dtype), + flatten_cls_preds.detach().sigmoid(), self.flatten_priors_train, + gt_labels, gt_bboxes, pad_bbox_flag) + + assigned_bboxes = assigned_result['assigned_bboxes'] + assigned_scores = assigned_result['assigned_scores'] + fg_mask_pre_prior = assigned_result['fg_mask_pre_prior'] + + assigned_scores_sum = assigned_scores.sum().clamp(min=1) + + loss_cls = self.loss_cls(flatten_cls_preds, assigned_scores).sum() + loss_cls /= assigned_scores_sum + + # rescale bbox + assigned_bboxes /= self.stride_tensor + flatten_pred_bboxes /= self.stride_tensor + + # select positive samples mask + num_pos = fg_mask_pre_prior.sum() + if num_pos > 0: + # when num_pos > 0, assigned_scores_sum will >0, so the loss_bbox + # will not report an error + # iou loss + prior_bbox_mask = fg_mask_pre_prior.unsqueeze(-1).repeat([1, 1, 4]) + pred_bboxes_pos = torch.masked_select( + flatten_pred_bboxes, prior_bbox_mask).reshape([-1, 4]) + assigned_bboxes_pos = torch.masked_select( + assigned_bboxes, prior_bbox_mask).reshape([-1, 4]) + bbox_weight = torch.masked_select( + assigned_scores.sum(-1), fg_mask_pre_prior).unsqueeze(-1) + loss_bbox = self.loss_bbox( + pred_bboxes_pos, assigned_bboxes_pos, + weight=bbox_weight) / assigned_scores_sum + + # dfl loss + pred_dist_pos = flatten_dist_preds[fg_mask_pre_prior] + assigned_ltrb = self.bbox_coder.encode( + self.flatten_priors_train[..., :2] / self.stride_tensor, + assigned_bboxes, + max_dis=self.head_module.reg_max - 1, + eps=0.01) + assigned_ltrb_pos = torch.masked_select( + assigned_ltrb, prior_bbox_mask).reshape([-1, 4]) + loss_dfl = self.loss_dfl( + pred_dist_pos.reshape(-1, self.head_module.reg_max), + assigned_ltrb_pos.reshape(-1), + weight=bbox_weight.expand(-1, 4).reshape(-1), + avg_factor=assigned_scores_sum) + else: + loss_bbox = flatten_pred_bboxes.sum() * 0 + loss_dfl = flatten_pred_bboxes.sum() * 0 + _, world_size = get_dist_info() + return dict( + loss_cls=loss_cls * num_imgs * world_size, + loss_bbox=loss_bbox * num_imgs * world_size, + loss_dfl=loss_dfl * num_imgs * world_size) diff --git a/models/YOLO-World/third_party/mmyolo/mmyolo/models/dense_heads/yolox_head.py b/models/YOLO-World/third_party/mmyolo/mmyolo/models/dense_heads/yolox_head.py new file mode 100644 index 0000000000000000000000000000000000000000..a203298d8536148a7022711eabeee7f04fea8ab4 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/mmyolo/models/dense_heads/yolox_head.py @@ -0,0 +1,514 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Optional, Sequence, Tuple, Union + +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule +from mmdet.models.task_modules.samplers import PseudoSampler +from mmdet.models.utils import multi_apply +from mmdet.structures.bbox import bbox_xyxy_to_cxcywh +from mmdet.utils import (ConfigType, OptConfigType, OptInstanceList, + OptMultiConfig, reduce_mean) +from mmengine.model import BaseModule, bias_init_with_prob +from mmengine.structures import InstanceData +from torch import Tensor + +from mmyolo.registry import MODELS, TASK_UTILS +from .yolov5_head import YOLOv5Head + + +@MODELS.register_module() +class YOLOXHeadModule(BaseModule): + """YOLOXHead head module used in `YOLOX. + + ``_ + + Args: + num_classes (int): Number of categories excluding the background + category. + in_channels (Union[int, Sequence]): Number of channels in the input + feature map. + widen_factor (float): Width multiplier, multiply number of + channels in each layer by this amount. Defaults to 1.0. + num_base_priors (int): The number of priors (points) at a point + on the feature grid + stacked_convs (int): Number of stacking convs of the head. + Defaults to 2. + featmap_strides (Sequence[int]): Downsample factor of each feature map. + Defaults to [8, 16, 32]. + use_depthwise (bool): Whether to depthwise separable convolution in + blocks. Defaults to False. + dcn_on_last_conv (bool): If true, use dcn in the last layer of + towers. Defaults to False. + conv_bias (bool or str): If specified as `auto`, it will be decided by + the norm_cfg. Bias of conv will be set as True if `norm_cfg` is + None, otherwise False. Defaults to "auto". + conv_cfg (:obj:`ConfigDict` or dict, optional): Config dict for + convolution layer. Defaults to None. + norm_cfg (:obj:`ConfigDict` or dict): Config dict for normalization + layer. Defaults to dict(type='BN', momentum=0.03, eps=0.001). + act_cfg (:obj:`ConfigDict` or dict): Config dict for activation layer. + Defaults to None. + init_cfg (:obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or + list[dict], optional): Initialization config dict. + Defaults to None. + """ + + def __init__( + self, + num_classes: int, + in_channels: Union[int, Sequence], + widen_factor: float = 1.0, + num_base_priors: int = 1, + feat_channels: int = 256, + stacked_convs: int = 2, + featmap_strides: Sequence[int] = [8, 16, 32], + use_depthwise: bool = False, + dcn_on_last_conv: bool = False, + conv_bias: Union[bool, str] = 'auto', + conv_cfg: OptConfigType = None, + norm_cfg: ConfigType = dict(type='BN', momentum=0.03, eps=0.001), + act_cfg: ConfigType = dict(type='SiLU', inplace=True), + init_cfg: OptMultiConfig = None, + ): + super().__init__(init_cfg=init_cfg) + self.num_classes = num_classes + self.feat_channels = int(feat_channels * widen_factor) + self.stacked_convs = stacked_convs + self.use_depthwise = use_depthwise + self.dcn_on_last_conv = dcn_on_last_conv + assert conv_bias == 'auto' or isinstance(conv_bias, bool) + self.conv_bias = conv_bias + self.num_base_priors = num_base_priors + + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + self.featmap_strides = featmap_strides + + if isinstance(in_channels, int): + in_channels = int(in_channels * widen_factor) + self.in_channels = in_channels + + self._init_layers() + + def _init_layers(self): + """Initialize heads for all level feature maps.""" + self.multi_level_cls_convs = nn.ModuleList() + self.multi_level_reg_convs = nn.ModuleList() + self.multi_level_conv_cls = nn.ModuleList() + self.multi_level_conv_reg = nn.ModuleList() + self.multi_level_conv_obj = nn.ModuleList() + for _ in self.featmap_strides: + self.multi_level_cls_convs.append(self._build_stacked_convs()) + self.multi_level_reg_convs.append(self._build_stacked_convs()) + conv_cls, conv_reg, conv_obj = self._build_predictor() + self.multi_level_conv_cls.append(conv_cls) + self.multi_level_conv_reg.append(conv_reg) + self.multi_level_conv_obj.append(conv_obj) + + def _build_stacked_convs(self) -> nn.Sequential: + """Initialize conv layers of a single level head.""" + conv = DepthwiseSeparableConvModule \ + if self.use_depthwise else ConvModule + stacked_convs = [] + for i in range(self.stacked_convs): + chn = self.in_channels if i == 0 else self.feat_channels + if self.dcn_on_last_conv and i == self.stacked_convs - 1: + conv_cfg = dict(type='DCNv2') + else: + conv_cfg = self.conv_cfg + stacked_convs.append( + conv( + chn, + self.feat_channels, + 3, + stride=1, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg, + bias=self.conv_bias)) + return nn.Sequential(*stacked_convs) + + def _build_predictor(self) -> Tuple[nn.Module, nn.Module, nn.Module]: + """Initialize predictor layers of a single level head.""" + conv_cls = nn.Conv2d(self.feat_channels, self.num_classes, 1) + conv_reg = nn.Conv2d(self.feat_channels, 4, 1) + conv_obj = nn.Conv2d(self.feat_channels, 1, 1) + return conv_cls, conv_reg, conv_obj + + def init_weights(self): + """Initialize weights of the head.""" + # Use prior in model initialization to improve stability + super().init_weights() + bias_init = bias_init_with_prob(0.01) + for conv_cls, conv_obj in zip(self.multi_level_conv_cls, + self.multi_level_conv_obj): + conv_cls.bias.data.fill_(bias_init) + conv_obj.bias.data.fill_(bias_init) + + def forward(self, x: Tuple[Tensor]) -> Tuple[List]: + """Forward features from the upstream network. + + Args: + x (Tuple[Tensor]): Features from the upstream network, each is + a 4D-tensor. + Returns: + Tuple[List]: A tuple of multi-level classification scores, bbox + predictions, and objectnesses. + """ + + return multi_apply(self.forward_single, x, self.multi_level_cls_convs, + self.multi_level_reg_convs, + self.multi_level_conv_cls, + self.multi_level_conv_reg, + self.multi_level_conv_obj) + + def forward_single(self, x: Tensor, cls_convs: nn.Module, + reg_convs: nn.Module, conv_cls: nn.Module, + conv_reg: nn.Module, + conv_obj: nn.Module) -> Tuple[Tensor, Tensor, Tensor]: + """Forward feature of a single scale level.""" + + cls_feat = cls_convs(x) + reg_feat = reg_convs(x) + + cls_score = conv_cls(cls_feat) + bbox_pred = conv_reg(reg_feat) + objectness = conv_obj(reg_feat) + + return cls_score, bbox_pred, objectness + + +@MODELS.register_module() +class YOLOXHead(YOLOv5Head): + """YOLOXHead head used in `YOLOX `_. + + Args: + head_module(ConfigType): Base module used for YOLOXHead + prior_generator: Points generator feature maps in + 2D points-based detectors. + loss_cls (:obj:`ConfigDict` or dict): Config of classification loss. + loss_bbox (:obj:`ConfigDict` or dict): Config of localization loss. + loss_obj (:obj:`ConfigDict` or dict): Config of objectness loss. + loss_bbox_aux (:obj:`ConfigDict` or dict): Config of bbox aux loss. + train_cfg (:obj:`ConfigDict` or dict, optional): Training config of + anchor head. Defaults to None. + test_cfg (:obj:`ConfigDict` or dict, optional): Testing config of + anchor head. Defaults to None. + init_cfg (:obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or + list[dict], optional): Initialization config dict. + Defaults to None. + """ + + def __init__(self, + head_module: ConfigType, + prior_generator: ConfigType = dict( + type='mmdet.MlvlPointGenerator', + offset=0, + strides=[8, 16, 32]), + bbox_coder: ConfigType = dict(type='YOLOXBBoxCoder'), + loss_cls: ConfigType = dict( + type='mmdet.CrossEntropyLoss', + use_sigmoid=True, + reduction='sum', + loss_weight=1.0), + loss_bbox: ConfigType = dict( + type='mmdet.IoULoss', + mode='square', + eps=1e-16, + reduction='sum', + loss_weight=5.0), + loss_obj: ConfigType = dict( + type='mmdet.CrossEntropyLoss', + use_sigmoid=True, + reduction='sum', + loss_weight=1.0), + loss_bbox_aux: ConfigType = dict( + type='mmdet.L1Loss', reduction='sum', loss_weight=1.0), + train_cfg: OptConfigType = None, + test_cfg: OptConfigType = None, + init_cfg: OptMultiConfig = None): + self.use_bbox_aux = False + self.loss_bbox_aux = loss_bbox_aux + + super().__init__( + head_module=head_module, + prior_generator=prior_generator, + bbox_coder=bbox_coder, + loss_cls=loss_cls, + loss_bbox=loss_bbox, + loss_obj=loss_obj, + train_cfg=train_cfg, + test_cfg=test_cfg, + init_cfg=init_cfg) + + def special_init(self): + """Since YOLO series algorithms will inherit from YOLOv5Head, but + different algorithms have special initialization process. + + The special_init function is designed to deal with this situation. + """ + self.loss_bbox_aux: nn.Module = MODELS.build(self.loss_bbox_aux) + if self.train_cfg: + self.assigner = TASK_UTILS.build(self.train_cfg.assigner) + # YOLOX does not support sampling + self.sampler = PseudoSampler() + + def forward(self, x: Tuple[Tensor]) -> Tuple[List]: + return self.head_module(x) + + def loss_by_feat( + self, + cls_scores: Sequence[Tensor], + bbox_preds: Sequence[Tensor], + objectnesses: Sequence[Tensor], + batch_gt_instances: Tensor, + batch_img_metas: Sequence[dict], + batch_gt_instances_ignore: OptInstanceList = None) -> dict: + """Calculate the loss based on the features extracted by the detection + head. + + Args: + cls_scores (Sequence[Tensor]): Box scores for each scale level, + each is a 4D-tensor, the channel number is + num_priors * num_classes. + bbox_preds (Sequence[Tensor]): Box energies / deltas for each scale + level, each is a 4D-tensor, the channel number is + num_priors * 4. + objectnesses (Sequence[Tensor]): Score factor for + all scale level, each is a 4D-tensor, has shape + (batch_size, 1, H, W). + batch_gt_instances (list[:obj:`InstanceData`]): Batch of + gt_instance. It usually includes ``bboxes`` and ``labels`` + attributes. + batch_img_metas (list[dict]): Meta information of each image, e.g., + image size, scaling factor, etc. + batch_gt_instances_ignore (list[:obj:`InstanceData`], optional): + Batch of gt_instances_ignore. It includes ``bboxes`` attribute + data that is ignored during training and testing. + Defaults to None. + Returns: + dict[str, Tensor]: A dictionary of losses. + """ + num_imgs = len(batch_img_metas) + if batch_gt_instances_ignore is None: + batch_gt_instances_ignore = [None] * num_imgs + + batch_gt_instances = self.gt_instances_preprocess( + batch_gt_instances, len(batch_img_metas)) + + featmap_sizes = [cls_score.shape[2:] for cls_score in cls_scores] + mlvl_priors = self.prior_generator.grid_priors( + featmap_sizes, + dtype=cls_scores[0].dtype, + device=cls_scores[0].device, + with_stride=True) + + flatten_cls_preds = [ + cls_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, + self.num_classes) + for cls_pred in cls_scores + ] + flatten_bbox_preds = [ + bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, 4) + for bbox_pred in bbox_preds + ] + flatten_objectness = [ + objectness.permute(0, 2, 3, 1).reshape(num_imgs, -1) + for objectness in objectnesses + ] + + flatten_cls_preds = torch.cat(flatten_cls_preds, dim=1) + flatten_bbox_preds = torch.cat(flatten_bbox_preds, dim=1) + flatten_objectness = torch.cat(flatten_objectness, dim=1) + flatten_priors = torch.cat(mlvl_priors) + flatten_bboxes = self.bbox_coder.decode(flatten_priors[..., :2], + flatten_bbox_preds, + flatten_priors[..., 2]) + + (pos_masks, cls_targets, obj_targets, bbox_targets, bbox_aux_target, + num_fg_imgs) = multi_apply( + self._get_targets_single, + flatten_priors.unsqueeze(0).repeat(num_imgs, 1, 1), + flatten_cls_preds.detach(), flatten_bboxes.detach(), + flatten_objectness.detach(), batch_gt_instances, batch_img_metas, + batch_gt_instances_ignore) + + # The experimental results show that 'reduce_mean' can improve + # performance on the COCO dataset. + num_pos = torch.tensor( + sum(num_fg_imgs), + dtype=torch.float, + device=flatten_cls_preds.device) + num_total_samples = max(reduce_mean(num_pos), 1.0) + + pos_masks = torch.cat(pos_masks, 0) + cls_targets = torch.cat(cls_targets, 0) + obj_targets = torch.cat(obj_targets, 0) + bbox_targets = torch.cat(bbox_targets, 0) + if self.use_bbox_aux: + bbox_aux_target = torch.cat(bbox_aux_target, 0) + + loss_obj = self.loss_obj(flatten_objectness.view(-1, 1), + obj_targets) / num_total_samples + if num_pos > 0: + loss_cls = self.loss_cls( + flatten_cls_preds.view(-1, self.num_classes)[pos_masks], + cls_targets) / num_total_samples + loss_bbox = self.loss_bbox( + flatten_bboxes.view(-1, 4)[pos_masks], + bbox_targets) / num_total_samples + else: + # Avoid cls and reg branch not participating in the gradient + # propagation when there is no ground-truth in the images. + # For more details, please refer to + # https://github.com/open-mmlab/mmdetection/issues/7298 + loss_cls = flatten_cls_preds.sum() * 0 + loss_bbox = flatten_bboxes.sum() * 0 + + loss_dict = dict( + loss_cls=loss_cls, loss_bbox=loss_bbox, loss_obj=loss_obj) + + if self.use_bbox_aux: + if num_pos > 0: + loss_bbox_aux = self.loss_bbox_aux( + flatten_bbox_preds.view(-1, 4)[pos_masks], + bbox_aux_target) / num_total_samples + else: + # Avoid cls and reg branch not participating in the gradient + # propagation when there is no ground-truth in the images. + # For more details, please refer to + # https://github.com/open-mmlab/mmdetection/issues/7298 + loss_bbox_aux = flatten_bbox_preds.sum() * 0 + loss_dict.update(loss_bbox_aux=loss_bbox_aux) + + return loss_dict + + @torch.no_grad() + def _get_targets_single( + self, + priors: Tensor, + cls_preds: Tensor, + decoded_bboxes: Tensor, + objectness: Tensor, + gt_instances: InstanceData, + img_meta: dict, + gt_instances_ignore: Optional[InstanceData] = None) -> tuple: + """Compute classification, regression, and objectness targets for + priors in a single image. + + Args: + priors (Tensor): All priors of one image, a 2D-Tensor with shape + [num_priors, 4] in [cx, xy, stride_w, stride_y] format. + cls_preds (Tensor): Classification predictions of one image, + a 2D-Tensor with shape [num_priors, num_classes] + decoded_bboxes (Tensor): Decoded bboxes predictions of one image, + a 2D-Tensor with shape [num_priors, 4] in [tl_x, tl_y, + br_x, br_y] format. + objectness (Tensor): Objectness predictions of one image, + a 1D-Tensor with shape [num_priors] + gt_instances (:obj:`InstanceData`): Ground truth of instance + annotations. It should includes ``bboxes`` and ``labels`` + attributes. + img_meta (dict): Meta information for current image. + gt_instances_ignore (:obj:`InstanceData`, optional): Instances + to be ignored during training. It includes ``bboxes`` attribute + data that is ignored during training and testing. + Defaults to None. + Returns: + tuple: + foreground_mask (list[Tensor]): Binary mask of foreground + targets. + cls_target (list[Tensor]): Classification targets of an image. + obj_target (list[Tensor]): Objectness targets of an image. + bbox_target (list[Tensor]): BBox targets of an image. + bbox_aux_target (int): BBox aux targets of an image. + num_pos_per_img (int): Number of positive samples in an image. + """ + + num_priors = priors.size(0) + num_gts = len(gt_instances) + # No target + if num_gts == 0: + cls_target = cls_preds.new_zeros((0, self.num_classes)) + bbox_target = cls_preds.new_zeros((0, 4)) + bbox_aux_target = cls_preds.new_zeros((0, 4)) + obj_target = cls_preds.new_zeros((num_priors, 1)) + foreground_mask = cls_preds.new_zeros(num_priors).bool() + return (foreground_mask, cls_target, obj_target, bbox_target, + bbox_aux_target, 0) + + # YOLOX uses center priors with 0.5 offset to assign targets, + # but use center priors without offset to regress bboxes. + offset_priors = torch.cat( + [priors[:, :2] + priors[:, 2:] * 0.5, priors[:, 2:]], dim=-1) + + scores = cls_preds.sigmoid() * objectness.unsqueeze(1).sigmoid() + pred_instances = InstanceData( + bboxes=decoded_bboxes, scores=scores.sqrt_(), priors=offset_priors) + assign_result = self.assigner.assign( + pred_instances=pred_instances, + gt_instances=gt_instances, + gt_instances_ignore=gt_instances_ignore) + + sampling_result = self.sampler.sample(assign_result, pred_instances, + gt_instances) + pos_inds = sampling_result.pos_inds + num_pos_per_img = pos_inds.size(0) + + pos_ious = assign_result.max_overlaps[pos_inds] + # IOU aware classification score + cls_target = F.one_hot(sampling_result.pos_gt_labels, + self.num_classes) * pos_ious.unsqueeze(-1) + obj_target = torch.zeros_like(objectness).unsqueeze(-1) + obj_target[pos_inds] = 1 + bbox_target = sampling_result.pos_gt_bboxes + bbox_aux_target = cls_preds.new_zeros((num_pos_per_img, 4)) + if self.use_bbox_aux: + bbox_aux_target = self._get_bbox_aux_target( + bbox_aux_target, bbox_target, priors[pos_inds]) + foreground_mask = torch.zeros_like(objectness).to(torch.bool) + foreground_mask[pos_inds] = 1 + return (foreground_mask, cls_target, obj_target, bbox_target, + bbox_aux_target, num_pos_per_img) + + def _get_bbox_aux_target(self, + bbox_aux_target: Tensor, + gt_bboxes: Tensor, + priors: Tensor, + eps: float = 1e-8) -> Tensor: + """Convert gt bboxes to center offset and log width height.""" + gt_cxcywh = bbox_xyxy_to_cxcywh(gt_bboxes) + bbox_aux_target[:, :2] = (gt_cxcywh[:, :2] - + priors[:, :2]) / priors[:, 2:] + bbox_aux_target[:, + 2:] = torch.log(gt_cxcywh[:, 2:] / priors[:, 2:] + eps) + return bbox_aux_target + + @staticmethod + def gt_instances_preprocess(batch_gt_instances: Tensor, + batch_size: int) -> List[InstanceData]: + """Split batch_gt_instances with batch size. + + Args: + batch_gt_instances (Tensor): Ground truth + a 2D-Tensor for whole batch, shape [all_gt_bboxes, 6] + batch_size (int): Batch size. + + Returns: + List: batch gt instances data, shape [batch_size, InstanceData] + """ + # faster version + batch_instance_list = [] + for i in range(batch_size): + batch_gt_instance_ = InstanceData() + single_batch_instance = \ + batch_gt_instances[batch_gt_instances[:, 0] == i, :] + batch_gt_instance_.bboxes = single_batch_instance[:, 2:] + batch_gt_instance_.labels = single_batch_instance[:, 1] + batch_instance_list.append(batch_gt_instance_) + + return batch_instance_list diff --git a/models/YOLO-World/third_party/mmyolo/mmyolo/models/dense_heads/yolox_pose_head.py b/models/YOLO-World/third_party/mmyolo/mmyolo/models/dense_heads/yolox_pose_head.py new file mode 100644 index 0000000000000000000000000000000000000000..96264e55299676239ce5a4c9b698941d0356bcea --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/mmyolo/models/dense_heads/yolox_pose_head.py @@ -0,0 +1,409 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from collections import defaultdict +from typing import List, Optional, Sequence, Tuple, Union + +import torch +import torch.nn as nn +from mmcv.ops import batched_nms +from mmdet.models.utils import filter_scores_and_topk +from mmdet.utils import ConfigType, OptInstanceList +from mmengine.config import ConfigDict +from mmengine.model import ModuleList, bias_init_with_prob +from mmengine.structures import InstanceData +from torch import Tensor + +from mmyolo.registry import MODELS +from ..utils import OutputSaveFunctionWrapper, OutputSaveObjectWrapper +from .yolox_head import YOLOXHead, YOLOXHeadModule + + +@MODELS.register_module() +class YOLOXPoseHeadModule(YOLOXHeadModule): + """YOLOXPoseHeadModule serves as a head module for `YOLOX-Pose`. + + In comparison to `YOLOXHeadModule`, this module introduces branches for + keypoint prediction. + """ + + def __init__(self, num_keypoints: int, *args, **kwargs): + self.num_keypoints = num_keypoints + super().__init__(*args, **kwargs) + + def _init_layers(self): + """Initializes the layers in the head module.""" + super()._init_layers() + + # The pose branch requires additional layers for precise regression + self.stacked_convs *= 2 + + # Create separate layers for each level of feature maps + pose_convs, offsets_preds, vis_preds = [], [], [] + for _ in self.featmap_strides: + pose_convs.append(self._build_stacked_convs()) + offsets_preds.append( + nn.Conv2d(self.feat_channels, self.num_keypoints * 2, 1)) + vis_preds.append( + nn.Conv2d(self.feat_channels, self.num_keypoints, 1)) + + self.multi_level_pose_convs = ModuleList(pose_convs) + self.multi_level_conv_offsets = ModuleList(offsets_preds) + self.multi_level_conv_vis = ModuleList(vis_preds) + + def init_weights(self): + """Initialize weights of the head.""" + super().init_weights() + + # Use prior in model initialization to improve stability + bias_init = bias_init_with_prob(0.01) + for conv_vis in self.multi_level_conv_vis: + conv_vis.bias.data.fill_(bias_init) + + def forward(self, x: Tuple[Tensor]) -> Tuple[List]: + """Forward features from the upstream network.""" + offsets_pred, vis_pred = [], [] + for i in range(len(x)): + pose_feat = self.multi_level_pose_convs[i](x[i]) + offsets_pred.append(self.multi_level_conv_offsets[i](pose_feat)) + vis_pred.append(self.multi_level_conv_vis[i](pose_feat)) + return (*super().forward(x), offsets_pred, vis_pred) + + +@MODELS.register_module() +class YOLOXPoseHead(YOLOXHead): + """YOLOXPoseHead head used in `YOLO-Pose. + + `_. + Args: + loss_pose (ConfigDict, optional): Config of keypoint OKS loss. + """ + + def __init__( + self, + loss_pose: Optional[ConfigType] = None, + *args, + **kwargs, + ): + super().__init__(*args, **kwargs) + self.loss_pose = MODELS.build(loss_pose) + self.num_keypoints = self.head_module.num_keypoints + + # set up buffers to save variables generated in methods of + # the class's base class. + self._log = defaultdict(list) + self.sampler = OutputSaveObjectWrapper(self.sampler) + + # ensure that the `sigmas` in self.assigner.oks_calculator + # is on the same device as the model + if hasattr(self.assigner, 'oks_calculator'): + self.add_module('assigner_oks_calculator', + self.assigner.oks_calculator) + + def _clear(self): + """Clear variable buffers.""" + self.sampler.clear() + self._log.clear() + + def loss(self, x: Tuple[Tensor], batch_data_samples: Union[list, + dict]) -> dict: + + if isinstance(batch_data_samples, list): + losses = super().loss(x, batch_data_samples) + else: + outs = self(x) + # Fast version + loss_inputs = outs + (batch_data_samples['bboxes_labels'], + batch_data_samples['keypoints'], + batch_data_samples['keypoints_visible'], + batch_data_samples['img_metas']) + losses = self.loss_by_feat(*loss_inputs) + + return losses + + def loss_by_feat( + self, + cls_scores: Sequence[Tensor], + bbox_preds: Sequence[Tensor], + objectnesses: Sequence[Tensor], + kpt_preds: Sequence[Tensor], + vis_preds: Sequence[Tensor], + batch_gt_instances: Tensor, + batch_gt_keypoints: Tensor, + batch_gt_keypoints_visible: Tensor, + batch_img_metas: Sequence[dict], + batch_gt_instances_ignore: OptInstanceList = None) -> dict: + """Calculate the loss based on the features extracted by the detection + head. + + In addition to the base class method, keypoint losses are also + calculated in this method. + """ + + self._clear() + batch_gt_instances = self.gt_kps_instances_preprocess( + batch_gt_instances, batch_gt_keypoints, batch_gt_keypoints_visible, + len(batch_img_metas)) + + # collect keypoints coordinates and visibility from model predictions + kpt_preds = torch.cat([ + kpt_pred.flatten(2).permute(0, 2, 1).contiguous() + for kpt_pred in kpt_preds + ], + dim=1) + + featmap_sizes = [cls_score.shape[2:] for cls_score in cls_scores] + mlvl_priors = self.prior_generator.grid_priors( + featmap_sizes, + dtype=cls_scores[0].dtype, + device=cls_scores[0].device, + with_stride=True) + grid_priors = torch.cat(mlvl_priors) + + flatten_kpts = self.decode_pose(grid_priors[..., :2], kpt_preds, + grid_priors[..., 2]) + + vis_preds = torch.cat([ + vis_pred.flatten(2).permute(0, 2, 1).contiguous() + for vis_pred in vis_preds + ], + dim=1) + + # compute detection losses and collect targets for keypoints + # predictions simultaneously + self._log['pred_keypoints'] = list(flatten_kpts.detach().split( + 1, dim=0)) + self._log['pred_keypoints_vis'] = list(vis_preds.detach().split( + 1, dim=0)) + + losses = super().loss_by_feat(cls_scores, bbox_preds, objectnesses, + batch_gt_instances, batch_img_metas, + batch_gt_instances_ignore) + + kpt_targets, vis_targets = [], [] + sampling_results = self.sampler.log['sample'] + sampling_result_idx = 0 + for gt_instances in batch_gt_instances: + if len(gt_instances) > 0: + sampling_result = sampling_results[sampling_result_idx] + kpt_target = gt_instances['keypoints'][ + sampling_result.pos_assigned_gt_inds] + vis_target = gt_instances['keypoints_visible'][ + sampling_result.pos_assigned_gt_inds] + sampling_result_idx += 1 + kpt_targets.append(kpt_target) + vis_targets.append(vis_target) + + if len(kpt_targets) > 0: + kpt_targets = torch.cat(kpt_targets, 0) + vis_targets = torch.cat(vis_targets, 0) + + # compute keypoint losses + if len(kpt_targets) > 0: + vis_targets = (vis_targets > 0).float() + pos_masks = torch.cat(self._log['foreground_mask'], 0) + bbox_targets = torch.cat(self._log['bbox_target'], 0) + loss_kpt = self.loss_pose( + flatten_kpts.view(-1, self.num_keypoints, 2)[pos_masks], + kpt_targets, vis_targets, bbox_targets) + loss_vis = self.loss_cls( + vis_preds.view(-1, self.num_keypoints)[pos_masks], + vis_targets) / vis_targets.sum() + else: + loss_kpt = kpt_preds.sum() * 0 + loss_vis = vis_preds.sum() * 0 + + losses.update(dict(loss_kpt=loss_kpt, loss_vis=loss_vis)) + + self._clear() + return losses + + @torch.no_grad() + def _get_targets_single( + self, + priors: Tensor, + cls_preds: Tensor, + decoded_bboxes: Tensor, + objectness: Tensor, + gt_instances: InstanceData, + img_meta: dict, + gt_instances_ignore: Optional[InstanceData] = None) -> tuple: + """Calculates targets for a single image, and saves them to the log. + + This method is similar to the _get_targets_single method in the base + class, but additionally saves the foreground mask and bbox targets to + the log. + """ + + # Construct a combined representation of bboxes and keypoints to + # ensure keypoints are also involved in the positive sample + # assignment process + kpt = self._log['pred_keypoints'].pop(0).squeeze(0) + kpt_vis = self._log['pred_keypoints_vis'].pop(0).squeeze(0) + kpt = torch.cat((kpt, kpt_vis.unsqueeze(-1)), dim=-1) + decoded_bboxes = torch.cat((decoded_bboxes, kpt.flatten(1)), dim=1) + + targets = super()._get_targets_single(priors, cls_preds, + decoded_bboxes, objectness, + gt_instances, img_meta, + gt_instances_ignore) + self._log['foreground_mask'].append(targets[0]) + self._log['bbox_target'].append(targets[3]) + return targets + + def predict_by_feat(self, + cls_scores: List[Tensor], + bbox_preds: List[Tensor], + objectnesses: Optional[List[Tensor]] = None, + kpt_preds: Optional[List[Tensor]] = None, + vis_preds: Optional[List[Tensor]] = None, + batch_img_metas: Optional[List[dict]] = None, + cfg: Optional[ConfigDict] = None, + rescale: bool = True, + with_nms: bool = True) -> List[InstanceData]: + """Transform a batch of output features extracted by the head into bbox + and keypoint results. + + In addition to the base class method, keypoint predictions are also + calculated in this method. + """ + """calculate predicted bboxes and get the kept instances indices. + + use OutputSaveFunctionWrapper as context manager to obtain + intermediate output from a parent class without copying a + arge block of code + """ + with OutputSaveFunctionWrapper( + filter_scores_and_topk, + super().predict_by_feat.__globals__) as outputs_1: + with OutputSaveFunctionWrapper( + batched_nms, + super()._bbox_post_process.__globals__) as outputs_2: + results_list = super().predict_by_feat(cls_scores, bbox_preds, + objectnesses, + batch_img_metas, cfg, + rescale, with_nms) + keep_indices_topk = [ + out[2][:cfg.max_per_img] for out in outputs_1 + ] + keep_indices_nms = [ + out[1][:cfg.max_per_img] for out in outputs_2 + ] + + num_imgs = len(batch_img_metas) + + # recover keypoints coordinates from model predictions + featmap_sizes = [vis_pred.shape[2:] for vis_pred in vis_preds] + priors = torch.cat(self.mlvl_priors) + strides = [ + priors.new_full((featmap_size.numel() * self.num_base_priors, ), + stride) for featmap_size, stride in zip( + featmap_sizes, self.featmap_strides) + ] + strides = torch.cat(strides) + kpt_preds = torch.cat([ + kpt_pred.permute(0, 2, 3, 1).reshape( + num_imgs, -1, self.num_keypoints * 2) for kpt_pred in kpt_preds + ], + dim=1) + flatten_decoded_kpts = self.decode_pose(priors, kpt_preds, strides) + + vis_preds = torch.cat([ + vis_pred.permute(0, 2, 3, 1).reshape( + num_imgs, -1, self.num_keypoints) for vis_pred in vis_preds + ], + dim=1).sigmoid() + + # select keypoints predictions according to bbox scores and nms result + keep_indices_nms_idx = 0 + for pred_instances, kpts, kpts_vis, img_meta, keep_idxs \ + in zip( + results_list, flatten_decoded_kpts, vis_preds, + batch_img_metas, keep_indices_topk): + + pred_instances.bbox_scores = pred_instances.scores + + if len(pred_instances) == 0: + pred_instances.keypoints = kpts[:0] + pred_instances.keypoint_scores = kpts_vis[:0] + continue + + kpts = kpts[keep_idxs] + kpts_vis = kpts_vis[keep_idxs] + + if rescale: + pad_param = img_meta.get('img_meta', None) + scale_factor = img_meta['scale_factor'] + if pad_param is not None: + kpts -= kpts.new_tensor([pad_param[2], pad_param[0]]) + kpts /= kpts.new_tensor(scale_factor).repeat( + (1, self.num_keypoints, 1)) + + keep_idxs_nms = keep_indices_nms[keep_indices_nms_idx] + kpts = kpts[keep_idxs_nms] + kpts_vis = kpts_vis[keep_idxs_nms] + keep_indices_nms_idx += 1 + + pred_instances.keypoints = kpts + pred_instances.keypoint_scores = kpts_vis + + results_list = [r.numpy() for r in results_list] + return results_list + + def decode_pose(self, grids: torch.Tensor, offsets: torch.Tensor, + strides: Union[torch.Tensor, int]) -> torch.Tensor: + """Decode regression offsets to keypoints. + + Args: + grids (torch.Tensor): The coordinates of the feature map grids. + offsets (torch.Tensor): The predicted offset of each keypoint + relative to its corresponding grid. + strides (torch.Tensor | int): The stride of the feature map for + each instance. + Returns: + torch.Tensor: The decoded keypoints coordinates. + """ + + if isinstance(strides, int): + strides = torch.tensor([strides]).to(offsets) + + strides = strides.reshape(1, -1, 1, 1) + offsets = offsets.reshape(*offsets.shape[:2], -1, 2) + xy_coordinates = (offsets[..., :2] * strides) + grids.unsqueeze(1) + return xy_coordinates + + @staticmethod + def gt_kps_instances_preprocess(batch_gt_instances: Tensor, + batch_gt_keypoints, + batch_gt_keypoints_visible, + batch_size: int) -> List[InstanceData]: + """Split batch_gt_instances with batch size. + + Args: + batch_gt_instances (Tensor): Ground truth + a 2D-Tensor for whole batch, shape [all_gt_bboxes, 6] + batch_size (int): Batch size. + + Returns: + List: batch gt instances data, shape [batch_size, InstanceData] + """ + # faster version + batch_instance_list = [] + for i in range(batch_size): + batch_gt_instance_ = InstanceData() + single_batch_instance = \ + batch_gt_instances[batch_gt_instances[:, 0] == i, :] + keypoints = \ + batch_gt_keypoints[batch_gt_instances[:, 0] == i, :] + keypoints_visible = \ + batch_gt_keypoints_visible[batch_gt_instances[:, 0] == i, :] + batch_gt_instance_.bboxes = single_batch_instance[:, 2:] + batch_gt_instance_.labels = single_batch_instance[:, 1] + batch_gt_instance_.keypoints = keypoints + batch_gt_instance_.keypoints_visible = keypoints_visible + batch_instance_list.append(batch_gt_instance_) + + return batch_instance_list + + @staticmethod + def gt_instances_preprocess(batch_gt_instances: List[InstanceData], *args, + **kwargs) -> List[InstanceData]: + return batch_gt_instances diff --git a/models/YOLO-World/third_party/mmyolo/mmyolo/models/detectors/__init__.py b/models/YOLO-World/third_party/mmyolo/mmyolo/models/detectors/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..74fb1c6c21c5840a5cd3f45a1a9f827c0e670604 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/mmyolo/models/detectors/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .yolo_detector import YOLODetector + +__all__ = ['YOLODetector'] diff --git a/models/YOLO-World/third_party/mmyolo/mmyolo/models/detectors/yolo_detector.py b/models/YOLO-World/third_party/mmyolo/mmyolo/models/detectors/yolo_detector.py new file mode 100644 index 0000000000000000000000000000000000000000..e6783fbab41287df54f136ea121e827d0603414f --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/mmyolo/models/detectors/yolo_detector.py @@ -0,0 +1,53 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +from mmdet.models.detectors.single_stage import SingleStageDetector +from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig +from mmengine.dist import get_world_size +from mmengine.logging import print_log + +from mmyolo.registry import MODELS + + +@MODELS.register_module() +class YOLODetector(SingleStageDetector): + r"""Implementation of YOLO Series + + Args: + backbone (:obj:`ConfigDict` or dict): The backbone config. + neck (:obj:`ConfigDict` or dict): The neck config. + bbox_head (:obj:`ConfigDict` or dict): The bbox head config. + train_cfg (:obj:`ConfigDict` or dict, optional): The training config + of YOLO. Defaults to None. + test_cfg (:obj:`ConfigDict` or dict, optional): The testing config + of YOLO. Defaults to None. + data_preprocessor (:obj:`ConfigDict` or dict, optional): Config of + :class:`DetDataPreprocessor` to process the input data. + Defaults to None. + init_cfg (:obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or + list[dict], optional): Initialization config dict. + Defaults to None. + use_syncbn (bool): whether to use SyncBatchNorm. Defaults to True. + """ + + def __init__(self, + backbone: ConfigType, + neck: ConfigType, + bbox_head: ConfigType, + train_cfg: OptConfigType = None, + test_cfg: OptConfigType = None, + data_preprocessor: OptConfigType = None, + init_cfg: OptMultiConfig = None, + use_syncbn: bool = True): + super().__init__( + backbone=backbone, + neck=neck, + bbox_head=bbox_head, + train_cfg=train_cfg, + test_cfg=test_cfg, + data_preprocessor=data_preprocessor, + init_cfg=init_cfg) + + # TODO: Waiting for mmengine support + if use_syncbn and get_world_size() > 1: + torch.nn.SyncBatchNorm.convert_sync_batchnorm(self) + print_log('Using SyncBatchNorm()', 'current') diff --git a/models/YOLO-World/third_party/mmyolo/mmyolo/models/layers/__init__.py b/models/YOLO-World/third_party/mmyolo/mmyolo/models/layers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..02753057f2ddf51b0688f4f65ebc52e12be9fa7a --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/mmyolo/models/layers/__init__.py @@ -0,0 +1,16 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .ema import ExpMomentumEMA +from .yolo_bricks import (BepC3StageBlock, BiFusion, CSPLayerWithTwoConv, + DarknetBottleneck, EELANBlock, EffectiveSELayer, + ELANBlock, ImplicitA, ImplicitM, + MaxPoolAndStrideConvBlock, PPYOLOEBasicBlock, + RepStageBlock, RepVGGBlock, SPPFBottleneck, + SPPFCSPBlock, TinyDownSampleBlock) + +__all__ = [ + 'SPPFBottleneck', 'RepVGGBlock', 'RepStageBlock', 'ExpMomentumEMA', + 'ELANBlock', 'MaxPoolAndStrideConvBlock', 'SPPFCSPBlock', + 'PPYOLOEBasicBlock', 'EffectiveSELayer', 'TinyDownSampleBlock', + 'EELANBlock', 'ImplicitA', 'ImplicitM', 'BepC3StageBlock', + 'CSPLayerWithTwoConv', 'DarknetBottleneck', 'BiFusion' +] diff --git a/models/YOLO-World/third_party/mmyolo/mmyolo/models/layers/ema.py b/models/YOLO-World/third_party/mmyolo/mmyolo/models/layers/ema.py new file mode 100644 index 0000000000000000000000000000000000000000..02ed204190ee4a5ab9395eddce5866545caac2c0 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/mmyolo/models/layers/ema.py @@ -0,0 +1,96 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math +from typing import Optional + +import torch +import torch.nn as nn +from mmdet.models.layers import ExpMomentumEMA as MMDET_ExpMomentumEMA +from torch import Tensor + +from mmyolo.registry import MODELS + + +@MODELS.register_module() +class ExpMomentumEMA(MMDET_ExpMomentumEMA): + """Exponential moving average (EMA) with exponential momentum strategy, + which is used in YOLO. + + Args: + model (nn.Module): The model to be averaged. + momentum (float): The momentum used for updating ema parameter. + Ema's parameters are updated with the formula: + `averaged_param = (1-momentum) * averaged_param + momentum * + source_param`. Defaults to 0.0002. + gamma (int): Use a larger momentum early in training and gradually + annealing to a smaller value to update the ema model smoothly. The + momentum is calculated as + `(1 - momentum) * exp(-(1 + steps) / gamma) + momentum`. + Defaults to 2000. + interval (int): Interval between two updates. Defaults to 1. + device (torch.device, optional): If provided, the averaged model will + be stored on the :attr:`device`. Defaults to None. + update_buffers (bool): if True, it will compute running averages for + both the parameters and the buffers of the model. Defaults to + False. + """ + + def __init__(self, + model: nn.Module, + momentum: float = 0.0002, + gamma: int = 2000, + interval=1, + device: Optional[torch.device] = None, + update_buffers: bool = False): + super().__init__( + model=model, + momentum=momentum, + interval=interval, + device=device, + update_buffers=update_buffers) + assert gamma > 0, f'gamma must be greater than 0, but got {gamma}' + self.gamma = gamma + + # Note: There is no need to re-fetch every update, + # as most models do not change their structure + # during the training process. + self.src_parameters = ( + model.state_dict() + if self.update_buffers else dict(model.named_parameters())) + if not self.update_buffers: + self.src_buffers = model.buffers() + + def avg_func(self, averaged_param: Tensor, source_param: Tensor, + steps: int): + """Compute the moving average of the parameters using the exponential + momentum strategy. + + Args: + averaged_param (Tensor): The averaged parameters. + source_param (Tensor): The source parameters. + steps (int): The number of times the parameters have been + updated. + """ + momentum = (1 - self.momentum) * math.exp( + -float(1 + steps) / self.gamma) + self.momentum + averaged_param.lerp_(source_param, momentum) + + def update_parameters(self, model: nn.Module): + """Update the parameters after each training step. + + Args: + model (nn.Module): The model of the parameter needs to be updated. + """ + if self.steps == 0: + for k, p_avg in self.avg_parameters.items(): + p_avg.data.copy_(self.src_parameters[k].data) + elif self.steps % self.interval == 0: + for k, p_avg in self.avg_parameters.items(): + if p_avg.dtype.is_floating_point: + self.avg_func(p_avg.data, self.src_parameters[k].data, + self.steps) + if not self.update_buffers: + # If not update the buffers, + # keep the buffers in sync with the source model. + for b_avg, b_src in zip(self.module.buffers(), self.src_buffers): + b_avg.data.copy_(b_src.data) + self.steps += 1 diff --git a/models/YOLO-World/third_party/mmyolo/mmyolo/models/layers/yolo_bricks.py b/models/YOLO-World/third_party/mmyolo/mmyolo/models/layers/yolo_bricks.py new file mode 100644 index 0000000000000000000000000000000000000000..19175be1a0e88f5bb7fb87b6810c52050293d890 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/mmyolo/models/layers/yolo_bricks.py @@ -0,0 +1,1728 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Optional, Sequence, Tuple, Union + +import numpy as np +import torch +import torch.nn as nn +from mmcv.cnn import (ConvModule, DepthwiseSeparableConvModule, MaxPool2d, + build_norm_layer) +from mmdet.models.layers.csp_layer import \ + DarknetBottleneck as MMDET_DarknetBottleneck +from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig +from mmengine.model import BaseModule +from mmengine.utils import digit_version +from torch import Tensor + +from mmyolo.registry import MODELS + +if digit_version(torch.__version__) >= digit_version('1.7.0'): + MODELS.register_module(module=nn.SiLU, name='SiLU') +else: + + class SiLU(nn.Module): + """Sigmoid Weighted Liner Unit.""" + + def __init__(self, inplace=True): + super().__init__() + + def forward(self, inputs) -> Tensor: + return inputs * torch.sigmoid(inputs) + + MODELS.register_module(module=SiLU, name='SiLU') + + +class SPPFBottleneck(BaseModule): + """Spatial pyramid pooling - Fast (SPPF) layer for + YOLOv5, YOLOX and PPYOLOE by Glenn Jocher + + Args: + in_channels (int): The input channels of this Module. + out_channels (int): The output channels of this Module. + kernel_sizes (int, tuple[int]): Sequential or number of kernel + sizes of pooling layers. Defaults to 5. + use_conv_first (bool): Whether to use conv before pooling layer. + In YOLOv5 and YOLOX, the para set to True. + In PPYOLOE, the para set to False. + Defaults to True. + mid_channels_scale (float): Channel multiplier, multiply in_channels + by this amount to get mid_channels. This parameter is valid only + when use_conv_fist=True.Defaults to 0.5. + conv_cfg (dict): Config dict for convolution layer. Defaults to None. + which means using conv2d. Defaults to None. + norm_cfg (dict): Config dict for normalization layer. + Defaults to dict(type='BN', momentum=0.03, eps=0.001). + act_cfg (dict): Config dict for activation layer. + Defaults to dict(type='SiLU', inplace=True). + init_cfg (dict or list[dict], optional): Initialization config dict. + Defaults to None. + """ + + def __init__(self, + in_channels: int, + out_channels: int, + kernel_sizes: Union[int, Sequence[int]] = 5, + use_conv_first: bool = True, + mid_channels_scale: float = 0.5, + conv_cfg: ConfigType = None, + norm_cfg: ConfigType = dict( + type='BN', momentum=0.03, eps=0.001), + act_cfg: ConfigType = dict(type='SiLU', inplace=True), + init_cfg: OptMultiConfig = None): + super().__init__(init_cfg) + + if use_conv_first: + mid_channels = int(in_channels * mid_channels_scale) + self.conv1 = ConvModule( + in_channels, + mid_channels, + 1, + stride=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + else: + mid_channels = in_channels + self.conv1 = None + self.kernel_sizes = kernel_sizes + if isinstance(kernel_sizes, int): + self.poolings = nn.MaxPool2d( + kernel_size=kernel_sizes, stride=1, padding=kernel_sizes // 2) + conv2_in_channels = mid_channels * 4 + else: + self.poolings = nn.ModuleList([ + nn.MaxPool2d(kernel_size=ks, stride=1, padding=ks // 2) + for ks in kernel_sizes + ]) + conv2_in_channels = mid_channels * (len(kernel_sizes) + 1) + + self.conv2 = ConvModule( + conv2_in_channels, + out_channels, + 1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + + def forward(self, x: Tensor) -> Tensor: + """Forward process + Args: + x (Tensor): The input tensor. + """ + if self.conv1: + x = self.conv1(x) + if isinstance(self.kernel_sizes, int): + y1 = self.poolings(x) + y2 = self.poolings(y1) + x = torch.cat([x, y1, y2, self.poolings(y2)], dim=1) + else: + x = torch.cat( + [x] + [pooling(x) for pooling in self.poolings], dim=1) + x = self.conv2(x) + return x + + +@MODELS.register_module() +class RepVGGBlock(nn.Module): + """RepVGGBlock is a basic rep-style block, including training and deploy + status This code is based on + https://github.com/DingXiaoH/RepVGG/blob/main/repvgg.py. + + Args: + in_channels (int): Number of channels in the input image + out_channels (int): Number of channels produced by the convolution + kernel_size (int or tuple): Size of the convolving kernel + stride (int or tuple): Stride of the convolution. Default: 1 + padding (int, tuple): Padding added to all four sides of + the input. Default: 1 + dilation (int or tuple): Spacing between kernel elements. Default: 1 + groups (int, optional): Number of blocked connections from input + channels to output channels. Default: 1 + padding_mode (string, optional): Default: 'zeros' + use_se (bool): Whether to use se. Default: False + use_alpha (bool): Whether to use `alpha` parameter at 1x1 conv. + In PPYOLOE+ model backbone, `use_alpha` will be set to True. + Default: False. + use_bn_first (bool): Whether to use bn layer before conv. + In YOLOv6 and YOLOv7, this will be set to True. + In PPYOLOE, this will be set to False. + Default: True. + deploy (bool): Whether in deploy mode. Default: False + """ + + def __init__(self, + in_channels: int, + out_channels: int, + kernel_size: Union[int, Tuple[int]] = 3, + stride: Union[int, Tuple[int]] = 1, + padding: Union[int, Tuple[int]] = 1, + dilation: Union[int, Tuple[int]] = 1, + groups: Optional[int] = 1, + padding_mode: Optional[str] = 'zeros', + norm_cfg: ConfigType = dict( + type='BN', momentum=0.03, eps=0.001), + act_cfg: ConfigType = dict(type='ReLU', inplace=True), + use_se: bool = False, + use_alpha: bool = False, + use_bn_first=True, + deploy: bool = False): + super().__init__() + self.deploy = deploy + self.groups = groups + self.in_channels = in_channels + self.out_channels = out_channels + + assert kernel_size == 3 + assert padding == 1 + + padding_11 = padding - kernel_size // 2 + + self.nonlinearity = MODELS.build(act_cfg) + + if use_se: + raise NotImplementedError('se block not supported yet') + else: + self.se = nn.Identity() + + if use_alpha: + alpha = torch.ones([ + 1, + ], dtype=torch.float32, requires_grad=True) + self.alpha = nn.Parameter(alpha, requires_grad=True) + else: + self.alpha = None + + if deploy: + self.rbr_reparam = nn.Conv2d( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + groups=groups, + bias=True, + padding_mode=padding_mode) + + else: + if use_bn_first and (out_channels == in_channels) and stride == 1: + self.rbr_identity = build_norm_layer( + norm_cfg, num_features=in_channels)[1] + else: + self.rbr_identity = None + + self.rbr_dense = ConvModule( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + groups=groups, + bias=False, + norm_cfg=norm_cfg, + act_cfg=None) + self.rbr_1x1 = ConvModule( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=1, + stride=stride, + padding=padding_11, + groups=groups, + bias=False, + norm_cfg=norm_cfg, + act_cfg=None) + + def forward(self, inputs: Tensor) -> Tensor: + """Forward process. + Args: + inputs (Tensor): The input tensor. + + Returns: + Tensor: The output tensor. + """ + if hasattr(self, 'rbr_reparam'): + return self.nonlinearity(self.se(self.rbr_reparam(inputs))) + + if self.rbr_identity is None: + id_out = 0 + else: + id_out = self.rbr_identity(inputs) + if self.alpha: + return self.nonlinearity( + self.se( + self.rbr_dense(inputs) + + self.alpha * self.rbr_1x1(inputs) + id_out)) + else: + return self.nonlinearity( + self.se( + self.rbr_dense(inputs) + self.rbr_1x1(inputs) + id_out)) + + def get_equivalent_kernel_bias(self): + """Derives the equivalent kernel and bias in a differentiable way. + + Returns: + tuple: Equivalent kernel and bias + """ + kernel3x3, bias3x3 = self._fuse_bn_tensor(self.rbr_dense) + kernel1x1, bias1x1 = self._fuse_bn_tensor(self.rbr_1x1) + kernelid, biasid = self._fuse_bn_tensor(self.rbr_identity) + if self.alpha: + return kernel3x3 + self.alpha * self._pad_1x1_to_3x3_tensor( + kernel1x1) + kernelid, bias3x3 + self.alpha * bias1x1 + biasid + else: + return kernel3x3 + self._pad_1x1_to_3x3_tensor( + kernel1x1) + kernelid, bias3x3 + bias1x1 + biasid + + def _pad_1x1_to_3x3_tensor(self, kernel1x1): + """Pad 1x1 tensor to 3x3. + Args: + kernel1x1 (Tensor): The input 1x1 kernel need to be padded. + + Returns: + Tensor: 3x3 kernel after padded. + """ + if kernel1x1 is None: + return 0 + else: + return torch.nn.functional.pad(kernel1x1, [1, 1, 1, 1]) + + def _fuse_bn_tensor(self, branch: nn.Module) -> Tuple[np.ndarray, Tensor]: + """Derives the equivalent kernel and bias of a specific branch layer. + + Args: + branch (nn.Module): The layer that needs to be equivalently + transformed, which can be nn.Sequential or nn.Batchnorm2d + + Returns: + tuple: Equivalent kernel and bias + """ + if branch is None: + return 0, 0 + if isinstance(branch, ConvModule): + kernel = branch.conv.weight + running_mean = branch.bn.running_mean + running_var = branch.bn.running_var + gamma = branch.bn.weight + beta = branch.bn.bias + eps = branch.bn.eps + else: + assert isinstance(branch, (nn.SyncBatchNorm, nn.BatchNorm2d)) + if not hasattr(self, 'id_tensor'): + input_dim = self.in_channels // self.groups + kernel_value = np.zeros((self.in_channels, input_dim, 3, 3), + dtype=np.float32) + for i in range(self.in_channels): + kernel_value[i, i % input_dim, 1, 1] = 1 + self.id_tensor = torch.from_numpy(kernel_value).to( + branch.weight.device) + kernel = self.id_tensor + running_mean = branch.running_mean + running_var = branch.running_var + gamma = branch.weight + beta = branch.bias + eps = branch.eps + std = (running_var + eps).sqrt() + t = (gamma / std).reshape(-1, 1, 1, 1) + return kernel * t, beta - running_mean * gamma / std + + def switch_to_deploy(self): + """Switch to deploy mode.""" + if hasattr(self, 'rbr_reparam'): + return + kernel, bias = self.get_equivalent_kernel_bias() + self.rbr_reparam = nn.Conv2d( + in_channels=self.rbr_dense.conv.in_channels, + out_channels=self.rbr_dense.conv.out_channels, + kernel_size=self.rbr_dense.conv.kernel_size, + stride=self.rbr_dense.conv.stride, + padding=self.rbr_dense.conv.padding, + dilation=self.rbr_dense.conv.dilation, + groups=self.rbr_dense.conv.groups, + bias=True) + self.rbr_reparam.weight.data = kernel + self.rbr_reparam.bias.data = bias + for para in self.parameters(): + para.detach_() + self.__delattr__('rbr_dense') + self.__delattr__('rbr_1x1') + if hasattr(self, 'rbr_identity'): + self.__delattr__('rbr_identity') + if hasattr(self, 'id_tensor'): + self.__delattr__('id_tensor') + self.deploy = True + + +@MODELS.register_module() +class BepC3StageBlock(nn.Module): + """Beer-mug RepC3 Block. + + Args: + in_channels (int): Number of channels in the input image + out_channels (int): Number of channels produced by the convolution + num_blocks (int): Number of blocks. Defaults to 1 + hidden_ratio (float): Hidden channel expansion. + Default: 0.5 + concat_all_layer (bool): Concat all layer when forward calculate. + Default: True + block_cfg (dict): Config dict for the block used to build each + layer. Defaults to dict(type='RepVGGBlock'). + norm_cfg (ConfigType): Config dict for normalization layer. + Defaults to dict(type='BN', momentum=0.03, eps=0.001). + act_cfg (ConfigType): Config dict for activation layer. + Defaults to dict(type='ReLU', inplace=True). + """ + + def __init__(self, + in_channels: int, + out_channels: int, + num_blocks: int = 1, + hidden_ratio: float = 0.5, + concat_all_layer: bool = True, + block_cfg: ConfigType = dict(type='RepVGGBlock'), + norm_cfg: ConfigType = dict( + type='BN', momentum=0.03, eps=0.001), + act_cfg: ConfigType = dict(type='ReLU', inplace=True)): + super().__init__() + hidden_channels = int(out_channels * hidden_ratio) + + self.conv1 = ConvModule( + in_channels, + hidden_channels, + kernel_size=1, + stride=1, + groups=1, + bias=False, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + self.conv2 = ConvModule( + in_channels, + hidden_channels, + kernel_size=1, + stride=1, + groups=1, + bias=False, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + self.conv3 = ConvModule( + 2 * hidden_channels, + out_channels, + kernel_size=1, + stride=1, + groups=1, + bias=False, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + self.block = RepStageBlock( + in_channels=hidden_channels, + out_channels=hidden_channels, + num_blocks=num_blocks, + block_cfg=block_cfg, + bottle_block=BottleRep) + self.concat_all_layer = concat_all_layer + if not concat_all_layer: + self.conv3 = ConvModule( + hidden_channels, + out_channels, + kernel_size=1, + stride=1, + groups=1, + bias=False, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + + def forward(self, x): + if self.concat_all_layer is True: + return self.conv3( + torch.cat((self.block(self.conv1(x)), self.conv2(x)), dim=1)) + else: + return self.conv3(self.block(self.conv1(x))) + + +class BottleRep(nn.Module): + """Bottle Rep Block. + + Args: + in_channels (int): Number of channels in the input image + out_channels (int): Number of channels produced by the convolution + block_cfg (dict): Config dict for the block used to build each + layer. Defaults to dict(type='RepVGGBlock'). + adaptive_weight (bool): Add adaptive_weight when forward calculate. + Defaults False. + """ + + def __init__(self, + in_channels: int, + out_channels: int, + block_cfg: ConfigType = dict(type='RepVGGBlock'), + adaptive_weight: bool = False): + super().__init__() + conv1_cfg = block_cfg.copy() + conv2_cfg = block_cfg.copy() + + conv1_cfg.update( + dict(in_channels=in_channels, out_channels=out_channels)) + conv2_cfg.update( + dict(in_channels=out_channels, out_channels=out_channels)) + + self.conv1 = MODELS.build(conv1_cfg) + self.conv2 = MODELS.build(conv2_cfg) + + if in_channels != out_channels: + self.shortcut = False + else: + self.shortcut = True + if adaptive_weight: + self.alpha = nn.Parameter(torch.ones(1)) + else: + self.alpha = 1.0 + + def forward(self, x: Tensor) -> Tensor: + outputs = self.conv1(x) + outputs = self.conv2(outputs) + return outputs + self.alpha * x if self.shortcut else outputs + + +@MODELS.register_module() +class ConvWrapper(nn.Module): + """Wrapper for normal Conv with SiLU activation. + + Args: + in_channels (int): Number of channels in the input image + out_channels (int): Number of channels produced by the convolution + kernel_size (int or tuple): Size of the convolving kernel + stride (int or tuple): Stride of the convolution. Default: 1 + groups (int, optional): Number of blocked connections from input + channels to output channels. Default: 1 + bias (bool, optional): Conv bias. Default: True. + norm_cfg (ConfigType): Config dict for normalization layer. + Defaults to dict(type='BN', momentum=0.03, eps=0.001). + act_cfg (ConfigType): Config dict for activation layer. + Defaults to dict(type='ReLU', inplace=True). + """ + + def __init__(self, + in_channels: int, + out_channels: int, + kernel_size: int = 3, + stride: int = 1, + groups: int = 1, + bias: bool = True, + norm_cfg: ConfigType = None, + act_cfg: ConfigType = dict(type='SiLU')): + super().__init__() + self.block = ConvModule( + in_channels, + out_channels, + kernel_size, + stride, + padding=kernel_size // 2, + groups=groups, + bias=bias, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + + def forward(self, x: Tensor) -> Tensor: + return self.block(x) + + +@MODELS.register_module() +class EffectiveSELayer(nn.Module): + """Effective Squeeze-Excitation. + + From `CenterMask : Real-Time Anchor-Free Instance Segmentation` + arxiv (https://arxiv.org/abs/1911.06667) + This code referenced to + https://github.com/youngwanLEE/CenterMask/blob/72147e8aae673fcaf4103ee90a6a6b73863e7fa1/maskrcnn_benchmark/modeling/backbone/vovnet.py#L108-L121 # noqa + + Args: + channels (int): The input and output channels of this Module. + act_cfg (dict): Config dict for activation layer. + Defaults to dict(type='HSigmoid'). + """ + + def __init__(self, + channels: int, + act_cfg: ConfigType = dict(type='HSigmoid')): + super().__init__() + assert isinstance(act_cfg, dict) + self.fc = ConvModule(channels, channels, 1, act_cfg=None) + + act_cfg_ = act_cfg.copy() # type: ignore + self.activate = MODELS.build(act_cfg_) + + def forward(self, x: Tensor) -> Tensor: + """Forward process + Args: + x (Tensor): The input tensor. + """ + x_se = x.mean((2, 3), keepdim=True) + x_se = self.fc(x_se) + return x * self.activate(x_se) + + +class PPYOLOESELayer(nn.Module): + """Squeeze-and-Excitation Attention Module for PPYOLOE. + There are some differences between the current implementation and + SELayer in mmdet: + 1. For fast speed and avoiding double inference in ppyoloe, + use `F.adaptive_avg_pool2d` before PPYOLOESELayer. + 2. Special ways to init weights. + 3. Different convolution order. + + Args: + feat_channels (int): The input (and output) channels of the SE layer. + norm_cfg (dict): Config dict for normalization layer. + Defaults to dict(type='BN', momentum=0.1, eps=1e-5). + act_cfg (dict): Config dict for activation layer. + Defaults to dict(type='SiLU', inplace=True). + """ + + def __init__(self, + feat_channels: int, + norm_cfg: ConfigType = dict( + type='BN', momentum=0.1, eps=1e-5), + act_cfg: ConfigType = dict(type='SiLU', inplace=True)): + super().__init__() + self.fc = nn.Conv2d(feat_channels, feat_channels, 1) + self.sig = nn.Sigmoid() + self.conv = ConvModule( + feat_channels, + feat_channels, + 1, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + + self._init_weights() + + def _init_weights(self): + """Init weights.""" + nn.init.normal_(self.fc.weight, mean=0, std=0.001) + + def forward(self, feat: Tensor, avg_feat: Tensor) -> Tensor: + """Forward process + Args: + feat (Tensor): The input tensor. + avg_feat (Tensor): Average pooling feature tensor. + """ + weight = self.sig(self.fc(avg_feat)) + return self.conv(feat * weight) + + +@MODELS.register_module() +class ELANBlock(BaseModule): + """Efficient layer aggregation networks for YOLOv7. + + Args: + in_channels (int): The input channels of this Module. + out_channels (int): The out channels of this Module. + middle_ratio (float): The scaling ratio of the middle layer + based on the in_channels. + block_ratio (float): The scaling ratio of the block layer + based on the in_channels. + num_blocks (int): The number of blocks in the main branch. + Defaults to 2. + num_convs_in_block (int): The number of convs pre block. + Defaults to 1. + conv_cfg (dict): Config dict for convolution layer. Defaults to None. + which means using conv2d. Defaults to None. + norm_cfg (dict): Config dict for normalization layer. + Defaults to dict(type='BN', momentum=0.03, eps=0.001). + act_cfg (dict): Config dict for activation layer. + Defaults to dict(type='SiLU', inplace=True). + init_cfg (dict or list[dict], optional): Initialization config dict. + Defaults to None. + """ + + def __init__(self, + in_channels: int, + out_channels: int, + middle_ratio: float, + block_ratio: float, + num_blocks: int = 2, + num_convs_in_block: int = 1, + conv_cfg: OptConfigType = None, + norm_cfg: ConfigType = dict( + type='BN', momentum=0.03, eps=0.001), + act_cfg: ConfigType = dict(type='SiLU', inplace=True), + init_cfg: OptMultiConfig = None): + super().__init__(init_cfg=init_cfg) + assert num_blocks >= 1 + assert num_convs_in_block >= 1 + + middle_channels = int(in_channels * middle_ratio) + block_channels = int(in_channels * block_ratio) + final_conv_in_channels = int( + num_blocks * block_channels) + 2 * middle_channels + + self.main_conv = ConvModule( + in_channels, + middle_channels, + 1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + + self.short_conv = ConvModule( + in_channels, + middle_channels, + 1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + + self.blocks = nn.ModuleList() + for _ in range(num_blocks): + if num_convs_in_block == 1: + internal_block = ConvModule( + middle_channels, + block_channels, + 3, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + else: + internal_block = [] + for _ in range(num_convs_in_block): + internal_block.append( + ConvModule( + middle_channels, + block_channels, + 3, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg)) + middle_channels = block_channels + internal_block = nn.Sequential(*internal_block) + + middle_channels = block_channels + self.blocks.append(internal_block) + + self.final_conv = ConvModule( + final_conv_in_channels, + out_channels, + 1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + + def forward(self, x: Tensor) -> Tensor: + """Forward process + Args: + x (Tensor): The input tensor. + """ + x_short = self.short_conv(x) + x_main = self.main_conv(x) + block_outs = [] + x_block = x_main + for block in self.blocks: + x_block = block(x_block) + block_outs.append(x_block) + x_final = torch.cat((*block_outs[::-1], x_main, x_short), dim=1) + return self.final_conv(x_final) + + +@MODELS.register_module() +class EELANBlock(BaseModule): + """Expand efficient layer aggregation networks for YOLOv7. + + Args: + num_elan_block (int): The number of ELANBlock. + """ + + def __init__(self, num_elan_block: int, **kwargs): + super().__init__() + assert num_elan_block >= 1 + self.e_elan_blocks = nn.ModuleList() + for _ in range(num_elan_block): + self.e_elan_blocks.append(ELANBlock(**kwargs)) + + def forward(self, x: Tensor) -> Tensor: + outs = [] + for elan_blocks in self.e_elan_blocks: + outs.append(elan_blocks(x)) + return sum(outs) + + +class MaxPoolAndStrideConvBlock(BaseModule): + """Max pooling and stride conv layer for YOLOv7. + + Args: + in_channels (int): The input channels of this Module. + out_channels (int): The out channels of this Module. + maxpool_kernel_sizes (int): kernel sizes of pooling layers. + Defaults to 2. + use_in_channels_of_middle (bool): Whether to calculate middle channels + based on in_channels. Defaults to False. + conv_cfg (dict): Config dict for convolution layer. Defaults to None. + which means using conv2d. Defaults to None. + norm_cfg (dict): Config dict for normalization layer. + Defaults to dict(type='BN', momentum=0.03, eps=0.001). + act_cfg (dict): Config dict for activation layer. + Defaults to dict(type='SiLU', inplace=True). + init_cfg (dict or list[dict], optional): Initialization config dict. + Defaults to None. + """ + + def __init__(self, + in_channels: int, + out_channels: int, + maxpool_kernel_sizes: int = 2, + use_in_channels_of_middle: bool = False, + conv_cfg: OptConfigType = None, + norm_cfg: ConfigType = dict( + type='BN', momentum=0.03, eps=0.001), + act_cfg: ConfigType = dict(type='SiLU', inplace=True), + init_cfg: OptMultiConfig = None): + super().__init__(init_cfg=init_cfg) + + middle_channels = in_channels if use_in_channels_of_middle \ + else out_channels // 2 + + self.maxpool_branches = nn.Sequential( + MaxPool2d( + kernel_size=maxpool_kernel_sizes, stride=maxpool_kernel_sizes), + ConvModule( + in_channels, + out_channels // 2, + 1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg)) + + self.stride_conv_branches = nn.Sequential( + ConvModule( + in_channels, + middle_channels, + 1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg), + ConvModule( + middle_channels, + out_channels // 2, + 3, + stride=2, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg)) + + def forward(self, x: Tensor) -> Tensor: + """Forward process + Args: + x (Tensor): The input tensor. + """ + maxpool_out = self.maxpool_branches(x) + stride_conv_out = self.stride_conv_branches(x) + return torch.cat([stride_conv_out, maxpool_out], dim=1) + + +@MODELS.register_module() +class TinyDownSampleBlock(BaseModule): + """Down sample layer for YOLOv7-tiny. + + Args: + in_channels (int): The input channels of this Module. + out_channels (int): The out channels of this Module. + middle_ratio (float): The scaling ratio of the middle layer + based on the in_channels. Defaults to 1.0. + kernel_sizes (int, tuple[int]): Sequential or number of kernel + sizes of pooling layers. Defaults to 3. + conv_cfg (dict): Config dict for convolution layer. Defaults to None. + which means using conv2d. Defaults to None. + norm_cfg (dict): Config dict for normalization layer. + Defaults to dict(type='BN', momentum=0.03, eps=0.001). + act_cfg (dict): Config dict for activation layer. + Defaults to dict(type='LeakyReLU', negative_slope=0.1). + init_cfg (dict or list[dict], optional): Initialization config dict. + Defaults to None. + """ + + def __init__( + self, + in_channels: int, + out_channels: int, + middle_ratio: float = 1.0, + kernel_sizes: Union[int, Sequence[int]] = 3, + conv_cfg: OptConfigType = None, + norm_cfg: ConfigType = dict(type='BN', momentum=0.03, eps=0.001), + act_cfg: ConfigType = dict(type='LeakyReLU', negative_slope=0.1), + init_cfg: OptMultiConfig = None): + super().__init__(init_cfg) + + middle_channels = int(in_channels * middle_ratio) + + self.short_conv = ConvModule( + in_channels, + middle_channels, + 1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + + self.main_convs = nn.ModuleList() + for i in range(3): + if i == 0: + self.main_convs.append( + ConvModule( + in_channels, + middle_channels, + 1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg)) + else: + self.main_convs.append( + ConvModule( + middle_channels, + middle_channels, + kernel_sizes, + padding=(kernel_sizes - 1) // 2, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg)) + + self.final_conv = ConvModule( + middle_channels * 4, + out_channels, + 1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + + def forward(self, x) -> Tensor: + short_out = self.short_conv(x) + + main_outs = [] + for main_conv in self.main_convs: + main_out = main_conv(x) + main_outs.append(main_out) + x = main_out + + return self.final_conv(torch.cat([*main_outs[::-1], short_out], dim=1)) + + +@MODELS.register_module() +class SPPFCSPBlock(BaseModule): + """Spatial pyramid pooling - Fast (SPPF) layer with CSP for + YOLOv7 + + Args: + in_channels (int): The input channels of this Module. + out_channels (int): The output channels of this Module. + expand_ratio (float): Expand ratio of SPPCSPBlock. + Defaults to 0.5. + kernel_sizes (int, tuple[int]): Sequential or number of kernel + sizes of pooling layers. Defaults to 5. + is_tiny_version (bool): Is tiny version of SPPFCSPBlock. If True, + it means it is a yolov7 tiny model. Defaults to False. + conv_cfg (dict): Config dict for convolution layer. Defaults to None. + which means using conv2d. Defaults to None. + norm_cfg (dict): Config dict for normalization layer. + Defaults to dict(type='BN', momentum=0.03, eps=0.001). + act_cfg (dict): Config dict for activation layer. + Defaults to dict(type='SiLU', inplace=True). + init_cfg (dict or list[dict], optional): Initialization config dict. + Defaults to None. + """ + + def __init__(self, + in_channels: int, + out_channels: int, + expand_ratio: float = 0.5, + kernel_sizes: Union[int, Sequence[int]] = 5, + is_tiny_version: bool = False, + conv_cfg: OptConfigType = None, + norm_cfg: ConfigType = dict( + type='BN', momentum=0.03, eps=0.001), + act_cfg: ConfigType = dict(type='SiLU', inplace=True), + init_cfg: OptMultiConfig = None): + super().__init__(init_cfg=init_cfg) + self.is_tiny_version = is_tiny_version + + mid_channels = int(2 * out_channels * expand_ratio) + + if is_tiny_version: + self.main_layers = ConvModule( + in_channels, + mid_channels, + 1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + else: + self.main_layers = nn.Sequential( + ConvModule( + in_channels, + mid_channels, + 1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg), + ConvModule( + mid_channels, + mid_channels, + 3, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg), + ConvModule( + mid_channels, + mid_channels, + 1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg), + ) + + self.kernel_sizes = kernel_sizes + if isinstance(kernel_sizes, int): + self.poolings = nn.MaxPool2d( + kernel_size=kernel_sizes, stride=1, padding=kernel_sizes // 2) + else: + self.poolings = nn.ModuleList([ + nn.MaxPool2d(kernel_size=ks, stride=1, padding=ks // 2) + for ks in kernel_sizes + ]) + + if is_tiny_version: + self.fuse_layers = ConvModule( + 4 * mid_channels, + mid_channels, + 1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + else: + self.fuse_layers = nn.Sequential( + ConvModule( + 4 * mid_channels, + mid_channels, + 1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg), + ConvModule( + mid_channels, + mid_channels, + 3, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg)) + + self.short_layer = ConvModule( + in_channels, + mid_channels, + 1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + + self.final_conv = ConvModule( + 2 * mid_channels, + out_channels, + 1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + + def forward(self, x) -> Tensor: + """Forward process + Args: + x (Tensor): The input tensor. + """ + x1 = self.main_layers(x) + if isinstance(self.kernel_sizes, int): + y1 = self.poolings(x1) + y2 = self.poolings(y1) + concat_list = [x1] + [y1, y2, self.poolings(y2)] + if self.is_tiny_version: + x1 = self.fuse_layers(torch.cat(concat_list[::-1], 1)) + else: + x1 = self.fuse_layers(torch.cat(concat_list, 1)) + else: + concat_list = [x1] + [m(x1) for m in self.poolings] + if self.is_tiny_version: + x1 = self.fuse_layers(torch.cat(concat_list[::-1], 1)) + else: + x1 = self.fuse_layers(torch.cat(concat_list, 1)) + + x2 = self.short_layer(x) + return self.final_conv(torch.cat((x1, x2), dim=1)) + + +class ImplicitA(nn.Module): + """Implicit add layer in YOLOv7. + + Args: + in_channels (int): The input channels of this Module. + mean (float): Mean value of implicit module. Defaults to 0. + std (float): Std value of implicit module. Defaults to 0.02 + """ + + def __init__(self, in_channels: int, mean: float = 0., std: float = .02): + super().__init__() + self.implicit = nn.Parameter(torch.zeros(1, in_channels, 1, 1)) + nn.init.normal_(self.implicit, mean=mean, std=std) + + def forward(self, x): + """Forward process + Args: + x (Tensor): The input tensor. + """ + return self.implicit + x + + +class ImplicitM(nn.Module): + """Implicit multiplier layer in YOLOv7. + + Args: + in_channels (int): The input channels of this Module. + mean (float): Mean value of implicit module. Defaults to 1. + std (float): Std value of implicit module. Defaults to 0.02. + """ + + def __init__(self, in_channels: int, mean: float = 1., std: float = .02): + super().__init__() + self.implicit = nn.Parameter(torch.ones(1, in_channels, 1, 1)) + nn.init.normal_(self.implicit, mean=mean, std=std) + + def forward(self, x): + """Forward process + Args: + x (Tensor): The input tensor. + """ + return self.implicit * x + + +@MODELS.register_module() +class PPYOLOEBasicBlock(nn.Module): + """PPYOLOE Backbone BasicBlock. + + Args: + in_channels (int): The input channels of this Module. + out_channels (int): The output channels of this Module. + norm_cfg (dict): Config dict for normalization layer. + Defaults to dict(type='BN', momentum=0.1, eps=1e-5). + act_cfg (dict): Config dict for activation layer. + Defaults to dict(type='SiLU', inplace=True). + shortcut (bool): Whether to add inputs and outputs together + at the end of this layer. Defaults to True. + use_alpha (bool): Whether to use `alpha` parameter at 1x1 conv. + """ + + def __init__(self, + in_channels: int, + out_channels: int, + norm_cfg: ConfigType = dict( + type='BN', momentum=0.1, eps=1e-5), + act_cfg: ConfigType = dict(type='SiLU', inplace=True), + shortcut: bool = True, + use_alpha: bool = False): + super().__init__() + assert act_cfg is None or isinstance(act_cfg, dict) + self.conv1 = ConvModule( + in_channels, + out_channels, + 3, + stride=1, + padding=1, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + + self.conv2 = RepVGGBlock( + out_channels, + out_channels, + use_alpha=use_alpha, + act_cfg=act_cfg, + norm_cfg=norm_cfg, + use_bn_first=False) + self.shortcut = shortcut + + def forward(self, x: Tensor) -> Tensor: + """Forward process. + Args: + inputs (Tensor): The input tensor. + + Returns: + Tensor: The output tensor. + """ + y = self.conv1(x) + y = self.conv2(y) + if self.shortcut: + return x + y + else: + return y + + +class CSPResLayer(nn.Module): + """PPYOLOE Backbone Stage. + + Args: + in_channels (int): The input channels of this Module. + out_channels (int): The output channels of this Module. + num_block (int): Number of blocks in this stage. + block_cfg (dict): Config dict for block. Default config is + suitable for PPYOLOE+ backbone. And in PPYOLOE neck, + block_cfg is set to dict(type='PPYOLOEBasicBlock', + shortcut=False, use_alpha=False). Defaults to + dict(type='PPYOLOEBasicBlock', shortcut=True, use_alpha=True). + stride (int): Stride of the convolution. In backbone, the stride + must be set to 2. In neck, the stride must be set to 1. + Defaults to 1. + norm_cfg (dict): Config dict for normalization layer. + Defaults to dict(type='BN', momentum=0.1, eps=1e-5). + act_cfg (dict): Config dict for activation layer. + Defaults to dict(type='SiLU', inplace=True). + attention_cfg (dict, optional): Config dict for `EffectiveSELayer`. + Defaults to dict(type='EffectiveSELayer', + act_cfg=dict(type='HSigmoid')). + use_spp (bool): Whether to use `SPPFBottleneck` layer. + Defaults to False. + """ + + def __init__(self, + in_channels: int, + out_channels: int, + num_block: int, + block_cfg: ConfigType = dict( + type='PPYOLOEBasicBlock', shortcut=True, use_alpha=True), + stride: int = 1, + norm_cfg: ConfigType = dict( + type='BN', momentum=0.1, eps=1e-5), + act_cfg: ConfigType = dict(type='SiLU', inplace=True), + attention_cfg: OptMultiConfig = dict( + type='EffectiveSELayer', act_cfg=dict(type='HSigmoid')), + use_spp: bool = False): + super().__init__() + + self.num_block = num_block + self.block_cfg = block_cfg + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + self.use_spp = use_spp + assert attention_cfg is None or isinstance(attention_cfg, dict) + + if stride == 2: + conv1_in_channels = conv2_in_channels = conv3_in_channels = ( + in_channels + out_channels) // 2 + blocks_channels = conv1_in_channels // 2 + self.conv_down = ConvModule( + in_channels, + conv1_in_channels, + 3, + stride=2, + padding=1, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + else: + conv1_in_channels = conv2_in_channels = in_channels + conv3_in_channels = out_channels + blocks_channels = out_channels // 2 + self.conv_down = None + + self.conv1 = ConvModule( + conv1_in_channels, + blocks_channels, + 1, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + + self.conv2 = ConvModule( + conv2_in_channels, + blocks_channels, + 1, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + + self.blocks = self.build_blocks_layer(blocks_channels) + + self.conv3 = ConvModule( + conv3_in_channels, + out_channels, + 1, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + + if attention_cfg: + attention_cfg = attention_cfg.copy() + attention_cfg['channels'] = blocks_channels * 2 + self.attn = MODELS.build(attention_cfg) + else: + self.attn = None + + def build_blocks_layer(self, blocks_channels: int) -> nn.Module: + """Build blocks layer. + + Args: + blocks_channels: The channels of this Module. + """ + blocks = nn.Sequential() + block_cfg = self.block_cfg.copy() + block_cfg.update( + dict(in_channels=blocks_channels, out_channels=blocks_channels)) + block_cfg.setdefault('norm_cfg', self.norm_cfg) + block_cfg.setdefault('act_cfg', self.act_cfg) + + for i in range(self.num_block): + blocks.add_module(str(i), MODELS.build(block_cfg)) + + if i == (self.num_block - 1) // 2 and self.use_spp: + blocks.add_module( + 'spp', + SPPFBottleneck( + blocks_channels, + blocks_channels, + kernel_sizes=[5, 9, 13], + use_conv_first=False, + conv_cfg=None, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg)) + + return blocks + + def forward(self, x: Tensor) -> Tensor: + """Forward process + Args: + x (Tensor): The input tensor. + """ + if self.conv_down is not None: + x = self.conv_down(x) + y1 = self.conv1(x) + y2 = self.blocks(self.conv2(x)) + y = torch.cat([y1, y2], axis=1) + if self.attn is not None: + y = self.attn(y) + y = self.conv3(y) + return y + + +@MODELS.register_module() +class RepStageBlock(nn.Module): + """RepStageBlock is a stage block with rep-style basic block. + + Args: + in_channels (int): The input channels of this Module. + out_channels (int): The output channels of this Module. + num_blocks (int, tuple[int]): Number of blocks. Defaults to 1. + bottle_block (nn.Module): Basic unit of RepStage. + Defaults to RepVGGBlock. + block_cfg (ConfigType): Config of RepStage. + Defaults to 'RepVGGBlock'. + """ + + def __init__(self, + in_channels: int, + out_channels: int, + num_blocks: int = 1, + bottle_block: nn.Module = RepVGGBlock, + block_cfg: ConfigType = dict(type='RepVGGBlock')): + super().__init__() + block_cfg = block_cfg.copy() + + block_cfg.update( + dict(in_channels=in_channels, out_channels=out_channels)) + + self.conv1 = MODELS.build(block_cfg) + + block_cfg.update( + dict(in_channels=out_channels, out_channels=out_channels)) + + self.block = None + if num_blocks > 1: + self.block = nn.Sequential(*(MODELS.build(block_cfg) + for _ in range(num_blocks - 1))) + + if bottle_block == BottleRep: + self.conv1 = BottleRep( + in_channels, + out_channels, + block_cfg=block_cfg, + adaptive_weight=True) + num_blocks = num_blocks // 2 + self.block = None + if num_blocks > 1: + self.block = nn.Sequential(*(BottleRep( + out_channels, + out_channels, + block_cfg=block_cfg, + adaptive_weight=True) for _ in range(num_blocks - 1))) + + def forward(self, x: Tensor) -> Tensor: + """Forward process. + + Args: + x (Tensor): The input tensor. + + Returns: + Tensor: The output tensor. + """ + x = self.conv1(x) + if self.block is not None: + x = self.block(x) + return x + + +class DarknetBottleneck(MMDET_DarknetBottleneck): + """The basic bottleneck block used in Darknet. + + Each ResBlock consists of two ConvModules and the input is added to the + final output. Each ConvModule is composed of Conv, BN, and LeakyReLU. + The first convLayer has filter size of k1Xk1 and the second one has the + filter size of k2Xk2. + + Note: + This DarknetBottleneck is little different from MMDet's, we can + change the kernel size and padding for each conv. + + Args: + in_channels (int): The input channels of this Module. + out_channels (int): The output channels of this Module. + expansion (float): The kernel size for hidden channel. + Defaults to 0.5. + kernel_size (Sequence[int]): The kernel size of the convolution. + Defaults to (1, 3). + padding (Sequence[int]): The padding size of the convolution. + Defaults to (0, 1). + add_identity (bool): Whether to add identity to the out. + Defaults to True + use_depthwise (bool): Whether to use depthwise separable convolution. + Defaults to False + conv_cfg (dict): Config dict for convolution layer. Default: None, + which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. + Defaults to dict(type='BN'). + act_cfg (dict): Config dict for activation layer. + Defaults to dict(type='Swish'). + """ + + def __init__(self, + in_channels: int, + out_channels: int, + expansion: float = 0.5, + kernel_size: Sequence[int] = (1, 3), + padding: Sequence[int] = (0, 1), + add_identity: bool = True, + use_depthwise: bool = False, + conv_cfg: OptConfigType = None, + norm_cfg: ConfigType = dict( + type='BN', momentum=0.03, eps=0.001), + act_cfg: ConfigType = dict(type='SiLU', inplace=True), + init_cfg: OptMultiConfig = None) -> None: + super().__init__(in_channels, out_channels, init_cfg=init_cfg) + hidden_channels = int(out_channels * expansion) + conv = DepthwiseSeparableConvModule if use_depthwise else ConvModule + assert isinstance(kernel_size, Sequence) and len(kernel_size) == 2 + + self.conv1 = ConvModule( + in_channels, + hidden_channels, + kernel_size[0], + padding=padding[0], + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + self.conv2 = conv( + hidden_channels, + out_channels, + kernel_size[1], + stride=1, + padding=padding[1], + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + self.add_identity = \ + add_identity and in_channels == out_channels + + +class CSPLayerWithTwoConv(BaseModule): + """Cross Stage Partial Layer with 2 convolutions. + + Args: + in_channels (int): The input channels of the CSP layer. + out_channels (int): The output channels of the CSP layer. + expand_ratio (float): Ratio to adjust the number of channels of the + hidden layer. Defaults to 0.5. + num_blocks (int): Number of blocks. Defaults to 1 + add_identity (bool): Whether to add identity in blocks. + Defaults to True. + conv_cfg (dict, optional): Config dict for convolution layer. + Defaults to None, which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. + Defaults to dict(type='BN'). + act_cfg (dict): Config dict for activation layer. + Defaults to dict(type='SiLU', inplace=True). + init_cfg (:obj:`ConfigDict` or dict or list[dict] or + list[:obj:`ConfigDict`], optional): Initialization config dict. + Defaults to None. + """ + + def __init__( + self, + in_channels: int, + out_channels: int, + expand_ratio: float = 0.5, + num_blocks: int = 1, + add_identity: bool = True, # shortcut + conv_cfg: OptConfigType = None, + norm_cfg: ConfigType = dict(type='BN', momentum=0.03, eps=0.001), + act_cfg: ConfigType = dict(type='SiLU', inplace=True), + init_cfg: OptMultiConfig = None) -> None: + super().__init__(init_cfg=init_cfg) + + self.mid_channels = int(out_channels * expand_ratio) + self.main_conv = ConvModule( + in_channels, + 2 * self.mid_channels, + 1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + self.final_conv = ConvModule( + (2 + num_blocks) * self.mid_channels, + out_channels, + 1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + + self.blocks = nn.ModuleList( + DarknetBottleneck( + self.mid_channels, + self.mid_channels, + expansion=1, + kernel_size=(3, 3), + padding=(1, 1), + add_identity=add_identity, + use_depthwise=False, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) for _ in range(num_blocks)) + + def forward(self, x: Tensor) -> Tensor: + """Forward process.""" + x_main = self.main_conv(x) + x_main = list(x_main.split((self.mid_channels, self.mid_channels), 1)) + x_main.extend(blocks(x_main[-1]) for blocks in self.blocks) + return self.final_conv(torch.cat(x_main, 1)) + + +class BiFusion(nn.Module): + """BiFusion Block in YOLOv6. + + BiFusion fuses current-, high- and low-level features. + Compared with concatenation in PAN, it fuses an extra low-level feature. + + Args: + in_channels0 (int): The channels of current-level feature. + in_channels1 (int): The input channels of lower-level feature. + out_channels (int): The out channels of the BiFusion module. + norm_cfg (dict): Config dict for normalization layer. + Defaults to dict(type='BN', momentum=0.03, eps=0.001). + act_cfg (dict): Config dict for activation layer. + Defaults to dict(type='SiLU', inplace=True). + init_cfg (dict or list[dict], optional): Initialization config dict. + Defaults to None. + """ + + def __init__(self, + in_channels0: int, + in_channels1: int, + out_channels: int, + norm_cfg: ConfigType = dict( + type='BN', momentum=0.03, eps=0.001), + act_cfg: ConfigType = dict(type='ReLU', inplace=True)): + super().__init__() + self.conv1 = ConvModule( + in_channels0, + out_channels, + kernel_size=1, + stride=1, + padding=0, + bias=False, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + self.conv2 = ConvModule( + in_channels1, + out_channels, + kernel_size=1, + stride=1, + padding=0, + bias=False, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + self.conv3 = ConvModule( + out_channels * 3, + out_channels, + kernel_size=1, + stride=1, + padding=0, + bias=False, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + self.upsample = nn.ConvTranspose2d( + out_channels, out_channels, kernel_size=2, stride=2, bias=True) + self.downsample = ConvModule( + out_channels, + out_channels, + kernel_size=3, + stride=2, + padding=1, + bias=False, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + + def forward(self, x: List[torch.Tensor]) -> Tensor: + """Forward process + Args: + x (List[torch.Tensor]): The tensor list of length 3. + x[0]: The high-level feature. + x[1]: The current-level feature. + x[2]: The low-level feature. + """ + x0 = self.upsample(x[0]) + x1 = self.conv1(x[1]) + x2 = self.downsample(self.conv2(x[2])) + return self.conv3(torch.cat((x0, x1, x2), dim=1)) + + +class CSPSPPFBottleneck(BaseModule): + """The SPPF block having a CSP-like version in YOLOv6 3.0. + + Args: + in_channels (int): The input channels of this Module. + out_channels (int): The output channels of this Module. + kernel_sizes (int, tuple[int]): Sequential or number of kernel + sizes of pooling layers. Defaults to 5. + use_conv_first (bool): Whether to use conv before pooling layer. + In YOLOv5 and YOLOX, the para set to True. + In PPYOLOE, the para set to False. + Defaults to True. + mid_channels_scale (float): Channel multiplier, multiply in_channels + by this amount to get mid_channels. This parameter is valid only + when use_conv_fist=True.Defaults to 0.5. + conv_cfg (dict): Config dict for convolution layer. Defaults to None. + which means using conv2d. Defaults to None. + norm_cfg (dict): Config dict for normalization layer. + Defaults to dict(type='BN', momentum=0.03, eps=0.001). + act_cfg (dict): Config dict for activation layer. + Defaults to dict(type='SiLU', inplace=True). + init_cfg (dict or list[dict], optional): Initialization config dict. + Defaults to None. + """ + + def __init__(self, + in_channels: int, + out_channels: int, + kernel_sizes: Union[int, Sequence[int]] = 5, + use_conv_first: bool = True, + mid_channels_scale: float = 0.5, + conv_cfg: ConfigType = None, + norm_cfg: ConfigType = dict( + type='BN', momentum=0.03, eps=0.001), + act_cfg: ConfigType = dict(type='SiLU', inplace=True), + init_cfg: OptMultiConfig = None): + super().__init__(init_cfg) + + if use_conv_first: + mid_channels = int(in_channels * mid_channels_scale) + self.conv1 = ConvModule( + in_channels, + mid_channels, + 1, + stride=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + self.conv3 = ConvModule( + mid_channels, + mid_channels, + 3, + stride=1, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + self.conv4 = ConvModule( + mid_channels, + mid_channels, + 1, + stride=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + else: + mid_channels = in_channels + self.conv1 = None + self.conv3 = None + self.conv4 = None + + self.conv2 = ConvModule( + in_channels, + mid_channels, + 1, + stride=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + + self.kernel_sizes = kernel_sizes + + if isinstance(kernel_sizes, int): + self.poolings = nn.MaxPool2d( + kernel_size=kernel_sizes, stride=1, padding=kernel_sizes // 2) + conv2_in_channels = mid_channels * 4 + else: + self.poolings = nn.ModuleList([ + nn.MaxPool2d(kernel_size=ks, stride=1, padding=ks // 2) + for ks in kernel_sizes + ]) + conv2_in_channels = mid_channels * (len(kernel_sizes) + 1) + + self.conv5 = ConvModule( + conv2_in_channels, + mid_channels, + 1, + stride=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + self.conv6 = ConvModule( + mid_channels, + mid_channels, + 3, + stride=1, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + self.conv7 = ConvModule( + mid_channels * 2, + out_channels, + 1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + + def forward(self, x: Tensor) -> Tensor: + """Forward process + Args: + x (Tensor): The input tensor. + """ + x0 = self.conv4(self.conv3(self.conv1(x))) if self.conv1 else x + y = self.conv2(x) + + if isinstance(self.kernel_sizes, int): + x1 = self.poolings(x0) + x2 = self.poolings(x1) + x3 = torch.cat([x0, x1, x2, self.poolings(x2)], dim=1) + else: + x3 = torch.cat( + [x0] + [pooling(x0) for pooling in self.poolings], dim=1) + + x3 = self.conv6(self.conv5(x3)) + x = self.conv7(torch.cat([y, x3], dim=1)) + return x diff --git a/models/YOLO-World/third_party/mmyolo/mmyolo/models/losses/__init__.py b/models/YOLO-World/third_party/mmyolo/mmyolo/models/losses/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c89fe4dc45ace2583241cff11542d1fbf8bdc73a --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/mmyolo/models/losses/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .iou_loss import IoULoss, bbox_overlaps +from .oks_loss import OksLoss + +__all__ = ['IoULoss', 'bbox_overlaps', 'OksLoss'] diff --git a/models/YOLO-World/third_party/mmyolo/mmyolo/models/losses/iou_loss.py b/models/YOLO-World/third_party/mmyolo/mmyolo/models/losses/iou_loss.py new file mode 100644 index 0000000000000000000000000000000000000000..e3d3dc40ef3e678989db85ee8cfd0035a26a9f19 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/mmyolo/models/losses/iou_loss.py @@ -0,0 +1,232 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math +from typing import Optional, Tuple, Union + +import torch +import torch.nn as nn +from mmdet.models.losses.utils import weight_reduce_loss +from mmdet.structures.bbox import HorizontalBoxes + +from mmyolo.registry import MODELS + + +def bbox_overlaps(pred: torch.Tensor, + target: torch.Tensor, + iou_mode: str = 'ciou', + bbox_format: str = 'xywh', + siou_theta: float = 4.0, + eps: float = 1e-7) -> torch.Tensor: + r"""Calculate overlap between two set of bboxes. + `Implementation of paper `Enhancing Geometric Factors into + Model Learning and Inference for Object Detection and Instance + Segmentation `_. + + In the CIoU implementation of YOLOv5 and MMDetection, there is a slight + difference in the way the alpha parameter is computed. + + mmdet version: + alpha = (ious > 0.5).float() * v / (1 - ious + v) + YOLOv5 version: + alpha = v / (v - ious + (1 + eps) + + Args: + pred (Tensor): Predicted bboxes of format (x1, y1, x2, y2) + or (x, y, w, h),shape (n, 4). + target (Tensor): Corresponding gt bboxes, shape (n, 4). + iou_mode (str): Options are ('iou', 'ciou', 'giou', 'siou'). + Defaults to "ciou". + bbox_format (str): Options are "xywh" and "xyxy". + Defaults to "xywh". + siou_theta (float): siou_theta for SIoU when calculate shape cost. + Defaults to 4.0. + eps (float): Eps to avoid log(0). + + Returns: + Tensor: shape (n, ). + """ + assert iou_mode in ('iou', 'ciou', 'giou', 'siou') + assert bbox_format in ('xyxy', 'xywh') + if bbox_format == 'xywh': + pred = HorizontalBoxes.cxcywh_to_xyxy(pred) + target = HorizontalBoxes.cxcywh_to_xyxy(target) + + bbox1_x1, bbox1_y1 = pred[..., 0], pred[..., 1] + bbox1_x2, bbox1_y2 = pred[..., 2], pred[..., 3] + bbox2_x1, bbox2_y1 = target[..., 0], target[..., 1] + bbox2_x2, bbox2_y2 = target[..., 2], target[..., 3] + + # Overlap + overlap = (torch.min(bbox1_x2, bbox2_x2) - + torch.max(bbox1_x1, bbox2_x1)).clamp(0) * \ + (torch.min(bbox1_y2, bbox2_y2) - + torch.max(bbox1_y1, bbox2_y1)).clamp(0) + + # Union + w1, h1 = bbox1_x2 - bbox1_x1, bbox1_y2 - bbox1_y1 + w2, h2 = bbox2_x2 - bbox2_x1, bbox2_y2 - bbox2_y1 + union = (w1 * h1) + (w2 * h2) - overlap + eps + + h1 = bbox1_y2 - bbox1_y1 + eps + h2 = bbox2_y2 - bbox2_y1 + eps + + # IoU + ious = overlap / union + + # enclose area + enclose_x1y1 = torch.min(pred[..., :2], target[..., :2]) + enclose_x2y2 = torch.max(pred[..., 2:], target[..., 2:]) + enclose_wh = (enclose_x2y2 - enclose_x1y1).clamp(min=0) + + enclose_w = enclose_wh[..., 0] # cw + enclose_h = enclose_wh[..., 1] # ch + + if iou_mode == 'ciou': + # CIoU = IoU - ( (ρ^2(b_pred,b_gt) / c^2) + (alpha x v) ) + + # calculate enclose area (c^2) + enclose_area = enclose_w**2 + enclose_h**2 + eps + + # calculate ρ^2(b_pred,b_gt): + # euclidean distance between b_pred(bbox2) and b_gt(bbox1) + # center point, because bbox format is xyxy -> left-top xy and + # right-bottom xy, so need to / 4 to get center point. + rho2_left_item = ((bbox2_x1 + bbox2_x2) - (bbox1_x1 + bbox1_x2))**2 / 4 + rho2_right_item = ((bbox2_y1 + bbox2_y2) - + (bbox1_y1 + bbox1_y2))**2 / 4 + rho2 = rho2_left_item + rho2_right_item # rho^2 (ρ^2) + + # Width and height ratio (v) + wh_ratio = (4 / (math.pi**2)) * torch.pow( + torch.atan(w2 / h2) - torch.atan(w1 / h1), 2) + + with torch.no_grad(): + alpha = wh_ratio / (wh_ratio - ious + (1 + eps)) + + # CIoU + ious = ious - ((rho2 / enclose_area) + (alpha * wh_ratio)) + + elif iou_mode == 'giou': + # GIoU = IoU - ( (A_c - union) / A_c ) + convex_area = enclose_w * enclose_h + eps # convex area (A_c) + ious = ious - (convex_area - union) / convex_area + + elif iou_mode == 'siou': + # SIoU: https://arxiv.org/pdf/2205.12740.pdf + # SIoU = IoU - ( (Distance Cost + Shape Cost) / 2 ) + + # calculate sigma (σ): + # euclidean distance between bbox2(pred) and bbox1(gt) center point, + # sigma_cw = b_cx_gt - b_cx + sigma_cw = (bbox2_x1 + bbox2_x2) / 2 - (bbox1_x1 + bbox1_x2) / 2 + eps + # sigma_ch = b_cy_gt - b_cy + sigma_ch = (bbox2_y1 + bbox2_y2) / 2 - (bbox1_y1 + bbox1_y2) / 2 + eps + # sigma = √( (sigma_cw ** 2) - (sigma_ch ** 2) ) + sigma = torch.pow(sigma_cw**2 + sigma_ch**2, 0.5) + + # choose minimize alpha, sin(alpha) + sin_alpha = torch.abs(sigma_ch) / sigma + sin_beta = torch.abs(sigma_cw) / sigma + sin_alpha = torch.where(sin_alpha <= math.sin(math.pi / 4), sin_alpha, + sin_beta) + + # Angle cost = 1 - 2 * ( sin^2 ( arcsin(x) - (pi / 4) ) ) + angle_cost = torch.cos(torch.arcsin(sin_alpha) * 2 - math.pi / 2) + + # Distance cost = Σ_(t=x,y) (1 - e ^ (- γ ρ_t)) + rho_x = (sigma_cw / enclose_w)**2 # ρ_x + rho_y = (sigma_ch / enclose_h)**2 # ρ_y + gamma = 2 - angle_cost # γ + distance_cost = (1 - torch.exp(-1 * gamma * rho_x)) + ( + 1 - torch.exp(-1 * gamma * rho_y)) + + # Shape cost = Ω = Σ_(t=w,h) ( ( 1 - ( e ^ (-ω_t) ) ) ^ θ ) + omiga_w = torch.abs(w1 - w2) / torch.max(w1, w2) # ω_w + omiga_h = torch.abs(h1 - h2) / torch.max(h1, h2) # ω_h + shape_cost = torch.pow(1 - torch.exp(-1 * omiga_w), + siou_theta) + torch.pow( + 1 - torch.exp(-1 * omiga_h), siou_theta) + + ious = ious - ((distance_cost + shape_cost) * 0.5) + + return ious.clamp(min=-1.0, max=1.0) + + +@MODELS.register_module() +class IoULoss(nn.Module): + """IoULoss. + + Computing the IoU loss between a set of predicted bboxes and target bboxes. + Args: + iou_mode (str): Options are "ciou". + Defaults to "ciou". + bbox_format (str): Options are "xywh" and "xyxy". + Defaults to "xywh". + eps (float): Eps to avoid log(0). + reduction (str): Options are "none", "mean" and "sum". + loss_weight (float): Weight of loss. + return_iou (bool): If True, return loss and iou. + """ + + def __init__(self, + iou_mode: str = 'ciou', + bbox_format: str = 'xywh', + eps: float = 1e-7, + reduction: str = 'mean', + loss_weight: float = 1.0, + return_iou: bool = True): + super().__init__() + assert bbox_format in ('xywh', 'xyxy') + assert iou_mode in ('ciou', 'siou', 'giou') + self.iou_mode = iou_mode + self.bbox_format = bbox_format + self.eps = eps + self.reduction = reduction + self.loss_weight = loss_weight + self.return_iou = return_iou + + def forward( + self, + pred: torch.Tensor, + target: torch.Tensor, + weight: Optional[torch.Tensor] = None, + avg_factor: Optional[float] = None, + reduction_override: Optional[Union[str, bool]] = None + ) -> Tuple[Union[torch.Tensor, torch.Tensor], torch.Tensor]: + """Forward function. + + Args: + pred (Tensor): Predicted bboxes of format (x1, y1, x2, y2) + or (x, y, w, h),shape (n, 4). + target (Tensor): Corresponding gt bboxes, shape (n, 4). + weight (Tensor, optional): Element-wise weights. + avg_factor (float, optional): Average factor when computing the + mean of losses. + reduction_override (str, bool, optional): Same as built-in losses + of PyTorch. Defaults to None. + Returns: + loss or tuple(loss, iou): + """ + if weight is not None and not torch.any(weight > 0): + if pred.dim() == weight.dim() + 1: + weight = weight.unsqueeze(1) + return (pred * weight).sum() # 0 + assert reduction_override in (None, 'none', 'mean', 'sum') + reduction = ( + reduction_override if reduction_override else self.reduction) + + if weight is not None and weight.dim() > 1: + weight = weight.mean(-1) + + iou = bbox_overlaps( + pred, + target, + iou_mode=self.iou_mode, + bbox_format=self.bbox_format, + eps=self.eps) + loss = self.loss_weight * weight_reduce_loss(1.0 - iou, weight, + reduction, avg_factor) + + if self.return_iou: + return loss, iou + else: + return loss diff --git a/models/YOLO-World/third_party/mmyolo/mmyolo/models/losses/oks_loss.py b/models/YOLO-World/third_party/mmyolo/mmyolo/models/losses/oks_loss.py new file mode 100644 index 0000000000000000000000000000000000000000..62c63422b3d13ade5164f23a9537a01847ff358d --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/mmyolo/models/losses/oks_loss.py @@ -0,0 +1,91 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Optional + +import torch +import torch.nn as nn +from torch import Tensor + +from mmyolo.registry import MODELS + +try: + from mmpose.datasets.datasets.utils import parse_pose_metainfo +except ImportError: + parse_pose_metainfo = None + + +@MODELS.register_module() +class OksLoss(nn.Module): + """A PyTorch implementation of the Object Keypoint Similarity (OKS) loss as + described in the paper "YOLO-Pose: Enhancing YOLO for Multi Person Pose + Estimation Using Object Keypoint Similarity Loss" by Debapriya et al. + + (2022). + The OKS loss is used for keypoint-based object recognition and consists + of a measure of the similarity between predicted and ground truth + keypoint locations, adjusted by the size of the object in the image. + The loss function takes as input the predicted keypoint locations, the + ground truth keypoint locations, a mask indicating which keypoints are + valid, and bounding boxes for the objects. + Args: + metainfo (Optional[str]): Path to a JSON file containing information + about the dataset's annotations. + loss_weight (float): Weight for the loss. + """ + + def __init__(self, + metainfo: Optional[str] = None, + loss_weight: float = 1.0): + super().__init__() + + if metainfo is not None: + if parse_pose_metainfo is None: + raise ImportError( + 'Please run "mim install -r requirements/mmpose.txt" ' + 'to install mmpose first for OksLossn.') + metainfo = parse_pose_metainfo(dict(from_file=metainfo)) + sigmas = metainfo.get('sigmas', None) + if sigmas is not None: + self.register_buffer('sigmas', torch.as_tensor(sigmas)) + self.loss_weight = loss_weight + + def forward(self, + output: Tensor, + target: Tensor, + target_weights: Tensor, + bboxes: Optional[Tensor] = None) -> Tensor: + oks = self.compute_oks(output, target, target_weights, bboxes) + loss = 1 - oks + return loss * self.loss_weight + + def compute_oks(self, + output: Tensor, + target: Tensor, + target_weights: Tensor, + bboxes: Optional[Tensor] = None) -> Tensor: + """Calculates the OKS loss. + + Args: + output (Tensor): Predicted keypoints in shape N x k x 2, where N + is batch size, k is the number of keypoints, and 2 are the + xy coordinates. + target (Tensor): Ground truth keypoints in the same shape as + output. + target_weights (Tensor): Mask of valid keypoints in shape N x k, + with 1 for valid and 0 for invalid. + bboxes (Optional[Tensor]): Bounding boxes in shape N x 4, + where 4 are the xyxy coordinates. + Returns: + Tensor: The calculated OKS loss. + """ + + dist = torch.norm(output - target, dim=-1) + + if hasattr(self, 'sigmas'): + sigmas = self.sigmas.reshape(*((1, ) * (dist.ndim - 1)), -1) + dist = dist / sigmas + if bboxes is not None: + area = torch.norm(bboxes[..., 2:] - bboxes[..., :2], dim=-1) + dist = dist / area.clip(min=1e-8).unsqueeze(-1) + + return (torch.exp(-dist.pow(2) / 2) * target_weights).sum( + dim=-1) / target_weights.sum(dim=-1).clip(min=1e-8) diff --git a/models/YOLO-World/third_party/mmyolo/mmyolo/models/necks/__init__.py b/models/YOLO-World/third_party/mmyolo/mmyolo/models/necks/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..159fae8d6e248330e49919420bf82154d905ad6c --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/mmyolo/models/necks/__init__.py @@ -0,0 +1,16 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .base_yolo_neck import BaseYOLONeck +from .cspnext_pafpn import CSPNeXtPAFPN +from .ppyoloe_csppan import PPYOLOECSPPAFPN +from .yolov5_pafpn import YOLOv5PAFPN +from .yolov6_pafpn import (YOLOv6CSPRepBiPAFPN, YOLOv6CSPRepPAFPN, + YOLOv6RepBiPAFPN, YOLOv6RepPAFPN) +from .yolov7_pafpn import YOLOv7PAFPN +from .yolov8_pafpn import YOLOv8PAFPN +from .yolox_pafpn import YOLOXPAFPN + +__all__ = [ + 'YOLOv5PAFPN', 'BaseYOLONeck', 'YOLOv6RepPAFPN', 'YOLOXPAFPN', + 'CSPNeXtPAFPN', 'YOLOv7PAFPN', 'PPYOLOECSPPAFPN', 'YOLOv6CSPRepPAFPN', + 'YOLOv8PAFPN', 'YOLOv6RepBiPAFPN', 'YOLOv6CSPRepBiPAFPN' +] diff --git a/models/YOLO-World/third_party/mmyolo/mmyolo/models/necks/base_yolo_neck.py b/models/YOLO-World/third_party/mmyolo/mmyolo/models/necks/base_yolo_neck.py new file mode 100644 index 0000000000000000000000000000000000000000..8825b7634f54df624f56d0cd0beef4d0e4658788 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/mmyolo/models/necks/base_yolo_neck.py @@ -0,0 +1,261 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from abc import ABCMeta, abstractmethod +from typing import List, Union + +import torch +import torch.nn as nn +from mmdet.utils import ConfigType, OptMultiConfig +from mmengine.model import BaseModule +from torch.nn.modules.batchnorm import _BatchNorm + +from mmyolo.registry import MODELS + + +@MODELS.register_module() +class BaseYOLONeck(BaseModule, metaclass=ABCMeta): + """Base neck used in YOLO series. + + .. code:: text + + P5 neck model structure diagram + +--------+ +-------+ + |top_down|----------+--------->| out |---> output0 + | layer1 | | | layer0| + +--------+ | +-------+ + stride=8 ^ | + idx=0 +------+ +--------+ | + -----> |reduce|--->| cat | | + |layer0| +--------+ | + +------+ ^ v + +--------+ +-----------+ + |upsample| |downsample | + | layer1 | | layer0 | + +--------+ +-----------+ + ^ | + +--------+ v + |top_down| +-----------+ + | layer2 |--->| cat | + +--------+ +-----------+ + stride=16 ^ v + idx=1 +------+ +--------+ +-----------+ +-------+ + -----> |reduce|--->| cat | | bottom_up |--->| out |---> output1 + |layer1| +--------+ | layer0 | | layer1| + +------+ ^ +-----------+ +-------+ + | v + +--------+ +-----------+ + |upsample| |downsample | + | layer2 | | layer1 | + stride=32 +--------+ +-----------+ + idx=2 +------+ ^ v + -----> |reduce| | +-----------+ + |layer2|---------+------->| cat | + +------+ +-----------+ + v + +-----------+ +-------+ + | bottom_up |--->| out |---> output2 + | layer1 | | layer2| + +-----------+ +-------+ + + .. code:: text + + P6 neck model structure diagram + +--------+ +-------+ + |top_down|----------+--------->| out |---> output0 + | layer1 | | | layer0| + +--------+ | +-------+ + stride=8 ^ | + idx=0 +------+ +--------+ | + -----> |reduce|--->| cat | | + |layer0| +--------+ | + +------+ ^ v + +--------+ +-----------+ + |upsample| |downsample | + | layer1 | | layer0 | + +--------+ +-----------+ + ^ | + +--------+ v + |top_down| +-----------+ + | layer2 |--->| cat | + +--------+ +-----------+ + stride=16 ^ v + idx=1 +------+ +--------+ +-----------+ +-------+ + -----> |reduce|--->| cat | | bottom_up |--->| out |---> output1 + |layer1| +--------+ | layer0 | | layer1| + +------+ ^ +-----------+ +-------+ + | v + +--------+ +-----------+ + |upsample| |downsample | + | layer2 | | layer1 | + +--------+ +-----------+ + ^ | + +--------+ v + |top_down| +-----------+ + | layer3 |--->| cat | + +--------+ +-----------+ + stride=32 ^ v + idx=2 +------+ +--------+ +-----------+ +-------+ + -----> |reduce|--->| cat | | bottom_up |--->| out |---> output2 + |layer2| +--------+ | layer1 | | layer2| + +------+ ^ +-----------+ +-------+ + | v + +--------+ +-----------+ + |upsample| |downsample | + | layer3 | | layer2 | + +--------+ +-----------+ + stride=64 ^ v + idx=3 +------+ | +-----------+ + -----> |reduce|---------+------->| cat | + |layer3| +-----------+ + +------+ v + +-----------+ +-------+ + | bottom_up |--->| out |---> output3 + | layer2 | | layer3| + +-----------+ +-------+ + + Args: + in_channels (List[int]): Number of input channels per scale. + out_channels (int): Number of output channels (used at each scale) + deepen_factor (float): Depth multiplier, multiply number of + blocks in CSP layer by this amount. Defaults to 1.0. + widen_factor (float): Width multiplier, multiply number of + channels in each layer by this amount. Defaults to 1.0. + upsample_feats_cat_first (bool): Whether the output features are + concat first after upsampling in the topdown module. + Defaults to True. Currently only YOLOv7 is false. + freeze_all(bool): Whether to freeze the model. Defaults to False + norm_cfg (dict): Config dict for normalization layer. + Defaults to None. + act_cfg (dict): Config dict for activation layer. + Defaults to None. + init_cfg (dict or list[dict], optional): Initialization config dict. + Defaults to None. + """ + + def __init__(self, + in_channels: List[int], + out_channels: Union[int, List[int]], + deepen_factor: float = 1.0, + widen_factor: float = 1.0, + upsample_feats_cat_first: bool = True, + freeze_all: bool = False, + norm_cfg: ConfigType = None, + act_cfg: ConfigType = None, + init_cfg: OptMultiConfig = None, + **kwargs): + super().__init__(init_cfg) + self.in_channels = in_channels + self.out_channels = out_channels + self.deepen_factor = deepen_factor + self.widen_factor = widen_factor + self.upsample_feats_cat_first = upsample_feats_cat_first + self.freeze_all = freeze_all + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + + self.reduce_layers = nn.ModuleList() + for idx in range(len(in_channels)): + self.reduce_layers.append(self.build_reduce_layer(idx)) + + # build top-down blocks + self.upsample_layers = nn.ModuleList() + self.top_down_layers = nn.ModuleList() + for idx in range(len(in_channels) - 1, 0, -1): + self.upsample_layers.append(self.build_upsample_layer(idx)) + self.top_down_layers.append(self.build_top_down_layer(idx)) + + # build bottom-up blocks + self.downsample_layers = nn.ModuleList() + self.bottom_up_layers = nn.ModuleList() + for idx in range(len(in_channels) - 1): + self.downsample_layers.append(self.build_downsample_layer(idx)) + self.bottom_up_layers.append(self.build_bottom_up_layer(idx)) + + self.out_layers = nn.ModuleList() + for idx in range(len(in_channels)): + self.out_layers.append(self.build_out_layer(idx)) + + @abstractmethod + def build_reduce_layer(self, idx: int): + """build reduce layer.""" + pass + + @abstractmethod + def build_upsample_layer(self, idx: int): + """build upsample layer.""" + pass + + @abstractmethod + def build_top_down_layer(self, idx: int): + """build top down layer.""" + pass + + @abstractmethod + def build_downsample_layer(self, idx: int): + """build downsample layer.""" + pass + + @abstractmethod + def build_bottom_up_layer(self, idx: int): + """build bottom up layer.""" + pass + + @abstractmethod + def build_out_layer(self, idx: int): + """build out layer.""" + pass + + def _freeze_all(self): + """Freeze the model.""" + for m in self.modules(): + if isinstance(m, _BatchNorm): + m.eval() + for param in m.parameters(): + param.requires_grad = False + + def train(self, mode=True): + """Convert the model into training mode while keep the normalization + layer freezed.""" + super().train(mode) + if self.freeze_all: + self._freeze_all() + + def forward(self, inputs: List[torch.Tensor]) -> tuple: + """Forward function.""" + assert len(inputs) == len(self.in_channels) + # reduce layers + reduce_outs = [] + for idx in range(len(self.in_channels)): + reduce_outs.append(self.reduce_layers[idx](inputs[idx])) + + # top-down path + inner_outs = [reduce_outs[-1]] + for idx in range(len(self.in_channels) - 1, 0, -1): + feat_high = inner_outs[0] + feat_low = reduce_outs[idx - 1] + upsample_feat = self.upsample_layers[len(self.in_channels) - 1 - + idx]( + feat_high) + if self.upsample_feats_cat_first: + top_down_layer_inputs = torch.cat([upsample_feat, feat_low], 1) + else: + top_down_layer_inputs = torch.cat([feat_low, upsample_feat], 1) + inner_out = self.top_down_layers[len(self.in_channels) - 1 - idx]( + top_down_layer_inputs) + inner_outs.insert(0, inner_out) + + # bottom-up path + outs = [inner_outs[0]] + for idx in range(len(self.in_channels) - 1): + feat_low = outs[-1] + feat_high = inner_outs[idx + 1] + downsample_feat = self.downsample_layers[idx](feat_low) + out = self.bottom_up_layers[idx]( + torch.cat([downsample_feat, feat_high], 1)) + outs.append(out) + + # out_layers + results = [] + for idx in range(len(self.in_channels)): + results.append(self.out_layers[idx](outs[idx])) + + return tuple(results) diff --git a/models/YOLO-World/third_party/mmyolo/mmyolo/models/necks/cspnext_pafpn.py b/models/YOLO-World/third_party/mmyolo/mmyolo/models/necks/cspnext_pafpn.py new file mode 100644 index 0000000000000000000000000000000000000000..310126f63e12f888daac50ca30674484f7b3a6ec --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/mmyolo/models/necks/cspnext_pafpn.py @@ -0,0 +1,201 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math +from typing import Sequence + +import torch.nn as nn +from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule +from mmdet.models.backbones.csp_darknet import CSPLayer +from mmdet.utils import ConfigType, OptMultiConfig + +from mmyolo.registry import MODELS +from .base_yolo_neck import BaseYOLONeck + + +@MODELS.register_module() +class CSPNeXtPAFPN(BaseYOLONeck): + """Path Aggregation Network with CSPNeXt blocks. + + Args: + in_channels (Sequence[int]): Number of input channels per scale. + out_channels (int): Number of output channels (used at each scale) + deepen_factor (float): Depth multiplier, multiply number of + blocks in CSP layer by this amount. Defaults to 1.0. + widen_factor (float): Width multiplier, multiply number of + channels in each layer by this amount. Defaults to 1.0. + num_csp_blocks (int): Number of bottlenecks in CSPLayer. + Defaults to 3. + use_depthwise (bool): Whether to use depthwise separable convolution in + blocks. Defaults to False. + expand_ratio (float): Ratio to adjust the number of channels of the + hidden layer. Defaults to 0.5. + upsample_cfg (dict): Config dict for interpolate layer. + Default: `dict(scale_factor=2, mode='nearest')` + conv_cfg (dict, optional): Config dict for convolution layer. + Default: None, which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='BN') + act_cfg (dict): Config dict for activation layer. + Default: dict(type='SiLU', inplace=True) + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None. + """ + + def __init__( + self, + in_channels: Sequence[int], + out_channels: int, + deepen_factor: float = 1.0, + widen_factor: float = 1.0, + num_csp_blocks: int = 3, + freeze_all: bool = False, + use_depthwise: bool = False, + expand_ratio: float = 0.5, + upsample_cfg: ConfigType = dict(scale_factor=2, mode='nearest'), + conv_cfg: bool = None, + norm_cfg: ConfigType = dict(type='BN'), + act_cfg: ConfigType = dict(type='SiLU', inplace=True), + init_cfg: OptMultiConfig = dict( + type='Kaiming', + layer='Conv2d', + a=math.sqrt(5), + distribution='uniform', + mode='fan_in', + nonlinearity='leaky_relu') + ) -> None: + self.num_csp_blocks = round(num_csp_blocks * deepen_factor) + self.conv = DepthwiseSeparableConvModule \ + if use_depthwise else ConvModule + self.upsample_cfg = upsample_cfg + self.expand_ratio = expand_ratio + self.conv_cfg = conv_cfg + + super().__init__( + in_channels=[ + int(channel * widen_factor) for channel in in_channels + ], + out_channels=int(out_channels * widen_factor), + deepen_factor=deepen_factor, + widen_factor=widen_factor, + freeze_all=freeze_all, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + init_cfg=init_cfg) + + def build_reduce_layer(self, idx: int) -> nn.Module: + """build reduce layer. + + Args: + idx (int): layer idx. + + Returns: + nn.Module: The reduce layer. + """ + if idx == len(self.in_channels) - 1: + layer = self.conv( + self.in_channels[idx], + self.in_channels[idx - 1], + 1, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + else: + layer = nn.Identity() + + return layer + + def build_upsample_layer(self, *args, **kwargs) -> nn.Module: + """build upsample layer.""" + return nn.Upsample(**self.upsample_cfg) + + def build_top_down_layer(self, idx: int) -> nn.Module: + """build top down layer. + + Args: + idx (int): layer idx. + + Returns: + nn.Module: The top down layer. + """ + if idx == 1: + return CSPLayer( + self.in_channels[idx - 1] * 2, + self.in_channels[idx - 1], + num_blocks=self.num_csp_blocks, + add_identity=False, + use_cspnext_block=True, + expand_ratio=self.expand_ratio, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + else: + return nn.Sequential( + CSPLayer( + self.in_channels[idx - 1] * 2, + self.in_channels[idx - 1], + num_blocks=self.num_csp_blocks, + add_identity=False, + use_cspnext_block=True, + expand_ratio=self.expand_ratio, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg), + self.conv( + self.in_channels[idx - 1], + self.in_channels[idx - 2], + kernel_size=1, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg)) + + def build_downsample_layer(self, idx: int) -> nn.Module: + """build downsample layer. + + Args: + idx (int): layer idx. + + Returns: + nn.Module: The downsample layer. + """ + return self.conv( + self.in_channels[idx], + self.in_channels[idx], + kernel_size=3, + stride=2, + padding=1, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + + def build_bottom_up_layer(self, idx: int) -> nn.Module: + """build bottom up layer. + + Args: + idx (int): layer idx. + + Returns: + nn.Module: The bottom up layer. + """ + return CSPLayer( + self.in_channels[idx] * 2, + self.in_channels[idx + 1], + num_blocks=self.num_csp_blocks, + add_identity=False, + use_cspnext_block=True, + expand_ratio=self.expand_ratio, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + + def build_out_layer(self, idx: int) -> nn.Module: + """build out layer. + + Args: + idx (int): layer idx. + + Returns: + nn.Module: The out layer. + """ + return self.conv( + self.in_channels[idx], + self.out_channels, + 3, + padding=1, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) diff --git a/models/YOLO-World/third_party/mmyolo/mmyolo/models/necks/ppyoloe_csppan.py b/models/YOLO-World/third_party/mmyolo/mmyolo/models/necks/ppyoloe_csppan.py new file mode 100644 index 0000000000000000000000000000000000000000..4e4ef7200bfc6784a7ce8d92bcfbc46314e518e9 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/mmyolo/models/necks/ppyoloe_csppan.py @@ -0,0 +1,216 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List + +import torch.nn as nn +from mmcv.cnn import ConvModule +from mmdet.utils import ConfigType, OptMultiConfig + +from mmyolo.models.backbones.csp_resnet import CSPResLayer +from mmyolo.models.necks import BaseYOLONeck +from mmyolo.registry import MODELS + + +@MODELS.register_module() +class PPYOLOECSPPAFPN(BaseYOLONeck): + """CSPPAN in PPYOLOE. + + Args: + in_channels (List[int]): Number of input channels per scale. + out_channels (List[int]): Number of output channels + (used at each scale). + deepen_factor (float): Depth multiplier, multiply number of + blocks in CSP layer by this amount. Defaults to 1.0. + widen_factor (float): Width multiplier, multiply number of + channels in each layer by this amount. Defaults to 1.0. + freeze_all(bool): Whether to freeze the model. + num_csplayer (int): Number of `CSPResLayer` in per layer. + Defaults to 1. + num_blocks_per_layer (int): Number of blocks per `CSPResLayer`. + Defaults to 3. + block_cfg (dict): Config dict for block. Defaults to + dict(type='PPYOLOEBasicBlock', shortcut=True, use_alpha=False) + norm_cfg (dict): Config dict for normalization layer. + Defaults to dict(type='BN', momentum=0.1, eps=1e-5). + act_cfg (dict): Config dict for activation layer. + Defaults to dict(type='SiLU', inplace=True). + drop_block_cfg (dict, optional): Drop block config. + Defaults to None. If you want to use Drop block after + `CSPResLayer`, you can set this para as + dict(type='mmdet.DropBlock', drop_prob=0.1, + block_size=3, warm_iters=0). + init_cfg (dict or list[dict], optional): Initialization config dict. + Defaults to None. + use_spp (bool): Whether to use `SPP` in reduce layer. + Defaults to False. + """ + + def __init__(self, + in_channels: List[int] = [256, 512, 1024], + out_channels: List[int] = [256, 512, 1024], + deepen_factor: float = 1.0, + widen_factor: float = 1.0, + freeze_all: bool = False, + num_csplayer: int = 1, + num_blocks_per_layer: int = 3, + block_cfg: ConfigType = dict( + type='PPYOLOEBasicBlock', shortcut=False, + use_alpha=False), + norm_cfg: ConfigType = dict( + type='BN', momentum=0.1, eps=1e-5), + act_cfg: ConfigType = dict(type='SiLU', inplace=True), + drop_block_cfg: ConfigType = None, + init_cfg: OptMultiConfig = None, + use_spp: bool = False): + self.block_cfg = block_cfg + self.num_csplayer = num_csplayer + self.num_blocks_per_layer = round(num_blocks_per_layer * deepen_factor) + # Only use spp in last reduce_layer, if use_spp=True. + self.use_spp = use_spp + self.drop_block_cfg = drop_block_cfg + assert drop_block_cfg is None or isinstance(drop_block_cfg, dict) + + super().__init__( + in_channels=[ + int(channel * widen_factor) for channel in in_channels + ], + out_channels=[ + int(channel * widen_factor) for channel in out_channels + ], + deepen_factor=deepen_factor, + widen_factor=widen_factor, + freeze_all=freeze_all, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + init_cfg=init_cfg) + + def build_reduce_layer(self, idx: int): + """build reduce layer. + + Args: + idx (int): layer idx. + + Returns: + nn.Module: The reduce layer. + """ + if idx == len(self.in_channels) - 1: + # fpn_stage + in_channels = self.in_channels[idx] + out_channels = self.out_channels[idx] + + layer = [ + CSPResLayer( + in_channels=in_channels if i == 0 else out_channels, + out_channels=out_channels, + num_block=self.num_blocks_per_layer, + block_cfg=self.block_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg, + attention_cfg=None, + use_spp=self.use_spp) for i in range(self.num_csplayer) + ] + + if self.drop_block_cfg: + layer.append(MODELS.build(self.drop_block_cfg)) + layer = nn.Sequential(*layer) + else: + layer = nn.Identity() + + return layer + + def build_upsample_layer(self, idx: int) -> nn.Module: + """build upsample layer.""" + # fpn_route + in_channels = self.out_channels[idx] + return nn.Sequential( + ConvModule( + in_channels=in_channels, + out_channels=in_channels // 2, + kernel_size=1, + stride=1, + padding=0, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg), + nn.Upsample(scale_factor=2, mode='nearest')) + + def build_top_down_layer(self, idx: int) -> nn.Module: + """build top down layer. + + Args: + idx (int): layer idx. + + Returns: + nn.Module: The top down layer. + """ + # fpn_stage + in_channels = self.in_channels[idx - 1] + self.out_channels[idx] // 2 + out_channels = self.out_channels[idx - 1] + + layer = [ + CSPResLayer( + in_channels=in_channels if i == 0 else out_channels, + out_channels=out_channels, + num_block=self.num_blocks_per_layer, + block_cfg=self.block_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg, + attention_cfg=None, + use_spp=False) for i in range(self.num_csplayer) + ] + + if self.drop_block_cfg: + layer.append(MODELS.build(self.drop_block_cfg)) + + return nn.Sequential(*layer) + + def build_downsample_layer(self, idx: int) -> nn.Module: + """build downsample layer. + + Args: + idx (int): layer idx. + + Returns: + nn.Module: The downsample layer. + """ + # pan_route + return ConvModule( + in_channels=self.out_channels[idx], + out_channels=self.out_channels[idx], + kernel_size=3, + stride=2, + padding=1, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + + def build_bottom_up_layer(self, idx: int) -> nn.Module: + """build bottom up layer. + + Args: + idx (int): layer idx. + + Returns: + nn.Module: The bottom up layer. + """ + # pan_stage + in_channels = self.out_channels[idx + 1] + self.out_channels[idx] + out_channels = self.out_channels[idx + 1] + + layer = [ + CSPResLayer( + in_channels=in_channels if i == 0 else out_channels, + out_channels=out_channels, + num_block=self.num_blocks_per_layer, + block_cfg=self.block_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg, + attention_cfg=None, + use_spp=False) for i in range(self.num_csplayer) + ] + + if self.drop_block_cfg: + layer.append(MODELS.build(self.drop_block_cfg)) + + return nn.Sequential(*layer) + + def build_out_layer(self, *args, **kwargs) -> nn.Module: + """build out layer.""" + return nn.Identity() diff --git a/models/YOLO-World/third_party/mmyolo/mmyolo/models/necks/yolov5_pafpn.py b/models/YOLO-World/third_party/mmyolo/mmyolo/models/necks/yolov5_pafpn.py new file mode 100644 index 0000000000000000000000000000000000000000..b95147fc512359442aeb1bbc88aadd07031bdadf --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/mmyolo/models/necks/yolov5_pafpn.py @@ -0,0 +1,171 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Union + +import torch +import torch.nn as nn +from mmcv.cnn import ConvModule +from mmdet.models.backbones.csp_darknet import CSPLayer +from mmdet.utils import ConfigType, OptMultiConfig + +from mmyolo.registry import MODELS +from ..utils import make_divisible, make_round +from .base_yolo_neck import BaseYOLONeck + + +@MODELS.register_module() +class YOLOv5PAFPN(BaseYOLONeck): + """Path Aggregation Network used in YOLOv5. + + Args: + in_channels (List[int]): Number of input channels per scale. + out_channels (int): Number of output channels (used at each scale) + deepen_factor (float): Depth multiplier, multiply number of + blocks in CSP layer by this amount. Defaults to 1.0. + widen_factor (float): Width multiplier, multiply number of + channels in each layer by this amount. Defaults to 1.0. + num_csp_blocks (int): Number of bottlenecks in CSPLayer. Defaults to 1. + freeze_all(bool): Whether to freeze the model + norm_cfg (dict): Config dict for normalization layer. + Defaults to dict(type='BN', momentum=0.03, eps=0.001). + act_cfg (dict): Config dict for activation layer. + Defaults to dict(type='SiLU', inplace=True). + init_cfg (dict or list[dict], optional): Initialization config dict. + Defaults to None. + """ + + def __init__(self, + in_channels: List[int], + out_channels: Union[List[int], int], + deepen_factor: float = 1.0, + widen_factor: float = 1.0, + num_csp_blocks: int = 1, + freeze_all: bool = False, + norm_cfg: ConfigType = dict( + type='BN', momentum=0.03, eps=0.001), + act_cfg: ConfigType = dict(type='SiLU', inplace=True), + init_cfg: OptMultiConfig = None): + self.num_csp_blocks = num_csp_blocks + super().__init__( + in_channels=in_channels, + out_channels=out_channels, + deepen_factor=deepen_factor, + widen_factor=widen_factor, + freeze_all=freeze_all, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + init_cfg=init_cfg) + + def init_weights(self): + if self.init_cfg is None: + """Initialize the parameters.""" + for m in self.modules(): + if isinstance(m, torch.nn.Conv2d): + # In order to be consistent with the source code, + # reset the Conv2d initialization parameters + m.reset_parameters() + else: + super().init_weights() + + def build_reduce_layer(self, idx: int) -> nn.Module: + """build reduce layer. + + Args: + idx (int): layer idx. + + Returns: + nn.Module: The reduce layer. + """ + if idx == len(self.in_channels) - 1: + layer = ConvModule( + make_divisible(self.in_channels[idx], self.widen_factor), + make_divisible(self.in_channels[idx - 1], self.widen_factor), + 1, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + else: + layer = nn.Identity() + + return layer + + def build_upsample_layer(self, *args, **kwargs) -> nn.Module: + """build upsample layer.""" + return nn.Upsample(scale_factor=2, mode='nearest') + + def build_top_down_layer(self, idx: int): + """build top down layer. + + Args: + idx (int): layer idx. + + Returns: + nn.Module: The top down layer. + """ + + if idx == 1: + return CSPLayer( + make_divisible(self.in_channels[idx - 1] * 2, + self.widen_factor), + make_divisible(self.in_channels[idx - 1], self.widen_factor), + num_blocks=make_round(self.num_csp_blocks, self.deepen_factor), + add_identity=False, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + else: + return nn.Sequential( + CSPLayer( + make_divisible(self.in_channels[idx - 1] * 2, + self.widen_factor), + make_divisible(self.in_channels[idx - 1], + self.widen_factor), + num_blocks=make_round(self.num_csp_blocks, + self.deepen_factor), + add_identity=False, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg), + ConvModule( + make_divisible(self.in_channels[idx - 1], + self.widen_factor), + make_divisible(self.in_channels[idx - 2], + self.widen_factor), + kernel_size=1, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg)) + + def build_downsample_layer(self, idx: int) -> nn.Module: + """build downsample layer. + + Args: + idx (int): layer idx. + + Returns: + nn.Module: The downsample layer. + """ + return ConvModule( + make_divisible(self.in_channels[idx], self.widen_factor), + make_divisible(self.in_channels[idx], self.widen_factor), + kernel_size=3, + stride=2, + padding=1, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + + def build_bottom_up_layer(self, idx: int) -> nn.Module: + """build bottom up layer. + + Args: + idx (int): layer idx. + + Returns: + nn.Module: The bottom up layer. + """ + return CSPLayer( + make_divisible(self.in_channels[idx] * 2, self.widen_factor), + make_divisible(self.in_channels[idx + 1], self.widen_factor), + num_blocks=make_round(self.num_csp_blocks, self.deepen_factor), + add_identity=False, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + + def build_out_layer(self, *args, **kwargs) -> nn.Module: + """build out layer.""" + return nn.Identity() diff --git a/models/YOLO-World/third_party/mmyolo/mmyolo/models/necks/yolov6_pafpn.py b/models/YOLO-World/third_party/mmyolo/mmyolo/models/necks/yolov6_pafpn.py new file mode 100644 index 0000000000000000000000000000000000000000..87782712352e269f159cc56da6ba6715840c87c7 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/mmyolo/models/necks/yolov6_pafpn.py @@ -0,0 +1,527 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List + +import torch +import torch.nn as nn +from mmcv.cnn import ConvModule +from mmdet.utils import ConfigType, OptMultiConfig + +from mmyolo.registry import MODELS +from ..layers import BepC3StageBlock, BiFusion, RepStageBlock +from ..utils import make_round +from .base_yolo_neck import BaseYOLONeck + + +@MODELS.register_module() +class YOLOv6RepPAFPN(BaseYOLONeck): + """Path Aggregation Network used in YOLOv6. + + Args: + in_channels (List[int]): Number of input channels per scale. + out_channels (int): Number of output channels (used at each scale) + deepen_factor (float): Depth multiplier, multiply number of + blocks in CSP layer by this amount. Defaults to 1.0. + widen_factor (float): Width multiplier, multiply number of + channels in each layer by this amount. Defaults to 1.0. + num_csp_blocks (int): Number of bottlenecks in CSPLayer. Defaults to 1. + freeze_all(bool): Whether to freeze the model. + norm_cfg (dict): Config dict for normalization layer. + Defaults to dict(type='BN', momentum=0.03, eps=0.001). + act_cfg (dict): Config dict for activation layer. + Defaults to dict(type='ReLU', inplace=True). + block_cfg (dict): Config dict for the block used to build each + layer. Defaults to dict(type='RepVGGBlock'). + init_cfg (dict or list[dict], optional): Initialization config dict. + Defaults to None. + """ + + def __init__(self, + in_channels: List[int], + out_channels: int, + deepen_factor: float = 1.0, + widen_factor: float = 1.0, + num_csp_blocks: int = 12, + freeze_all: bool = False, + norm_cfg: ConfigType = dict( + type='BN', momentum=0.03, eps=0.001), + act_cfg: ConfigType = dict(type='ReLU', inplace=True), + block_cfg: ConfigType = dict(type='RepVGGBlock'), + init_cfg: OptMultiConfig = None): + self.num_csp_blocks = num_csp_blocks + self.block_cfg = block_cfg + super().__init__( + in_channels=in_channels, + out_channels=out_channels, + deepen_factor=deepen_factor, + widen_factor=widen_factor, + freeze_all=freeze_all, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + init_cfg=init_cfg) + + def build_reduce_layer(self, idx: int) -> nn.Module: + """build reduce layer. + + Args: + idx (int): layer idx. + Returns: + nn.Module: The reduce layer. + """ + if idx == 2: + layer = ConvModule( + in_channels=int(self.in_channels[idx] * self.widen_factor), + out_channels=int(self.out_channels[idx - 1] * + self.widen_factor), + kernel_size=1, + stride=1, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + else: + layer = nn.Identity() + + return layer + + def build_upsample_layer(self, idx: int) -> nn.Module: + """build upsample layer. + + Args: + idx (int): layer idx. + Returns: + nn.Module: The upsample layer. + """ + return nn.ConvTranspose2d( + in_channels=int(self.out_channels[idx - 1] * self.widen_factor), + out_channels=int(self.out_channels[idx - 1] * self.widen_factor), + kernel_size=2, + stride=2, + bias=True) + + def build_top_down_layer(self, idx: int) -> nn.Module: + """build top down layer. + + Args: + idx (int): layer idx. + Returns: + nn.Module: The top down layer. + """ + block_cfg = self.block_cfg.copy() + + layer0 = RepStageBlock( + in_channels=int( + (self.out_channels[idx - 1] + self.in_channels[idx - 1]) * + self.widen_factor), + out_channels=int(self.out_channels[idx - 1] * self.widen_factor), + num_blocks=make_round(self.num_csp_blocks, self.deepen_factor), + block_cfg=block_cfg) + + if idx == 1: + return layer0 + elif idx == 2: + layer1 = ConvModule( + in_channels=int(self.out_channels[idx - 1] * + self.widen_factor), + out_channels=int(self.out_channels[idx - 2] * + self.widen_factor), + kernel_size=1, + stride=1, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + return nn.Sequential(layer0, layer1) + + def build_downsample_layer(self, idx: int) -> nn.Module: + """build downsample layer. + + Args: + idx (int): layer idx. + Returns: + nn.Module: The downsample layer. + """ + return ConvModule( + in_channels=int(self.out_channels[idx] * self.widen_factor), + out_channels=int(self.out_channels[idx] * self.widen_factor), + kernel_size=3, + stride=2, + padding=3 // 2, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + + def build_bottom_up_layer(self, idx: int) -> nn.Module: + """build bottom up layer. + + Args: + idx (int): layer idx. + Returns: + nn.Module: The bottom up layer. + """ + block_cfg = self.block_cfg.copy() + + return RepStageBlock( + in_channels=int(self.out_channels[idx] * 2 * self.widen_factor), + out_channels=int(self.out_channels[idx + 1] * self.widen_factor), + num_blocks=make_round(self.num_csp_blocks, self.deepen_factor), + block_cfg=block_cfg) + + def build_out_layer(self, *args, **kwargs) -> nn.Module: + """build out layer.""" + return nn.Identity() + + def init_weights(self): + if self.init_cfg is None: + """Initialize the parameters.""" + for m in self.modules(): + if isinstance(m, torch.nn.Conv2d): + # In order to be consistent with the source code, + # reset the Conv2d initialization parameters + m.reset_parameters() + else: + super().init_weights() + + +@MODELS.register_module() +class YOLOv6CSPRepPAFPN(YOLOv6RepPAFPN): + """Path Aggregation Network used in YOLOv6. + + Args: + in_channels (List[int]): Number of input channels per scale. + out_channels (int): Number of output channels (used at each scale) + deepen_factor (float): Depth multiplier, multiply number of + blocks in CSP layer by this amount. Defaults to 1.0. + widen_factor (float): Width multiplier, multiply number of + channels in each layer by this amount. Defaults to 1.0. + num_csp_blocks (int): Number of bottlenecks in CSPLayer. Defaults to 1. + freeze_all(bool): Whether to freeze the model. + norm_cfg (dict): Config dict for normalization layer. + Defaults to dict(type='BN', momentum=0.03, eps=0.001). + act_cfg (dict): Config dict for activation layer. + Defaults to dict(type='ReLU', inplace=True). + block_cfg (dict): Config dict for the block used to build each + layer. Defaults to dict(type='RepVGGBlock'). + block_act_cfg (dict): Config dict for activation layer used in each + stage. Defaults to dict(type='SiLU', inplace=True). + init_cfg (dict or list[dict], optional): Initialization config dict. + Defaults to None. + """ + + def __init__(self, + in_channels: List[int], + out_channels: int, + deepen_factor: float = 1.0, + widen_factor: float = 1.0, + hidden_ratio: float = 0.5, + num_csp_blocks: int = 12, + freeze_all: bool = False, + norm_cfg: ConfigType = dict( + type='BN', momentum=0.03, eps=0.001), + act_cfg: ConfigType = dict(type='ReLU', inplace=True), + block_act_cfg: ConfigType = dict(type='SiLU', inplace=True), + block_cfg: ConfigType = dict(type='RepVGGBlock'), + init_cfg: OptMultiConfig = None): + self.hidden_ratio = hidden_ratio + self.block_act_cfg = block_act_cfg + super().__init__( + in_channels=in_channels, + out_channels=out_channels, + deepen_factor=deepen_factor, + widen_factor=widen_factor, + num_csp_blocks=num_csp_blocks, + freeze_all=freeze_all, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + block_cfg=block_cfg, + init_cfg=init_cfg) + + def build_top_down_layer(self, idx: int) -> nn.Module: + """build top down layer. + + Args: + idx (int): layer idx. + Returns: + nn.Module: The top down layer. + """ + block_cfg = self.block_cfg.copy() + + layer0 = BepC3StageBlock( + in_channels=int( + (self.out_channels[idx - 1] + self.in_channels[idx - 1]) * + self.widen_factor), + out_channels=int(self.out_channels[idx - 1] * self.widen_factor), + num_blocks=make_round(self.num_csp_blocks, self.deepen_factor), + block_cfg=block_cfg, + hidden_ratio=self.hidden_ratio, + norm_cfg=self.norm_cfg, + act_cfg=self.block_act_cfg) + + if idx == 1: + return layer0 + elif idx == 2: + layer1 = ConvModule( + in_channels=int(self.out_channels[idx - 1] * + self.widen_factor), + out_channels=int(self.out_channels[idx - 2] * + self.widen_factor), + kernel_size=1, + stride=1, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + return nn.Sequential(layer0, layer1) + + def build_bottom_up_layer(self, idx: int) -> nn.Module: + """build bottom up layer. + + Args: + idx (int): layer idx. + Returns: + nn.Module: The bottom up layer. + """ + block_cfg = self.block_cfg.copy() + + return BepC3StageBlock( + in_channels=int(self.out_channels[idx] * 2 * self.widen_factor), + out_channels=int(self.out_channels[idx + 1] * self.widen_factor), + num_blocks=make_round(self.num_csp_blocks, self.deepen_factor), + block_cfg=block_cfg, + hidden_ratio=self.hidden_ratio, + norm_cfg=self.norm_cfg, + act_cfg=self.block_act_cfg) + + +@MODELS.register_module() +class YOLOv6RepBiPAFPN(YOLOv6RepPAFPN): + """Path Aggregation Network used in YOLOv6 3.0. + + Args: + in_channels (List[int]): Number of input channels per scale. + out_channels (int): Number of output channels (used at each scale) + deepen_factor (float): Depth multiplier, multiply number of + blocks in CSP layer by this amount. Defaults to 1.0. + widen_factor (float): Width multiplier, multiply number of + channels in each layer by this amount. Defaults to 1.0. + num_csp_blocks (int): Number of bottlenecks in CSPLayer. Defaults to 1. + freeze_all(bool): Whether to freeze the model. + norm_cfg (dict): Config dict for normalization layer. + Defaults to dict(type='BN', momentum=0.03, eps=0.001). + act_cfg (dict): Config dict for activation layer. + Defaults to dict(type='ReLU', inplace=True). + block_cfg (dict): Config dict for the block used to build each + layer. Defaults to dict(type='RepVGGBlock'). + init_cfg (dict or list[dict], optional): Initialization config dict. + Defaults to None. + """ + + def __init__(self, + in_channels: List[int], + out_channels: int, + deepen_factor: float = 1.0, + widen_factor: float = 1.0, + num_csp_blocks: int = 12, + freeze_all: bool = False, + norm_cfg: ConfigType = dict( + type='BN', momentum=0.03, eps=0.001), + act_cfg: ConfigType = dict(type='ReLU', inplace=True), + block_cfg: ConfigType = dict(type='RepVGGBlock'), + init_cfg: OptMultiConfig = None): + self.extra_in_channel = in_channels[0] + super().__init__( + in_channels=in_channels[1:], + out_channels=out_channels, + deepen_factor=deepen_factor, + widen_factor=widen_factor, + num_csp_blocks=num_csp_blocks, + freeze_all=freeze_all, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + block_cfg=block_cfg, + init_cfg=init_cfg) + + def build_top_down_layer(self, idx: int) -> nn.Module: + """build top down layer. + + Args: + idx (int): layer idx. + Returns: + nn.Module: The top down layer. + """ + block_cfg = self.block_cfg.copy() + + layer0 = RepStageBlock( + in_channels=int(self.out_channels[idx - 1] * self.widen_factor), + out_channels=int(self.out_channels[idx - 1] * self.widen_factor), + num_blocks=make_round(self.num_csp_blocks, self.deepen_factor), + block_cfg=block_cfg) + + if idx == 1: + return layer0 + elif idx == 2: + layer1 = ConvModule( + in_channels=int(self.out_channels[idx - 1] * + self.widen_factor), + out_channels=int(self.out_channels[idx - 2] * + self.widen_factor), + kernel_size=1, + stride=1, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + return nn.Sequential(layer0, layer1) + + def build_upsample_layer(self, idx: int) -> nn.Module: + """build upsample layer. + + Args: + idx (int): layer idx. + Returns: + nn.Module: The upsample layer. + """ + in_channels1 = self.in_channels[ + idx - 2] if idx > 1 else self.extra_in_channel + return BiFusion( + in_channels0=int(self.in_channels[idx - 1] * self.widen_factor), + in_channels1=int(in_channels1 * self.widen_factor), + out_channels=int(self.out_channels[idx - 1] * self.widen_factor), + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + + def forward(self, inputs: List[torch.Tensor]) -> tuple: + """Forward function.""" + assert len(inputs) == len(self.in_channels) + 1 + # reduce layers + reduce_outs = [inputs[0]] + for idx in range(len(self.in_channels)): + reduce_outs.append(self.reduce_layers[idx](inputs[idx + 1])) + + # top-down path + inner_outs = [reduce_outs[-1]] + for idx in range(len(self.in_channels) - 1, 0, -1): + feat_high = inner_outs[0] + feat_cur = reduce_outs[idx] + feat_low = reduce_outs[idx - 1] + top_down_layer_inputs = self.upsample_layers[len(self.in_channels) + - 1 - idx]([ + feat_high, + feat_cur, feat_low + ]) + inner_out = self.top_down_layers[len(self.in_channels) - 1 - idx]( + top_down_layer_inputs) + inner_outs.insert(0, inner_out) + + # bottom-up path + outs = [inner_outs[0]] + for idx in range(len(self.in_channels) - 1): + feat_low = outs[-1] + feat_high = inner_outs[idx + 1] + downsample_feat = self.downsample_layers[idx](feat_low) + out = self.bottom_up_layers[idx]( + torch.cat([downsample_feat, feat_high], 1)) + outs.append(out) + + # out_layers + results = [] + for idx in range(len(self.in_channels)): + results.append(self.out_layers[idx](outs[idx])) + + return tuple(results) + + +@MODELS.register_module() +class YOLOv6CSPRepBiPAFPN(YOLOv6RepBiPAFPN): + """Path Aggregation Network used in YOLOv6 3.0. + + Args: + in_channels (List[int]): Number of input channels per scale. + out_channels (int): Number of output channels (used at each scale) + deepen_factor (float): Depth multiplier, multiply number of + blocks in CSP layer by this amount. Defaults to 1.0. + widen_factor (float): Width multiplier, multiply number of + channels in each layer by this amount. Defaults to 1.0. + num_csp_blocks (int): Number of bottlenecks in CSPLayer. Defaults to 1. + freeze_all(bool): Whether to freeze the model. + norm_cfg (dict): Config dict for normalization layer. + Defaults to dict(type='BN', momentum=0.03, eps=0.001). + act_cfg (dict): Config dict for activation layer. + Defaults to dict(type='ReLU', inplace=True). + block_cfg (dict): Config dict for the block used to build each + layer. Defaults to dict(type='RepVGGBlock'). + block_act_cfg (dict): Config dict for activation layer used in each + stage. Defaults to dict(type='SiLU', inplace=True). + init_cfg (dict or list[dict], optional): Initialization config dict. + Defaults to None. + """ + + def __init__(self, + in_channels: List[int], + out_channels: int, + deepen_factor: float = 1.0, + widen_factor: float = 1.0, + hidden_ratio: float = 0.5, + num_csp_blocks: int = 12, + freeze_all: bool = False, + norm_cfg: ConfigType = dict( + type='BN', momentum=0.03, eps=0.001), + act_cfg: ConfigType = dict(type='ReLU', inplace=True), + block_act_cfg: ConfigType = dict(type='SiLU', inplace=True), + block_cfg: ConfigType = dict(type='RepVGGBlock'), + init_cfg: OptMultiConfig = None): + self.hidden_ratio = hidden_ratio + self.block_act_cfg = block_act_cfg + super().__init__( + in_channels=in_channels, + out_channels=out_channels, + deepen_factor=deepen_factor, + widen_factor=widen_factor, + num_csp_blocks=num_csp_blocks, + freeze_all=freeze_all, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + block_cfg=block_cfg, + init_cfg=init_cfg) + + def build_top_down_layer(self, idx: int) -> nn.Module: + """build top down layer. + + Args: + idx (int): layer idx. + Returns: + nn.Module: The top down layer. + """ + block_cfg = self.block_cfg.copy() + + layer0 = BepC3StageBlock( + in_channels=int(self.out_channels[idx - 1] * self.widen_factor), + out_channels=int(self.out_channels[idx - 1] * self.widen_factor), + num_blocks=make_round(self.num_csp_blocks, self.deepen_factor), + block_cfg=block_cfg, + hidden_ratio=self.hidden_ratio, + norm_cfg=self.norm_cfg, + act_cfg=self.block_act_cfg) + + if idx == 1: + return layer0 + elif idx == 2: + layer1 = ConvModule( + in_channels=int(self.out_channels[idx - 1] * + self.widen_factor), + out_channels=int(self.out_channels[idx - 2] * + self.widen_factor), + kernel_size=1, + stride=1, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + return nn.Sequential(layer0, layer1) + + def build_bottom_up_layer(self, idx: int) -> nn.Module: + """build bottom up layer. + + Args: + idx (int): layer idx. + Returns: + nn.Module: The bottom up layer. + """ + block_cfg = self.block_cfg.copy() + + return BepC3StageBlock( + in_channels=int(self.out_channels[idx] * 2 * self.widen_factor), + out_channels=int(self.out_channels[idx + 1] * self.widen_factor), + num_blocks=make_round(self.num_csp_blocks, self.deepen_factor), + block_cfg=block_cfg, + hidden_ratio=self.hidden_ratio, + norm_cfg=self.norm_cfg, + act_cfg=self.block_act_cfg) diff --git a/models/YOLO-World/third_party/mmyolo/mmyolo/models/necks/yolov7_pafpn.py b/models/YOLO-World/third_party/mmyolo/mmyolo/models/necks/yolov7_pafpn.py new file mode 100644 index 0000000000000000000000000000000000000000..1d31f4623b50083ff820e6b20229b33ad0f41860 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/mmyolo/models/necks/yolov7_pafpn.py @@ -0,0 +1,216 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List + +import torch.nn as nn +from mmcv.cnn import ConvModule +from mmdet.utils import ConfigType, OptMultiConfig + +from mmyolo.registry import MODELS +from ..layers import MaxPoolAndStrideConvBlock, RepVGGBlock, SPPFCSPBlock +from .base_yolo_neck import BaseYOLONeck + + +@MODELS.register_module() +class YOLOv7PAFPN(BaseYOLONeck): + """Path Aggregation Network used in YOLOv7. + + Args: + in_channels (List[int]): Number of input channels per scale. + out_channels (int): Number of output channels (used at each scale). + block_cfg (dict): Config dict for block. + deepen_factor (float): Depth multiplier, multiply number of + blocks in CSP layer by this amount. Defaults to 1.0. + widen_factor (float): Width multiplier, multiply number of + channels in each layer by this amount. Defaults to 1.0. + spp_expand_ratio (float): Expand ratio of SPPCSPBlock. + Defaults to 0.5. + is_tiny_version (bool): Is tiny version of neck. If True, + it means it is a yolov7 tiny model. Defaults to False. + use_maxpool_in_downsample (bool): Whether maxpooling is + used in downsample layers. Defaults to True. + use_in_channels_in_downsample (bool): MaxPoolAndStrideConvBlock + module input parameters. Defaults to False. + use_repconv_outs (bool): Whether to use `repconv` in the output + layer. Defaults to True. + upsample_feats_cat_first (bool): Whether the output features are + concat first after upsampling in the topdown module. + Defaults to True. Currently only YOLOv7 is false. + freeze_all(bool): Whether to freeze the model. Defaults to False. + norm_cfg (dict): Config dict for normalization layer. + Defaults to dict(type='BN', momentum=0.03, eps=0.001). + act_cfg (dict): Config dict for activation layer. + Defaults to dict(type='SiLU', inplace=True). + init_cfg (dict or list[dict], optional): Initialization config dict. + Defaults to None. + """ + + def __init__(self, + in_channels: List[int], + out_channels: List[int], + block_cfg: dict = dict( + type='ELANBlock', + middle_ratio=0.5, + block_ratio=0.25, + num_blocks=4, + num_convs_in_block=1), + deepen_factor: float = 1.0, + widen_factor: float = 1.0, + spp_expand_ratio: float = 0.5, + is_tiny_version: bool = False, + use_maxpool_in_downsample: bool = True, + use_in_channels_in_downsample: bool = False, + use_repconv_outs: bool = True, + upsample_feats_cat_first: bool = False, + freeze_all: bool = False, + norm_cfg: ConfigType = dict( + type='BN', momentum=0.03, eps=0.001), + act_cfg: ConfigType = dict(type='SiLU', inplace=True), + init_cfg: OptMultiConfig = None): + + self.is_tiny_version = is_tiny_version + self.use_maxpool_in_downsample = use_maxpool_in_downsample + self.use_in_channels_in_downsample = use_in_channels_in_downsample + self.spp_expand_ratio = spp_expand_ratio + self.use_repconv_outs = use_repconv_outs + self.block_cfg = block_cfg + self.block_cfg.setdefault('norm_cfg', norm_cfg) + self.block_cfg.setdefault('act_cfg', act_cfg) + + super().__init__( + in_channels=[ + int(channel * widen_factor) for channel in in_channels + ], + out_channels=[ + int(channel * widen_factor) for channel in out_channels + ], + deepen_factor=deepen_factor, + widen_factor=widen_factor, + upsample_feats_cat_first=upsample_feats_cat_first, + freeze_all=freeze_all, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + init_cfg=init_cfg) + + def build_reduce_layer(self, idx: int) -> nn.Module: + """build reduce layer. + + Args: + idx (int): layer idx. + + Returns: + nn.Module: The reduce layer. + """ + if idx == len(self.in_channels) - 1: + layer = SPPFCSPBlock( + self.in_channels[idx], + self.out_channels[idx], + expand_ratio=self.spp_expand_ratio, + is_tiny_version=self.is_tiny_version, + kernel_sizes=5, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + else: + layer = ConvModule( + self.in_channels[idx], + self.out_channels[idx], + 1, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + + return layer + + def build_upsample_layer(self, idx: int) -> nn.Module: + """build upsample layer.""" + return nn.Sequential( + ConvModule( + self.out_channels[idx], + self.out_channels[idx - 1], + 1, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg), + nn.Upsample(scale_factor=2, mode='nearest')) + + def build_top_down_layer(self, idx: int) -> nn.Module: + """build top down layer. + + Args: + idx (int): layer idx. + + Returns: + nn.Module: The top down layer. + """ + block_cfg = self.block_cfg.copy() + block_cfg['in_channels'] = self.out_channels[idx - 1] * 2 + block_cfg['out_channels'] = self.out_channels[idx - 1] + return MODELS.build(block_cfg) + + def build_downsample_layer(self, idx: int) -> nn.Module: + """build downsample layer. + + Args: + idx (int): layer idx. + + Returns: + nn.Module: The downsample layer. + """ + if self.use_maxpool_in_downsample and not self.is_tiny_version: + return MaxPoolAndStrideConvBlock( + self.out_channels[idx], + self.out_channels[idx + 1], + use_in_channels_of_middle=self.use_in_channels_in_downsample, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + else: + return ConvModule( + self.out_channels[idx], + self.out_channels[idx + 1], + 3, + stride=2, + padding=1, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + + def build_bottom_up_layer(self, idx: int) -> nn.Module: + """build bottom up layer. + + Args: + idx (int): layer idx. + + Returns: + nn.Module: The bottom up layer. + """ + block_cfg = self.block_cfg.copy() + block_cfg['in_channels'] = self.out_channels[idx + 1] * 2 + block_cfg['out_channels'] = self.out_channels[idx + 1] + return MODELS.build(block_cfg) + + def build_out_layer(self, idx: int) -> nn.Module: + """build out layer. + + Args: + idx (int): layer idx. + + Returns: + nn.Module: The out layer. + """ + if len(self.in_channels) == 4: + # P6 + return nn.Identity() + + out_channels = self.out_channels[idx] * 2 + + if self.use_repconv_outs: + return RepVGGBlock( + self.out_channels[idx], + out_channels, + 3, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + else: + return ConvModule( + self.out_channels[idx], + out_channels, + 3, + padding=1, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) diff --git a/models/YOLO-World/third_party/mmyolo/mmyolo/models/necks/yolov8_pafpn.py b/models/YOLO-World/third_party/mmyolo/mmyolo/models/necks/yolov8_pafpn.py new file mode 100644 index 0000000000000000000000000000000000000000..e26698bcc191b0141d89c1e965de811494a96539 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/mmyolo/models/necks/yolov8_pafpn.py @@ -0,0 +1,102 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Union + +import torch.nn as nn +from mmdet.utils import ConfigType, OptMultiConfig + +from mmyolo.registry import MODELS +from .. import CSPLayerWithTwoConv +from ..utils import make_divisible, make_round +from .yolov5_pafpn import YOLOv5PAFPN + + +@MODELS.register_module() +class YOLOv8PAFPN(YOLOv5PAFPN): + """Path Aggregation Network used in YOLOv8. + + Args: + in_channels (List[int]): Number of input channels per scale. + out_channels (int): Number of output channels (used at each scale) + deepen_factor (float): Depth multiplier, multiply number of + blocks in CSP layer by this amount. Defaults to 1.0. + widen_factor (float): Width multiplier, multiply number of + channels in each layer by this amount. Defaults to 1.0. + num_csp_blocks (int): Number of bottlenecks in CSPLayer. Defaults to 1. + freeze_all(bool): Whether to freeze the model + norm_cfg (dict): Config dict for normalization layer. + Defaults to dict(type='BN', momentum=0.03, eps=0.001). + act_cfg (dict): Config dict for activation layer. + Defaults to dict(type='SiLU', inplace=True). + init_cfg (dict or list[dict], optional): Initialization config dict. + Defaults to None. + """ + + def __init__(self, + in_channels: List[int], + out_channels: Union[List[int], int], + deepen_factor: float = 1.0, + widen_factor: float = 1.0, + num_csp_blocks: int = 3, + freeze_all: bool = False, + norm_cfg: ConfigType = dict( + type='BN', momentum=0.03, eps=0.001), + act_cfg: ConfigType = dict(type='SiLU', inplace=True), + init_cfg: OptMultiConfig = None): + super().__init__( + in_channels=in_channels, + out_channels=out_channels, + deepen_factor=deepen_factor, + widen_factor=widen_factor, + num_csp_blocks=num_csp_blocks, + freeze_all=freeze_all, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + init_cfg=init_cfg) + + def build_reduce_layer(self, idx: int) -> nn.Module: + """build reduce layer. + + Args: + idx (int): layer idx. + + Returns: + nn.Module: The reduce layer. + """ + return nn.Identity() + + def build_top_down_layer(self, idx: int) -> nn.Module: + """build top down layer. + + Args: + idx (int): layer idx. + + Returns: + nn.Module: The top down layer. + """ + return CSPLayerWithTwoConv( + make_divisible((self.in_channels[idx - 1] + self.in_channels[idx]), + self.widen_factor), + make_divisible(self.out_channels[idx - 1], self.widen_factor), + num_blocks=make_round(self.num_csp_blocks, self.deepen_factor), + add_identity=False, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + + def build_bottom_up_layer(self, idx: int) -> nn.Module: + """build bottom up layer. + + Args: + idx (int): layer idx. + + Returns: + nn.Module: The bottom up layer. + """ + return CSPLayerWithTwoConv( + make_divisible( + (self.out_channels[idx] + self.out_channels[idx + 1]), + self.widen_factor), + make_divisible(self.out_channels[idx + 1], self.widen_factor), + num_blocks=make_round(self.num_csp_blocks, self.deepen_factor), + add_identity=False, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) diff --git a/models/YOLO-World/third_party/mmyolo/mmyolo/models/necks/yolox_pafpn.py b/models/YOLO-World/third_party/mmyolo/mmyolo/models/necks/yolox_pafpn.py new file mode 100644 index 0000000000000000000000000000000000000000..bd2595e70fe47e38e68ebd0d878deb6f264bf2d1 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/mmyolo/models/necks/yolox_pafpn.py @@ -0,0 +1,172 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List + +import torch.nn as nn +from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule +from mmdet.models.backbones.csp_darknet import CSPLayer +from mmdet.utils import ConfigType, OptMultiConfig + +from mmyolo.registry import MODELS +from .base_yolo_neck import BaseYOLONeck + + +@MODELS.register_module() +class YOLOXPAFPN(BaseYOLONeck): + """Path Aggregation Network used in YOLOX. + + Args: + in_channels (List[int]): Number of input channels per scale. + out_channels (int): Number of output channels (used at each scale). + deepen_factor (float): Depth multiplier, multiply number of + blocks in CSP layer by this amount. Defaults to 1.0. + widen_factor (float): Width multiplier, multiply number of + channels in each layer by this amount. Defaults to 1.0. + num_csp_blocks (int): Number of bottlenecks in CSPLayer. Defaults to 1. + use_depthwise (bool): Whether to use depthwise separable convolution. + Defaults to False. + freeze_all(bool): Whether to freeze the model. Defaults to False. + norm_cfg (dict): Config dict for normalization layer. + Defaults to dict(type='BN', momentum=0.03, eps=0.001). + act_cfg (dict): Config dict for activation layer. + Defaults to dict(type='SiLU', inplace=True). + init_cfg (dict or list[dict], optional): Initialization config dict. + Defaults to None. + """ + + def __init__(self, + in_channels: List[int], + out_channels: int, + deepen_factor: float = 1.0, + widen_factor: float = 1.0, + num_csp_blocks: int = 3, + use_depthwise: bool = False, + freeze_all: bool = False, + norm_cfg: ConfigType = dict( + type='BN', momentum=0.03, eps=0.001), + act_cfg: ConfigType = dict(type='SiLU', inplace=True), + init_cfg: OptMultiConfig = None): + self.num_csp_blocks = round(num_csp_blocks * deepen_factor) + self.use_depthwise = use_depthwise + + super().__init__( + in_channels=[ + int(channel * widen_factor) for channel in in_channels + ], + out_channels=int(out_channels * widen_factor), + deepen_factor=deepen_factor, + widen_factor=widen_factor, + freeze_all=freeze_all, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + init_cfg=init_cfg) + + def build_reduce_layer(self, idx: int) -> nn.Module: + """build reduce layer. + + Args: + idx (int): layer idx. + + Returns: + nn.Module: The reduce layer. + """ + if idx == 2: + layer = ConvModule( + self.in_channels[idx], + self.in_channels[idx - 1], + 1, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + else: + layer = nn.Identity() + + return layer + + def build_upsample_layer(self, *args, **kwargs) -> nn.Module: + """build upsample layer.""" + return nn.Upsample(scale_factor=2, mode='nearest') + + def build_top_down_layer(self, idx: int) -> nn.Module: + """build top down layer. + + Args: + idx (int): layer idx. + + Returns: + nn.Module: The top down layer. + """ + if idx == 1: + return CSPLayer( + self.in_channels[idx - 1] * 2, + self.in_channels[idx - 1], + num_blocks=self.num_csp_blocks, + add_identity=False, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + elif idx == 2: + return nn.Sequential( + CSPLayer( + self.in_channels[idx - 1] * 2, + self.in_channels[idx - 1], + num_blocks=self.num_csp_blocks, + add_identity=False, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg), + ConvModule( + self.in_channels[idx - 1], + self.in_channels[idx - 2], + kernel_size=1, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg)) + + def build_downsample_layer(self, idx: int) -> nn.Module: + """build downsample layer. + + Args: + idx (int): layer idx. + + Returns: + nn.Module: The downsample layer. + """ + conv = DepthwiseSeparableConvModule \ + if self.use_depthwise else ConvModule + return conv( + self.in_channels[idx], + self.in_channels[idx], + kernel_size=3, + stride=2, + padding=1, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + + def build_bottom_up_layer(self, idx: int) -> nn.Module: + """build bottom up layer. + + Args: + idx (int): layer idx. + + Returns: + nn.Module: The bottom up layer. + """ + return CSPLayer( + self.in_channels[idx] * 2, + self.in_channels[idx + 1], + num_blocks=self.num_csp_blocks, + add_identity=False, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + + def build_out_layer(self, idx: int) -> nn.Module: + """build out layer. + + Args: + idx (int): layer idx. + + Returns: + nn.Module: The out layer. + """ + return ConvModule( + self.in_channels[idx], + self.out_channels, + 1, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) diff --git a/models/YOLO-World/third_party/mmyolo/mmyolo/models/plugins/__init__.py b/models/YOLO-World/third_party/mmyolo/mmyolo/models/plugins/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..497233ac21a4dd1a6a2a3127c09435d8146eb553 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/mmyolo/models/plugins/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .cbam import CBAM + +__all__ = ['CBAM'] diff --git a/models/YOLO-World/third_party/mmyolo/mmyolo/models/plugins/cbam.py b/models/YOLO-World/third_party/mmyolo/mmyolo/models/plugins/cbam.py new file mode 100644 index 0000000000000000000000000000000000000000..e9559f2e2db951a5681ec9af5864928ed480361b --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/mmyolo/models/plugins/cbam.py @@ -0,0 +1,119 @@ +# Copyright (c) OpenMMLab. All rights reserved. + +import torch +import torch.nn as nn +from mmcv.cnn import ConvModule +from mmdet.utils import OptMultiConfig +from mmengine.model import BaseModule + +from mmyolo.registry import MODELS + + +class ChannelAttention(BaseModule): + """ChannelAttention. + + Args: + channels (int): The input (and output) channels of the + ChannelAttention. + reduce_ratio (int): Squeeze ratio in ChannelAttention, the intermediate + channel will be ``int(channels/ratio)``. Defaults to 16. + act_cfg (dict): Config dict for activation layer + Defaults to dict(type='ReLU'). + """ + + def __init__(self, + channels: int, + reduce_ratio: int = 16, + act_cfg: dict = dict(type='ReLU')): + super().__init__() + + self.avg_pool = nn.AdaptiveAvgPool2d(1) + self.max_pool = nn.AdaptiveMaxPool2d(1) + + self.fc = nn.Sequential( + ConvModule( + in_channels=channels, + out_channels=int(channels / reduce_ratio), + kernel_size=1, + stride=1, + conv_cfg=None, + act_cfg=act_cfg), + ConvModule( + in_channels=int(channels / reduce_ratio), + out_channels=channels, + kernel_size=1, + stride=1, + conv_cfg=None, + act_cfg=None)) + self.sigmoid = nn.Sigmoid() + + def forward(self, x: torch.Tensor) -> torch.Tensor: + """Forward function.""" + avgpool_out = self.fc(self.avg_pool(x)) + maxpool_out = self.fc(self.max_pool(x)) + out = self.sigmoid(avgpool_out + maxpool_out) + return out + + +class SpatialAttention(BaseModule): + """SpatialAttention + Args: + kernel_size (int): The size of the convolution kernel in + SpatialAttention. Defaults to 7. + """ + + def __init__(self, kernel_size: int = 7): + super().__init__() + + self.conv = ConvModule( + in_channels=2, + out_channels=1, + kernel_size=kernel_size, + stride=1, + padding=kernel_size // 2, + conv_cfg=None, + act_cfg=dict(type='Sigmoid')) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + """Forward function.""" + avg_out = torch.mean(x, dim=1, keepdim=True) + max_out, _ = torch.max(x, dim=1, keepdim=True) + out = torch.cat([avg_out, max_out], dim=1) + out = self.conv(out) + return out + + +@MODELS.register_module() +class CBAM(BaseModule): + """Convolutional Block Attention Module. arxiv link: + https://arxiv.org/abs/1807.06521v2. + + Args: + in_channels (int): The input (and output) channels of the CBAM. + reduce_ratio (int): Squeeze ratio in ChannelAttention, the intermediate + channel will be ``int(channels/ratio)``. Defaults to 16. + kernel_size (int): The size of the convolution kernel in + SpatialAttention. Defaults to 7. + act_cfg (dict): Config dict for activation layer in ChannelAttention + Defaults to dict(type='ReLU'). + init_cfg (dict or list[dict], optional): Initialization config dict. + Defaults to None. + """ + + def __init__(self, + in_channels: int, + reduce_ratio: int = 16, + kernel_size: int = 7, + act_cfg: dict = dict(type='ReLU'), + init_cfg: OptMultiConfig = None): + super().__init__(init_cfg) + self.channel_attention = ChannelAttention( + channels=in_channels, reduce_ratio=reduce_ratio, act_cfg=act_cfg) + + self.spatial_attention = SpatialAttention(kernel_size) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + """Forward function.""" + out = self.channel_attention(x) * x + out = self.spatial_attention(out) * out + return out diff --git a/models/YOLO-World/third_party/mmyolo/mmyolo/models/task_modules/__init__.py b/models/YOLO-World/third_party/mmyolo/mmyolo/models/task_modules/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..7dbdc25fa3cf16e85e0e99e7d302a98f2b4f13ce --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/mmyolo/models/task_modules/__init__.py @@ -0,0 +1,8 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .assigners import BatchATSSAssigner, BatchTaskAlignedAssigner +from .coders import YOLOv5BBoxCoder, YOLOXBBoxCoder + +__all__ = [ + 'YOLOv5BBoxCoder', 'YOLOXBBoxCoder', 'BatchATSSAssigner', + 'BatchTaskAlignedAssigner' +] diff --git a/models/YOLO-World/third_party/mmyolo/mmyolo/models/task_modules/assigners/__init__.py b/models/YOLO-World/third_party/mmyolo/mmyolo/models/task_modules/assigners/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..7b2e2e69c921367083e21abce799e3ef5b8d47e1 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/mmyolo/models/task_modules/assigners/__init__.py @@ -0,0 +1,14 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .batch_atss_assigner import BatchATSSAssigner +from .batch_dsl_assigner import BatchDynamicSoftLabelAssigner +from .batch_task_aligned_assigner import BatchTaskAlignedAssigner +from .pose_sim_ota_assigner import PoseSimOTAAssigner +from .utils import (select_candidates_in_gts, select_highest_overlaps, + yolov6_iou_calculator) + +__all__ = [ + 'BatchATSSAssigner', 'BatchTaskAlignedAssigner', + 'select_candidates_in_gts', 'select_highest_overlaps', + 'yolov6_iou_calculator', 'BatchDynamicSoftLabelAssigner', + 'PoseSimOTAAssigner' +] diff --git a/models/YOLO-World/third_party/mmyolo/mmyolo/models/task_modules/assigners/batch_atss_assigner.py b/models/YOLO-World/third_party/mmyolo/mmyolo/models/task_modules/assigners/batch_atss_assigner.py new file mode 100644 index 0000000000000000000000000000000000000000..45b3069afde73e240890273c58e3860da59ad854 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/mmyolo/models/task_modules/assigners/batch_atss_assigner.py @@ -0,0 +1,339 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Tuple + +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmdet.utils import ConfigType +from torch import Tensor + +from mmyolo.registry import TASK_UTILS +from .utils import (select_candidates_in_gts, select_highest_overlaps, + yolov6_iou_calculator) + + +def bbox_center_distance(bboxes: Tensor, + priors: Tensor) -> Tuple[Tensor, Tensor]: + """Compute the center distance between bboxes and priors. + + Args: + bboxes (Tensor): Shape (n, 4) for bbox, "xyxy" format. + priors (Tensor): Shape (num_priors, 4) for priors, "xyxy" format. + + Returns: + distances (Tensor): Center distances between bboxes and priors, + shape (num_priors, n). + priors_points (Tensor): Priors cx cy points, + shape (num_priors, 2). + """ + bbox_cx = (bboxes[:, 0] + bboxes[:, 2]) / 2.0 + bbox_cy = (bboxes[:, 1] + bboxes[:, 3]) / 2.0 + bbox_points = torch.stack((bbox_cx, bbox_cy), dim=1) + + priors_cx = (priors[:, 0] + priors[:, 2]) / 2.0 + priors_cy = (priors[:, 1] + priors[:, 3]) / 2.0 + priors_points = torch.stack((priors_cx, priors_cy), dim=1) + + distances = (bbox_points[:, None, :] - + priors_points[None, :, :]).pow(2).sum(-1).sqrt() + + return distances, priors_points + + +@TASK_UTILS.register_module() +class BatchATSSAssigner(nn.Module): + """Assign a batch of corresponding gt bboxes or background to each prior. + + This code is based on + https://github.com/meituan/YOLOv6/blob/main/yolov6/assigners/atss_assigner.py + + Each proposal will be assigned with `0` or a positive integer + indicating the ground truth index. + + - 0: negative sample, no assigned gt + - positive integer: positive sample, index (1-based) of assigned gt + + Args: + num_classes (int): number of class + iou_calculator (:obj:`ConfigDict` or dict): Config dict for iou + calculator. Defaults to ``dict(type='BboxOverlaps2D')`` + topk (int): number of priors selected in each level + """ + + def __init__( + self, + num_classes: int, + iou_calculator: ConfigType = dict(type='mmdet.BboxOverlaps2D'), + topk: int = 9): + super().__init__() + self.num_classes = num_classes + self.iou_calculator = TASK_UTILS.build(iou_calculator) + self.topk = topk + + @torch.no_grad() + def forward(self, pred_bboxes: Tensor, priors: Tensor, + num_level_priors: List, gt_labels: Tensor, gt_bboxes: Tensor, + pad_bbox_flag: Tensor) -> dict: + """Assign gt to priors. + + The assignment is done in following steps + + 1. compute iou between all prior (prior of all pyramid levels) and gt + 2. compute center distance between all prior and gt + 3. on each pyramid level, for each gt, select k prior whose center + are closest to the gt center, so we total select k*l prior as + candidates for each gt + 4. get corresponding iou for the these candidates, and compute the + mean and std, set mean + std as the iou threshold + 5. select these candidates whose iou are greater than or equal to + the threshold as positive + 6. limit the positive sample's center in gt + + Args: + pred_bboxes (Tensor): Predicted bounding boxes, + shape(batch_size, num_priors, 4) + priors (Tensor): Model priors with stride, shape(num_priors, 4) + num_level_priors (List): Number of bboxes in each level, len(3) + gt_labels (Tensor): Ground truth label, + shape(batch_size, num_gt, 1) + gt_bboxes (Tensor): Ground truth bbox, + shape(batch_size, num_gt, 4) + pad_bbox_flag (Tensor): Ground truth bbox mask, + 1 means bbox, 0 means no bbox, + shape(batch_size, num_gt, 1) + Returns: + assigned_result (dict): Assigned result + 'assigned_labels' (Tensor): shape(batch_size, num_gt) + 'assigned_bboxes' (Tensor): shape(batch_size, num_gt, 4) + 'assigned_scores' (Tensor): + shape(batch_size, num_gt, number_classes) + 'fg_mask_pre_prior' (Tensor): shape(bs, num_gt) + """ + # generate priors + cell_half_size = priors[:, 2:] * 2.5 + priors_gen = torch.zeros_like(priors) + priors_gen[:, :2] = priors[:, :2] - cell_half_size + priors_gen[:, 2:] = priors[:, :2] + cell_half_size + priors = priors_gen + + batch_size = gt_bboxes.size(0) + num_gt, num_priors = gt_bboxes.size(1), priors.size(0) + + assigned_result = { + 'assigned_labels': + gt_bboxes.new_full([batch_size, num_priors], self.num_classes), + 'assigned_bboxes': + gt_bboxes.new_full([batch_size, num_priors, 4], 0), + 'assigned_scores': + gt_bboxes.new_full([batch_size, num_priors, self.num_classes], 0), + 'fg_mask_pre_prior': + gt_bboxes.new_full([batch_size, num_priors], 0) + } + + if num_gt == 0: + return assigned_result + + # compute iou between all prior (prior of all pyramid levels) and gt + overlaps = self.iou_calculator(gt_bboxes.reshape([-1, 4]), priors) + overlaps = overlaps.reshape([batch_size, -1, num_priors]) + + # compute center distance between all prior and gt + distances, priors_points = bbox_center_distance( + gt_bboxes.reshape([-1, 4]), priors) + distances = distances.reshape([batch_size, -1, num_priors]) + + # Selecting candidates based on the center distance + is_in_candidate, candidate_idxs = self.select_topk_candidates( + distances, num_level_priors, pad_bbox_flag) + + # get corresponding iou for the these candidates, and compute the + # mean and std, set mean + std as the iou threshold + overlaps_thr_per_gt, iou_candidates = self.threshold_calculator( + is_in_candidate, candidate_idxs, overlaps, num_priors, batch_size, + num_gt) + + # select candidates iou >= threshold as positive + is_pos = torch.where( + iou_candidates > overlaps_thr_per_gt.repeat([1, 1, num_priors]), + is_in_candidate, torch.zeros_like(is_in_candidate)) + + is_in_gts = select_candidates_in_gts(priors_points, gt_bboxes) + pos_mask = is_pos * is_in_gts * pad_bbox_flag + + # if an anchor box is assigned to multiple gts, + # the one with the highest IoU will be selected. + gt_idx_pre_prior, fg_mask_pre_prior, pos_mask = \ + select_highest_overlaps(pos_mask, overlaps, num_gt) + + # assigned target + assigned_labels, assigned_bboxes, assigned_scores = self.get_targets( + gt_labels, gt_bboxes, gt_idx_pre_prior, fg_mask_pre_prior, + num_priors, batch_size, num_gt) + + # soft label with iou + if pred_bboxes is not None: + ious = yolov6_iou_calculator(gt_bboxes, pred_bboxes) * pos_mask + ious = ious.max(axis=-2)[0].unsqueeze(-1) + assigned_scores *= ious + + assigned_result['assigned_labels'] = assigned_labels.long() + assigned_result['assigned_bboxes'] = assigned_bboxes + assigned_result['assigned_scores'] = assigned_scores + assigned_result['fg_mask_pre_prior'] = fg_mask_pre_prior.bool() + return assigned_result + + def select_topk_candidates(self, distances: Tensor, + num_level_priors: List[int], + pad_bbox_flag: Tensor) -> Tuple[Tensor, Tensor]: + """Selecting candidates based on the center distance. + + Args: + distances (Tensor): Distance between all bbox and gt, + shape(batch_size, num_gt, num_priors) + num_level_priors (List[int]): Number of bboxes in each level, + len(3) + pad_bbox_flag (Tensor): Ground truth bbox mask, + shape(batch_size, num_gt, 1) + + Return: + is_in_candidate_list (Tensor): Flag show that each level have + topk candidates or not, shape(batch_size, num_gt, num_priors) + candidate_idxs (Tensor): Candidates index, + shape(batch_size, num_gt, num_gt) + """ + is_in_candidate_list = [] + candidate_idxs = [] + start_idx = 0 + + distances_dtype = distances.dtype + distances = torch.split(distances, num_level_priors, dim=-1) + pad_bbox_flag = pad_bbox_flag.repeat(1, 1, self.topk).bool() + + for distances_per_level, priors_per_level in zip( + distances, num_level_priors): + # on each pyramid level, for each gt, + # select k bbox whose center are closest to the gt center + end_index = start_idx + priors_per_level + selected_k = min(self.topk, priors_per_level) + + _, topk_idxs_per_level = distances_per_level.topk( + selected_k, dim=-1, largest=False) + candidate_idxs.append(topk_idxs_per_level + start_idx) + + topk_idxs_per_level = torch.where( + pad_bbox_flag, topk_idxs_per_level, + torch.zeros_like(topk_idxs_per_level)) + + is_in_candidate = F.one_hot(topk_idxs_per_level, + priors_per_level).sum(dim=-2) + is_in_candidate = torch.where(is_in_candidate > 1, + torch.zeros_like(is_in_candidate), + is_in_candidate) + is_in_candidate_list.append(is_in_candidate.to(distances_dtype)) + + start_idx = end_index + + is_in_candidate_list = torch.cat(is_in_candidate_list, dim=-1) + candidate_idxs = torch.cat(candidate_idxs, dim=-1) + + return is_in_candidate_list, candidate_idxs + + @staticmethod + def threshold_calculator(is_in_candidate: List, candidate_idxs: Tensor, + overlaps: Tensor, num_priors: int, + batch_size: int, + num_gt: int) -> Tuple[Tensor, Tensor]: + """Get corresponding iou for the these candidates, and compute the mean + and std, set mean + std as the iou threshold. + + Args: + is_in_candidate (Tensor): Flag show that each level have + topk candidates or not, shape(batch_size, num_gt, num_priors). + candidate_idxs (Tensor): Candidates index, + shape(batch_size, num_gt, num_gt) + overlaps (Tensor): Overlaps area, + shape(batch_size, num_gt, num_priors). + num_priors (int): Number of priors. + batch_size (int): Batch size. + num_gt (int): Number of ground truth. + + Return: + overlaps_thr_per_gt (Tensor): Overlap threshold of + per ground truth, shape(batch_size, num_gt, 1). + candidate_overlaps (Tensor): Candidate overlaps, + shape(batch_size, num_gt, num_priors). + """ + + batch_size_num_gt = batch_size * num_gt + candidate_overlaps = torch.where(is_in_candidate > 0, overlaps, + torch.zeros_like(overlaps)) + candidate_idxs = candidate_idxs.reshape([batch_size_num_gt, -1]) + + assist_indexes = num_priors * torch.arange( + batch_size_num_gt, device=candidate_idxs.device) + assist_indexes = assist_indexes[:, None] + flatten_indexes = candidate_idxs + assist_indexes + + candidate_overlaps_reshape = candidate_overlaps.reshape( + -1)[flatten_indexes] + candidate_overlaps_reshape = candidate_overlaps_reshape.reshape( + [batch_size, num_gt, -1]) + + overlaps_mean_per_gt = candidate_overlaps_reshape.mean( + axis=-1, keepdim=True) + overlaps_std_per_gt = candidate_overlaps_reshape.std( + axis=-1, keepdim=True) + overlaps_thr_per_gt = overlaps_mean_per_gt + overlaps_std_per_gt + + return overlaps_thr_per_gt, candidate_overlaps + + def get_targets(self, gt_labels: Tensor, gt_bboxes: Tensor, + assigned_gt_inds: Tensor, fg_mask_pre_prior: Tensor, + num_priors: int, batch_size: int, + num_gt: int) -> Tuple[Tensor, Tensor, Tensor]: + """Get target info. + + Args: + gt_labels (Tensor): Ground true labels, + shape(batch_size, num_gt, 1) + gt_bboxes (Tensor): Ground true bboxes, + shape(batch_size, num_gt, 4) + assigned_gt_inds (Tensor): Assigned ground truth indexes, + shape(batch_size, num_priors) + fg_mask_pre_prior (Tensor): Force ground truth matching mask, + shape(batch_size, num_priors) + num_priors (int): Number of priors. + batch_size (int): Batch size. + num_gt (int): Number of ground truth. + + Return: + assigned_labels (Tensor): Assigned labels, + shape(batch_size, num_priors) + assigned_bboxes (Tensor): Assigned bboxes, + shape(batch_size, num_priors) + assigned_scores (Tensor): Assigned scores, + shape(batch_size, num_priors) + """ + + # assigned target labels + batch_index = torch.arange( + batch_size, dtype=gt_labels.dtype, device=gt_labels.device) + batch_index = batch_index[..., None] + assigned_gt_inds = (assigned_gt_inds + batch_index * num_gt).long() + assigned_labels = gt_labels.flatten()[assigned_gt_inds.flatten()] + assigned_labels = assigned_labels.reshape([batch_size, num_priors]) + assigned_labels = torch.where( + fg_mask_pre_prior > 0, assigned_labels, + torch.full_like(assigned_labels, self.num_classes)) + + # assigned target boxes + assigned_bboxes = gt_bboxes.reshape([-1, + 4])[assigned_gt_inds.flatten()] + assigned_bboxes = assigned_bboxes.reshape([batch_size, num_priors, 4]) + + # assigned target scores + assigned_scores = F.one_hot(assigned_labels.long(), + self.num_classes + 1).float() + assigned_scores = assigned_scores[:, :, :self.num_classes] + + return assigned_labels, assigned_bboxes, assigned_scores diff --git a/models/YOLO-World/third_party/mmyolo/mmyolo/models/task_modules/assigners/batch_dsl_assigner.py b/models/YOLO-World/third_party/mmyolo/mmyolo/models/task_modules/assigners/batch_dsl_assigner.py new file mode 100644 index 0000000000000000000000000000000000000000..5ae0f80239590f9c906778e6e4c7c6b4bd10c488 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/mmyolo/models/task_modules/assigners/batch_dsl_assigner.py @@ -0,0 +1,272 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Tuple + +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmdet.structures.bbox import BaseBoxes +from mmdet.utils import ConfigType +from torch import Tensor + +from mmyolo.registry import TASK_UTILS + +INF = 100000000 +EPS = 1.0e-7 + + +def find_inside_points(boxes: Tensor, + points: Tensor, + box_dim: int = 4, + eps: float = 0.01) -> Tensor: + """Find inside box points in batches. Boxes dimension must be 3. + + Args: + boxes (Tensor): Boxes tensor. Must be batch input. + Has shape of (batch_size, n_boxes, box_dim). + points (Tensor): Points coordinates. Has shape of (n_points, 2). + box_dim (int): The dimension of box. 4 means horizontal box and + 5 means rotated box. Defaults to 4. + eps (float): Make sure the points are inside not on the boundary. + Only use in rotated boxes. Defaults to 0.01. + + Returns: + Tensor: A BoolTensor indicating whether a point is inside + boxes. The index has shape of (n_points, batch_size, n_boxes). + """ + if box_dim == 4: + # Horizontal Boxes + lt_ = points[:, None, None] - boxes[..., :2] + rb_ = boxes[..., 2:] - points[:, None, None] + + deltas = torch.cat([lt_, rb_], dim=-1) + is_in_gts = deltas.min(dim=-1).values > 0 + + elif box_dim == 5: + # Rotated Boxes + points = points[:, None, None] + ctrs, wh, t = torch.split(boxes, [2, 2, 1], dim=-1) + cos_value, sin_value = torch.cos(t), torch.sin(t) + matrix = torch.cat([cos_value, sin_value, -sin_value, cos_value], + dim=-1).reshape(*boxes.shape[:-1], 2, 2) + + offset = points - ctrs + offset = torch.matmul(matrix, offset[..., None]) + offset = offset.squeeze(-1) + offset_x, offset_y = offset[..., 0], offset[..., 1] + w, h = wh[..., 0], wh[..., 1] + is_in_gts = (offset_x <= w / 2 - eps) & (offset_x >= - w / 2 + eps) & \ + (offset_y <= h / 2 - eps) & (offset_y >= - h / 2 + eps) + else: + raise NotImplementedError(f'Unsupport box_dim:{box_dim}') + + return is_in_gts + + +def get_box_center(boxes: Tensor, box_dim: int = 4) -> Tensor: + """Return a tensor representing the centers of boxes. + + Args: + boxes (Tensor): Boxes tensor. Has shape of (b, n, box_dim) + box_dim (int): The dimension of box. 4 means horizontal box and + 5 means rotated box. Defaults to 4. + + Returns: + Tensor: Centers have shape of (b, n, 2) + """ + if box_dim == 4: + # Horizontal Boxes, (x1, y1, x2, y2) + return (boxes[..., :2] + boxes[..., 2:]) / 2.0 + elif box_dim == 5: + # Rotated Boxes, (x, y, w, h, a) + return boxes[..., :2] + else: + raise NotImplementedError(f'Unsupported box_dim:{box_dim}') + + +@TASK_UTILS.register_module() +class BatchDynamicSoftLabelAssigner(nn.Module): + """Computes matching between predictions and ground truth with dynamic soft + label assignment. + + Args: + num_classes (int): number of class + soft_center_radius (float): Radius of the soft center prior. + Defaults to 3.0. + topk (int): Select top-k predictions to calculate dynamic k + best matches for each gt. Defaults to 13. + iou_weight (float): The scale factor of iou cost. Defaults to 3.0. + iou_calculator (ConfigType): Config of overlaps Calculator. + Defaults to dict(type='BboxOverlaps2D'). + batch_iou (bool): Use batch input when calculate IoU. + If set to False use loop instead. Defaults to True. + """ + + def __init__( + self, + num_classes, + soft_center_radius: float = 3.0, + topk: int = 13, + iou_weight: float = 3.0, + iou_calculator: ConfigType = dict(type='mmdet.BboxOverlaps2D'), + batch_iou: bool = True, + ) -> None: + super().__init__() + self.num_classes = num_classes + self.soft_center_radius = soft_center_radius + self.topk = topk + self.iou_weight = iou_weight + self.iou_calculator = TASK_UTILS.build(iou_calculator) + self.batch_iou = batch_iou + + @torch.no_grad() + def forward(self, pred_bboxes: Tensor, pred_scores: Tensor, priors: Tensor, + gt_labels: Tensor, gt_bboxes: Tensor, + pad_bbox_flag: Tensor) -> dict: + num_gt = gt_bboxes.size(1) + decoded_bboxes = pred_bboxes + batch_size, num_bboxes, box_dim = decoded_bboxes.size() + + if num_gt == 0 or num_bboxes == 0: + return { + 'assigned_labels': + gt_labels.new_full( + pred_scores[..., 0].shape, + self.num_classes, + dtype=torch.long), + 'assigned_labels_weights': + gt_bboxes.new_full(pred_scores[..., 0].shape, 1), + 'assigned_bboxes': + gt_bboxes.new_full(pred_bboxes.shape, 0), + 'assign_metrics': + gt_bboxes.new_full(pred_scores[..., 0].shape, 0) + } + + prior_center = priors[:, :2] + if isinstance(gt_bboxes, BaseBoxes): + raise NotImplementedError( + f'type of {type(gt_bboxes)} are not implemented !') + else: + is_in_gts = find_inside_points(gt_bboxes, prior_center, box_dim) + + # (N_points, B, N_boxes) + is_in_gts = is_in_gts * pad_bbox_flag[..., 0][None] + # (N_points, B, N_boxes) -> (B, N_points, N_boxes) + is_in_gts = is_in_gts.permute(1, 0, 2) + # (B, N_points) + valid_mask = is_in_gts.sum(dim=-1) > 0 + + gt_center = get_box_center(gt_bboxes, box_dim) + + strides = priors[..., 2] + distance = (priors[None].unsqueeze(2)[..., :2] - + gt_center[:, None, :, :] + ).pow(2).sum(-1).sqrt() / strides[None, :, None] + + # prevent overflow + distance = distance * valid_mask.unsqueeze(-1) + soft_center_prior = torch.pow(10, distance - self.soft_center_radius) + + if self.batch_iou: + pairwise_ious = self.iou_calculator(decoded_bboxes, gt_bboxes) + else: + ious = [] + for box, gt in zip(decoded_bboxes, gt_bboxes): + iou = self.iou_calculator(box, gt) + ious.append(iou) + pairwise_ious = torch.stack(ious, dim=0) + + iou_cost = -torch.log(pairwise_ious + EPS) * self.iou_weight + + # select the predicted scores corresponded to the gt_labels + pairwise_pred_scores = pred_scores.permute(0, 2, 1) + idx = torch.zeros([2, batch_size, num_gt], dtype=torch.long) + idx[0] = torch.arange(end=batch_size).view(-1, 1).repeat(1, num_gt) + idx[1] = gt_labels.long().squeeze(-1) + pairwise_pred_scores = pairwise_pred_scores[idx[0], + idx[1]].permute(0, 2, 1) + # classification cost + scale_factor = pairwise_ious - pairwise_pred_scores.sigmoid() + pairwise_cls_cost = F.binary_cross_entropy_with_logits( + pairwise_pred_scores, pairwise_ious, + reduction='none') * scale_factor.abs().pow(2.0) + + cost_matrix = pairwise_cls_cost + iou_cost + soft_center_prior + + max_pad_value = torch.ones_like(cost_matrix) * INF + cost_matrix = torch.where(valid_mask[..., None].repeat(1, 1, num_gt), + cost_matrix, max_pad_value) + + (matched_pred_ious, matched_gt_inds, + fg_mask_inboxes) = self.dynamic_k_matching(cost_matrix, pairwise_ious, + pad_bbox_flag) + + del pairwise_ious, cost_matrix + + batch_index = (fg_mask_inboxes > 0).nonzero(as_tuple=True)[0] + + assigned_labels = gt_labels.new_full(pred_scores[..., 0].shape, + self.num_classes) + assigned_labels[fg_mask_inboxes] = gt_labels[ + batch_index, matched_gt_inds].squeeze(-1) + assigned_labels = assigned_labels.long() + + assigned_labels_weights = gt_bboxes.new_full(pred_scores[..., 0].shape, + 1) + + assigned_bboxes = gt_bboxes.new_full(pred_bboxes.shape, 0) + assigned_bboxes[fg_mask_inboxes] = gt_bboxes[batch_index, + matched_gt_inds] + + assign_metrics = gt_bboxes.new_full(pred_scores[..., 0].shape, 0) + assign_metrics[fg_mask_inboxes] = matched_pred_ious + + return dict( + assigned_labels=assigned_labels, + assigned_labels_weights=assigned_labels_weights, + assigned_bboxes=assigned_bboxes, + assign_metrics=assign_metrics) + + def dynamic_k_matching( + self, cost_matrix: Tensor, pairwise_ious: Tensor, + pad_bbox_flag: int) -> Tuple[Tensor, Tensor, Tensor]: + """Use IoU and matching cost to calculate the dynamic top-k positive + targets. + + Args: + cost_matrix (Tensor): Cost matrix. + pairwise_ious (Tensor): Pairwise iou matrix. + num_gt (int): Number of gt. + valid_mask (Tensor): Mask for valid bboxes. + Returns: + tuple: matched ious and gt indexes. + """ + matching_matrix = torch.zeros_like(cost_matrix, dtype=torch.uint8) + # select candidate topk ious for dynamic-k calculation + candidate_topk = min(self.topk, pairwise_ious.size(1)) + topk_ious, _ = torch.topk(pairwise_ious, candidate_topk, dim=1) + # calculate dynamic k for each gt + dynamic_ks = torch.clamp(topk_ious.sum(1).int(), min=1) + + num_gts = pad_bbox_flag.sum((1, 2)).int() + # sorting the batch cost matirx is faster than topk + _, sorted_indices = torch.sort(cost_matrix, dim=1) + for b in range(pad_bbox_flag.shape[0]): + for gt_idx in range(num_gts[b]): + topk_ids = sorted_indices[b, :dynamic_ks[b, gt_idx], gt_idx] + matching_matrix[b, :, gt_idx][topk_ids] = 1 + + del topk_ious, dynamic_ks + + prior_match_gt_mask = matching_matrix.sum(2) > 1 + if prior_match_gt_mask.sum() > 0: + cost_min, cost_argmin = torch.min( + cost_matrix[prior_match_gt_mask, :], dim=1) + matching_matrix[prior_match_gt_mask, :] *= 0 + matching_matrix[prior_match_gt_mask, cost_argmin] = 1 + + # get foreground mask inside box and center prior + fg_mask_inboxes = matching_matrix.sum(2) > 0 + matched_pred_ious = (matching_matrix * + pairwise_ious).sum(2)[fg_mask_inboxes] + matched_gt_inds = matching_matrix[fg_mask_inboxes, :].argmax(1) + return matched_pred_ious, matched_gt_inds, fg_mask_inboxes diff --git a/models/YOLO-World/third_party/mmyolo/mmyolo/models/task_modules/assigners/batch_task_aligned_assigner.py b/models/YOLO-World/third_party/mmyolo/mmyolo/models/task_modules/assigners/batch_task_aligned_assigner.py new file mode 100644 index 0000000000000000000000000000000000000000..202d678986c3a398de63675c004592b98ea092e0 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/mmyolo/models/task_modules/assigners/batch_task_aligned_assigner.py @@ -0,0 +1,311 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Optional, Tuple + +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch import Tensor + +from mmyolo.models.losses import bbox_overlaps +from mmyolo.registry import TASK_UTILS +from .utils import (select_candidates_in_gts, select_highest_overlaps, + yolov6_iou_calculator) + + +@TASK_UTILS.register_module() +class BatchTaskAlignedAssigner(nn.Module): + """This code referenced to + https://github.com/meituan/YOLOv6/blob/main/yolov6/ + assigners/tal_assigner.py. + Batch Task aligned assigner base on the paper: + `TOOD: Task-aligned One-stage Object Detection. + `_. + Assign a corresponding gt bboxes or background to a batch of + predicted bboxes. Each bbox will be assigned with `0` or a + positive integer indicating the ground truth index. + - 0: negative sample, no assigned gt + - positive integer: positive sample, index (1-based) of assigned gt + Args: + num_classes (int): number of class + topk (int): number of bbox selected in each level + alpha (float): Hyper-parameters related to alignment_metrics. + Defaults to 1.0 + beta (float): Hyper-parameters related to alignment_metrics. + Defaults to 6. + eps (float): Eps to avoid log(0). Default set to 1e-9 + use_ciou (bool): Whether to use ciou while calculating iou. + Defaults to False. + """ + + def __init__(self, + num_classes: int, + topk: int = 13, + alpha: float = 1.0, + beta: float = 6.0, + eps: float = 1e-7, + use_ciou: bool = False): + super().__init__() + self.num_classes = num_classes + self.topk = topk + self.alpha = alpha + self.beta = beta + self.eps = eps + self.use_ciou = use_ciou + + @torch.no_grad() + def forward( + self, + pred_bboxes: Tensor, + pred_scores: Tensor, + priors: Tensor, + gt_labels: Tensor, + gt_bboxes: Tensor, + pad_bbox_flag: Tensor, + ) -> dict: + """Assign gt to bboxes. + + The assignment is done in following steps + 1. compute alignment metric between all bbox (bbox of all pyramid + levels) and gt + 2. select top-k bbox as candidates for each gt + 3. limit the positive sample's center in gt (because the anchor-free + detector only can predict positive distance) + Args: + pred_bboxes (Tensor): Predict bboxes, + shape(batch_size, num_priors, 4) + pred_scores (Tensor): Scores of predict bboxes, + shape(batch_size, num_priors, num_classes) + priors (Tensor): Model priors, shape (num_priors, 4) + gt_labels (Tensor): Ground true labels, + shape(batch_size, num_gt, 1) + gt_bboxes (Tensor): Ground true bboxes, + shape(batch_size, num_gt, 4) + pad_bbox_flag (Tensor): Ground truth bbox mask, + 1 means bbox, 0 means no bbox, + shape(batch_size, num_gt, 1) + Returns: + assigned_result (dict) Assigned result: + assigned_labels (Tensor): Assigned labels, + shape(batch_size, num_priors) + assigned_bboxes (Tensor): Assigned boxes, + shape(batch_size, num_priors, 4) + assigned_scores (Tensor): Assigned scores, + shape(batch_size, num_priors, num_classes) + fg_mask_pre_prior (Tensor): Force ground truth matching mask, + shape(batch_size, num_priors) + """ + # (num_priors, 4) -> (num_priors, 2) + priors = priors[:, :2] + + batch_size = pred_scores.size(0) + num_gt = gt_bboxes.size(1) + + assigned_result = { + 'assigned_labels': + gt_bboxes.new_full(pred_scores[..., 0].shape, self.num_classes), + 'assigned_bboxes': + gt_bboxes.new_full(pred_bboxes.shape, 0), + 'assigned_scores': + gt_bboxes.new_full(pred_scores.shape, 0), + 'fg_mask_pre_prior': + gt_bboxes.new_full(pred_scores[..., 0].shape, 0) + } + + if num_gt == 0: + return assigned_result + + pos_mask, alignment_metrics, overlaps = self.get_pos_mask( + pred_bboxes, pred_scores, priors, gt_labels, gt_bboxes, + pad_bbox_flag, batch_size, num_gt) + + (assigned_gt_idxs, fg_mask_pre_prior, + pos_mask) = select_highest_overlaps(pos_mask, overlaps, num_gt) + + # assigned target + assigned_labels, assigned_bboxes, assigned_scores = self.get_targets( + gt_labels, gt_bboxes, assigned_gt_idxs, fg_mask_pre_prior, + batch_size, num_gt) + + # normalize + alignment_metrics *= pos_mask + pos_align_metrics = alignment_metrics.max(axis=-1, keepdim=True)[0] + pos_overlaps = (overlaps * pos_mask).max(axis=-1, keepdim=True)[0] + norm_align_metric = ( + alignment_metrics * pos_overlaps / + (pos_align_metrics + self.eps)).max(-2)[0].unsqueeze(-1) + assigned_scores = assigned_scores * norm_align_metric + + assigned_result['assigned_labels'] = assigned_labels + assigned_result['assigned_bboxes'] = assigned_bboxes + assigned_result['assigned_scores'] = assigned_scores + assigned_result['fg_mask_pre_prior'] = fg_mask_pre_prior.bool() + return assigned_result + + def get_pos_mask(self, pred_bboxes: Tensor, pred_scores: Tensor, + priors: Tensor, gt_labels: Tensor, gt_bboxes: Tensor, + pad_bbox_flag: Tensor, batch_size: int, + num_gt: int) -> Tuple[Tensor, Tensor, Tensor]: + """Get possible mask. + + Args: + pred_bboxes (Tensor): Predict bboxes, + shape(batch_size, num_priors, 4) + pred_scores (Tensor): Scores of predict bbox, + shape(batch_size, num_priors, num_classes) + priors (Tensor): Model priors, shape (num_priors, 2) + gt_labels (Tensor): Ground true labels, + shape(batch_size, num_gt, 1) + gt_bboxes (Tensor): Ground true bboxes, + shape(batch_size, num_gt, 4) + pad_bbox_flag (Tensor): Ground truth bbox mask, + 1 means bbox, 0 means no bbox, + shape(batch_size, num_gt, 1) + batch_size (int): Batch size. + num_gt (int): Number of ground truth. + Returns: + pos_mask (Tensor): Possible mask, + shape(batch_size, num_gt, num_priors) + alignment_metrics (Tensor): Alignment metrics, + shape(batch_size, num_gt, num_priors) + overlaps (Tensor): Overlaps of gt_bboxes and pred_bboxes, + shape(batch_size, num_gt, num_priors) + """ + + # Compute alignment metric between all bbox and gt + alignment_metrics, overlaps = \ + self.get_box_metrics(pred_bboxes, pred_scores, gt_labels, + gt_bboxes, batch_size, num_gt) + + # get is_in_gts mask + is_in_gts = select_candidates_in_gts(priors, gt_bboxes) + + # get topk_metric mask + topk_metric = self.select_topk_candidates( + alignment_metrics * is_in_gts, + topk_mask=pad_bbox_flag.repeat([1, 1, self.topk]).bool()) + + # merge all mask to a final mask + pos_mask = topk_metric * is_in_gts * pad_bbox_flag + + return pos_mask, alignment_metrics, overlaps + + def get_box_metrics(self, pred_bboxes: Tensor, pred_scores: Tensor, + gt_labels: Tensor, gt_bboxes: Tensor, batch_size: int, + num_gt: int) -> Tuple[Tensor, Tensor]: + """Compute alignment metric between all bbox and gt. + + Args: + pred_bboxes (Tensor): Predict bboxes, + shape(batch_size, num_priors, 4) + pred_scores (Tensor): Scores of predict bbox, + shape(batch_size, num_priors, num_classes) + gt_labels (Tensor): Ground true labels, + shape(batch_size, num_gt, 1) + gt_bboxes (Tensor): Ground true bboxes, + shape(batch_size, num_gt, 4) + batch_size (int): Batch size. + num_gt (int): Number of ground truth. + Returns: + alignment_metrics (Tensor): Align metric, + shape(batch_size, num_gt, num_priors) + overlaps (Tensor): Overlaps, shape(batch_size, num_gt, num_priors) + """ + pred_scores = pred_scores.permute(0, 2, 1) + gt_labels = gt_labels.to(torch.long) + idx = torch.zeros([2, batch_size, num_gt], dtype=torch.long) + idx[0] = torch.arange(end=batch_size).view(-1, 1).repeat(1, num_gt) + idx[1] = gt_labels.squeeze(-1) + bbox_scores = pred_scores[idx[0], idx[1]] + # TODO: need to replace the yolov6_iou_calculator function + if self.use_ciou: + overlaps = bbox_overlaps( + pred_bboxes.unsqueeze(1), + gt_bboxes.unsqueeze(2), + iou_mode='ciou', + bbox_format='xyxy').clamp(0) + else: + overlaps = yolov6_iou_calculator(gt_bboxes, pred_bboxes) + + alignment_metrics = bbox_scores.pow(self.alpha) * overlaps.pow( + self.beta) + + return alignment_metrics, overlaps + + def select_topk_candidates(self, + alignment_gt_metrics: Tensor, + using_largest_topk: bool = True, + topk_mask: Optional[Tensor] = None) -> Tensor: + """Compute alignment metric between all bbox and gt. + + Args: + alignment_gt_metrics (Tensor): Alignment metric of gt candidates, + shape(batch_size, num_gt, num_priors) + using_largest_topk (bool): Controls whether to using largest or + smallest elements. + topk_mask (Tensor): Topk mask, + shape(batch_size, num_gt, self.topk) + Returns: + Tensor: Topk candidates mask, + shape(batch_size, num_gt, num_priors) + """ + num_priors = alignment_gt_metrics.shape[-1] + topk_metrics, topk_idxs = torch.topk( + alignment_gt_metrics, + self.topk, + axis=-1, + largest=using_largest_topk) + if topk_mask is None: + topk_mask = (topk_metrics.max(axis=-1, keepdim=True) > + self.eps).tile([1, 1, self.topk]) + topk_idxs = torch.where(topk_mask, topk_idxs, + torch.zeros_like(topk_idxs)) + is_in_topk = F.one_hot(topk_idxs, num_priors).sum(axis=-2) + is_in_topk = torch.where(is_in_topk > 1, torch.zeros_like(is_in_topk), + is_in_topk) + return is_in_topk.to(alignment_gt_metrics.dtype) + + def get_targets(self, gt_labels: Tensor, gt_bboxes: Tensor, + assigned_gt_idxs: Tensor, fg_mask_pre_prior: Tensor, + batch_size: int, + num_gt: int) -> Tuple[Tensor, Tensor, Tensor]: + """Get assigner info. + + Args: + gt_labels (Tensor): Ground true labels, + shape(batch_size, num_gt, 1) + gt_bboxes (Tensor): Ground true bboxes, + shape(batch_size, num_gt, 4) + assigned_gt_idxs (Tensor): Assigned ground truth indexes, + shape(batch_size, num_priors) + fg_mask_pre_prior (Tensor): Force ground truth matching mask, + shape(batch_size, num_priors) + batch_size (int): Batch size. + num_gt (int): Number of ground truth. + Returns: + assigned_labels (Tensor): Assigned labels, + shape(batch_size, num_priors) + assigned_bboxes (Tensor): Assigned bboxes, + shape(batch_size, num_priors) + assigned_scores (Tensor): Assigned scores, + shape(batch_size, num_priors) + """ + # assigned target labels + batch_ind = torch.arange( + end=batch_size, dtype=torch.int64, device=gt_labels.device)[..., + None] + assigned_gt_idxs = assigned_gt_idxs + batch_ind * num_gt + assigned_labels = gt_labels.long().flatten()[assigned_gt_idxs] + + # assigned target boxes + assigned_bboxes = gt_bboxes.reshape([-1, 4])[assigned_gt_idxs] + + # assigned target scores + assigned_labels[assigned_labels < 0] = 0 + assigned_scores = F.one_hot(assigned_labels, self.num_classes) + force_gt_scores_mask = fg_mask_pre_prior[:, :, None].repeat( + 1, 1, self.num_classes) + assigned_scores = torch.where(force_gt_scores_mask > 0, + assigned_scores, + torch.full_like(assigned_scores, 0)) + + return assigned_labels, assigned_bboxes, assigned_scores diff --git a/models/YOLO-World/third_party/mmyolo/mmyolo/models/task_modules/assigners/batch_yolov7_assigner.py b/models/YOLO-World/third_party/mmyolo/mmyolo/models/task_modules/assigners/batch_yolov7_assigner.py new file mode 100644 index 0000000000000000000000000000000000000000..6709968eeb1768fc4e6124f1f7a344f581dd43a7 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/mmyolo/models/task_modules/assigners/batch_yolov7_assigner.py @@ -0,0 +1,344 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Sequence + +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmdet.structures.bbox import bbox_cxcywh_to_xyxy, bbox_overlaps + + +def _cat_multi_level_tensor_in_place(*multi_level_tensor, place_hold_var): + """concat multi-level tensor in place.""" + for level_tensor in multi_level_tensor: + for i, var in enumerate(level_tensor): + if len(var) > 0: + level_tensor[i] = torch.cat(var, dim=0) + else: + level_tensor[i] = place_hold_var + + +class BatchYOLOv7Assigner(nn.Module): + """Batch YOLOv7 Assigner. + + It consists of two assigning steps: + + 1. YOLOv5 cross-grid sample assigning + 2. SimOTA assigning + + This code referenced to + https://github.com/WongKinYiu/yolov7/blob/main/utils/loss.py. + + Args: + num_classes (int): Number of classes. + num_base_priors (int): Number of base priors. + featmap_strides (Sequence[int]): Feature map strides. + prior_match_thr (float): Threshold to match priors. + Defaults to 4.0. + candidate_topk (int): Number of topk candidates to + assign. Defaults to 10. + iou_weight (float): IOU weight. Defaults to 3.0. + cls_weight (float): Class weight. Defaults to 1.0. + """ + + def __init__(self, + num_classes: int, + num_base_priors: int, + featmap_strides: Sequence[int], + prior_match_thr: float = 4.0, + candidate_topk: int = 10, + iou_weight: float = 3.0, + cls_weight: float = 1.0): + super().__init__() + self.num_classes = num_classes + self.num_base_priors = num_base_priors + self.featmap_strides = featmap_strides + # yolov5 param + self.prior_match_thr = prior_match_thr + # simota param + self.candidate_topk = candidate_topk + self.iou_weight = iou_weight + self.cls_weight = cls_weight + + @torch.no_grad() + def forward(self, + pred_results, + batch_targets_normed, + batch_input_shape, + priors_base_sizes, + grid_offset, + near_neighbor_thr=0.5) -> dict: + """Forward function.""" + # (num_base_priors, num_batch_gt, 7) + # 7 is mean (batch_idx, cls_id, x_norm, y_norm, + # w_norm, h_norm, prior_idx) + + # mlvl is mean multi_level + if batch_targets_normed.shape[1] == 0: + # empty gt of batch + num_levels = len(pred_results) + return dict( + mlvl_positive_infos=[pred_results[0].new_empty( + (0, 4))] * num_levels, + mlvl_priors=[] * num_levels, + mlvl_targets_normed=[] * num_levels) + + # if near_neighbor_thr = 0.5 are mean the nearest + # 3 neighbors are also considered positive samples. + # if near_neighbor_thr = 1.0 are mean the nearest + # 5 neighbors are also considered positive samples. + mlvl_positive_infos, mlvl_priors = self.yolov5_assigner( + pred_results, + batch_targets_normed, + priors_base_sizes, + grid_offset, + near_neighbor_thr=near_neighbor_thr) + + mlvl_positive_infos, mlvl_priors, \ + mlvl_targets_normed = self.simota_assigner( + pred_results, batch_targets_normed, mlvl_positive_infos, + mlvl_priors, batch_input_shape) + + place_hold_var = batch_targets_normed.new_empty((0, 4)) + _cat_multi_level_tensor_in_place( + mlvl_positive_infos, + mlvl_priors, + mlvl_targets_normed, + place_hold_var=place_hold_var) + + return dict( + mlvl_positive_infos=mlvl_positive_infos, + mlvl_priors=mlvl_priors, + mlvl_targets_normed=mlvl_targets_normed) + + def yolov5_assigner(self, + pred_results, + batch_targets_normed, + priors_base_sizes, + grid_offset, + near_neighbor_thr=0.5): + """YOLOv5 cross-grid sample assigner.""" + num_batch_gts = batch_targets_normed.shape[1] + assert num_batch_gts > 0 + + mlvl_positive_infos, mlvl_priors = [], [] + + scaled_factor = torch.ones(7, device=pred_results[0].device) + for i in range(len(pred_results)): # lever + priors_base_sizes_i = priors_base_sizes[i] + # (1, 1, feat_shape_w, feat_shape_h, feat_shape_w, feat_shape_h) + scaled_factor[2:6] = torch.tensor( + pred_results[i].shape)[[3, 2, 3, 2]] + + # Scale batch_targets from range 0-1 to range 0-features_maps size. + # (num_base_priors, num_batch_gts, 7) + batch_targets_scaled = batch_targets_normed * scaled_factor + + # Shape match + wh_ratio = batch_targets_scaled[..., + 4:6] / priors_base_sizes_i[:, None] + match_inds = torch.max( + wh_ratio, 1. / wh_ratio).max(2)[0] < self.prior_match_thr + batch_targets_scaled = batch_targets_scaled[ + match_inds] # (num_matched_target, 7) + + # no gt bbox matches anchor + if batch_targets_scaled.shape[0] == 0: + mlvl_positive_infos.append( + batch_targets_scaled.new_empty((0, 4))) + mlvl_priors.append([]) + continue + + # Positive samples with additional neighbors + batch_targets_cxcy = batch_targets_scaled[:, 2:4] + grid_xy = scaled_factor[[2, 3]] - batch_targets_cxcy + left, up = ((batch_targets_cxcy % 1 < near_neighbor_thr) & + (batch_targets_cxcy > 1)).T + right, bottom = ((grid_xy % 1 < near_neighbor_thr) & + (grid_xy > 1)).T + offset_inds = torch.stack( + (torch.ones_like(left), left, up, right, bottom)) + batch_targets_scaled = batch_targets_scaled.repeat( + (5, 1, 1))[offset_inds] # () + retained_offsets = grid_offset.repeat(1, offset_inds.shape[1], + 1)[offset_inds] + + # batch_targets_scaled: (num_matched_target, 7) + # 7 is mean (batch_idx, cls_id, x_scaled, + # y_scaled, w_scaled, h_scaled, prior_idx) + + # mlvl_positive_info: (num_matched_target, 4) + # 4 is mean (batch_idx, prior_idx, x_scaled, y_scaled) + mlvl_positive_info = batch_targets_scaled[:, [0, 6, 2, 3]] + retained_offsets = retained_offsets * near_neighbor_thr + mlvl_positive_info[:, + 2:] = mlvl_positive_info[:, + 2:] - retained_offsets + mlvl_positive_info[:, 2].clamp_(0, scaled_factor[2] - 1) + mlvl_positive_info[:, 3].clamp_(0, scaled_factor[3] - 1) + mlvl_positive_info = mlvl_positive_info.long() + priors_inds = mlvl_positive_info[:, 1] + + mlvl_positive_infos.append(mlvl_positive_info) + mlvl_priors.append(priors_base_sizes_i[priors_inds]) + + return mlvl_positive_infos, mlvl_priors + + def simota_assigner(self, pred_results, batch_targets_normed, + mlvl_positive_infos, mlvl_priors, batch_input_shape): + """SimOTA assigner.""" + num_batch_gts = batch_targets_normed.shape[1] + assert num_batch_gts > 0 + num_levels = len(mlvl_positive_infos) + + mlvl_positive_infos_matched = [[] for _ in range(num_levels)] + mlvl_priors_matched = [[] for _ in range(num_levels)] + mlvl_targets_normed_matched = [[] for _ in range(num_levels)] + + for batch_idx in range(pred_results[0].shape[0]): + # (num_batch_gt, 7) + # 7 is mean (batch_idx, cls_id, x_norm, y_norm, + # w_norm, h_norm, prior_idx) + targets_normed = batch_targets_normed[0] + # (num_gt, 7) + targets_normed = targets_normed[targets_normed[:, 0] == batch_idx] + num_gts = targets_normed.shape[0] + + if num_gts == 0: + continue + + _mlvl_decoderd_bboxes = [] + _mlvl_obj_cls = [] + _mlvl_priors = [] + _mlvl_positive_infos = [] + _from_which_layer = [] + + for i, head_pred in enumerate(pred_results): + # (num_matched_target, 4) + # 4 is mean (batch_idx, prior_idx, grid_x, grid_y) + _mlvl_positive_info = mlvl_positive_infos[i] + if _mlvl_positive_info.shape[0] == 0: + continue + + idx = (_mlvl_positive_info[:, 0] == batch_idx) + _mlvl_positive_info = _mlvl_positive_info[idx] + _mlvl_positive_infos.append(_mlvl_positive_info) + + priors = mlvl_priors[i][idx] + _mlvl_priors.append(priors) + + _from_which_layer.append( + _mlvl_positive_info.new_full( + size=(_mlvl_positive_info.shape[0], ), fill_value=i)) + + # (n,85) + level_batch_idx, prior_ind, \ + grid_x, grid_y = _mlvl_positive_info.T + pred_positive = head_pred[level_batch_idx, prior_ind, grid_y, + grid_x] + _mlvl_obj_cls.append(pred_positive[:, 4:]) + + # decoded + grid = torch.stack([grid_x, grid_y], dim=1) + pred_positive_cxcy = (pred_positive[:, :2].sigmoid() * 2. - + 0.5 + grid) * self.featmap_strides[i] + pred_positive_wh = (pred_positive[:, 2:4].sigmoid() * 2) ** 2 \ + * priors * self.featmap_strides[i] + pred_positive_xywh = torch.cat( + [pred_positive_cxcy, pred_positive_wh], dim=-1) + _mlvl_decoderd_bboxes.append(pred_positive_xywh) + + if len(_mlvl_decoderd_bboxes) == 0: + continue + + # 1 calc pair_wise_iou_loss + _mlvl_decoderd_bboxes = torch.cat(_mlvl_decoderd_bboxes, dim=0) + num_pred_positive = _mlvl_decoderd_bboxes.shape[0] + + if num_pred_positive == 0: + continue + + # scaled xywh + batch_input_shape_wh = pred_results[0].new_tensor( + batch_input_shape[::-1]).repeat((1, 2)) + targets_scaled_bbox = targets_normed[:, 2:6] * batch_input_shape_wh + + targets_scaled_bbox = bbox_cxcywh_to_xyxy(targets_scaled_bbox) + _mlvl_decoderd_bboxes = bbox_cxcywh_to_xyxy(_mlvl_decoderd_bboxes) + pair_wise_iou = bbox_overlaps(targets_scaled_bbox, + _mlvl_decoderd_bboxes) + pair_wise_iou_loss = -torch.log(pair_wise_iou + 1e-8) + + # 2 calc pair_wise_cls_loss + _mlvl_obj_cls = torch.cat(_mlvl_obj_cls, dim=0).float().sigmoid() + _mlvl_positive_infos = torch.cat(_mlvl_positive_infos, dim=0) + _from_which_layer = torch.cat(_from_which_layer, dim=0) + _mlvl_priors = torch.cat(_mlvl_priors, dim=0) + + gt_cls_per_image = ( + F.one_hot(targets_normed[:, 1].to(torch.int64), + self.num_classes).float().unsqueeze(1).repeat( + 1, num_pred_positive, 1)) + # cls_score * obj + cls_preds_ = _mlvl_obj_cls[:, 1:]\ + .unsqueeze(0)\ + .repeat(num_gts, 1, 1) \ + * _mlvl_obj_cls[:, 0:1]\ + .unsqueeze(0).repeat(num_gts, 1, 1) + y = cls_preds_.sqrt_() + pair_wise_cls_loss = F.binary_cross_entropy_with_logits( + torch.log(y / (1 - y)), gt_cls_per_image, + reduction='none').sum(-1) + del cls_preds_ + + # calc cost + cost = ( + self.cls_weight * pair_wise_cls_loss + + self.iou_weight * pair_wise_iou_loss) + + # num_gt, num_match_pred + matching_matrix = torch.zeros_like(cost) + + top_k, _ = torch.topk( + pair_wise_iou, + min(self.candidate_topk, pair_wise_iou.shape[1]), + dim=1) + dynamic_ks = torch.clamp(top_k.sum(1).int(), min=1) + + # Select only topk matches per gt + for gt_idx in range(num_gts): + _, pos_idx = torch.topk( + cost[gt_idx], k=dynamic_ks[gt_idx].item(), largest=False) + matching_matrix[gt_idx][pos_idx] = 1.0 + del top_k, dynamic_ks + + # Each prediction box can match at most one gt box, + # and if there are more than one, + # only the least costly one can be taken + anchor_matching_gt = matching_matrix.sum(0) + if (anchor_matching_gt > 1).sum() > 0: + _, cost_argmin = torch.min( + cost[:, anchor_matching_gt > 1], dim=0) + matching_matrix[:, anchor_matching_gt > 1] *= 0.0 + matching_matrix[cost_argmin, anchor_matching_gt > 1] = 1.0 + fg_mask_inboxes = matching_matrix.sum(0) > 0.0 + matched_gt_inds = matching_matrix[:, fg_mask_inboxes].argmax(0) + + targets_normed = targets_normed[matched_gt_inds] + _mlvl_positive_infos = _mlvl_positive_infos[fg_mask_inboxes] + _from_which_layer = _from_which_layer[fg_mask_inboxes] + _mlvl_priors = _mlvl_priors[fg_mask_inboxes] + + # Rearranged in the order of the prediction layers + # to facilitate loss + for i in range(num_levels): + layer_idx = _from_which_layer == i + mlvl_positive_infos_matched[i].append( + _mlvl_positive_infos[layer_idx]) + mlvl_priors_matched[i].append(_mlvl_priors[layer_idx]) + mlvl_targets_normed_matched[i].append( + targets_normed[layer_idx]) + + results = mlvl_positive_infos_matched, \ + mlvl_priors_matched, \ + mlvl_targets_normed_matched + return results diff --git a/models/YOLO-World/third_party/mmyolo/mmyolo/models/task_modules/assigners/pose_sim_ota_assigner.py b/models/YOLO-World/third_party/mmyolo/mmyolo/models/task_modules/assigners/pose_sim_ota_assigner.py new file mode 100644 index 0000000000000000000000000000000000000000..e66a9bf157aceceadb2f228cbbcb3ff1ddc00196 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/mmyolo/models/task_modules/assigners/pose_sim_ota_assigner.py @@ -0,0 +1,210 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Optional, Tuple + +import torch +import torch.nn.functional as F +from mmdet.models.task_modules.assigners import AssignResult, SimOTAAssigner +from mmdet.utils import ConfigType +from mmengine.structures import InstanceData +from torch import Tensor + +from mmyolo.registry import MODELS, TASK_UTILS + +INF = 100000.0 +EPS = 1.0e-7 + + +@TASK_UTILS.register_module() +class PoseSimOTAAssigner(SimOTAAssigner): + + def __init__(self, + center_radius: float = 2.5, + candidate_topk: int = 10, + iou_weight: float = 3.0, + cls_weight: float = 1.0, + oks_weight: float = 0.0, + vis_weight: float = 0.0, + iou_calculator: ConfigType = dict(type='BboxOverlaps2D'), + oks_calculator: ConfigType = dict(type='OksLoss')): + + self.center_radius = center_radius + self.candidate_topk = candidate_topk + self.iou_weight = iou_weight + self.cls_weight = cls_weight + self.oks_weight = oks_weight + self.vis_weight = vis_weight + + self.iou_calculator = TASK_UTILS.build(iou_calculator) + self.oks_calculator = MODELS.build(oks_calculator) + + def assign(self, + pred_instances: InstanceData, + gt_instances: InstanceData, + gt_instances_ignore: Optional[InstanceData] = None, + **kwargs) -> AssignResult: + """Assign gt to priors using SimOTA. + + Args: + pred_instances (:obj:`InstanceData`): Instances of model + predictions. It includes ``priors``, and the priors can + be anchors or points, or the bboxes predicted by the + previous stage, has shape (n, 4). The bboxes predicted by + the current model or stage will be named ``bboxes``, + ``labels``, and ``scores``, the same as the ``InstanceData`` + in other places. + gt_instances (:obj:`InstanceData`): Ground truth of instance + annotations. It usually includes ``bboxes``, with shape (k, 4), + and ``labels``, with shape (k, ). + gt_instances_ignore (:obj:`InstanceData`, optional): Instances + to be ignored during training. It includes ``bboxes`` + attribute data that is ignored during training and testing. + Defaults to None. + Returns: + obj:`AssignResult`: The assigned result. + """ + gt_bboxes = gt_instances.bboxes + gt_labels = gt_instances.labels + gt_keypoints = gt_instances.keypoints + gt_keypoints_visible = gt_instances.keypoints_visible + num_gt = gt_bboxes.size(0) + + decoded_bboxes = pred_instances.bboxes[..., :4] + pred_kpts = pred_instances.bboxes[..., 4:] + pred_kpts = pred_kpts.reshape(*pred_kpts.shape[:-1], -1, 3) + pred_kpts_vis = pred_kpts[..., -1] + pred_kpts = pred_kpts[..., :2] + pred_scores = pred_instances.scores + priors = pred_instances.priors + num_bboxes = decoded_bboxes.size(0) + + # assign 0 by default + assigned_gt_inds = decoded_bboxes.new_full((num_bboxes, ), + 0, + dtype=torch.long) + if num_gt == 0 or num_bboxes == 0: + # No ground truth or boxes, return empty assignment + max_overlaps = decoded_bboxes.new_zeros((num_bboxes, )) + assigned_labels = decoded_bboxes.new_full((num_bboxes, ), + -1, + dtype=torch.long) + return AssignResult( + num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels) + + valid_mask, is_in_boxes_and_center = self.get_in_gt_and_in_center_info( + priors, gt_bboxes) + valid_decoded_bbox = decoded_bboxes[valid_mask] + valid_pred_scores = pred_scores[valid_mask] + valid_pred_kpts = pred_kpts[valid_mask] + valid_pred_kpts_vis = pred_kpts_vis[valid_mask] + num_valid = valid_decoded_bbox.size(0) + if num_valid == 0: + # No valid bboxes, return empty assignment + max_overlaps = decoded_bboxes.new_zeros((num_bboxes, )) + assigned_labels = decoded_bboxes.new_full((num_bboxes, ), + -1, + dtype=torch.long) + return AssignResult( + num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels) + + cost_matrix = (~is_in_boxes_and_center) * INF + + # calculate iou + pairwise_ious = self.iou_calculator(valid_decoded_bbox, gt_bboxes) + if self.iou_weight > 0: + iou_cost = -torch.log(pairwise_ious + EPS) + cost_matrix = cost_matrix + iou_cost * self.iou_weight + + # calculate oks + pairwise_oks = self.oks_calculator.compute_oks( + valid_pred_kpts.unsqueeze(1), # [num_valid, -1, k, 2] + gt_keypoints.unsqueeze(0), # [1, num_gt, k, 2] + gt_keypoints_visible.unsqueeze(0), # [1, num_gt, k] + bboxes=gt_bboxes.unsqueeze(0), # [1, num_gt, 4] + ) # -> [num_valid, num_gt] + if self.oks_weight > 0: + oks_cost = -torch.log(pairwise_oks + EPS) + cost_matrix = cost_matrix + oks_cost * self.oks_weight + + # calculate cls + if self.cls_weight > 0: + gt_onehot_label = ( + F.one_hot(gt_labels.to(torch.int64), + pred_scores.shape[-1]).float().unsqueeze(0).repeat( + num_valid, 1, 1)) + + valid_pred_scores = valid_pred_scores.unsqueeze(1).repeat( + 1, num_gt, 1) + # disable AMP autocast to avoid overflow + with torch.cuda.amp.autocast(enabled=False): + cls_cost = ( + F.binary_cross_entropy( + valid_pred_scores.to(dtype=torch.float32), + gt_onehot_label, + reduction='none', + ).sum(-1).to(dtype=valid_pred_scores.dtype)) + cost_matrix = cost_matrix + cls_cost * self.cls_weight + + # calculate vis + if self.vis_weight > 0: + valid_pred_kpts_vis = valid_pred_kpts_vis.sigmoid().unsqueeze( + 1).repeat(1, num_gt, 1) # [num_valid, 1, k] + gt_kpt_vis = gt_keypoints_visible.unsqueeze( + 0).float() # [1, num_gt, k] + with torch.cuda.amp.autocast(enabled=False): + vis_cost = ( + F.binary_cross_entropy( + valid_pred_kpts_vis.to(dtype=torch.float32), + gt_kpt_vis.repeat(num_valid, 1, 1), + reduction='none', + ).sum(-1).to(dtype=valid_pred_kpts_vis.dtype)) + cost_matrix = cost_matrix + vis_cost * self.vis_weight + + # mixed metric + pairwise_oks = pairwise_oks.pow(0.5) + matched_pred_oks, matched_gt_inds = \ + self.dynamic_k_matching( + cost_matrix, pairwise_ious, pairwise_oks, num_gt, valid_mask) + + # convert to AssignResult format + assigned_gt_inds[valid_mask] = matched_gt_inds + 1 + assigned_labels = assigned_gt_inds.new_full((num_bboxes, ), -1) + assigned_labels[valid_mask] = gt_labels[matched_gt_inds].long() + max_overlaps = assigned_gt_inds.new_full((num_bboxes, ), + -INF, + dtype=torch.float32) + max_overlaps[valid_mask] = matched_pred_oks + return AssignResult( + num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels) + + def dynamic_k_matching(self, cost: Tensor, pairwise_ious: Tensor, + pairwise_oks: Tensor, num_gt: int, + valid_mask: Tensor) -> Tuple[Tensor, Tensor]: + """Use IoU and matching cost to calculate the dynamic top-k positive + targets.""" + matching_matrix = torch.zeros_like(cost, dtype=torch.uint8) + # select candidate topk ious for dynamic-k calculation + candidate_topk = min(self.candidate_topk, pairwise_ious.size(0)) + topk_ious, _ = torch.topk(pairwise_ious, candidate_topk, dim=0) + # calculate dynamic k for each gt + dynamic_ks = torch.clamp(topk_ious.sum(0).int(), min=1) + for gt_idx in range(num_gt): + _, pos_idx = torch.topk( + cost[:, gt_idx], k=dynamic_ks[gt_idx], largest=False) + matching_matrix[:, gt_idx][pos_idx] = 1 + + del topk_ious, dynamic_ks, pos_idx + + prior_match_gt_mask = matching_matrix.sum(1) > 1 + if prior_match_gt_mask.sum() > 0: + cost_min, cost_argmin = torch.min( + cost[prior_match_gt_mask, :], dim=1) + matching_matrix[prior_match_gt_mask, :] *= 0 + matching_matrix[prior_match_gt_mask, cost_argmin] = 1 + # get foreground mask inside box and center prior + fg_mask_inboxes = matching_matrix.sum(1) > 0 + valid_mask[valid_mask.clone()] = fg_mask_inboxes + + matched_gt_inds = matching_matrix[fg_mask_inboxes, :].argmax(1) + matched_pred_oks = (matching_matrix * + pairwise_oks).sum(1)[fg_mask_inboxes] + return matched_pred_oks, matched_gt_inds diff --git a/models/YOLO-World/third_party/mmyolo/mmyolo/models/task_modules/assigners/utils.py b/models/YOLO-World/third_party/mmyolo/mmyolo/models/task_modules/assigners/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..5843200936ef7a269109517e6d2952cceea02059 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/mmyolo/models/task_modules/assigners/utils.py @@ -0,0 +1,110 @@ +# Copyright (c) OpenMMLab. All rights reserved. + +from typing import Tuple + +import torch +import torch.nn.functional as F +from torch import Tensor + + +def select_candidates_in_gts(priors_points: Tensor, + gt_bboxes: Tensor, + eps: float = 1e-9) -> Tensor: + """Select the positive priors' center in gt. + + Args: + priors_points (Tensor): Model priors points, + shape(num_priors, 2) + gt_bboxes (Tensor): Ground true bboxes, + shape(batch_size, num_gt, 4) + eps (float): Default to 1e-9. + Return: + (Tensor): shape(batch_size, num_gt, num_priors) + """ + batch_size, num_gt, _ = gt_bboxes.size() + gt_bboxes = gt_bboxes.reshape([-1, 4]) + + priors_number = priors_points.size(0) + priors_points = priors_points.unsqueeze(0).repeat(batch_size * num_gt, 1, + 1) + + # calculate the left, top, right, bottom distance between positive + # prior center and gt side + gt_bboxes_lt = gt_bboxes[:, 0:2].unsqueeze(1).repeat(1, priors_number, 1) + gt_bboxes_rb = gt_bboxes[:, 2:4].unsqueeze(1).repeat(1, priors_number, 1) + bbox_deltas = torch.cat( + [priors_points - gt_bboxes_lt, gt_bboxes_rb - priors_points], dim=-1) + bbox_deltas = bbox_deltas.reshape([batch_size, num_gt, priors_number, -1]) + + return (bbox_deltas.min(axis=-1)[0] > eps).to(gt_bboxes.dtype) + + +def select_highest_overlaps(pos_mask: Tensor, overlaps: Tensor, + num_gt: int) -> Tuple[Tensor, Tensor, Tensor]: + """If an anchor box is assigned to multiple gts, the one with the highest + iou will be selected. + + Args: + pos_mask (Tensor): The assigned positive sample mask, + shape(batch_size, num_gt, num_priors) + overlaps (Tensor): IoU between all bbox and ground truth, + shape(batch_size, num_gt, num_priors) + num_gt (int): Number of ground truth. + Return: + gt_idx_pre_prior (Tensor): Target ground truth index, + shape(batch_size, num_priors) + fg_mask_pre_prior (Tensor): Force matching ground truth, + shape(batch_size, num_priors) + pos_mask (Tensor): The assigned positive sample mask, + shape(batch_size, num_gt, num_priors) + """ + fg_mask_pre_prior = pos_mask.sum(axis=-2) + + # Make sure the positive sample matches the only one and is the largest IoU + if fg_mask_pre_prior.max() > 1: + mask_multi_gts = (fg_mask_pre_prior.unsqueeze(1) > 1).repeat( + [1, num_gt, 1]) + index = overlaps.argmax(axis=1) + is_max_overlaps = F.one_hot(index, num_gt) + is_max_overlaps = \ + is_max_overlaps.permute(0, 2, 1).to(overlaps.dtype) + + pos_mask = torch.where(mask_multi_gts, is_max_overlaps, pos_mask) + fg_mask_pre_prior = pos_mask.sum(axis=-2) + + gt_idx_pre_prior = pos_mask.argmax(axis=-2) + return gt_idx_pre_prior, fg_mask_pre_prior, pos_mask + + +# TODO:'mmdet.BboxOverlaps2D' will cause gradient inconsistency, +# which will be found and solved in a later version. +def yolov6_iou_calculator(bbox1: Tensor, + bbox2: Tensor, + eps: float = 1e-9) -> Tensor: + """Calculate iou for batch. + + Args: + bbox1 (Tensor): shape(batch size, num_gt, 4) + bbox2 (Tensor): shape(batch size, num_priors, 4) + eps (float): Default to 1e-9. + Return: + (Tensor): IoU, shape(size, num_gt, num_priors) + """ + bbox1 = bbox1.unsqueeze(2) # [N, M1, 4] -> [N, M1, 1, 4] + bbox2 = bbox2.unsqueeze(1) # [N, M2, 4] -> [N, 1, M2, 4] + + # calculate xy info of predict and gt bbox + bbox1_x1y1, bbox1_x2y2 = bbox1[:, :, :, 0:2], bbox1[:, :, :, 2:4] + bbox2_x1y1, bbox2_x2y2 = bbox2[:, :, :, 0:2], bbox2[:, :, :, 2:4] + + # calculate overlap area + overlap = (torch.minimum(bbox1_x2y2, bbox2_x2y2) - + torch.maximum(bbox1_x1y1, bbox2_x1y1)).clip(0).prod(-1) + + # calculate bbox area + bbox1_area = (bbox1_x2y2 - bbox1_x1y1).clip(0).prod(-1) + bbox2_area = (bbox2_x2y2 - bbox2_x1y1).clip(0).prod(-1) + + union = bbox1_area + bbox2_area - overlap + eps + + return overlap / union diff --git a/models/YOLO-World/third_party/mmyolo/mmyolo/models/task_modules/coders/__init__.py b/models/YOLO-World/third_party/mmyolo/mmyolo/models/task_modules/coders/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..75b6e7d6b30afd3de21c738dfc8e75df2eae7120 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/mmyolo/models/task_modules/coders/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .distance_angle_point_coder import DistanceAnglePointCoder +from .distance_point_bbox_coder import DistancePointBBoxCoder +from .yolov5_bbox_coder import YOLOv5BBoxCoder +from .yolox_bbox_coder import YOLOXBBoxCoder + +__all__ = [ + 'YOLOv5BBoxCoder', 'YOLOXBBoxCoder', 'DistancePointBBoxCoder', + 'DistanceAnglePointCoder' +] diff --git a/models/YOLO-World/third_party/mmyolo/mmyolo/models/task_modules/coders/distance_angle_point_coder.py b/models/YOLO-World/third_party/mmyolo/mmyolo/models/task_modules/coders/distance_angle_point_coder.py new file mode 100644 index 0000000000000000000000000000000000000000..a7e322f94725ee548c9b261be6f5bae2f3d9b4d9 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/mmyolo/models/task_modules/coders/distance_angle_point_coder.py @@ -0,0 +1,94 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Optional, Sequence, Union + +import torch + +from mmyolo.registry import TASK_UTILS + +try: + from mmrotate.models.task_modules.coders import \ + DistanceAnglePointCoder as MMROTATE_DistanceAnglePointCoder + MMROTATE_AVAILABLE = True +except ImportError: + from mmdet.models.task_modules.coders import BaseBBoxCoder + MMROTATE_DistanceAnglePointCoder = BaseBBoxCoder + MMROTATE_AVAILABLE = False + + +@TASK_UTILS.register_module() +class DistanceAnglePointCoder(MMROTATE_DistanceAnglePointCoder): + """Distance Angle Point BBox coder. + + This coder encodes gt bboxes (x, y, w, h, theta) into (top, bottom, left, + right, theta) and decode it back to the original. + """ + + def __init__(self, clip_border=True, angle_version='oc'): + if not MMROTATE_AVAILABLE: + raise ImportError( + 'Please run "mim install -r requirements/mmrotate.txt" ' + 'to install mmrotate first for rotated detection.') + + super().__init__(clip_border=clip_border, angle_version=angle_version) + + def decode( + self, + points: torch.Tensor, + pred_bboxes: torch.Tensor, + stride: torch.Tensor, + max_shape: Optional[Union[Sequence[int], torch.Tensor, + Sequence[Sequence[int]]]] = None, + ) -> torch.Tensor: + """Decode distance prediction to bounding box. + + Args: + points (Tensor): Shape (B, N, 2) or (N, 2). + pred_bboxes (Tensor): Distance from the given point to 4 + boundaries and angle (left, top, right, bottom, angle). + Shape (B, N, 5) or (N, 5) + max_shape (Sequence[int] or torch.Tensor or Sequence[ + Sequence[int]],optional): Maximum bounds for boxes, specifies + (H, W, C) or (H, W). If priors shape is (B, N, 4), then + the max_shape should be a Sequence[Sequence[int]], + and the length of max_shape should also be B. + Default None. + Returns: + Tensor: Boxes with shape (N, 5) or (B, N, 5) + """ + assert points.size(-2) == pred_bboxes.size(-2) + assert points.size(-1) == 2 + assert pred_bboxes.size(-1) == 5 + if self.clip_border is False: + max_shape = None + + if pred_bboxes.dim() == 2: + stride = stride[:, None] + else: + stride = stride[None, :, None] + pred_bboxes[..., :4] = pred_bboxes[..., :4] * stride + + return self.distance2obb(points, pred_bboxes, max_shape, + self.angle_version) + + def encode(self, + points: torch.Tensor, + gt_bboxes: torch.Tensor, + max_dis: float = 16., + eps: float = 0.01) -> torch.Tensor: + """Encode bounding box to distances. + + Args: + points (Tensor): Shape (N, 2), The format is [x, y]. + gt_bboxes (Tensor): Shape (N, 5), The format is "xywha" + max_dis (float): Upper bound of the distance. Default None. + eps (float): a small value to ensure target < max_dis, instead <=. + Default 0.1. + + Returns: + Tensor: Box transformation deltas. The shape is (N, 5). + """ + + assert points.size(-2) == gt_bboxes.size(-2) + assert points.size(-1) == 2 + assert gt_bboxes.size(-1) == 5 + return self.obb2distance(points, gt_bboxes, max_dis, eps) diff --git a/models/YOLO-World/third_party/mmyolo/mmyolo/models/task_modules/coders/distance_point_bbox_coder.py b/models/YOLO-World/third_party/mmyolo/mmyolo/models/task_modules/coders/distance_point_bbox_coder.py new file mode 100644 index 0000000000000000000000000000000000000000..16417b8ab209c57880cfcfe0ba2a955e78c0a3f0 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/mmyolo/models/task_modules/coders/distance_point_bbox_coder.py @@ -0,0 +1,79 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Optional, Sequence, Union + +import torch +from mmdet.models.task_modules.coders import \ + DistancePointBBoxCoder as MMDET_DistancePointBBoxCoder +from mmdet.structures.bbox import bbox2distance, distance2bbox + +from mmyolo.registry import TASK_UTILS + + +@TASK_UTILS.register_module() +class DistancePointBBoxCoder(MMDET_DistancePointBBoxCoder): + """Distance Point BBox coder. + + This coder encodes gt bboxes (x1, y1, x2, y2) into (top, bottom, left, + right) and decode it back to the original. + """ + + def decode( + self, + points: torch.Tensor, + pred_bboxes: torch.Tensor, + stride: torch.Tensor, + max_shape: Optional[Union[Sequence[int], torch.Tensor, + Sequence[Sequence[int]]]] = None + ) -> torch.Tensor: + """Decode distance prediction to bounding box. + + Args: + points (Tensor): Shape (B, N, 2) or (N, 2). + pred_bboxes (Tensor): Distance from the given point to 4 + boundaries (left, top, right, bottom). Shape (B, N, 4) + or (N, 4) + stride (Tensor): Featmap stride. + max_shape (Sequence[int] or torch.Tensor or Sequence[ + Sequence[int]],optional): Maximum bounds for boxes, specifies + (H, W, C) or (H, W). If priors shape is (B, N, 4), then + the max_shape should be a Sequence[Sequence[int]], + and the length of max_shape should also be B. + Default None. + Returns: + Tensor: Boxes with shape (N, 4) or (B, N, 4) + """ + assert points.size(-2) == pred_bboxes.size(-2) + assert points.size(-1) == 2 + assert pred_bboxes.size(-1) == 4 + if self.clip_border is False: + max_shape = None + + pred_bboxes = pred_bboxes * stride[None, :, None] + + return distance2bbox(points, pred_bboxes, max_shape) + + def encode(self, + points: torch.Tensor, + gt_bboxes: torch.Tensor, + max_dis: float = 16., + eps: float = 0.01) -> torch.Tensor: + """Encode bounding box to distances. The rewrite is to support batch + operations. + + Args: + points (Tensor): Shape (B, N, 2) or (N, 2), The format is [x, y]. + gt_bboxes (Tensor or :obj:`BaseBoxes`): Shape (N, 4), The format + is "xyxy" + max_dis (float): Upper bound of the distance. Default to 16.. + eps (float): a small value to ensure target < max_dis, instead <=. + Default 0.01. + + Returns: + Tensor: Box transformation deltas. The shape is (N, 4) or + (B, N, 4). + """ + + assert points.size(-2) == gt_bboxes.size(-2) + assert points.size(-1) == 2 + assert gt_bboxes.size(-1) == 4 + return bbox2distance(points, gt_bboxes, max_dis, eps) diff --git a/models/YOLO-World/third_party/mmyolo/mmyolo/models/task_modules/coders/yolov5_bbox_coder.py b/models/YOLO-World/third_party/mmyolo/mmyolo/models/task_modules/coders/yolov5_bbox_coder.py new file mode 100644 index 0000000000000000000000000000000000000000..bab5f0e0fe06c1930497bdece7c7a06636fe9c37 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/mmyolo/models/task_modules/coders/yolov5_bbox_coder.py @@ -0,0 +1,55 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Union + +import torch +from mmdet.models.task_modules.coders.base_bbox_coder import BaseBBoxCoder + +from mmyolo.registry import TASK_UTILS + + +@TASK_UTILS.register_module() +class YOLOv5BBoxCoder(BaseBBoxCoder): + """YOLOv5 BBox coder. + + This decoder decodes pred bboxes (delta_x, delta_x, w, h) to bboxes (tl_x, + tl_y, br_x, br_y). + """ + + def encode(self, **kwargs): + """Encode deltas between bboxes and ground truth boxes.""" + pass + + def decode(self, priors: torch.Tensor, pred_bboxes: torch.Tensor, + stride: Union[torch.Tensor, int]) -> torch.Tensor: + """Decode regression results (delta_x, delta_x, w, h) to bboxes (tl_x, + tl_y, br_x, br_y). + + Args: + priors (torch.Tensor): Basic boxes or points, e.g. anchors. + pred_bboxes (torch.Tensor): Encoded boxes with shape + stride (torch.Tensor | int): Strides of bboxes. + + Returns: + torch.Tensor: Decoded boxes. + """ + assert pred_bboxes.size(-1) == priors.size(-1) == 4 + + pred_bboxes = pred_bboxes.sigmoid() + + x_center = (priors[..., 0] + priors[..., 2]) * 0.5 + y_center = (priors[..., 1] + priors[..., 3]) * 0.5 + w = priors[..., 2] - priors[..., 0] + h = priors[..., 3] - priors[..., 1] + + # The anchor of mmdet has been offset by 0.5 + x_center_pred = (pred_bboxes[..., 0] - 0.5) * 2 * stride + x_center + y_center_pred = (pred_bboxes[..., 1] - 0.5) * 2 * stride + y_center + w_pred = (pred_bboxes[..., 2] * 2)**2 * w + h_pred = (pred_bboxes[..., 3] * 2)**2 * h + + decoded_bboxes = torch.stack( + (x_center_pred - w_pred / 2, y_center_pred - h_pred / 2, + x_center_pred + w_pred / 2, y_center_pred + h_pred / 2), + dim=-1) + + return decoded_bboxes diff --git a/models/YOLO-World/third_party/mmyolo/mmyolo/models/task_modules/coders/yolox_bbox_coder.py b/models/YOLO-World/third_party/mmyolo/mmyolo/models/task_modules/coders/yolox_bbox_coder.py new file mode 100644 index 0000000000000000000000000000000000000000..02c898d814e89e5c8ef4db792831a7ba80c7c0cc --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/mmyolo/models/task_modules/coders/yolox_bbox_coder.py @@ -0,0 +1,45 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Union + +import torch +from mmdet.models.task_modules.coders.base_bbox_coder import BaseBBoxCoder + +from mmyolo.registry import TASK_UTILS + + +@TASK_UTILS.register_module() +class YOLOXBBoxCoder(BaseBBoxCoder): + """YOLOX BBox coder. + + This decoder decodes pred bboxes (delta_x, delta_x, w, h) to bboxes (tl_x, + tl_y, br_x, br_y). + """ + + def encode(self, **kwargs): + """Encode deltas between bboxes and ground truth boxes.""" + pass + + def decode(self, priors: torch.Tensor, pred_bboxes: torch.Tensor, + stride: Union[torch.Tensor, int]) -> torch.Tensor: + """Decode regression results (delta_x, delta_x, w, h) to bboxes (tl_x, + tl_y, br_x, br_y). + + Args: + priors (torch.Tensor): Basic boxes or points, e.g. anchors. + pred_bboxes (torch.Tensor): Encoded boxes with shape + stride (torch.Tensor | int): Strides of bboxes. + + Returns: + torch.Tensor: Decoded boxes. + """ + stride = stride[None, :, None] + xys = (pred_bboxes[..., :2] * stride) + priors + whs = pred_bboxes[..., 2:].exp() * stride + + tl_x = (xys[..., 0] - whs[..., 0] / 2) + tl_y = (xys[..., 1] - whs[..., 1] / 2) + br_x = (xys[..., 0] + whs[..., 0] / 2) + br_y = (xys[..., 1] + whs[..., 1] / 2) + + decoded_bboxes = torch.stack([tl_x, tl_y, br_x, br_y], -1) + return decoded_bboxes diff --git a/models/YOLO-World/third_party/mmyolo/mmyolo/models/utils/__init__.py b/models/YOLO-World/third_party/mmyolo/mmyolo/models/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d62ff80e25ea5adad8524fd6f756f1db5e4de4d5 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/mmyolo/models/utils/__init__.py @@ -0,0 +1,8 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .misc import (OutputSaveFunctionWrapper, OutputSaveObjectWrapper, + gt_instances_preprocess, make_divisible, make_round) + +__all__ = [ + 'make_divisible', 'make_round', 'gt_instances_preprocess', + 'OutputSaveFunctionWrapper', 'OutputSaveObjectWrapper' +] diff --git a/models/YOLO-World/third_party/mmyolo/mmyolo/models/utils/misc.py b/models/YOLO-World/third_party/mmyolo/mmyolo/models/utils/misc.py new file mode 100644 index 0000000000000000000000000000000000000000..96cd1195aefb2fbf5db7535be785dae2fab4add9 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/mmyolo/models/utils/misc.py @@ -0,0 +1,186 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math +from collections import defaultdict +from copy import deepcopy +from typing import Any, Callable, Dict, Optional, Sequence, Tuple, Union + +import torch +from mmdet.structures.bbox.transforms import get_box_tensor +from torch import Tensor + + +def make_divisible(x: float, + widen_factor: float = 1.0, + divisor: int = 8) -> int: + """Make sure that x*widen_factor is divisible by divisor.""" + return math.ceil(x * widen_factor / divisor) * divisor + + +def make_round(x: float, deepen_factor: float = 1.0) -> int: + """Make sure that x*deepen_factor becomes an integer not less than 1.""" + return max(round(x * deepen_factor), 1) if x > 1 else x + + +def gt_instances_preprocess(batch_gt_instances: Union[Tensor, Sequence], + batch_size: int) -> Tensor: + """Split batch_gt_instances with batch size. + + From [all_gt_bboxes, box_dim+2] to [batch_size, number_gt, box_dim+1]. + For horizontal box, box_dim=4, for rotated box, box_dim=5 + + If some shape of single batch smaller than + gt bbox len, then using zeros to fill. + + Args: + batch_gt_instances (Sequence[Tensor]): Ground truth + instances for whole batch, shape [all_gt_bboxes, box_dim+2] + batch_size (int): Batch size. + + Returns: + Tensor: batch gt instances data, shape + [batch_size, number_gt, box_dim+1] + """ + if isinstance(batch_gt_instances, Sequence): + max_gt_bbox_len = max( + [len(gt_instances) for gt_instances in batch_gt_instances]) + # fill zeros with length box_dim+1 if some shape of + # single batch not equal max_gt_bbox_len + batch_instance_list = [] + for index, gt_instance in enumerate(batch_gt_instances): + bboxes = gt_instance.bboxes + labels = gt_instance.labels + box_dim = get_box_tensor(bboxes).size(-1) + batch_instance_list.append( + torch.cat((labels[:, None], bboxes), dim=-1)) + + if bboxes.shape[0] >= max_gt_bbox_len: + continue + + fill_tensor = bboxes.new_full( + [max_gt_bbox_len - bboxes.shape[0], box_dim + 1], 0) + batch_instance_list[index] = torch.cat( + (batch_instance_list[index], fill_tensor), dim=0) + + return torch.stack(batch_instance_list) + else: + # faster version + # format of batch_gt_instances: [img_ind, cls_ind, (box)] + # For example horizontal box should be: + # [img_ind, cls_ind, x1, y1, x2, y2] + # Rotated box should be + # [img_ind, cls_ind, x, y, w, h, a] + + # sqlit batch gt instance [all_gt_bboxes, box_dim+2] -> + # [batch_size, max_gt_bbox_len, box_dim+1] + assert isinstance(batch_gt_instances, Tensor) + box_dim = batch_gt_instances.size(-1) - 2 + if len(batch_gt_instances) > 0: + gt_images_indexes = batch_gt_instances[:, 0] + max_gt_bbox_len = gt_images_indexes.unique( + return_counts=True)[1].max() + # fill zeros with length box_dim+1 if some shape of + # single batch not equal max_gt_bbox_len + batch_instance = torch.zeros( + (batch_size, max_gt_bbox_len, box_dim + 1), + dtype=batch_gt_instances.dtype, + device=batch_gt_instances.device) + + for i in range(batch_size): + match_indexes = gt_images_indexes == i + gt_num = match_indexes.sum() + if gt_num: + batch_instance[i, :gt_num] = batch_gt_instances[ + match_indexes, 1:] + else: + batch_instance = torch.zeros((batch_size, 0, box_dim + 1), + dtype=batch_gt_instances.dtype, + device=batch_gt_instances.device) + + return batch_instance + + +class OutputSaveObjectWrapper: + """A wrapper class that saves the output of function calls on an object.""" + + def __init__(self, obj: Any) -> None: + self.obj = obj + self.log = defaultdict(list) + + def __getattr__(self, attr: str) -> Any: + """Overrides the default behavior when an attribute is accessed. + + - If the attribute is callable, hooks the attribute and saves the + returned value of the function call to the log. + - If the attribute is not callable, saves the attribute's value to the + log and returns the value. + """ + orig_attr = getattr(self.obj, attr) + + if not callable(orig_attr): + self.log[attr].append(orig_attr) + return orig_attr + + def hooked(*args: Tuple, **kwargs: Dict) -> Any: + """The hooked function that logs the return value of the original + function.""" + result = orig_attr(*args, **kwargs) + self.log[attr].append(result) + return result + + return hooked + + def clear(self): + """Clears the log of function call outputs.""" + self.log.clear() + + def __deepcopy__(self, memo): + """Only copy the object when applying deepcopy.""" + other = type(self)(deepcopy(self.obj)) + memo[id(self)] = other + return other + + +class OutputSaveFunctionWrapper: + """A class that wraps a function and saves its outputs. + + This class can be used to decorate a function to save its outputs. It wraps + the function with a `__call__` method that calls the original function and + saves the results in a log attribute. + Args: + func (Callable): A function to wrap. + spec (Optional[Dict]): A dictionary of global variables to use as the + namespace for the wrapper. If `None`, the global namespace of the + original function is used. + """ + + def __init__(self, func: Callable, spec: Optional[Dict]) -> None: + """Initializes the OutputSaveFunctionWrapper instance.""" + assert callable(func) + self.log = [] + self.func = func + self.func_name = func.__name__ + + if isinstance(spec, dict): + self.spec = spec + elif hasattr(func, '__globals__'): + self.spec = func.__globals__ + else: + raise ValueError + + def __call__(self, *args, **kwargs) -> Any: + """Calls the wrapped function with the given arguments and saves the + results in the `log` attribute.""" + results = self.func(*args, **kwargs) + self.log.append(results) + return results + + def __enter__(self) -> None: + """Enters the context and sets the wrapped function to be a global + variable in the specified namespace.""" + self.spec[self.func_name] = self + return self.log + + def __exit__(self, exc_type, exc_val, exc_tb) -> None: + """Exits the context and resets the wrapped function to its original + value in the specified namespace.""" + self.spec[self.func_name] = self.func diff --git a/models/YOLO-World/third_party/mmyolo/mmyolo/registry.py b/models/YOLO-World/third_party/mmyolo/mmyolo/registry.py new file mode 100644 index 0000000000000000000000000000000000000000..71f43e6cf53d92917b7aea6175ae0540613ff720 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/mmyolo/registry.py @@ -0,0 +1,103 @@ +# Copyright (c) OpenMMLab. All rights reserved. +"""MMYOLO provides 17 registry nodes to support using modules across projects. +Each node is a child of the root registry in MMEngine. + +More details can be found at +https://mmengine.readthedocs.io/en/latest/tutorials/registry.html. +""" + +from mmengine.registry import DATA_SAMPLERS as MMENGINE_DATA_SAMPLERS +from mmengine.registry import DATASETS as MMENGINE_DATASETS +from mmengine.registry import HOOKS as MMENGINE_HOOKS +from mmengine.registry import LOOPS as MMENGINE_LOOPS +from mmengine.registry import METRICS as MMENGINE_METRICS +from mmengine.registry import MODEL_WRAPPERS as MMENGINE_MODEL_WRAPPERS +from mmengine.registry import MODELS as MMENGINE_MODELS +from mmengine.registry import \ + OPTIM_WRAPPER_CONSTRUCTORS as MMENGINE_OPTIM_WRAPPER_CONSTRUCTORS +from mmengine.registry import OPTIM_WRAPPERS as MMENGINE_OPTIM_WRAPPERS +from mmengine.registry import OPTIMIZERS as MMENGINE_OPTIMIZERS +from mmengine.registry import PARAM_SCHEDULERS as MMENGINE_PARAM_SCHEDULERS +from mmengine.registry import \ + RUNNER_CONSTRUCTORS as MMENGINE_RUNNER_CONSTRUCTORS +from mmengine.registry import RUNNERS as MMENGINE_RUNNERS +from mmengine.registry import TASK_UTILS as MMENGINE_TASK_UTILS +from mmengine.registry import TRANSFORMS as MMENGINE_TRANSFORMS +from mmengine.registry import VISBACKENDS as MMENGINE_VISBACKENDS +from mmengine.registry import VISUALIZERS as MMENGINE_VISUALIZERS +from mmengine.registry import \ + WEIGHT_INITIALIZERS as MMENGINE_WEIGHT_INITIALIZERS +from mmengine.registry import Registry + +# manage all kinds of runners like `EpochBasedRunner` and `IterBasedRunner` +RUNNERS = Registry( + 'runner', parent=MMENGINE_RUNNERS, locations=['mmyolo.engine']) +# manage runner constructors that define how to initialize runners +RUNNER_CONSTRUCTORS = Registry( + 'runner constructor', + parent=MMENGINE_RUNNER_CONSTRUCTORS, + locations=['mmyolo.engine']) +# manage all kinds of loops like `EpochBasedTrainLoop` +LOOPS = Registry('loop', parent=MMENGINE_LOOPS, locations=['mmyolo.engine']) +# manage all kinds of hooks like `CheckpointHook` +HOOKS = Registry( + 'hook', parent=MMENGINE_HOOKS, locations=['mmyolo.engine.hooks']) + +# manage data-related modules +DATASETS = Registry( + 'dataset', parent=MMENGINE_DATASETS, locations=['mmyolo.datasets']) +DATA_SAMPLERS = Registry( + 'data sampler', + parent=MMENGINE_DATA_SAMPLERS, + locations=['mmyolo.datasets']) +TRANSFORMS = Registry( + 'transform', + parent=MMENGINE_TRANSFORMS, + locations=['mmyolo.datasets.transforms']) + +# manage all kinds of modules inheriting `nn.Module` +MODELS = Registry('model', parent=MMENGINE_MODELS, locations=['mmyolo.models']) +# manage all kinds of model wrappers like 'MMDistributedDataParallel' +MODEL_WRAPPERS = Registry( + 'model_wrapper', + parent=MMENGINE_MODEL_WRAPPERS, + locations=['mmyolo.models']) +# manage all kinds of weight initialization modules like `Uniform` +WEIGHT_INITIALIZERS = Registry( + 'weight initializer', + parent=MMENGINE_WEIGHT_INITIALIZERS, + locations=['mmyolo.models']) + +# manage all kinds of optimizers like `SGD` and `Adam` +OPTIMIZERS = Registry( + 'optimizer', + parent=MMENGINE_OPTIMIZERS, + locations=['mmyolo.engine.optimizers']) +OPTIM_WRAPPERS = Registry( + 'optim_wrapper', + parent=MMENGINE_OPTIM_WRAPPERS, + locations=['mmyolo.engine.optimizers']) +# manage constructors that customize the optimization hyperparameters. +OPTIM_WRAPPER_CONSTRUCTORS = Registry( + 'optimizer constructor', + parent=MMENGINE_OPTIM_WRAPPER_CONSTRUCTORS, + locations=['mmyolo.engine.optimizers']) +# manage all kinds of parameter schedulers like `MultiStepLR` +PARAM_SCHEDULERS = Registry( + 'parameter scheduler', + parent=MMENGINE_PARAM_SCHEDULERS, + locations=['mmyolo.engine.optimizers']) +# manage all kinds of metrics +METRICS = Registry( + 'metric', parent=MMENGINE_METRICS, locations=['mmyolo.engine']) + +# manage task-specific modules like anchor generators and box coders +TASK_UTILS = Registry( + 'task util', parent=MMENGINE_TASK_UTILS, locations=['mmyolo.models']) + +# manage visualizer +VISUALIZERS = Registry( + 'visualizer', parent=MMENGINE_VISUALIZERS, locations=['mmyolo.utils']) +# manage visualizer backend +VISBACKENDS = Registry( + 'vis_backend', parent=MMENGINE_VISBACKENDS, locations=['mmyolo.utils']) diff --git a/models/YOLO-World/third_party/mmyolo/mmyolo/testing/__init__.py b/models/YOLO-World/third_party/mmyolo/mmyolo/testing/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b6d7a010ee27b2822d44ad099f46f65bf6f0c00a --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/mmyolo/testing/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from ._utils import get_detector_cfg + +__all__ = ['get_detector_cfg'] diff --git a/models/YOLO-World/third_party/mmyolo/mmyolo/testing/_utils.py b/models/YOLO-World/third_party/mmyolo/mmyolo/testing/_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..9ccf2fe0cfd7baa3aeb7f3793c3db025d8889d5f --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/mmyolo/testing/_utils.py @@ -0,0 +1,53 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +from os.path import dirname, exists, join + +import numpy as np +from mmengine.config import Config + + +def _get_config_directory(): + """Find the predefined detector config directory.""" + try: + # Assume we are running in the source mmyolo repo + repo_dpath = dirname(dirname(dirname(__file__))) + except NameError: + # For IPython development when this __file__ is not defined + import mmyolo + repo_dpath = dirname(dirname(mmyolo.__file__)) + config_dpath = join(repo_dpath, 'configs') + if not exists(config_dpath): + raise Exception('Cannot find config path') + return config_dpath + + +def _get_config_module(fname): + """Load a configuration as a python module.""" + config_dpath = _get_config_directory() + config_fpath = join(config_dpath, fname) + config_mod = Config.fromfile(config_fpath) + return config_mod + + +def get_detector_cfg(fname): + """Grab configs necessary to create a detector. + + These are deep copied to allow for safe modification of parameters without + influencing other tests. + """ + config = _get_config_module(fname) + model = copy.deepcopy(config.model) + return model + + +def _rand_bboxes(rng, num_boxes, w, h): + """Randomly generate a specified number of bboxes.""" + cx, cy, bw, bh = rng.rand(num_boxes, 4).T + + tl_x = ((cx * w) - (w * bw / 2)).clip(0, w) + tl_y = ((cy * h) - (h * bh / 2)).clip(0, h) + br_x = ((cx * w) + (w * bw / 2)).clip(0, w) + br_y = ((cy * h) + (h * bh / 2)).clip(0, h) + + bboxes = np.vstack([tl_x, tl_y, br_x, br_y]).T + return bboxes diff --git a/models/YOLO-World/third_party/mmyolo/mmyolo/utils/__init__.py b/models/YOLO-World/third_party/mmyolo/mmyolo/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f4e968494892ccefb60d0c7b713c131ddc6fb869 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/mmyolo/utils/__init__.py @@ -0,0 +1,9 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .collect_env import collect_env +from .misc import is_metainfo_lower, switch_to_deploy +from .setup_env import register_all_modules + +__all__ = [ + 'register_all_modules', 'collect_env', 'switch_to_deploy', + 'is_metainfo_lower' +] diff --git a/models/YOLO-World/third_party/mmyolo/mmyolo/utils/boxam_utils.py b/models/YOLO-World/third_party/mmyolo/mmyolo/utils/boxam_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..50d6c09ecd309abe11777b4bc5307db0bbec2735 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/mmyolo/utils/boxam_utils.py @@ -0,0 +1,517 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import bisect +import copy +import warnings +from pathlib import Path +from typing import Callable, List, Optional, Tuple, Union + +import cv2 +import numpy as np +import torch +import torch.nn as nn +import torchvision +from mmcv.transforms import Compose +from mmdet.evaluation import get_classes +from mmdet.utils import ConfigType +from mmengine.config import Config +from mmengine.registry import init_default_scope +from mmengine.runner import load_checkpoint +from mmengine.structures import InstanceData +from torch import Tensor + +from mmyolo.registry import MODELS + +try: + from pytorch_grad_cam import (AblationCAM, AblationLayer, + ActivationsAndGradients) + from pytorch_grad_cam import GradCAM as Base_GradCAM + from pytorch_grad_cam import GradCAMPlusPlus as Base_GradCAMPlusPlus + from pytorch_grad_cam.base_cam import BaseCAM + from pytorch_grad_cam.utils.image import scale_cam_image, show_cam_on_image + from pytorch_grad_cam.utils.svd_on_activations import get_2d_projection +except ImportError: + pass + + +def init_detector( + config: Union[str, Path, Config], + checkpoint: Optional[str] = None, + palette: str = 'coco', + device: str = 'cuda:0', + cfg_options: Optional[dict] = None, +) -> nn.Module: + """Initialize a detector from config file. + + Args: + config (str, :obj:`Path`, or :obj:`mmengine.Config`): Config file path, + :obj:`Path`, or the config object. + checkpoint (str, optional): Checkpoint path. If left as None, the model + will not load any weights. + palette (str): Color palette used for visualization. If palette + is stored in checkpoint, use checkpoint's palette first, otherwise + use externally passed palette. Currently, supports 'coco', 'voc', + 'citys' and 'random'. Defaults to coco. + device (str): The device where the anchors will be put on. + Defaults to cuda:0. + cfg_options (dict, optional): Options to override some settings in + the used config. + + Returns: + nn.Module: The constructed detector. + """ + if isinstance(config, (str, Path)): + config = Config.fromfile(config) + elif not isinstance(config, Config): + raise TypeError('config must be a filename or Config object, ' + f'but got {type(config)}') + if cfg_options is not None: + config.merge_from_dict(cfg_options) + elif 'init_cfg' in config.model.backbone: + config.model.backbone.init_cfg = None + + # only change this + # grad based method requires train_cfg + # config.model.train_cfg = None + init_default_scope(config.get('default_scope', 'mmyolo')) + + model = MODELS.build(config.model) + if checkpoint is not None: + checkpoint = load_checkpoint(model, checkpoint, map_location='cpu') + # Weights converted from elsewhere may not have meta fields. + checkpoint_meta = checkpoint.get('meta', {}) + # save the dataset_meta in the model for convenience + if 'dataset_meta' in checkpoint_meta: + # mmdet 3.x, all keys should be lowercase + model.dataset_meta = { + k.lower(): v + for k, v in checkpoint_meta['dataset_meta'].items() + } + elif 'CLASSES' in checkpoint_meta: + # < mmdet 3.x + classes = checkpoint_meta['CLASSES'] + model.dataset_meta = {'classes': classes, 'palette': palette} + else: + warnings.simplefilter('once') + warnings.warn( + 'dataset_meta or class names are not saved in the ' + 'checkpoint\'s meta data, use COCO classes by default.') + model.dataset_meta = { + 'classes': get_classes('coco'), + 'palette': palette + } + + model.cfg = config # save the config in the model for convenience + model.to(device) + model.eval() + return model + + +def reshape_transform(feats: Union[Tensor, List[Tensor]], + max_shape: Tuple[int, int] = (20, 20), + is_need_grad: bool = False): + """Reshape and aggregate feature maps when the input is a multi-layer + feature map. + + Takes these tensors with different sizes, resizes them to a common shape, + and concatenates them. + """ + if len(max_shape) == 1: + max_shape = max_shape * 2 + + if isinstance(feats, torch.Tensor): + feats = [feats] + else: + if is_need_grad: + raise NotImplementedError('The `grad_base` method does not ' + 'support output multi-activation layers') + + max_h = max([im.shape[-2] for im in feats]) + max_w = max([im.shape[-1] for im in feats]) + if -1 in max_shape: + max_shape = (max_h, max_w) + else: + max_shape = (min(max_h, max_shape[0]), min(max_w, max_shape[1])) + + activations = [] + for feat in feats: + activations.append( + torch.nn.functional.interpolate( + torch.abs(feat), max_shape, mode='bilinear')) + + activations = torch.cat(activations, axis=1) + return activations + + +class BoxAMDetectorWrapper(nn.Module): + """Wrap the mmdet model class to facilitate handling of non-tensor + situations during inference.""" + + def __init__(self, + cfg: ConfigType, + checkpoint: str, + score_thr: float, + device: str = 'cuda:0'): + super().__init__() + self.cfg = cfg + self.device = device + self.score_thr = score_thr + self.checkpoint = checkpoint + self.detector = init_detector(self.cfg, self.checkpoint, device=device) + + pipeline_cfg = copy.deepcopy(self.cfg.test_dataloader.dataset.pipeline) + pipeline_cfg[0].type = 'mmdet.LoadImageFromNDArray' + + new_test_pipeline = [] + for pipeline in pipeline_cfg: + if not pipeline['type'].endswith('LoadAnnotations'): + new_test_pipeline.append(pipeline) + self.test_pipeline = Compose(new_test_pipeline) + + self.is_need_loss = False + self.input_data = None + self.image = None + + def need_loss(self, is_need_loss: bool): + """Grad-based methods require loss.""" + self.is_need_loss = is_need_loss + + def set_input_data(self, + image: np.ndarray, + pred_instances: Optional[InstanceData] = None): + """Set the input data to be used in the next step.""" + self.image = image + + if self.is_need_loss: + assert pred_instances is not None + pred_instances = pred_instances.numpy() + data = dict( + img=self.image, + img_id=0, + gt_bboxes=pred_instances.bboxes, + gt_bboxes_labels=pred_instances.labels) + data = self.test_pipeline(data) + else: + data = dict(img=self.image, img_id=0) + data = self.test_pipeline(data) + data['inputs'] = [data['inputs']] + data['data_samples'] = [data['data_samples']] + self.input_data = data + + def __call__(self, *args, **kwargs): + assert self.input_data is not None + if self.is_need_loss: + # Maybe this is a direction that can be optimized + # self.detector.init_weights() + if hasattr(self.detector.bbox_head, 'head_module'): + self.detector.bbox_head.head_module.training = True + else: + self.detector.bbox_head.training = True + if hasattr(self.detector.bbox_head, 'featmap_sizes'): + # Prevent the model algorithm error when calculating loss + self.detector.bbox_head.featmap_sizes = None + + data_ = {} + data_['inputs'] = [self.input_data['inputs']] + data_['data_samples'] = [self.input_data['data_samples']] + data = self.detector.data_preprocessor(data_, training=False) + loss = self.detector._run_forward(data, mode='loss') + + if hasattr(self.detector.bbox_head, 'featmap_sizes'): + self.detector.bbox_head.featmap_sizes = None + + return [loss] + else: + if hasattr(self.detector.bbox_head, 'head_module'): + self.detector.bbox_head.head_module.training = False + else: + self.detector.bbox_head.training = False + with torch.no_grad(): + results = self.detector.test_step(self.input_data) + return results + + +class BoxAMDetectorVisualizer: + """Box AM visualization class.""" + + def __init__(self, + method_class, + model: nn.Module, + target_layers: List, + reshape_transform: Optional[Callable] = None, + is_need_grad: bool = False, + extra_params: Optional[dict] = None): + self.target_layers = target_layers + self.reshape_transform = reshape_transform + self.is_need_grad = is_need_grad + + if method_class.__name__ == 'AblationCAM': + batch_size = extra_params.get('batch_size', 1) + ratio_channels_to_ablate = extra_params.get( + 'ratio_channels_to_ablate', 1.) + self.cam = AblationCAM( + model, + target_layers, + use_cuda=True if 'cuda' in model.device else False, + reshape_transform=reshape_transform, + batch_size=batch_size, + ablation_layer=extra_params['ablation_layer'], + ratio_channels_to_ablate=ratio_channels_to_ablate) + else: + self.cam = method_class( + model, + target_layers, + use_cuda=True if 'cuda' in model.device else False, + reshape_transform=reshape_transform, + ) + if self.is_need_grad: + self.cam.activations_and_grads.release() + + self.classes = model.detector.dataset_meta['classes'] + self.COLORS = np.random.uniform(0, 255, size=(len(self.classes), 3)) + + def switch_activations_and_grads(self, model) -> None: + """In the grad-based method, we need to switch + ``ActivationsAndGradients`` layer, otherwise an error will occur.""" + self.cam.model = model + + if self.is_need_grad is True: + self.cam.activations_and_grads = ActivationsAndGradients( + model, self.target_layers, self.reshape_transform) + self.is_need_grad = False + else: + self.cam.activations_and_grads.release() + self.is_need_grad = True + + def __call__(self, img, targets, aug_smooth=False, eigen_smooth=False): + img = torch.from_numpy(img)[None].permute(0, 3, 1, 2) + return self.cam(img, targets, aug_smooth, eigen_smooth)[0, :] + + def show_am(self, + image: np.ndarray, + pred_instance: InstanceData, + grayscale_am: np.ndarray, + with_norm_in_bboxes: bool = False): + """Normalize the AM to be in the range [0, 1] inside every bounding + boxes, and zero outside of the bounding boxes.""" + + boxes = pred_instance.bboxes + labels = pred_instance.labels + + if with_norm_in_bboxes is True: + boxes = boxes.astype(np.int32) + renormalized_am = np.zeros(grayscale_am.shape, dtype=np.float32) + images = [] + for x1, y1, x2, y2 in boxes: + img = renormalized_am * 0 + img[y1:y2, x1:x2] = scale_cam_image( + [grayscale_am[y1:y2, x1:x2].copy()])[0] + images.append(img) + + renormalized_am = np.max(np.float32(images), axis=0) + renormalized_am = scale_cam_image([renormalized_am])[0] + else: + renormalized_am = grayscale_am + + am_image_renormalized = show_cam_on_image( + image / 255, renormalized_am, use_rgb=False) + + image_with_bounding_boxes = self._draw_boxes( + boxes, labels, am_image_renormalized, pred_instance.get('scores')) + return image_with_bounding_boxes + + def _draw_boxes(self, + boxes: List, + labels: List, + image: np.ndarray, + scores: Optional[List] = None): + """draw boxes on image.""" + for i, box in enumerate(boxes): + label = labels[i] + color = self.COLORS[label] + cv2.rectangle(image, (int(box[0]), int(box[1])), + (int(box[2]), int(box[3])), color, 2) + if scores is not None: + score = scores[i] + text = str(self.classes[label]) + ': ' + str( + round(score * 100, 1)) + else: + text = self.classes[label] + + cv2.putText( + image, + text, (int(box[0]), int(box[1] - 5)), + cv2.FONT_HERSHEY_SIMPLEX, + 0.5, + color, + 1, + lineType=cv2.LINE_AA) + return image + + +class DetAblationLayer(AblationLayer): + """Det AblationLayer.""" + + def __init__(self): + super().__init__() + self.activations = None + + def set_next_batch(self, input_batch_index, activations, + num_channels_to_ablate): + """Extract the next batch member from activations, and repeat it + num_channels_to_ablate times.""" + if isinstance(activations, torch.Tensor): + return super().set_next_batch(input_batch_index, activations, + num_channels_to_ablate) + + self.activations = [] + for activation in activations: + activation = activation[ + input_batch_index, :, :, :].clone().unsqueeze(0) + self.activations.append( + activation.repeat(num_channels_to_ablate, 1, 1, 1)) + + def __call__(self, x): + """Go over the activation indices to be ablated, stored in + self.indices.""" + result = self.activations + + if isinstance(result, torch.Tensor): + return super().__call__(x) + + channel_cumsum = np.cumsum([r.shape[1] for r in result]) + num_channels_to_ablate = result[0].size(0) # batch + for i in range(num_channels_to_ablate): + pyramid_layer = bisect.bisect_right(channel_cumsum, + self.indices[i]) + if pyramid_layer > 0: + index_in_pyramid_layer = self.indices[i] - channel_cumsum[ + pyramid_layer - 1] + else: + index_in_pyramid_layer = self.indices[i] + result[pyramid_layer][i, index_in_pyramid_layer, :, :] = -1000 + return result + + +class DetBoxScoreTarget: + """Det Score calculation class. + + In the case of the grad-free method, the calculation method is that + for every original detected bounding box specified in "bboxes", + assign a score on how the current bounding boxes match it, + + 1. In Bbox IoU + 2. In the classification score. + 3. In Mask IoU if ``segms`` exist. + + If there is not a large enough overlap, or the category changed, + assign a score of 0. The total score is the sum of all the box scores. + + In the case of the grad-based method, the calculation method is + the sum of losses after excluding a specific key. + """ + + def __init__(self, + pred_instance: InstanceData, + match_iou_thr: float = 0.5, + device: str = 'cuda:0', + ignore_loss_params: Optional[List] = None): + self.focal_bboxes = pred_instance.bboxes + self.focal_labels = pred_instance.labels + self.match_iou_thr = match_iou_thr + self.device = device + self.ignore_loss_params = ignore_loss_params + if ignore_loss_params is not None: + assert isinstance(self.ignore_loss_params, list) + + def __call__(self, results): + output = torch.tensor([0.], device=self.device) + + if 'loss_cls' in results: + # grad-based method + # results is dict + for loss_key, loss_value in results.items(): + if 'loss' not in loss_key or \ + loss_key in self.ignore_loss_params: + continue + if isinstance(loss_value, list): + output += sum(loss_value) + else: + output += loss_value + return output + else: + # grad-free method + # results is DetDataSample + pred_instances = results.pred_instances + if len(pred_instances) == 0: + return output + + pred_bboxes = pred_instances.bboxes + pred_scores = pred_instances.scores + pred_labels = pred_instances.labels + + for focal_box, focal_label in zip(self.focal_bboxes, + self.focal_labels): + ious = torchvision.ops.box_iou(focal_box[None], + pred_bboxes[..., :4]) + index = ious.argmax() + if ious[0, index] > self.match_iou_thr and pred_labels[ + index] == focal_label: + # TODO: Adaptive adjustment of weights based on algorithms + score = ious[0, index] + pred_scores[index] + output = output + score + return output + + +class SpatialBaseCAM(BaseCAM): + """CAM that maintains spatial information. + + Gradients are often averaged over the spatial dimension in CAM + visualization for classification, but this is unreasonable in detection + tasks. There is no need to average the gradients in the detection task. + """ + + def get_cam_image(self, + input_tensor: torch.Tensor, + target_layer: torch.nn.Module, + targets: List[torch.nn.Module], + activations: torch.Tensor, + grads: torch.Tensor, + eigen_smooth: bool = False) -> np.ndarray: + + weights = self.get_cam_weights(input_tensor, target_layer, targets, + activations, grads) + weighted_activations = weights * activations + if eigen_smooth: + cam = get_2d_projection(weighted_activations) + else: + cam = weighted_activations.sum(axis=1) + return cam + + +class GradCAM(SpatialBaseCAM, Base_GradCAM): + """Gradients are no longer averaged over the spatial dimension.""" + + def get_cam_weights(self, input_tensor, target_layer, target_category, + activations, grads): + return grads + + +class GradCAMPlusPlus(SpatialBaseCAM, Base_GradCAMPlusPlus): + """Gradients are no longer averaged over the spatial dimension.""" + + def get_cam_weights(self, input_tensor, target_layers, target_category, + activations, grads): + grads_power_2 = grads**2 + grads_power_3 = grads_power_2 * grads + # Equation 19 in https://arxiv.org/abs/1710.11063 + sum_activations = np.sum(activations, axis=(2, 3)) + eps = 0.000001 + aij = grads_power_2 / ( + 2 * grads_power_2 + + sum_activations[:, :, None, None] * grads_power_3 + eps) + # Now bring back the ReLU from eq.7 in the paper, + # And zero out aijs where the activations are 0 + aij = np.where(grads != 0, aij, 0) + + weights = np.maximum(grads, 0) * aij + return weights diff --git a/models/YOLO-World/third_party/mmyolo/mmyolo/utils/collect_env.py b/models/YOLO-World/third_party/mmyolo/mmyolo/utils/collect_env.py new file mode 100644 index 0000000000000000000000000000000000000000..89bad658cb7d4f1b602690d8d888a309166283ee --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/mmyolo/utils/collect_env.py @@ -0,0 +1,21 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import mmcv +import mmdet +from mmengine.utils import get_git_hash +from mmengine.utils.dl_utils import collect_env as collect_base_env + +import mmyolo + + +def collect_env() -> dict: + """Collect the information of the running environments.""" + env_info = collect_base_env() + env_info['MMCV'] = mmcv.__version__ + env_info['MMDetection'] = mmdet.__version__ + env_info['MMYOLO'] = mmyolo.__version__ + '+' + get_git_hash()[:7] + return env_info + + +if __name__ == '__main__': + for name, val in collect_env().items(): + print(f'{name}: {val}') diff --git a/models/YOLO-World/third_party/mmyolo/mmyolo/utils/labelme_utils.py b/models/YOLO-World/third_party/mmyolo/mmyolo/utils/labelme_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..0981919771a617ca79b29c3ddf96ea14c82fccc6 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/mmyolo/utils/labelme_utils.py @@ -0,0 +1,92 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import json +import os.path + +from mmengine.structures import InstanceData + + +class LabelmeFormat: + """Predict results save into labelme file. + + Base on https://github.com/wkentaro/labelme/blob/main/labelme/label_file.py + + Args: + classes (tuple): Model classes name. + """ + + def __init__(self, classes: tuple): + super().__init__() + self.classes = classes + + def __call__(self, pred_instances: InstanceData, metainfo: dict, + output_path: str, selected_classes: list): + """Get image data field for labelme. + + Args: + pred_instances (InstanceData): Candidate prediction info. + metainfo (dict): Meta info of prediction. + output_path (str): Image file path. + selected_classes (list): Selected class name. + + Labelme file eg. + { + "version": "5.1.1", + "flags": {}, + "imagePath": "/data/cat/1.jpg", + "imageData": null, + "imageHeight": 3000, + "imageWidth": 4000, + "shapes": [ + { + "label": "cat", + "points": [ + [ + 1148.076923076923, + 1188.4615384615383 + ], + [ + 2471.1538461538457, + 2176.923076923077 + ] + ], + "group_id": null, + "shape_type": "rectangle", + "flags": {} + }, + {...} + ] + } + """ + + image_path = os.path.abspath(metainfo['img_path']) + + json_info = { + 'version': '5.1.1', + 'flags': {}, + 'imagePath': image_path, + 'imageData': None, + 'imageHeight': metainfo['ori_shape'][0], + 'imageWidth': metainfo['ori_shape'][1], + 'shapes': [] + } + + for pred_instance in pred_instances: + pred_bbox = pred_instance.bboxes.cpu().numpy().tolist()[0] + pred_label = self.classes[pred_instance.labels] + + if selected_classes is not None and \ + pred_label not in selected_classes: + # filter class name + continue + + sub_dict = { + 'label': pred_label, + 'points': [pred_bbox[:2], pred_bbox[2:]], + 'group_id': None, + 'shape_type': 'rectangle', + 'flags': {} + } + json_info['shapes'].append(sub_dict) + + with open(output_path, 'w', encoding='utf-8') as f_json: + json.dump(json_info, f_json, ensure_ascii=False, indent=2) diff --git a/models/YOLO-World/third_party/mmyolo/mmyolo/utils/large_image.py b/models/YOLO-World/third_party/mmyolo/mmyolo/utils/large_image.py new file mode 100644 index 0000000000000000000000000000000000000000..8670804684f6dcdc6dc1846cf85260d900b3474e --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/mmyolo/utils/large_image.py @@ -0,0 +1,103 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Sequence, Tuple + +import torch +from mmcv.ops import batched_nms +from mmdet.structures import DetDataSample, SampleList +from mmengine.structures import InstanceData + + +def shift_rbboxes(bboxes: torch.Tensor, offset: Sequence[int]): + """Shift rotated bboxes with offset. + + Args: + bboxes (Tensor): The rotated bboxes need to be translated. + With shape (n, 5), which means (x, y, w, h, a). + offset (Sequence[int]): The translation offsets with shape of (2, ). + Returns: + Tensor: Shifted rotated bboxes. + """ + offset_tensor = bboxes.new_tensor(offset) + shifted_bboxes = bboxes.clone() + shifted_bboxes[:, 0:2] = shifted_bboxes[:, 0:2] + offset_tensor + return shifted_bboxes + + +def shift_predictions(det_data_samples: SampleList, + offsets: Sequence[Tuple[int, int]], + src_image_shape: Tuple[int, int]) -> SampleList: + """Shift predictions to the original image. + + Args: + det_data_samples (List[:obj:`DetDataSample`]): A list of patch results. + offsets (Sequence[Tuple[int, int]]): Positions of the left top points + of patches. + src_image_shape (Tuple[int, int]): A (height, width) tuple of the large + image's width and height. + Returns: + (List[:obj:`DetDataSample`]): shifted results. + """ + try: + from sahi.slicing import shift_bboxes, shift_masks + except ImportError: + raise ImportError('Please run "pip install -U sahi" ' + 'to install sahi first for large image inference.') + + assert len(det_data_samples) == len( + offsets), 'The `results` should has the ' 'same length with `offsets`.' + shifted_predictions = [] + for det_data_sample, offset in zip(det_data_samples, offsets): + pred_inst = det_data_sample.pred_instances.clone() + + # Check bbox type + if pred_inst.bboxes.size(-1) == 4: + # Horizontal bboxes + shifted_bboxes = shift_bboxes(pred_inst.bboxes, offset) + elif pred_inst.bboxes.size(-1) == 5: + # Rotated bboxes + shifted_bboxes = shift_rbboxes(pred_inst.bboxes, offset) + else: + raise NotImplementedError + + # shift bboxes and masks + pred_inst.bboxes = shifted_bboxes + if 'masks' in det_data_sample: + pred_inst.masks = shift_masks(pred_inst.masks, offset, + src_image_shape) + + shifted_predictions.append(pred_inst.clone()) + + shifted_predictions = InstanceData.cat(shifted_predictions) + + return shifted_predictions + + +def merge_results_by_nms(results: SampleList, offsets: Sequence[Tuple[int, + int]], + src_image_shape: Tuple[int, int], + nms_cfg: dict) -> DetDataSample: + """Merge patch results by nms. + + Args: + results (List[:obj:`DetDataSample`]): A list of patch results. + offsets (Sequence[Tuple[int, int]]): Positions of the left top points + of patches. + src_image_shape (Tuple[int, int]): A (height, width) tuple of the large + image's width and height. + nms_cfg (dict): it should specify nms type and other parameters + like `iou_threshold`. + Returns: + :obj:`DetDataSample`: merged results. + """ + shifted_instances = shift_predictions(results, offsets, src_image_shape) + + _, keeps = batched_nms( + boxes=shifted_instances.bboxes, + scores=shifted_instances.scores, + idxs=shifted_instances.labels, + nms_cfg=nms_cfg) + merged_instances = shifted_instances[keeps] + + merged_result = results[0].clone() + merged_result.pred_instances = merged_instances + return merged_result diff --git a/models/YOLO-World/third_party/mmyolo/mmyolo/utils/misc.py b/models/YOLO-World/third_party/mmyolo/mmyolo/utils/misc.py new file mode 100644 index 0000000000000000000000000000000000000000..f5d366d75d4821753354c54629d2929661993578 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/mmyolo/utils/misc.py @@ -0,0 +1,135 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os +import urllib + +import numpy as np +import torch +from mmengine.utils import scandir +from prettytable import PrettyTable + +from mmyolo.models import RepVGGBlock + +IMG_EXTENSIONS = ('.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif', + '.tiff', '.webp') + + +def switch_to_deploy(model): + """Model switch to deploy status.""" + for layer in model.modules(): + if isinstance(layer, RepVGGBlock): + layer.switch_to_deploy() + + print('Switch model to deploy modality.') + + +def auto_arrange_images(image_list: list, image_column: int = 2) -> np.ndarray: + """Auto arrange image to image_column x N row. + + Args: + image_list (list): cv2 image list. + image_column (int): Arrange to N column. Default: 2. + Return: + (np.ndarray): image_column x N row merge image + """ + img_count = len(image_list) + if img_count <= image_column: + # no need to arrange + image_show = np.concatenate(image_list, axis=1) + else: + # arrange image according to image_column + image_row = round(img_count / image_column) + fill_img_list = [np.ones(image_list[0].shape, dtype=np.uint8) * 255 + ] * ( + image_row * image_column - img_count) + image_list.extend(fill_img_list) + merge_imgs_col = [] + for i in range(image_row): + start_col = image_column * i + end_col = image_column * (i + 1) + merge_col = np.hstack(image_list[start_col:end_col]) + merge_imgs_col.append(merge_col) + + # merge to one image + image_show = np.vstack(merge_imgs_col) + + return image_show + + +def get_file_list(source_root: str) -> [list, dict]: + """Get file list. + + Args: + source_root (str): image or video source path + + Return: + source_file_path_list (list): A list for all source file. + source_type (dict): Source type: file or url or dir. + """ + is_dir = os.path.isdir(source_root) + is_url = source_root.startswith(('http:/', 'https:/')) + is_file = os.path.splitext(source_root)[-1].lower() in IMG_EXTENSIONS + + source_file_path_list = [] + if is_dir: + # when input source is dir + for file in scandir( + source_root, IMG_EXTENSIONS, recursive=True, + case_sensitive=False): + source_file_path_list.append(os.path.join(source_root, file)) + elif is_url: + # when input source is url + filename = os.path.basename( + urllib.parse.unquote(source_root).split('?')[0]) + file_save_path = os.path.join(os.getcwd(), filename) + print(f'Downloading source file to {file_save_path}') + torch.hub.download_url_to_file(source_root, file_save_path) + source_file_path_list = [file_save_path] + elif is_file: + # when input source is single image + source_file_path_list = [source_root] + else: + print('Cannot find image file.') + + source_type = dict(is_dir=is_dir, is_url=is_url, is_file=is_file) + + return source_file_path_list, source_type + + +def show_data_classes(data_classes): + """When printing an error, all class names of the dataset.""" + print('\n\nThe name of the class contained in the dataset:') + data_classes_info = PrettyTable() + data_classes_info.title = 'Information of dataset class' + # List Print Settings + # If the quantity is too large, 25 rows will be displayed in each column + if len(data_classes) < 25: + data_classes_info.add_column('Class name', data_classes) + elif len(data_classes) % 25 != 0 and len(data_classes) > 25: + col_num = int(len(data_classes) / 25) + 1 + data_name_list = list(data_classes) + for i in range(0, (col_num * 25) - len(data_classes)): + data_name_list.append('') + for i in range(0, len(data_name_list), 25): + data_classes_info.add_column('Class name', + data_name_list[i:i + 25]) + + # Align display data to the left + data_classes_info.align['Class name'] = 'l' + print(data_classes_info) + + +def is_metainfo_lower(cfg): + """Determine whether the custom metainfo fields are all lowercase.""" + + def judge_keys(dataloader_cfg): + while 'dataset' in dataloader_cfg: + dataloader_cfg = dataloader_cfg['dataset'] + if 'metainfo' in dataloader_cfg: + all_keys = dataloader_cfg['metainfo'].keys() + all_is_lower = all([str(k).islower() for k in all_keys]) + assert all_is_lower, f'The keys in dataset metainfo must be all lowercase, but got {all_keys}. ' \ + f'Please refer to https://github.com/open-mmlab/mmyolo/blob/e62c8c4593/configs/yolov5/yolov5_s-v61_syncbn_fast_1xb4-300e_balloon.py#L8' # noqa + + judge_keys(cfg.get('train_dataloader', {})) + judge_keys(cfg.get('val_dataloader', {})) + judge_keys(cfg.get('test_dataloader', {})) diff --git a/models/YOLO-World/third_party/mmyolo/mmyolo/utils/setup_env.py b/models/YOLO-World/third_party/mmyolo/mmyolo/utils/setup_env.py new file mode 100644 index 0000000000000000000000000000000000000000..f51ed928cbddb98c2274e09b5acea1d70dfd1abd --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/mmyolo/utils/setup_env.py @@ -0,0 +1,41 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import datetime +import warnings + +from mmengine import DefaultScope + + +def register_all_modules(init_default_scope: bool = True): + """Register all modules in mmdet into the registries. + + Args: + init_default_scope (bool): Whether initialize the mmdet default scope. + When `init_default_scope=True`, the global default scope will be + set to `mmyolo`, and all registries will build modules from mmdet's + registry node. To understand more about the registry, please refer + to https://github.com/open-mmlab/mmengine/blob/main/docs/en/tutorials/registry.md + Defaults to True. + """ # noqa + import mmdet.engine # noqa: F401,F403 + import mmdet.visualization # noqa: F401,F403 + + import mmyolo.datasets # noqa: F401,F403 + import mmyolo.engine # noqa: F401,F403 + import mmyolo.models # noqa: F401,F403 + + if init_default_scope: + never_created = DefaultScope.get_current_instance() is None \ + or not DefaultScope.check_instance_created('mmyolo') + if never_created: + DefaultScope.get_instance('mmyolo', scope_name='mmyolo') + return + current_scope = DefaultScope.get_current_instance() + if current_scope.scope_name != 'mmyolo': + warnings.warn('The current default scope ' + f'"{current_scope.scope_name}" is not "mmyolo", ' + '`register_all_modules` will force the current' + 'default scope to be "mmyolo". If this is not ' + 'expected, please set `init_default_scope=False`.') + # avoid name conflict + new_instance_name = f'mmyolo-{datetime.datetime.now()}' + DefaultScope.get_instance(new_instance_name, scope_name='mmyolo') diff --git a/models/YOLO-World/third_party/mmyolo/mmyolo/version.py b/models/YOLO-World/third_party/mmyolo/mmyolo/version.py new file mode 100644 index 0000000000000000000000000000000000000000..6e4f0e8e3747eeb71d72d53d0e2daf6ea203c596 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/mmyolo/version.py @@ -0,0 +1,23 @@ +# Copyright (c) OpenMMLab. All rights reserved. + +__version__ = '0.6.0' + +from typing import Tuple + +short_version = __version__ + + +def parse_version_info(version_str: str) -> Tuple: + """Parse version info of MMYOLO.""" + version_info = [] + for x in version_str.split('.'): + if x.isdigit(): + version_info.append(int(x)) + elif x.find('rc') != -1: + patch_version = x.split('rc') + version_info.append(int(patch_version[0])) + version_info.append(f'rc{patch_version[1]}') + return tuple(version_info) + + +version_info = parse_version_info(__version__) diff --git a/models/YOLO-World/third_party/mmyolo/model-index.yml b/models/YOLO-World/third_party/mmyolo/model-index.yml new file mode 100644 index 0000000000000000000000000000000000000000..9aa0288cc4b79f5b367c159ad9b29ccd62a0b74c --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/model-index.yml @@ -0,0 +1,8 @@ +Import: + - configs/yolov5/metafile.yml + - configs/yolov6/metafile.yml + - configs/yolox/metafile.yml + - configs/rtmdet/metafile.yml + - configs/yolov7/metafile.yml + - configs/ppyoloe/metafile.yml + - configs/yolov8/metafile.yml diff --git a/models/YOLO-World/third_party/mmyolo/projects/assigner_visualization/README.md b/models/YOLO-World/third_party/mmyolo/projects/assigner_visualization/README.md new file mode 100644 index 0000000000000000000000000000000000000000..918589f228af70f3338b2e6ea065ea72f245ebc1 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/projects/assigner_visualization/README.md @@ -0,0 +1,43 @@ +# MMYOLO Model Assigner Visualization + + + +## Introduction + +This project is developed for easily showing assigning results. The script allows users to analyze where and how many positive samples each gt is assigned in the image. + +Now, the script supports `YOLOv5`, `YOLOv7`, `YOLOv8` and `RTMDet`. + +## Usage + +### Command + +YOLOv5 assigner visualization command: + +```shell +python projects/assigner_visualization/assigner_visualization.py projects/assigner_visualization/configs/yolov5_s-v61_syncbn_fast_8xb16-300e_coco_assignervisualization.py +``` + +Note: `YOLOv5` does not need to load the trained weights. + +YOLOv7 assigner visualization command: + +```shell +python projects/assigner_visualization/assigner_visualization.py projects/assigner_visualization/configs/yolov7_tiny_syncbn_fast_8xb16-300e_coco_assignervisualization.py -c ${checkpont} +``` + +YOLOv8 assigner visualization command: + +```shell +python projects/assigner_visualization/assigner_visualization.py projects/assigner_visualization/configs/yolov8_s_syncbn_fast_8xb16-500e_coco_assignervisualization.py -c ${checkpont} +``` + +RTMdet assigner visualization command: + +```shell +python projects/assigner_visualization/assigner_visualization.py projects/assigner_visualization/configs/rtmdet_s_syncbn_fast_8xb32-300e_coco_assignervisualization.py -c ${checkpont} +``` + +${checkpont} is the checkpont file path. Dynamic label assignment is used in `YOLOv7`, `YOLOv8` and `RTMDet`, model weights will affect the positive sample allocation results, so it is recommended to load the trained model weights. + +If you want to know details about label assignment, you can check the [RTMDet](https://mmyolo.readthedocs.io/zh_CN/latest/algorithm_descriptions/rtmdet_description.html#id5). diff --git a/models/YOLO-World/third_party/mmyolo/projects/assigner_visualization/assigner_visualization.py b/models/YOLO-World/third_party/mmyolo/projects/assigner_visualization/assigner_visualization.py new file mode 100644 index 0000000000000000000000000000000000000000..e290d26b6d6fbb2f703faf3ebcd0474da871aea8 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/projects/assigner_visualization/assigner_visualization.py @@ -0,0 +1,177 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os +import os.path as osp +import sys +import warnings + +import mmcv +import numpy as np +import torch +from mmengine import ProgressBar +from mmengine.config import Config, DictAction +from mmengine.dataset import COLLATE_FUNCTIONS +from mmengine.runner.checkpoint import load_checkpoint +from numpy import random + +from mmyolo.registry import DATASETS, MODELS +from mmyolo.utils import register_all_modules +from projects.assigner_visualization.dense_heads import (RTMHeadAssigner, + YOLOv5HeadAssigner, + YOLOv7HeadAssigner, + YOLOv8HeadAssigner) +from projects.assigner_visualization.visualization import \ + YOLOAssignerVisualizer + + +def parse_args(): + parser = argparse.ArgumentParser( + description='MMYOLO show the positive sample assigning' + ' results.') + parser.add_argument('config', help='config file path') + parser.add_argument('--checkpoint', '-c', type=str, help='checkpoint file') + parser.add_argument( + '--show-number', + '-n', + type=int, + default=sys.maxsize, + help='number of images selected to save, ' + 'must bigger than 0. if the number is bigger than length ' + 'of dataset, show all the images in dataset; ' + 'default "sys.maxsize", show all images in dataset') + parser.add_argument( + '--output-dir', + default='assigned_results', + type=str, + help='The name of the folder where the image is saved.') + parser.add_argument( + '--device', default='cuda:0', help='Device used for inference.') + parser.add_argument( + '--show-prior', + default=False, + action='store_true', + help='Whether to show prior on image.') + parser.add_argument( + '--not-show-label', + default=False, + action='store_true', + help='Whether to show label on image.') + parser.add_argument('--seed', default=-1, type=int, help='random seed') + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. If the value to ' + 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' + 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' + 'Note that the quotation marks are necessary and that no white space ' + 'is allowed.') + + args = parser.parse_args() + return args + + +def main(): + args = parse_args() + register_all_modules() + + # set random seed + seed = int(args.seed) + if seed != -1: + print(f'Set the global seed: {seed}') + random.seed(int(args.seed)) + + cfg = Config.fromfile(args.config) + if args.cfg_options is not None: + cfg.merge_from_dict(args.cfg_options) + + # build model + model = MODELS.build(cfg.model) + if args.checkpoint is not None: + load_checkpoint(model, args.checkpoint) + elif isinstance(model.bbox_head, (YOLOv7HeadAssigner, RTMHeadAssigner)): + warnings.warn( + 'if you use dynamic_assignment methods such as YOLOv7 or ' + 'YOLOv8 or RTMDet assigner, please load the checkpoint.') + assert isinstance(model.bbox_head, (YOLOv5HeadAssigner, + YOLOv7HeadAssigner, + YOLOv8HeadAssigner, + RTMHeadAssigner)), \ + 'Now, this script only support YOLOv5, YOLOv7, YOLOv8 and RTMdet, ' \ + 'and bbox_head must use ' \ + '`YOLOv5HeadAssigner or YOLOv7HeadAssigne or YOLOv8HeadAssigner ' \ + 'or RTMHeadAssigner`. Please use `' \ + 'yolov5_s-v61_syncbn_fast_8xb16-300e_coco_assignervisualization.py' \ + 'or yolov7_tiny_syncbn_fast_8x16b-300e_coco_assignervisualization.py' \ + 'or yolov8_s_syncbn_fast_8xb16-500e_coco_assignervisualization.py' \ + 'or rtmdet_s_syncbn_fast_8xb32-300e_coco_assignervisualization.py' \ + """` as config file.""" + model.eval() + model.to(args.device) + + # build dataset + dataset_cfg = cfg.get('train_dataloader').get('dataset') + dataset = DATASETS.build(dataset_cfg) + + # get collate_fn + collate_fn_cfg = cfg.get('train_dataloader').pop( + 'collate_fn', dict(type='pseudo_collate')) + collate_fn_type = collate_fn_cfg.pop('type') + collate_fn = COLLATE_FUNCTIONS.get(collate_fn_type) + + # init visualizer + visualizer = YOLOAssignerVisualizer( + vis_backends=[{ + 'type': 'LocalVisBackend' + }], name='visualizer') + visualizer.dataset_meta = dataset.metainfo + # need priors size to draw priors + + if hasattr(model.bbox_head.prior_generator, 'base_anchors'): + visualizer.priors_size = model.bbox_head.prior_generator.base_anchors + + # make output dir + os.makedirs(args.output_dir, exist_ok=True) + print('Results will save to ', args.output_dir) + + # init visualization image number + assert args.show_number > 0 + display_number = min(args.show_number, len(dataset)) + + progress_bar = ProgressBar(display_number) + for ind_img in range(display_number): + data = dataset.prepare_data(ind_img) + if data is None: + print('Unable to visualize {} due to strong data augmentations'. + format(dataset[ind_img]['data_samples'].img_path)) + continue + # convert data to batch format + batch_data = collate_fn([data]) + with torch.no_grad(): + assign_results = model.assign(batch_data) + + img = data['inputs'].cpu().numpy().astype(np.uint8).transpose( + (1, 2, 0)) + # bgr2rgb + img = mmcv.bgr2rgb(img) + + gt_instances = data['data_samples'].gt_instances + + img_show = visualizer.draw_assign(img, assign_results, gt_instances, + args.show_prior, args.not_show_label) + + if hasattr(data['data_samples'], 'img_path'): + filename = osp.basename(data['data_samples'].img_path) + else: + # some dataset have not image path + filename = f'{ind_img}.jpg' + out_file = osp.join(args.output_dir, filename) + + # convert rgb 2 bgr and save img + mmcv.imwrite(mmcv.rgb2bgr(img_show), out_file) + progress_bar.update() + + +if __name__ == '__main__': + main() diff --git a/models/YOLO-World/third_party/mmyolo/projects/assigner_visualization/configs/rtmdet_s_syncbn_fast_8xb32-300e_coco_assignervisualization.py b/models/YOLO-World/third_party/mmyolo/projects/assigner_visualization/configs/rtmdet_s_syncbn_fast_8xb32-300e_coco_assignervisualization.py new file mode 100644 index 0000000000000000000000000000000000000000..006502eb45af9ece927b68359525cc6c2de30788 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/projects/assigner_visualization/configs/rtmdet_s_syncbn_fast_8xb32-300e_coco_assignervisualization.py @@ -0,0 +1,9 @@ +_base_ = ['../../../configs/rtmdet/rtmdet_s_syncbn_fast_8xb32-300e_coco.py'] + +custom_imports = dict(imports=[ + 'projects.assigner_visualization.detectors', + 'projects.assigner_visualization.dense_heads' +]) + +model = dict( + type='YOLODetectorAssigner', bbox_head=dict(type='RTMHeadAssigner')) diff --git a/models/YOLO-World/third_party/mmyolo/projects/assigner_visualization/configs/yolov5_s-v61_syncbn_fast_8xb16-300e_coco_assignervisualization.py b/models/YOLO-World/third_party/mmyolo/projects/assigner_visualization/configs/yolov5_s-v61_syncbn_fast_8xb16-300e_coco_assignervisualization.py new file mode 100644 index 0000000000000000000000000000000000000000..1db799b5142375c86bd5a018764017c9d3170a07 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/projects/assigner_visualization/configs/yolov5_s-v61_syncbn_fast_8xb16-300e_coco_assignervisualization.py @@ -0,0 +1,11 @@ +_base_ = [ + '../../../configs/yolov5/yolov5_s-v61_syncbn_fast_8xb16-300e_coco.py' +] + +custom_imports = dict(imports=[ + 'projects.assigner_visualization.detectors', + 'projects.assigner_visualization.dense_heads' +]) + +model = dict( + type='YOLODetectorAssigner', bbox_head=dict(type='YOLOv5HeadAssigner')) diff --git a/models/YOLO-World/third_party/mmyolo/projects/assigner_visualization/configs/yolov7_tiny_syncbn_fast_8xb16-300e_coco_assignervisualization.py b/models/YOLO-World/third_party/mmyolo/projects/assigner_visualization/configs/yolov7_tiny_syncbn_fast_8xb16-300e_coco_assignervisualization.py new file mode 100644 index 0000000000000000000000000000000000000000..626dc18b59df3b9ced0781347989b65f64de5042 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/projects/assigner_visualization/configs/yolov7_tiny_syncbn_fast_8xb16-300e_coco_assignervisualization.py @@ -0,0 +1,9 @@ +_base_ = ['../../../configs/yolov7/yolov7_tiny_syncbn_fast_8x16b-300e_coco.py'] + +custom_imports = dict(imports=[ + 'projects.assigner_visualization.detectors', + 'projects.assigner_visualization.dense_heads' +]) + +model = dict( + type='YOLODetectorAssigner', bbox_head=dict(type='YOLOv7HeadAssigner')) diff --git a/models/YOLO-World/third_party/mmyolo/projects/assigner_visualization/configs/yolov8_s_syncbn_fast_8xb16-500e_coco_assignervisualization.py b/models/YOLO-World/third_party/mmyolo/projects/assigner_visualization/configs/yolov8_s_syncbn_fast_8xb16-500e_coco_assignervisualization.py new file mode 100644 index 0000000000000000000000000000000000000000..03dcae8c39a09c0200dc52123efc1bc0a348dea3 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/projects/assigner_visualization/configs/yolov8_s_syncbn_fast_8xb16-500e_coco_assignervisualization.py @@ -0,0 +1,9 @@ +_base_ = ['../../../configs/yolov8/yolov8_s_syncbn_fast_8xb16-500e_coco.py'] + +custom_imports = dict(imports=[ + 'projects.assigner_visualization.detectors', + 'projects.assigner_visualization.dense_heads' +]) + +model = dict( + type='YOLODetectorAssigner', bbox_head=dict(type='YOLOv8HeadAssigner')) diff --git a/models/YOLO-World/third_party/mmyolo/projects/assigner_visualization/dense_heads/__init__.py b/models/YOLO-World/third_party/mmyolo/projects/assigner_visualization/dense_heads/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..82adaaba8ebe3510895ebc3d5ed5ac7c573b41b2 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/projects/assigner_visualization/dense_heads/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .rtmdet_head_assigner import RTMHeadAssigner +from .yolov5_head_assigner import YOLOv5HeadAssigner +from .yolov7_head_assigner import YOLOv7HeadAssigner +from .yolov8_head_assigner import YOLOv8HeadAssigner + +__all__ = [ + 'YOLOv5HeadAssigner', 'YOLOv7HeadAssigner', 'YOLOv8HeadAssigner', + 'RTMHeadAssigner' +] diff --git a/models/YOLO-World/third_party/mmyolo/projects/assigner_visualization/dense_heads/rtmdet_head_assigner.py b/models/YOLO-World/third_party/mmyolo/projects/assigner_visualization/dense_heads/rtmdet_head_assigner.py new file mode 100644 index 0000000000000000000000000000000000000000..d3ae1c86d054d02a7a8537ee91251c0cca39edc6 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/projects/assigner_visualization/dense_heads/rtmdet_head_assigner.py @@ -0,0 +1,175 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Union + +import torch +from mmdet.structures.bbox import distance2bbox +from mmdet.utils import InstanceList +from torch import Tensor + +from mmyolo.models import RTMDetHead +from mmyolo.models.utils import gt_instances_preprocess +from mmyolo.registry import MODELS + + +@MODELS.register_module() +class RTMHeadAssigner(RTMDetHead): + + def assign_by_gt_and_feat( + self, + cls_scores: List[Tensor], + bbox_preds: List[Tensor], + batch_gt_instances: InstanceList, + batch_img_metas: List[dict], + inputs_hw: Union[Tensor, tuple] = (640, 640) + ) -> dict: + """Calculate the assigning results based on the gt and features + extracted by the detection head. + + Args: + cls_scores (list[Tensor]): Box scores for each scale level + Has shape (N, num_anchors * num_classes, H, W) + bbox_preds (list[Tensor]): Decoded box for each scale + level with shape (N, num_anchors * 4, H, W) in + [tl_x, tl_y, br_x, br_y] format. + batch_gt_instances (list[:obj:`InstanceData`]): Batch of + gt_instance. It usually includes ``bboxes`` and ``labels`` + attributes. + batch_img_metas (list[dict]): Meta information of each image, e.g., + image size, scaling factor, etc. + inputs_hw (Union[Tensor, tuple]): Height and width of inputs size. + Returns: + dict[str, Tensor]: A dictionary of assigning results. + """ + num_imgs = len(batch_img_metas) + featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] + assert len(featmap_sizes) == self.prior_generator.num_levels + # rtmdet's prior offset differs from others + prior_offset = self.prior_generator.offset + + gt_info = gt_instances_preprocess(batch_gt_instances, num_imgs) + gt_labels = gt_info[:, :, :1] + gt_bboxes = gt_info[:, :, 1:] # xyxy + pad_bbox_flag = (gt_bboxes.sum(-1, keepdim=True) > 0).float() + + device = cls_scores[0].device + + # If the shape does not equal, generate new one + if featmap_sizes != self.featmap_sizes_train: + self.featmap_sizes_train = featmap_sizes + mlvl_priors_with_stride = self.prior_generator.grid_priors( + featmap_sizes, device=device, with_stride=True) + self.flatten_priors_train = torch.cat( + mlvl_priors_with_stride, dim=0) + + flatten_cls_scores = torch.cat([ + cls_score.permute(0, 2, 3, 1).reshape(num_imgs, -1, + self.cls_out_channels) + for cls_score in cls_scores + ], 1).contiguous() + + flatten_bboxes = torch.cat([ + bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, 4) + for bbox_pred in bbox_preds + ], 1) + flatten_bboxes = flatten_bboxes * self.flatten_priors_train[..., -1, + None] + flatten_bboxes = distance2bbox(self.flatten_priors_train[..., :2], + flatten_bboxes) + + assigned_result = self.assigner(flatten_bboxes.detach(), + flatten_cls_scores.detach(), + self.flatten_priors_train, gt_labels, + gt_bboxes, pad_bbox_flag) + + labels = assigned_result['assigned_labels'].reshape(-1) + bbox_targets = assigned_result['assigned_bboxes'].reshape(-1, 4) + + # FG cat_id: [0, num_classes -1], BG cat_id: num_classes + bg_class_ind = self.num_classes + pos_inds = ((labels >= 0) + & (labels < bg_class_ind)).nonzero().squeeze(1) + targets = bbox_targets[pos_inds] + gt_bboxes = gt_bboxes.squeeze(0) + matched_gt_inds = torch.tensor( + [((t == gt_bboxes).sum(dim=1) == t.shape[0]).nonzero()[0] + for t in targets], + device=device) + + level_inds = torch.zeros_like(labels) + img_inds = torch.zeros_like(labels) + level_nums = [0] + [f[0] * f[1] for f in featmap_sizes] + for i in range(len(level_nums) - 1): + level_nums[i + 1] = level_nums[i] + level_nums[i + 1] + level_inds[level_nums[i]:level_nums[i + 1]] = i + level_inds_pos = level_inds[pos_inds] + + img_inds = img_inds[pos_inds] + labels = labels[pos_inds] + + inputs_hw = batch_img_metas[0]['batch_input_shape'] + assign_results = [] + for i in range(self.num_levels): + retained_inds = level_inds_pos == i + if not retained_inds.any(): + assign_results_prior = { + 'stride': + self.featmap_strides[i], + 'grid_x_inds': + torch.zeros([0], dtype=torch.int64).to(device), + 'grid_y_inds': + torch.zeros([0], dtype=torch.int64).to(device), + 'img_inds': + torch.zeros([0], dtype=torch.int64).to(device), + 'class_inds': + torch.zeros([0], dtype=torch.int64).to(device), + 'retained_gt_inds': + torch.zeros([0], dtype=torch.int64).to(device), + 'prior_ind': + 0, + 'offset': + prior_offset + } + else: + w = inputs_hw[1] // self.featmap_strides[i] + + retained_pos_inds = pos_inds[retained_inds] - level_nums[i] + grid_y_inds = retained_pos_inds // w + grid_x_inds = retained_pos_inds - retained_pos_inds // w * w + assign_results_prior = { + 'stride': self.featmap_strides[i], + 'grid_x_inds': grid_x_inds, + 'grid_y_inds': grid_y_inds, + 'img_inds': img_inds[retained_inds], + 'class_inds': labels[retained_inds], + 'retained_gt_inds': matched_gt_inds[retained_inds], + 'prior_ind': 0, + 'offset': prior_offset + } + assign_results.append([assign_results_prior]) + return assign_results + + def assign(self, batch_data_samples: Union[list, dict], + inputs_hw: Union[tuple, torch.Size]) -> dict: + """Calculate assigning results. This function is provided to the + `assigner_visualization.py` script. + + Args: + batch_data_samples (List[:obj:`DetDataSample`], dict): The Data + Samples. It usually includes information such as + `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`. + inputs_hw: Height and width of inputs size + + Returns: + dict: A dictionary of assigning components. + """ + if isinstance(batch_data_samples, list): + raise NotImplementedError( + 'assigning results_list is not implemented') + else: + # Fast version + cls_scores, bbox_preds = self(batch_data_samples['feats']) + assign_inputs = (cls_scores, bbox_preds, + batch_data_samples['bboxes_labels'], + batch_data_samples['img_metas'], inputs_hw) + assign_results = self.assign_by_gt_and_feat(*assign_inputs) + return assign_results diff --git a/models/YOLO-World/third_party/mmyolo/projects/assigner_visualization/dense_heads/yolov5_head_assigner.py b/models/YOLO-World/third_party/mmyolo/projects/assigner_visualization/dense_heads/yolov5_head_assigner.py new file mode 100644 index 0000000000000000000000000000000000000000..599963fede32fc02c73db8c744dfbc2946dd53fb --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/projects/assigner_visualization/dense_heads/yolov5_head_assigner.py @@ -0,0 +1,188 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Sequence, Union + +import torch +from mmdet.models.utils import unpack_gt_instances +from mmengine.structures import InstanceData +from torch import Tensor + +from mmyolo.models import YOLOv5Head +from mmyolo.registry import MODELS + + +@MODELS.register_module() +class YOLOv5HeadAssigner(YOLOv5Head): + + def assign_by_gt_and_feat( + self, + batch_gt_instances: Sequence[InstanceData], + batch_img_metas: Sequence[dict], + inputs_hw: Union[Tensor, tuple] = (640, 640) + ) -> dict: + """Calculate the assigning results based on the gt and features + extracted by the detection head. + + Args: + batch_gt_instances (Sequence[InstanceData]): Batch of + gt_instance. It usually includes ``bboxes`` and ``labels`` + attributes. + batch_img_metas (Sequence[dict]): Meta information of each image, + e.g., image size, scaling factor, etc. + batch_gt_instances_ignore (list[:obj:`InstanceData`], optional): + Batch of gt_instances_ignore. It includes ``bboxes`` attribute + data that is ignored during training and testing. + Defaults to None. + inputs_hw (Union[Tensor, tuple]): Height and width of inputs size. + Returns: + dict[str, Tensor]: A dictionary of assigning results. + """ + # 1. Convert gt to norm format + batch_targets_normed = self._convert_gt_to_norm_format( + batch_gt_instances, batch_img_metas) + + device = batch_targets_normed.device + scaled_factor = torch.ones(7, device=device) + gt_inds = torch.arange( + batch_targets_normed.shape[1], + dtype=torch.long, + device=device, + requires_grad=False).unsqueeze(0).repeat((self.num_base_priors, 1)) + + assign_results = [] + for i in range(self.num_levels): + assign_results_feat = [] + h = inputs_hw[0] // self.featmap_strides[i] + w = inputs_hw[1] // self.featmap_strides[i] + + # empty gt bboxes + if batch_targets_normed.shape[1] == 0: + for k in range(self.num_base_priors): + assign_results_feat.append({ + 'stride': + self.featmap_strides[i], + 'grid_x_inds': + torch.zeros([0], dtype=torch.int64).to(device), + 'grid_y_inds': + torch.zeros([0], dtype=torch.int64).to(device), + 'img_inds': + torch.zeros([0], dtype=torch.int64).to(device), + 'class_inds': + torch.zeros([0], dtype=torch.int64).to(device), + 'retained_gt_inds': + torch.zeros([0], dtype=torch.int64).to(device), + 'prior_ind': + k + }) + assign_results.append(assign_results_feat) + continue + + priors_base_sizes_i = self.priors_base_sizes[i] + # feature map scale whwh + scaled_factor[2:6] = torch.tensor([w, h, w, h]) + # Scale batch_targets from range 0-1 to range 0-features_maps size. + # (num_base_priors, num_bboxes, 7) + batch_targets_scaled = batch_targets_normed * scaled_factor + + # 2. Shape match + wh_ratio = batch_targets_scaled[..., + 4:6] / priors_base_sizes_i[:, None] + match_inds = torch.max( + wh_ratio, 1 / wh_ratio).max(2)[0] < self.prior_match_thr + batch_targets_scaled = batch_targets_scaled[match_inds] + match_gt_inds = gt_inds[match_inds] + + # no gt bbox matches anchor + if batch_targets_scaled.shape[0] == 0: + for k in range(self.num_base_priors): + assign_results_feat.append({ + 'stride': + self.featmap_strides[i], + 'grid_x_inds': + torch.zeros([0], dtype=torch.int64).to(device), + 'grid_y_inds': + torch.zeros([0], dtype=torch.int64).to(device), + 'img_inds': + torch.zeros([0], dtype=torch.int64).to(device), + 'class_inds': + torch.zeros([0], dtype=torch.int64).to(device), + 'retained_gt_inds': + torch.zeros([0], dtype=torch.int64).to(device), + 'prior_ind': + k + }) + assign_results.append(assign_results_feat) + continue + + # 3. Positive samples with additional neighbors + + # check the left, up, right, bottom sides of the + # targets grid, and determine whether assigned + # them as positive samples as well. + batch_targets_cxcy = batch_targets_scaled[:, 2:4] + grid_xy = scaled_factor[[2, 3]] - batch_targets_cxcy + left, up = ((batch_targets_cxcy % 1 < self.near_neighbor_thr) & + (batch_targets_cxcy > 1)).T + right, bottom = ((grid_xy % 1 < self.near_neighbor_thr) & + (grid_xy > 1)).T + offset_inds = torch.stack( + (torch.ones_like(left), left, up, right, bottom)) + + batch_targets_scaled = batch_targets_scaled.repeat( + (5, 1, 1))[offset_inds] + retained_gt_inds = match_gt_inds.repeat((5, 1))[offset_inds] + retained_offsets = self.grid_offset.repeat(1, offset_inds.shape[1], + 1)[offset_inds] + + # prepare pred results and positive sample indexes to + # calculate class loss and bbox lo + _chunk_targets = batch_targets_scaled.chunk(4, 1) + img_class_inds, grid_xy, grid_wh, priors_inds = _chunk_targets + priors_inds, (img_inds, class_inds) = priors_inds.long().view( + -1), img_class_inds.long().T + + grid_xy_long = (grid_xy - + retained_offsets * self.near_neighbor_thr).long() + grid_x_inds, grid_y_inds = grid_xy_long.T + for k in range(self.num_base_priors): + retained_inds = priors_inds == k + assign_results_prior = { + 'stride': self.featmap_strides[i], + 'grid_x_inds': grid_x_inds[retained_inds], + 'grid_y_inds': grid_y_inds[retained_inds], + 'img_inds': img_inds[retained_inds], + 'class_inds': class_inds[retained_inds], + 'retained_gt_inds': retained_gt_inds[retained_inds], + 'prior_ind': k + } + assign_results_feat.append(assign_results_prior) + assign_results.append(assign_results_feat) + return assign_results + + def assign(self, batch_data_samples: Union[list, dict], + inputs_hw: Union[tuple, torch.Size]) -> dict: + """Calculate assigning results. This function is provided to the + `assigner_visualization.py` script. + + Args: + batch_data_samples (List[:obj:`DetDataSample`], dict): The Data + Samples. It usually includes information such as + `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`. + inputs_hw: Height and width of inputs size + + Returns: + dict: A dictionary of assigning components. + """ + if isinstance(batch_data_samples, list): + outputs = unpack_gt_instances(batch_data_samples) + (batch_gt_instances, batch_gt_instances_ignore, + batch_img_metas) = outputs + + assign_inputs = (batch_gt_instances, batch_img_metas, + batch_gt_instances_ignore, inputs_hw) + else: + # Fast version + assign_inputs = (batch_data_samples['bboxes_labels'], + batch_data_samples['img_metas'], inputs_hw) + assign_results = self.assign_by_gt_and_feat(*assign_inputs) + + return assign_results diff --git a/models/YOLO-World/third_party/mmyolo/projects/assigner_visualization/dense_heads/yolov7_head_assigner.py b/models/YOLO-World/third_party/mmyolo/projects/assigner_visualization/dense_heads/yolov7_head_assigner.py new file mode 100644 index 0000000000000000000000000000000000000000..de2a90e36b57f5ad54158ee546dac6cf513cd5a3 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/projects/assigner_visualization/dense_heads/yolov7_head_assigner.py @@ -0,0 +1,159 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Union + +import torch +from mmdet.utils import InstanceList +from torch import Tensor + +from mmyolo.models import YOLOv7Head +from mmyolo.registry import MODELS + + +@MODELS.register_module() +class YOLOv7HeadAssigner(YOLOv7Head): + + def assign_by_gt_and_feat( + self, + cls_scores: List[Tensor], + bbox_preds: List[Tensor], + objectnesses: List[Tensor], + batch_gt_instances: InstanceList, + batch_img_metas: List[dict], + inputs_hw: Union[Tensor, tuple], + ) -> dict: + """Calculate the assigning results based on the gt and features + extracted by the detection head. + Args: + cls_scores (Sequence[Tensor]): Box scores for each scale level, + each is a 4D-tensor, the channel number is + num_priors * num_classes. + bbox_preds (Sequence[Tensor]): Box energies / deltas for each scale + level, each is a 4D-tensor, the channel number is + num_priors * 4. + objectnesses (Sequence[Tensor]): Score factor for + all scale level, each is a 4D-tensor, has shape + (batch_size, 1, H, W) + batch_gt_instances (list[:obj:`InstanceData`]): Batch of + gt_instance. It usually includes ``bboxes`` and ``labels`` + attributes. + batch_img_metas (list[dict]): Meta information of each image, e.g., + image size, scaling factor, etc. + inputs_hw (Union[Tensor, tuple]): Height and width of inputs size. + Returns: + dict[str, Tensor]: A dictionary of assigning results. + """ + device = cls_scores[0][0].device + + head_preds = self._merge_predict_results(bbox_preds, objectnesses, + cls_scores) + + batch_targets_normed = self._convert_gt_to_norm_format( + batch_gt_instances, batch_img_metas) + + # yolov5_assign and simota_assign + assigner_results = self.assigner( + head_preds, + batch_targets_normed, + batch_img_metas[0]['batch_input_shape'], + self.priors_base_sizes, + self.grid_offset, + near_neighbor_thr=self.near_neighbor_thr) + + # multi-level positive sample position. + mlvl_positive_infos = assigner_results['mlvl_positive_infos'] + # assigned results with label and bboxes information. + mlvl_targets_normed = assigner_results['mlvl_targets_normed'] + + assign_results = [] + for i in range(self.num_levels): + assign_results_feat = [] + # no gt bbox matches anchor + if mlvl_positive_infos[i].shape[0] == 0: + for k in range(self.num_base_priors): + assign_results_feat.append({ + 'stride': + self.featmap_strides[i], + 'grid_x_inds': + torch.zeros([0], dtype=torch.int64).to(device), + 'grid_y_inds': + torch.zeros([0], dtype=torch.int64).to(device), + 'img_inds': + torch.zeros([0], dtype=torch.int64).to(device), + 'class_inds': + torch.zeros([0], dtype=torch.int64).to(device), + 'retained_gt_inds': + torch.zeros([0], dtype=torch.int64).to(device), + 'prior_ind': + k + }) + assign_results.append(assign_results_feat) + continue + + # (batch_idx, prior_idx, x_scaled, y_scaled) + positive_info = mlvl_positive_infos[i] + targets_normed = mlvl_targets_normed[i] + priors_inds = positive_info[:, 1] + grid_x_inds = positive_info[:, 2] + grid_y_inds = positive_info[:, 3] + img_inds = targets_normed[:, 0] + class_inds = targets_normed[:, 1].long() + retained_gt_inds = self.get_gt_inds( + targets_normed, batch_targets_normed[0]).long() + for k in range(self.num_base_priors): + retained_inds = priors_inds == k + assign_results_prior = { + 'stride': self.featmap_strides[i], + 'grid_x_inds': grid_x_inds[retained_inds], + 'grid_y_inds': grid_y_inds[retained_inds], + 'img_inds': img_inds[retained_inds], + 'class_inds': class_inds[retained_inds], + 'retained_gt_inds': retained_gt_inds[retained_inds], + 'prior_ind': k + } + assign_results_feat.append(assign_results_prior) + assign_results.append(assign_results_feat) + return assign_results + + def get_gt_inds(self, assigned_target, gt_instance): + """Judging which one gt_ind is assigned by comparing assign_target and + origin target. + + Args: + assigned_target (Tensor(assign_nums,7)): YOLOv7 assigning results. + gt_instance (Tensor(gt_nums,7)): Normalized gt_instance, It + usually includes ``bboxes`` and ``labels`` attributes. + Returns: + gt_inds (Tensor): the index which one gt is assigned. + """ + gt_inds = torch.zeros(assigned_target.shape[0]) + for i in range(assigned_target.shape[0]): + gt_inds[i] = ((assigned_target[i] == gt_instance).sum( + dim=1) == 7).nonzero().squeeze() + return gt_inds + + def assign(self, batch_data_samples: Union[list, dict], + inputs_hw: Union[tuple, torch.Size]) -> dict: + """Calculate assigning results. + + This function is provided to the + `assigner_visualization.py` script. + Args: + batch_data_samples (List[:obj:`DetDataSample`], dict): The Data + Samples. It usually includes information such as + `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`. + inputs_hw: Height and width of inputs size + Returns: + dict: A dictionary of assigning components. + """ + if isinstance(batch_data_samples, list): + raise NotImplementedError( + 'assigning results_list is not implemented') + else: + # Fast version + cls_scores, bbox_preds, objectnesses = self( + batch_data_samples['feats']) + assign_inputs = (cls_scores, bbox_preds, objectnesses, + batch_data_samples['bboxes_labels'], + batch_data_samples['img_metas'], inputs_hw) + assign_results = self.assign_by_gt_and_feat(*assign_inputs) + return assign_results diff --git a/models/YOLO-World/third_party/mmyolo/projects/assigner_visualization/dense_heads/yolov8_head_assigner.py b/models/YOLO-World/third_party/mmyolo/projects/assigner_visualization/dense_heads/yolov8_head_assigner.py new file mode 100644 index 0000000000000000000000000000000000000000..49d254fdf5ae1e941b5c9b906223ec47311439c3 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/projects/assigner_visualization/dense_heads/yolov8_head_assigner.py @@ -0,0 +1,180 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Union + +import torch +from mmdet.utils import InstanceList +from torch import Tensor + +from mmyolo.models import YOLOv8Head +from mmyolo.models.utils import gt_instances_preprocess +from mmyolo.registry import MODELS + + +@MODELS.register_module() +class YOLOv8HeadAssigner(YOLOv8Head): + + def assign_by_gt_and_feat( + self, + cls_scores: List[Tensor], + bbox_preds: List[Tensor], + batch_gt_instances: InstanceList, + batch_img_metas: List[dict], + inputs_hw: Union[Tensor, tuple] = (640, 640) + ) -> dict: + """Calculate the assigning results based on the gt and features + extracted by the detection head. + Args: + cls_scores (Sequence[Tensor]): Box scores for each scale level, + each is a 4D-tensor, the channel number is + num_priors * num_classes. + bbox_preds (Sequence[Tensor]): Box energies / deltas for each scale + level, each is a 4D-tensor, the channel number is + num_priors * 4. + bbox_dist_preds (Sequence[Tensor]): Box distribution logits for + each scale level with shape (bs, reg_max + 1, H*W, 4). + batch_gt_instances (list[:obj:`InstanceData`]): Batch of + gt_instance. It usually includes ``bboxes`` and ``labels`` + attributes. + batch_img_metas (list[dict]): Meta information of each image, e.g., + image size, scaling factor, etc. + inputs_hw (Union[Tensor, tuple]): Height and width of inputs size. + Returns: + dict[str, Tensor]: A dictionary of assigning results. + """ + num_imgs = len(batch_img_metas) + device = cls_scores[0].device + + current_featmap_sizes = [ + cls_score.shape[2:] for cls_score in cls_scores + ] + # If the shape does not equal, generate new one + if current_featmap_sizes != self.featmap_sizes_train: + self.featmap_sizes_train = current_featmap_sizes + + mlvl_priors_with_stride = self.prior_generator.grid_priors( + self.featmap_sizes_train, + dtype=cls_scores[0].dtype, + device=device, + with_stride=True) + + self.num_level_priors = [len(n) for n in mlvl_priors_with_stride] + self.flatten_priors_train = torch.cat( + mlvl_priors_with_stride, dim=0) + self.stride_tensor = self.flatten_priors_train[..., [2]] + + # gt info + gt_info = gt_instances_preprocess(batch_gt_instances, num_imgs) + gt_labels = gt_info[:, :, :1] + gt_bboxes = gt_info[:, :, 1:] # xyxy + pad_bbox_flag = (gt_bboxes.sum(-1, keepdim=True) > 0).float() + + # pred info + flatten_cls_preds = [ + cls_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, + self.num_classes) + for cls_pred in cls_scores + ] + flatten_pred_bboxes = [ + bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, 4) + for bbox_pred in bbox_preds + ] + # (bs, n, 4 * reg_max) + + flatten_cls_preds = torch.cat(flatten_cls_preds, dim=1) + flatten_pred_bboxes = torch.cat(flatten_pred_bboxes, dim=1) + flatten_pred_bboxes = self.bbox_coder.decode( + self.flatten_priors_train[..., :2], flatten_pred_bboxes, + self.stride_tensor[..., 0]) + + assigned_result = self.assigner( + (flatten_pred_bboxes.detach()).type(gt_bboxes.dtype), + flatten_cls_preds.detach().sigmoid(), self.flatten_priors_train, + gt_labels, gt_bboxes, pad_bbox_flag) + + labels = assigned_result['assigned_labels'].reshape(-1) + bbox_targets = assigned_result['assigned_bboxes'].reshape(-1, 4) + fg_mask_pre_prior = assigned_result['fg_mask_pre_prior'].squeeze(0) + + pos_inds = fg_mask_pre_prior.nonzero().squeeze(1) + + targets = bbox_targets[pos_inds] + gt_bboxes = gt_bboxes.squeeze(0) + matched_gt_inds = torch.tensor( + [((t == gt_bboxes).sum(dim=1) == t.shape[0]).nonzero()[0] + for t in targets], + device=device) + + level_inds = torch.zeros_like(labels) + img_inds = torch.zeros_like(labels) + level_nums = [0] + self.num_level_priors + for i in range(len(level_nums) - 1): + level_nums[i + 1] = level_nums[i] + level_nums[i + 1] + level_inds[level_nums[i]:level_nums[i + 1]] = i + level_inds_pos = level_inds[pos_inds] + + img_inds = img_inds[pos_inds] + labels = labels[pos_inds] + + assign_results = [] + for i in range(self.num_levels): + retained_inds = level_inds_pos == i + if not retained_inds.any(): + assign_results_prior = { + 'stride': + self.featmap_strides[i], + 'grid_x_inds': + torch.zeros([0], dtype=torch.int64).to(device), + 'grid_y_inds': + torch.zeros([0], dtype=torch.int64).to(device), + 'img_inds': + torch.zeros([0], dtype=torch.int64).to(device), + 'class_inds': + torch.zeros([0], dtype=torch.int64).to(device), + 'retained_gt_inds': + torch.zeros([0], dtype=torch.int64).to(device), + 'prior_ind': + 0 + } + else: + w = inputs_hw[1] // self.featmap_strides[i] + + retained_pos_inds = pos_inds[retained_inds] - level_nums[i] + grid_y_inds = retained_pos_inds // w + grid_x_inds = retained_pos_inds - retained_pos_inds // w * w + assign_results_prior = { + 'stride': self.featmap_strides[i], + 'grid_x_inds': grid_x_inds, + 'grid_y_inds': grid_y_inds, + 'img_inds': img_inds[retained_inds], + 'class_inds': labels[retained_inds], + 'retained_gt_inds': matched_gt_inds[retained_inds], + 'prior_ind': 0 + } + assign_results.append([assign_results_prior]) + return assign_results + + def assign(self, batch_data_samples: Union[list, dict], + inputs_hw: Union[tuple, torch.Size]) -> dict: + """Calculate assigning results. + + This function is provided to the + `assigner_visualization.py` script. + Args: + batch_data_samples (List[:obj:`DetDataSample`], dict): The Data + Samples. It usually includes information such as + `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`. + inputs_hw: Height and width of inputs size + Returns: + dict: A dictionary of assigning components. + """ + if isinstance(batch_data_samples, list): + raise NotImplementedError( + 'assigning results_list is not implemented') + else: + # Fast version + cls_scores, bbox_preds = self(batch_data_samples['feats']) + assign_inputs = (cls_scores, bbox_preds, + batch_data_samples['bboxes_labels'], + batch_data_samples['img_metas'], inputs_hw) + assign_results = self.assign_by_gt_and_feat(*assign_inputs) + return assign_results diff --git a/models/YOLO-World/third_party/mmyolo/projects/assigner_visualization/detectors/__init__.py b/models/YOLO-World/third_party/mmyolo/projects/assigner_visualization/detectors/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..155606a0136ef3e93d90347773af3eb7010b27ac --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/projects/assigner_visualization/detectors/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from projects.assigner_visualization.detectors.yolo_detector_assigner import \ + YOLODetectorAssigner + +__all__ = ['YOLODetectorAssigner'] diff --git a/models/YOLO-World/third_party/mmyolo/projects/assigner_visualization/detectors/yolo_detector_assigner.py b/models/YOLO-World/third_party/mmyolo/projects/assigner_visualization/detectors/yolo_detector_assigner.py new file mode 100644 index 0000000000000000000000000000000000000000..5b723e01f65381155aaae962415d3c70040de06b --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/projects/assigner_visualization/detectors/yolo_detector_assigner.py @@ -0,0 +1,34 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Union + +from mmyolo.models import YOLODetector +from mmyolo.registry import MODELS +from projects.assigner_visualization.dense_heads import (RTMHeadAssigner, + YOLOv7HeadAssigner, + YOLOv8HeadAssigner) + + +@MODELS.register_module() +class YOLODetectorAssigner(YOLODetector): + + def assign(self, data: dict) -> Union[dict, list]: + """Calculate assigning results from a batch of inputs and data + samples.This function is provided to the `assigner_visualization.py` + script. + + Args: + data (dict or tuple or list): Data sampled from dataset. + + Returns: + dict: A dictionary of assigning components. + """ + assert isinstance(data, dict) + assert len(data['inputs']) == 1, 'Only support batchsize == 1' + data = self.data_preprocessor(data, True) + available_assigners = (YOLOv7HeadAssigner, YOLOv8HeadAssigner, + RTMHeadAssigner) + if isinstance(self.bbox_head, available_assigners): + data['data_samples']['feats'] = self.extract_feat(data['inputs']) + inputs_hw = data['inputs'].shape[-2:] + assign_results = self.bbox_head.assign(data['data_samples'], inputs_hw) + return assign_results diff --git a/models/YOLO-World/third_party/mmyolo/projects/assigner_visualization/visualization/__init__.py b/models/YOLO-World/third_party/mmyolo/projects/assigner_visualization/visualization/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..521a25b8837cf084e78fffa9f84660a4c9ae02bb --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/projects/assigner_visualization/visualization/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .assigner_visualizer import YOLOAssignerVisualizer + +__all__ = ['YOLOAssignerVisualizer'] diff --git a/models/YOLO-World/third_party/mmyolo/projects/assigner_visualization/visualization/assigner_visualizer.py b/models/YOLO-World/third_party/mmyolo/projects/assigner_visualization/visualization/assigner_visualizer.py new file mode 100644 index 0000000000000000000000000000000000000000..fe1f4f0b90da2bbd683e3f9845efb66c9348459e --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/projects/assigner_visualization/visualization/assigner_visualizer.py @@ -0,0 +1,326 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math +from typing import List, Union + +import mmcv +import numpy as np +import torch +from mmdet.structures.bbox import HorizontalBoxes +from mmdet.visualization import DetLocalVisualizer +from mmdet.visualization.palette import _get_adaptive_scales, get_palette +from mmengine.structures import InstanceData +from torch import Tensor + +from mmyolo.registry import VISUALIZERS + + +@VISUALIZERS.register_module() +class YOLOAssignerVisualizer(DetLocalVisualizer): + """MMYOLO Detection Assigner Visualizer. + + This class is provided to the `assigner_visualization.py` script. + Args: + name (str): Name of the instance. Defaults to 'visualizer'. + """ + + def __init__(self, name: str = 'visualizer', *args, **kwargs): + super().__init__(name=name, *args, **kwargs) + # need priors_size from config + self.priors_size = None + + def draw_grid(self, + stride: int = 8, + line_styles: Union[str, List[str]] = ':', + colors: Union[str, tuple, List[str], + List[tuple]] = (180, 180, 180), + line_widths: Union[Union[int, float], + List[Union[int, float]]] = 1): + """Draw grids on image. + + Args: + stride (int): Downsample factor of feature map. + line_styles (Union[str, List[str]]): The linestyle + of lines. ``line_styles`` can have the same length with + texts or just single value. If ``line_styles`` is single + value, all the lines will have the same linestyle. + Reference to + https://matplotlib.org/stable/api/collections_api.html?highlight=collection#matplotlib.collections.AsteriskPolygonCollection.set_linestyle + for more details. Defaults to ':'. + colors (Union[str, tuple, List[str], List[tuple]]): The colors of + lines. ``colors`` can have the same length with lines or just + single value. If ``colors`` is single value, all the lines + will have the same colors. Reference to + https://matplotlib.org/stable/gallery/color/named_colors.html + for more details. Defaults to (180, 180, 180). + line_widths (Union[Union[int, float], List[Union[int, float]]]): + The linewidth of lines. ``line_widths`` can have + the same length with lines or just single value. + If ``line_widths`` is single value, all the lines will + have the same linewidth. Defaults to 1. + """ + assert self._image is not None, 'Please set image using `set_image`' + # draw vertical lines + x_datas_vertical = ((np.arange(self.width // stride - 1) + 1) * + stride).reshape((-1, 1)).repeat( + 2, axis=1) + y_datas_vertical = np.array([[0, self.height - 1]]).repeat( + self.width // stride - 1, axis=0) + self.draw_lines( + x_datas_vertical, + y_datas_vertical, + colors=colors, + line_styles=line_styles, + line_widths=line_widths) + + # draw horizontal lines + x_datas_horizontal = np.array([[0, self.width - 1]]).repeat( + self.height // stride - 1, axis=0) + y_datas_horizontal = ((np.arange(self.height // stride - 1) + 1) * + stride).reshape((-1, 1)).repeat( + 2, axis=1) + self.draw_lines( + x_datas_horizontal, + y_datas_horizontal, + colors=colors, + line_styles=line_styles, + line_widths=line_widths) + + def draw_instances_assign(self, + instances: InstanceData, + retained_gt_inds: Tensor, + not_show_label: bool = False): + """Draw instances of GT. + + Args: + instances (:obj:`InstanceData`): gt_instance. It usually + includes ``bboxes`` and ``labels`` attributes. + retained_gt_inds (Tensor): The gt indexes assigned as the + positive sample in the current prior. + not_show_label (bool): Whether to show gt labels on images. + """ + assert self.dataset_meta is not None + classes = self.dataset_meta['classes'] + palette = self.dataset_meta['palette'] + if len(retained_gt_inds) == 0: + return self.get_image() + draw_gt_inds = torch.from_numpy( + np.array( + list(set(retained_gt_inds.cpu().numpy())), dtype=np.int64)) + bboxes = instances.bboxes[draw_gt_inds] + labels = instances.labels[draw_gt_inds] + + if not isinstance(bboxes, Tensor): + bboxes = bboxes.tensor + + edge_colors = [palette[i] for i in labels] + + max_label = int(max(labels) if len(labels) > 0 else 0) + text_palette = get_palette(self.text_color, max_label + 1) + text_colors = [text_palette[label] for label in labels] + + self.draw_bboxes( + bboxes, + edge_colors=edge_colors, + alpha=self.alpha, + line_widths=self.line_width) + + if not not_show_label: + positions = bboxes[:, :2] + self.line_width + areas = (bboxes[:, 3] - bboxes[:, 1]) * ( + bboxes[:, 2] - bboxes[:, 0]) + scales = _get_adaptive_scales(areas) + for i, (pos, label) in enumerate(zip(positions, labels)): + label_text = classes[ + label] if classes is not None else f'class {label}' + + self.draw_texts( + label_text, + pos, + colors=text_colors[i], + font_sizes=int(13 * scales[i]), + bboxes=[{ + 'facecolor': 'black', + 'alpha': 0.8, + 'pad': 0.7, + 'edgecolor': 'none' + }]) + + def draw_positive_assign(self, + grid_x_inds: Tensor, + grid_y_inds: Tensor, + class_inds: Tensor, + stride: int, + bboxes: Union[Tensor, HorizontalBoxes], + retained_gt_inds: Tensor, + offset: float = 0.5): + """ + + Args: + grid_x_inds (Tensor): The X-axis indexes of the positive sample + in current prior. + grid_y_inds (Tensor): The Y-axis indexes of the positive sample + in current prior. + class_inds (Tensor): The classes indexes of the positive sample + in current prior. + stride (int): Downsample factor of feature map. + bboxes (Union[Tensor, HorizontalBoxes]): Bounding boxes of GT. + retained_gt_inds (Tensor): The gt indexes assigned as the + positive sample in the current prior. + offset (float): The offset of points, the value is normalized + with corresponding stride. Defaults to 0.5. + """ + if not isinstance(bboxes, Tensor): + # Convert HorizontalBoxes to Tensor + bboxes = bboxes.tensor + + # The PALETTE in the dataset_meta is required + assert self.dataset_meta is not None + palette = self.dataset_meta['palette'] + x = ((grid_x_inds + offset) * stride).long() + y = ((grid_y_inds + offset) * stride).long() + center = torch.stack((x, y), dim=-1) + + retained_bboxes = bboxes[retained_gt_inds] + bbox_wh = retained_bboxes[:, 2:] - retained_bboxes[:, :2] + bbox_area = bbox_wh[:, 0] * bbox_wh[:, 1] + radius = _get_adaptive_scales(bbox_area) * 4 + colors = [palette[i] for i in class_inds] + + self.draw_circles( + center, + radius, + colors, + line_widths=0, + face_colors=colors, + alpha=1.0) + + def draw_prior(self, + grid_x_inds: Tensor, + grid_y_inds: Tensor, + class_inds: Tensor, + stride: int, + feat_ind: int, + prior_ind: int, + offset: float = 0.5): + """Draw priors on image. + + Args: + grid_x_inds (Tensor): The X-axis indexes of the positive sample + in current prior. + grid_y_inds (Tensor): The Y-axis indexes of the positive sample + in current prior. + class_inds (Tensor): The classes indexes of the positive sample + in current prior. + stride (int): Downsample factor of feature map. + feat_ind (int): Index of featmap. + prior_ind (int): Index of prior in current featmap. + offset (float): The offset of points, the value is normalized + with corresponding stride. Defaults to 0.5. + """ + + palette = self.dataset_meta['palette'] + center_x = ((grid_x_inds + offset) * stride) + center_y = ((grid_y_inds + offset) * stride) + xyxy = torch.stack((center_x, center_y, center_x, center_y), dim=1) + device = xyxy.device + if self.priors_size is not None: + xyxy += self.priors_size[feat_ind][prior_ind].to(device) + else: + xyxy += torch.tensor( + [[-stride / 2, -stride / 2, stride / 2, stride / 2]], + device=device) + + colors = [palette[i] for i in class_inds] + self.draw_bboxes( + xyxy, + edge_colors=colors, + alpha=self.alpha, + line_styles='--', + line_widths=math.ceil(self.line_width * 0.3)) + + def draw_assign(self, + image: np.ndarray, + assign_results: List[List[dict]], + gt_instances: InstanceData, + show_prior: bool = False, + not_show_label: bool = False) -> np.ndarray: + """Draw assigning results. + + Args: + image (np.ndarray): The image to draw. + assign_results (list): The assigning results. + gt_instances (:obj:`InstanceData`): Data structure for + instance-level annotations or predictions. + show_prior (bool): Whether to show prior on image. + not_show_label (bool): Whether to show gt labels on images. + + Returns: + np.ndarray: the drawn image which channel is RGB. + """ + img_show_list = [] + for feat_ind, assign_results_feat in enumerate(assign_results): + img_show_list_feat = [] + for prior_ind, assign_results_prior in enumerate( + assign_results_feat): + self.set_image(image) + h, w = image.shape[:2] + + # draw grid + stride = assign_results_prior['stride'] + self.draw_grid(stride) + + # draw prior on matched gt + grid_x_inds = assign_results_prior['grid_x_inds'] + grid_y_inds = assign_results_prior['grid_y_inds'] + class_inds = assign_results_prior['class_inds'] + prior_ind = assign_results_prior['prior_ind'] + offset = assign_results_prior.get('offset', 0.5) + + if show_prior: + self.draw_prior(grid_x_inds, grid_y_inds, class_inds, + stride, feat_ind, prior_ind, offset) + + # draw matched gt + retained_gt_inds = assign_results_prior['retained_gt_inds'] + self.draw_instances_assign(gt_instances, retained_gt_inds, + not_show_label) + + # draw positive + self.draw_positive_assign(grid_x_inds, grid_y_inds, class_inds, + stride, gt_instances.bboxes, + retained_gt_inds, offset) + + # draw title + if self.priors_size is not None: + base_prior = self.priors_size[feat_ind][prior_ind] + else: + base_prior = [stride, stride, stride * 2, stride * 2] + prior_size = (base_prior[2] - base_prior[0], + base_prior[3] - base_prior[1]) + pos = np.array((20, 20)) + text = f'feat_ind: {feat_ind} ' \ + f'prior_ind: {prior_ind} ' \ + f'prior_size: ({prior_size[0]}, {prior_size[1]})' + scales = _get_adaptive_scales(np.array([h * w / 16])) + font_sizes = int(13 * scales) + self.draw_texts( + text, + pos, + colors=self.text_color, + font_sizes=font_sizes, + bboxes=[{ + 'facecolor': 'black', + 'alpha': 0.8, + 'pad': 0.7, + 'edgecolor': 'none' + }]) + + img_show = self.get_image() + img_show = mmcv.impad(img_show, padding=(5, 5, 5, 5)) + img_show_list_feat.append(img_show) + img_show_list.append(np.concatenate(img_show_list_feat, axis=1)) + + # Merge all images into one image + # setting axis is to beautify the merged image + axis = 0 if len(assign_results[0]) > 1 else 1 + return np.concatenate(img_show_list, axis=axis) diff --git a/models/YOLO-World/third_party/mmyolo/projects/easydeploy/README.md b/models/YOLO-World/third_party/mmyolo/projects/easydeploy/README.md new file mode 100644 index 0000000000000000000000000000000000000000..1816e7ed96ee34209c56af4a22eda5f1eb7e499b --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/projects/easydeploy/README.md @@ -0,0 +1,11 @@ +# MMYOLO Model Easy-Deployment + +## Introduction + +This project is developed for easily converting your MMYOLO models to other inference backends without the need of MMDeploy, which reduces the cost of both time and effort on getting familiar with MMDeploy. + +Currently we support converting to `ONNX` and `TensorRT` formats, other inference backends such `ncnn` will be added to this project as well. + +## Supported Backends + +- [Model Convert](docs/model_convert.md) diff --git a/models/YOLO-World/third_party/mmyolo/projects/easydeploy/README_zh-CN.md b/models/YOLO-World/third_party/mmyolo/projects/easydeploy/README_zh-CN.md new file mode 100644 index 0000000000000000000000000000000000000000..4c6bc0cf4ef91edeced04bdf15af08ae1f6f0dcd --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/projects/easydeploy/README_zh-CN.md @@ -0,0 +1,11 @@ +# MMYOLO 模型转换 + +## 介绍 + +本项目作为 MMYOLO 的部署 project 单独存在,意图剥离 MMDeploy 当前的体系,独自支持用户完成模型训练后的转换和部署功能,使用户的学习和工程成本下降。 + +当前支持对 ONNX 格式和 TensorRT 格式的转换,后续对其他推理平台也会支持起来。 + +## 转换教程 + +- [Model Convert](docs/model_convert.md) diff --git a/models/YOLO-World/third_party/mmyolo/projects/easydeploy/backbone/__init__.py b/models/YOLO-World/third_party/mmyolo/projects/easydeploy/backbone/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..dc167f8515c66a30d884ed9655a11d45e21481c0 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/projects/easydeploy/backbone/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .common import DeployC2f +from .focus import DeployFocus, GConvFocus, NcnnFocus + +__all__ = ['DeployFocus', 'NcnnFocus', 'GConvFocus', 'DeployC2f'] diff --git a/models/YOLO-World/third_party/mmyolo/projects/easydeploy/backbone/common.py b/models/YOLO-World/third_party/mmyolo/projects/easydeploy/backbone/common.py new file mode 100644 index 0000000000000000000000000000000000000000..617875bd979a5b9150e476544090777118087a0b --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/projects/easydeploy/backbone/common.py @@ -0,0 +1,16 @@ +import torch +import torch.nn as nn +from torch import Tensor + + +class DeployC2f(nn.Module): + + def __init__(self, *args, **kwargs): + super().__init__() + + def forward(self, x: Tensor) -> Tensor: + x_main = self.main_conv(x) + x_main = [x_main, x_main[:, self.mid_channels:, ...]] + x_main.extend(blocks(x_main[-1]) for blocks in self.blocks) + x_main.pop(1) + return self.final_conv(torch.cat(x_main, 1)) diff --git a/models/YOLO-World/third_party/mmyolo/projects/easydeploy/backbone/focus.py b/models/YOLO-World/third_party/mmyolo/projects/easydeploy/backbone/focus.py new file mode 100644 index 0000000000000000000000000000000000000000..2a19afcca1d9c4e27109daeebd83907cd9b7b284 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/projects/easydeploy/backbone/focus.py @@ -0,0 +1,79 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch import Tensor + + +class DeployFocus(nn.Module): + + def __init__(self, orin_Focus: nn.Module): + super().__init__() + self.__dict__.update(orin_Focus.__dict__) + + def forward(self, x: Tensor) -> Tensor: + batch_size, channel, height, width = x.shape + x = x.reshape(batch_size, channel, -1, 2, width) + x = x.reshape(batch_size, channel, x.shape[2], 2, -1, 2) + half_h = x.shape[2] + half_w = x.shape[4] + x = x.permute(0, 5, 3, 1, 2, 4) + x = x.reshape(batch_size, channel * 4, half_h, half_w) + + return self.conv(x) + + +class NcnnFocus(nn.Module): + + def __init__(self, orin_Focus: nn.Module): + super().__init__() + self.__dict__.update(orin_Focus.__dict__) + + def forward(self, x: Tensor) -> Tensor: + batch_size, c, h, w = x.shape + assert h % 2 == 0 and w % 2 == 0, f'focus for yolox needs even feature\ + height and width, got {(h, w)}.' + + x = x.reshape(batch_size, c * h, 1, w) + _b, _c, _h, _w = x.shape + g = _c // 2 + # fuse to ncnn's shufflechannel + x = x.view(_b, g, 2, _h, _w) + x = torch.transpose(x, 1, 2).contiguous() + x = x.view(_b, -1, _h, _w) + + x = x.reshape(_b, c * h * w, 1, 1) + + _b, _c, _h, _w = x.shape + g = _c // 2 + # fuse to ncnn's shufflechannel + x = x.view(_b, g, 2, _h, _w) + x = torch.transpose(x, 1, 2).contiguous() + x = x.view(_b, -1, _h, _w) + + x = x.reshape(_b, c * 4, h // 2, w // 2) + + return self.conv(x) + + +class GConvFocus(nn.Module): + + def __init__(self, orin_Focus: nn.Module): + super().__init__() + device = next(orin_Focus.parameters()).device + self.weight1 = torch.tensor([[1., 0], [0, 0]]).expand(3, 1, 2, + 2).to(device) + self.weight2 = torch.tensor([[0, 0], [1., 0]]).expand(3, 1, 2, + 2).to(device) + self.weight3 = torch.tensor([[0, 1.], [0, 0]]).expand(3, 1, 2, + 2).to(device) + self.weight4 = torch.tensor([[0, 0], [0, 1.]]).expand(3, 1, 2, + 2).to(device) + self.__dict__.update(orin_Focus.__dict__) + + def forward(self, x: Tensor) -> Tensor: + conv1 = F.conv2d(x, self.weight1, stride=2, groups=3) + conv2 = F.conv2d(x, self.weight2, stride=2, groups=3) + conv3 = F.conv2d(x, self.weight3, stride=2, groups=3) + conv4 = F.conv2d(x, self.weight4, stride=2, groups=3) + return self.conv(torch.cat([conv1, conv2, conv3, conv4], dim=1)) diff --git a/models/YOLO-World/third_party/mmyolo/projects/easydeploy/bbox_code/__init__.py b/models/YOLO-World/third_party/mmyolo/projects/easydeploy/bbox_code/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b85a815536a5749a15f0ad6aab2b028eb6a3fe0a --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/projects/easydeploy/bbox_code/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .bbox_coder import (rtmdet_bbox_decoder, yolov5_bbox_decoder, + yolox_bbox_decoder) + +__all__ = ['yolov5_bbox_decoder', 'rtmdet_bbox_decoder', 'yolox_bbox_decoder'] diff --git a/models/YOLO-World/third_party/mmyolo/projects/easydeploy/bbox_code/bbox_coder.py b/models/YOLO-World/third_party/mmyolo/projects/easydeploy/bbox_code/bbox_coder.py new file mode 100644 index 0000000000000000000000000000000000000000..6483cf8b0328aff3d61f1fa0788337ab536d347d --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/projects/easydeploy/bbox_code/bbox_coder.py @@ -0,0 +1,46 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Optional + +import torch +from torch import Tensor + + +def yolov5_bbox_decoder(priors: Tensor, bbox_preds: Tensor, + stride: Tensor) -> Tensor: + bbox_preds = bbox_preds.sigmoid() + + x_center = (priors[..., 0] + priors[..., 2]) * 0.5 + y_center = (priors[..., 1] + priors[..., 3]) * 0.5 + w = priors[..., 2] - priors[..., 0] + h = priors[..., 3] - priors[..., 1] + + x_center_pred = (bbox_preds[..., 0] - 0.5) * 2 * stride + x_center + y_center_pred = (bbox_preds[..., 1] - 0.5) * 2 * stride + y_center + w_pred = (bbox_preds[..., 2] * 2)**2 * w + h_pred = (bbox_preds[..., 3] * 2)**2 * h + + decoded_bboxes = torch.stack( + [x_center_pred, y_center_pred, w_pred, h_pred], dim=-1) + + return decoded_bboxes + + +def rtmdet_bbox_decoder(priors: Tensor, bbox_preds: Tensor, + stride: Optional[Tensor]) -> Tensor: + stride = stride[None, :, None] + bbox_preds *= stride + tl_x = (priors[..., 0] - bbox_preds[..., 0]) + tl_y = (priors[..., 1] - bbox_preds[..., 1]) + br_x = (priors[..., 0] + bbox_preds[..., 2]) + br_y = (priors[..., 1] + bbox_preds[..., 3]) + decoded_bboxes = torch.stack([tl_x, tl_y, br_x, br_y], -1) + return decoded_bboxes + + +def yolox_bbox_decoder(priors: Tensor, bbox_preds: Tensor, + stride: Optional[Tensor]) -> Tensor: + stride = stride[None, :, None] + xys = (bbox_preds[..., :2] * stride) + priors + whs = bbox_preds[..., 2:].exp() * stride + decoded_bboxes = torch.cat([xys, whs], -1) + return decoded_bboxes diff --git a/models/YOLO-World/third_party/mmyolo/projects/easydeploy/deepstream/CMakeLists.txt b/models/YOLO-World/third_party/mmyolo/projects/easydeploy/deepstream/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..f640bea13bacfc0f6cc2f33e598f65cf5ce0922e --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/projects/easydeploy/deepstream/CMakeLists.txt @@ -0,0 +1,35 @@ +cmake_minimum_required(VERSION 2.8.12) + +set(CMAKE_CUDA_ARCHITECTURES 60 61 62 70 72 75 86) +set(CMAKE_CUDA_COMPILER /usr/local/cuda/bin/nvcc) + +project(nvdsparsebbox_mmyolo LANGUAGES CXX) + +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++14 -O3 -g -Wall -Werror -shared -fPIC") +set(CMAKE_CXX_STANDARD 14) +set(CMAKE_BUILD_TYPE Release) +option(CUDA_USE_STATIC_CUDA_RUNTIME OFF) + +# CUDA +find_package(CUDA REQUIRED) + +# TensorRT +set(TensorRT_INCLUDE_DIRS "/usr/include/x86_64-linux-gnu" CACHE STRING "TensorRT headers path") +set(TensorRT_LIBRARIES "/usr/lib/x86_64-linux-gnu" CACHE STRING "TensorRT libs path") + +# DeepStream +set(DEEPSTREAM "/opt/nvidia/deepstream/deepstream" CACHE STRING "DeepStream root path") +set(DS_LIBRARIES ${DEEPSTREAM}/lib) +set(DS_INCLUDE_DIRS ${DEEPSTREAM}/sources/includes) + +include_directories( + ${CUDA_INCLUDE_DIRS} + ${TensorRT_INCLUDE_DIRS} + ${DS_INCLUDE_DIRS}) + +add_library( + ${PROJECT_NAME} + SHARED + custom_mmyolo_bbox_parser/nvdsparsebbox_mmyolo.cpp) + +target_link_libraries(${PROJECT_NAME} PRIVATE nvinfer nvinfer_plugin) diff --git a/models/YOLO-World/third_party/mmyolo/projects/easydeploy/deepstream/README.md b/models/YOLO-World/third_party/mmyolo/projects/easydeploy/deepstream/README.md new file mode 100644 index 0000000000000000000000000000000000000000..111f3765e41d558b64097d8a25585bd9c14acf4f --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/projects/easydeploy/deepstream/README.md @@ -0,0 +1,48 @@ +# Inference MMYOLO Models with DeepStream + +This project demonstrates how to inference MMYOLO models with customized parsers in [DeepStream SDK](https://developer.nvidia.com/deepstream-sdk). + +## Pre-requisites + +### 1. Install Nvidia Driver and CUDA + +First, please follow the official documents and instructions to install dedicated Nvidia graphic driver and CUDA matched to your gpu and target Nvidia AIoT devices. + +### 2. Install DeepStream SDK + +Second, please follow the official instruction to download and install DeepStream SDK. Currently stable version of DeepStream is v6.2. + +### 3. Generate TensorRT Engine + +As DeepStream builds on top of several NVIDIA libraries, you need to first convert your trained MMYOLO models to TensorRT engine files. We strongly recommend you to try the supported TensorRT deployment solution in [EasyDeploy](../../easydeploy/). + +## Build and Run + +Please make sure that your converted TensorRT engine is already located in the `deepstream` folder as the config shows. Create your own model config files and change the `config-file` parameter in [deepstream_app_config.txt](deepstream_app_config.txt) to the model you want to run with. + +```bash +mkdir build && cd build +cmake .. +make -j$(nproc) && make install +``` + +Then you can run the inference with this command. + +```bash +deepstream-app -c deepstream_app_config.txt +``` + +## Code Structure + +```bash +├── deepstream +│ ├── configs # config file for MMYOLO models +│ │ └── config_infer_rtmdet.txt +│ ├── custom_mmyolo_bbox_parser # customized parser for MMYOLO models to DeepStream formats +│ │ └── nvdsparsebbox_mmyolo.cpp +| ├── CMakeLists.txt +│ ├── coco_labels.txt # labels for coco detection +│ ├── deepstream_app_config.txt # deepStream reference app configs for MMYOLO models +│ ├── README_zh-CN.md +│ └── README.md +``` diff --git a/models/YOLO-World/third_party/mmyolo/projects/easydeploy/deepstream/README_zh-CN.md b/models/YOLO-World/third_party/mmyolo/projects/easydeploy/deepstream/README_zh-CN.md new file mode 100644 index 0000000000000000000000000000000000000000..13a85d5bc90159c3ff9f1a32e93d01e82ed2faa4 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/projects/easydeploy/deepstream/README_zh-CN.md @@ -0,0 +1,48 @@ +# 使用 DeepStream SDK 推理 MMYOLO 模型 + +本项目演示了如何使用 [DeepStream SDK](https://developer.nvidia.com/deepstream-sdk) 配合改写的 parser 来推理 MMYOLO 的模型。 + +## 预先准备 + +### 1. 安装 Nidia 驱动和 CUDA + +首先请根据当前的显卡驱动和目标使用设备的驱动完成显卡驱动和 CUDA 的安装。 + +### 2. 安装 DeepStream SDK + +目前 DeepStream SDK 稳定版本已经更新到 v6.2,官方推荐使用这个版本。 + +### 3. 将 MMYOLO 模型转换为 TensorRT Engine + +推荐使用 EasyDeploy 中的 TensorRT 方案完成目标模型的转换部署,具体可参考 [此文档](../../easydeploy/docs/model_convert.md) 。 + +## 编译使用 + +当前项目使用的是 MMYOLO 的 rtmdet 模型,若想使用其他的模型,请参照目录下的配置文件进行改写。然后将转换完的 TensorRT engine 放在当前目录下并执行如下命令: + +```bash +mkdir build && cd build +cmake .. +make -j$(nproc) && make install +``` + +完成编译后可使用如下命令进行推理: + +```bash +deepstream-app -c deepstream_app_config.txt +``` + +## 项目代码结构 + +```bash +├── deepstream +│ ├── configs # MMYOLO 模型对应的 DeepStream 配置 +│ │ └── config_infer_rtmdet.txt +│ ├── custom_mmyolo_bbox_parser # 适配 DeepStream formats 的 parser +│ │ └── nvdsparsebbox_mmyolo.cpp +| ├── CMakeLists.txt +│ ├── coco_labels.txt # coco labels +│ ├── deepstream_app_config.txt # DeepStream app 配置 +│ ├── README_zh-CN.md +│ └── README.md +``` diff --git a/models/YOLO-World/third_party/mmyolo/projects/easydeploy/deepstream/coco_labels.txt b/models/YOLO-World/third_party/mmyolo/projects/easydeploy/deepstream/coco_labels.txt new file mode 100644 index 0000000000000000000000000000000000000000..ca76c80b5b2cd0b25047f75736656cfebc9da7aa --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/projects/easydeploy/deepstream/coco_labels.txt @@ -0,0 +1,80 @@ +person +bicycle +car +motorbike +aeroplane +bus +train +truck +boat +traffic light +fire hydrant +stop sign +parking meter +bench +bird +cat +dog +horse +sheep +cow +elephant +bear +zebra +giraffe +backpack +umbrella +handbag +tie +suitcase +frisbee +skis +snowboard +sports ball +kite +baseball bat +baseball glove +skateboard +surfboard +tennis racket +bottle +wine glass +cup +fork +knife +spoon +bowl +banana +apple +sandwich +orange +broccoli +carrot +hot dog +pizza +donut +cake +chair +sofa +pottedplant +bed +diningtable +toilet +tvmonitor +laptop +mouse +remote +keyboard +cell phone +microwave +oven +toaster +sink +refrigerator +book +clock +vase +scissors +teddy bear +hair drier +toothbrush diff --git a/models/YOLO-World/third_party/mmyolo/projects/easydeploy/deepstream/configs/config_infer_rtmdet.txt b/models/YOLO-World/third_party/mmyolo/projects/easydeploy/deepstream/configs/config_infer_rtmdet.txt new file mode 100644 index 0000000000000000000000000000000000000000..a1e5efd2a3810730144e037ee96dfbd36124b0e6 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/projects/easydeploy/deepstream/configs/config_infer_rtmdet.txt @@ -0,0 +1,22 @@ +[property] +gpu-id=0 +net-scale-factor=0.01735207357279195 +offsets=57.375;57.12;58.395 +model-color-format=1 +model-engine-file=../end2end.engine +labelfile-path=../coco_labels.txt +batch-size=1 +network-mode=0 +num-detected-classes=80 +interval=0 +gie-unique-id=1 +process-mode=1 +network-type=0 +cluster-mode=2 +maintain-aspect-ratio=1 +parse-bbox-func-name=NvDsInferParseCustomMMYOLO +custom-lib-path=../build/libnvdsparsebbox_mmyolo.so + +[class-attrs-all] +pre-cluster-threshold=0.45 +topk=100 diff --git a/models/YOLO-World/third_party/mmyolo/projects/easydeploy/deepstream/configs/config_infer_yolov5.txt b/models/YOLO-World/third_party/mmyolo/projects/easydeploy/deepstream/configs/config_infer_yolov5.txt new file mode 100644 index 0000000000000000000000000000000000000000..6ad7d6429cacd0a6050821e5b2a41317478f5119 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/projects/easydeploy/deepstream/configs/config_infer_yolov5.txt @@ -0,0 +1,21 @@ +[property] +gpu-id=0 +net-scale-factor=0.0039215697906911373 +model-color-format=0 +model-engine-file=../end2end.engine +labelfile-path=../coco_labels.txt +batch-size=1 +network-mode=0 +num-detected-classes=80 +interval=0 +gie-unique-id=1 +process-mode=1 +network-type=0 +cluster-mode=2 +maintain-aspect-ratio=1 +parse-bbox-func-name=NvDsInferParseCustomMMYOLO +custom-lib-path=../build/libnvdsparsebbox_mmyolo.so + +[class-attrs-all] +pre-cluster-threshold=0.45 +topk=100 diff --git a/models/YOLO-World/third_party/mmyolo/projects/easydeploy/deepstream/configs/config_infer_yolov8.txt b/models/YOLO-World/third_party/mmyolo/projects/easydeploy/deepstream/configs/config_infer_yolov8.txt new file mode 100644 index 0000000000000000000000000000000000000000..6ad7d6429cacd0a6050821e5b2a41317478f5119 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/projects/easydeploy/deepstream/configs/config_infer_yolov8.txt @@ -0,0 +1,21 @@ +[property] +gpu-id=0 +net-scale-factor=0.0039215697906911373 +model-color-format=0 +model-engine-file=../end2end.engine +labelfile-path=../coco_labels.txt +batch-size=1 +network-mode=0 +num-detected-classes=80 +interval=0 +gie-unique-id=1 +process-mode=1 +network-type=0 +cluster-mode=2 +maintain-aspect-ratio=1 +parse-bbox-func-name=NvDsInferParseCustomMMYOLO +custom-lib-path=../build/libnvdsparsebbox_mmyolo.so + +[class-attrs-all] +pre-cluster-threshold=0.45 +topk=100 diff --git a/models/YOLO-World/third_party/mmyolo/projects/easydeploy/deepstream/custom_mmyolo_bbox_parser/nvdsparsebbox_mmyolo.cpp b/models/YOLO-World/third_party/mmyolo/projects/easydeploy/deepstream/custom_mmyolo_bbox_parser/nvdsparsebbox_mmyolo.cpp new file mode 100644 index 0000000000000000000000000000000000000000..eb780856cbd2b289cdf9dc8518438f946a2ab548 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/projects/easydeploy/deepstream/custom_mmyolo_bbox_parser/nvdsparsebbox_mmyolo.cpp @@ -0,0 +1,118 @@ +#include "nvdsinfer_custom_impl.h" +#include +#include + +/** + * Function expected by DeepStream for decoding the MMYOLO output. + * + * C-linkage [extern "C"] was written to prevent name-mangling. This function must return true after + * adding all bounding boxes to the objectList vector. + * + * @param [outputLayersInfo] std::vector of NvDsInferLayerInfo objects with information about the output layer. + * @param [networkInfo] NvDsInferNetworkInfo object with information about the MMYOLO network. + * @param [detectionParams] NvDsInferParseDetectionParams with information about some config params. + * @param [objectList] std::vector of NvDsInferParseObjectInfo objects to which bounding box information must + * be stored. + * + * @return true + */ + +// This is just the function prototype. The definition is written at the end of the file. +extern "C" bool NvDsInferParseCustomMMYOLO( + std::vector const& outputLayersInfo, + NvDsInferNetworkInfo const& networkInfo, + NvDsInferParseDetectionParams const& detectionParams, + std::vector& objectList); + +static __inline__ float clamp(float& val, float min, float max) +{ + return val > min ? (val < max ? val : max) : min; +} + +static std::vector decodeMMYoloTensor( + const int* num_dets, + const float* bboxes, + const float* scores, + const int* labels, + const float& conf_thres, + const unsigned int& img_w, + const unsigned int& img_h +) +{ + std::vector bboxInfo; + size_t nums = num_dets[0]; + for (size_t i = 0; i < nums; i++) + { + float score = scores[i]; + if (score < conf_thres)continue; + float x0 = (bboxes[i * 4]); + float y0 = (bboxes[i * 4 + 1]); + float x1 = (bboxes[i * 4 + 2]); + float y1 = (bboxes[i * 4 + 3]); + x0 = clamp(x0, 0.f, img_w); + y0 = clamp(y0, 0.f, img_h); + x1 = clamp(x1, 0.f, img_w); + y1 = clamp(y1, 0.f, img_h); + NvDsInferParseObjectInfo obj; + obj.left = x0; + obj.top = y0; + obj.width = x1 - x0; + obj.height = y1 - y0; + obj.detectionConfidence = score; + obj.classId = labels[i]; + bboxInfo.push_back(obj); + } + + return bboxInfo; +} + +/* C-linkage to prevent name-mangling */ +extern "C" bool NvDsInferParseCustomMMYOLO( + std::vector const& outputLayersInfo, + NvDsInferNetworkInfo const& networkInfo, + NvDsInferParseDetectionParams const& detectionParams, + std::vector& objectList) +{ + +// Some assertions and error checking. + if (outputLayersInfo.empty() || outputLayersInfo.size() != 4) + { + std::cerr << "Could not find output layer in bbox parsing" << std::endl; + return false; + } + +// Score threshold of bboxes. + const float conf_thres = detectionParams.perClassThreshold[0]; + +// Obtaining the output layer. + const NvDsInferLayerInfo& num_dets = outputLayersInfo[0]; + const NvDsInferLayerInfo& bboxes = outputLayersInfo[1]; + const NvDsInferLayerInfo& scores = outputLayersInfo[2]; + const NvDsInferLayerInfo& labels = outputLayersInfo[3]; + +// num_dets(int) bboxes(float) scores(float) labels(int) + assert (num_dets.dims.numDims == 2); + assert (bboxes.dims.numDims == 3); + assert (scores.dims.numDims == 2); + assert (labels.dims.numDims == 2); + + +// Decoding the output tensor of MMYOLO to the NvDsInferParseObjectInfo format. + std::vector objects = + decodeMMYoloTensor( + (const int*)(num_dets.buffer), + (const float*)(bboxes.buffer), + (const float*)(scores.buffer), + (const int*)(labels.buffer), + conf_thres, + networkInfo.width, + networkInfo.height + ); + + objectList.clear(); + objectList = objects; + return true; +} + +/* Check that the custom function has been defined correctly */ +CHECK_CUSTOM_PARSE_FUNC_PROTOTYPE(NvDsInferParseCustomMMYOLO); diff --git a/models/YOLO-World/third_party/mmyolo/projects/easydeploy/deepstream/deepstream_app_config.txt b/models/YOLO-World/third_party/mmyolo/projects/easydeploy/deepstream/deepstream_app_config.txt new file mode 100644 index 0000000000000000000000000000000000000000..331776897a5e9109b9007ed1b7974f128287c4fc --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/projects/easydeploy/deepstream/deepstream_app_config.txt @@ -0,0 +1,62 @@ +[application] +enable-perf-measurement=1 +perf-measurement-interval-sec=5 + +[tiled-display] +enable=1 +rows=1 +columns=1 +width=1280 +height=720 +gpu-id=0 +nvbuf-memory-type=0 + +[source0] +enable=1 +type=3 +uri=file:///opt/nvidia/deepstream/deepstream/samples/streams/sample_1080p_h264.mp4 +num-sources=1 +gpu-id=0 +cudadec-memtype=0 + +[sink0] +enable=1 +type=2 +sync=0 +gpu-id=0 +nvbuf-memory-type=0 + +[osd] +enable=1 +gpu-id=0 +border-width=5 +text-size=15 +text-color=1;1;1;1; +text-bg-color=0.3;0.3;0.3;1 +font=Serif +show-clock=0 +clock-x-offset=800 +clock-y-offset=820 +clock-text-size=12 +clock-color=1;0;0;0 +nvbuf-memory-type=0 + +[streammux] +gpu-id=0 +live-source=0 +batch-size=1 +batched-push-timeout=40000 +width=1920 +height=1080 +enable-padding=0 +nvbuf-memory-type=0 + +[primary-gie] +enable=1 +gpu-id=0 +gie-unique-id=1 +nvbuf-memory-type=0 +config-file=configs/config_infer_rtmdet.txt + +[tests] +file-loop=0 diff --git a/models/YOLO-World/third_party/mmyolo/projects/easydeploy/docs/model_convert.md b/models/YOLO-World/third_party/mmyolo/projects/easydeploy/docs/model_convert.md new file mode 100644 index 0000000000000000000000000000000000000000..9af62599dd1b56648680fc315ca88c35c7b31cb9 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/projects/easydeploy/docs/model_convert.md @@ -0,0 +1,156 @@ +# MMYOLO 模型 ONNX 转换 + +## 1. 导出后端支持的 ONNX + +## 环境依赖 + +- [onnx](https://github.com/onnx/onnx) + + ```shell + pip install onnx + ``` + + [onnx-simplifier](https://github.com/daquexian/onnx-simplifier) (可选,用于简化模型) + + ```shell + pip install onnx-simplifier + ``` + +\*\*\* 请确保您在 `MMYOLO` 根目录下运行相关脚本,避免无法找到相关依赖包。\*\*\* + +## 使用方法 + +[模型导出脚本](./projects/easydeploy/tools/export_onnx.py)用于将 `MMYOLO` 模型转换为 `onnx` 。 + +### 参数介绍: + +- `config` : 构建模型使用的配置文件,如 [`yolov5_s-v61_syncbn_fast_8xb16-300e_coco.py`](./configs/yolov5/yolov5_s-v61_syncbn_fast_8xb16-300e_coco.py) 。 +- `checkpoint` : 训练得到的权重文件,如 `yolov5s.pth` 。 +- `--work-dir` : 转换后的模型保存路径。 +- `--img-size`: 转换模型时输入的尺寸,如 `640 640`。 +- `--batch-size`: 转换后的模型输入 `batch size` 。 +- `--device`: 转换模型使用的设备,默认为 `cuda:0`。 +- `--simplify`: 是否简化导出的 `onnx` 模型,需要安装 [onnx-simplifier](https://github.com/daquexian/onnx-simplifier),默认关闭。 +- `--opset`: 指定导出 `onnx` 的 `opset`,默认为 `11` 。 +- `--backend`: 指定导出 `onnx` 用于的后端名称,`ONNXRuntime`: `onnxruntime`, `TensorRT8`: `tensorrt8`, `TensorRT7`: `tensorrt7`,默认为`onnxruntime`即 `ONNXRuntime`。 +- `--pre-topk`: 指定导出 `onnx` 的后处理筛选候选框个数阈值,默认为 `1000`。 +- `--keep-topk`: 指定导出 `onnx` 的非极大值抑制输出的候选框个数阈值,默认为 `100`。 +- `--iou-threshold`: 非极大值抑制中过滤重复候选框的 `iou` 阈值,默认为 `0.65`。 +- `--score-threshold`: 非极大值抑制中过滤候选框得分的阈值,默认为 `0.25`。 +- `--model-only`: 指定仅导出模型 backbone + neck, 不包含后处理,默认关闭。 + +例子: + +```shell +python ./projects/easydeploy/tools/export.py \ + configs/yolov5/yolov5_s-v61_syncbn_fast_8xb16-300e_coco.py \ + yolov5s.pth \ + --work-dir work_dir \ + --img-size 640 640 \ + --batch 1 \ + --device cpu \ + --simplify \ + --opset 11 \ + --backend 1 \ + --pre-topk 1000 \ + --keep-topk 100 \ + --iou-threshold 0.65 \ + --score-threshold 0.25 +``` + +然后利用后端支持的工具如 `TensorRT` 读取 `onnx` 再次转换为后端支持的模型格式如 `.engine/.plan` 等。 + +`MMYOLO` 目前支持 `TensorRT8`, `TensorRT7`, `ONNXRuntime` 后端的端到端模型转换,目前仅支持静态 shape 模型的导出和转换,动态 batch 或动态长宽的模型端到端转换会在未来继续支持。 + +端到端转换得到的 `onnx` 模型输入输出如图: + +
+ +
+ +输入名: `images`, 尺寸 640x640 + +输出名: `num_dets`, 尺寸 1x1,表示检测目标数量。 + +输出名: `boxes`, 尺寸 1x100x4,表示检测框的坐标,格式为 `x1y1x2y1`。 + +输出名: `scores`, 尺寸 1x100,表示检测框的分数。 + +输出名: `labels`, 尺寸 1x100,表示检测框的类别 id。 + +可以利用 `num_dets` 中的个数对 `boxes`, `scores`, `labels` 进行截断,从 100 个检测结果中抽取前 `num_dets` 个目标作为最终检测结果。 + +## 2. 仅导出模型 Backbone + Neck + +当您需要部署在非 `TensorRT`, `ONNXRuntime` 等支持端到端部署的平台时,您可以考虑使用`--model-only` 参数并且不要传递 `--backend` 参数,您将会导出仅包含 `Backbone` + `neck` 的模型,模型的部分输出如图: + +
+ +
+ +这种导出方式获取的 `ONNX` 模型具有如下优点: + +- 算子简单,一般而言只包含 `Conv`,激活函数等简单算子,几乎不存在无法正确导出的情况,对于嵌入式部署更加友好。 +- 方便不同算法之间对比速度性能,由于不同的算法后处理不同,仅对比 `backbone` + `Neck` 的速度更加公平。 + +也有如下缺点: + +- 后处理逻辑需要单独完成,会有额外的 `decode` + `nms` 的操作需要实现。 +- 与 `TensorRT` 相比,由于 `TensorRT` 可以利用多核优势并行进行后处理,使用 `--model-only` 方式导出的模型性能会差很多。 + +### 使用方法 + +```shell +python ./projects/easydeploy/tools/export.py \ + configs/yolov5/yolov5_s-v61_syncbn_fast_8xb16-300e_coco.py \ + yolov5s.pth \ + --work-dir work_dir \ + --img-size 640 640 \ + --batch 1 \ + --device cpu \ + --simplify \ + --opset 11 \ + --model-only +``` + +## 使用 `model-only` 导出的 ONNX 进行推理 + +[模型推理脚本](./projects/easydeploy/examples/main_onnxruntime.py)用于推理导出的 `ONNX` 模型,需要安装基础依赖环境: + +[`onnxruntime`](https://github.com/microsoft/onnxruntime) 和 [`opencv-python`](https://github.com/opencv/opencv-python) + +```shell +pip install onnxruntime +pip install opencv-python==4.7.0.72 # 建议使用最新的 opencv +``` + +### 参数介绍: + +- `img` : 待检测的图片路径或图片文件夹路径。 +- `onnx` : 导出的 `model-only` ONNX 模型。 +- `--type` : 模型名称,目前支持 `yolov5`, `yolox`, `yolov6`, `ppyoloe`, `ppyoloep`, `yolov7`, `rtmdet`, `yolov8`。 +- `--img-size`: 转换模型时输入的尺寸,如 `640 640`。 +- `--out-dir`: 保存检测结果的路径 。 +- `--show`: 是否可视化检测结果。 +- `--score-thr`: 模型检测后处理的置信度分数 。 +- `--iou-thr`: 模型检测后处理的 IOU 分数 。 + +## 使用方法 + +```shell +cd ./projects/easydeploy/examples +python main_onnxruntime.py \ + "image_path_to_detect" \ + yolov5_s_model-only.onnx \ + --out-dir work_dir \ + --img-size 640 640 \ + --show \ + --score-thr 0.3 \ + --iou-thr 0.7 +``` + +*注意!!!* + +当您使用自定义数据集训练得到的模型时,请修改 [`config.py`](./projects/easydeploy/examples/config.py) 中 `CLASS_NAMES` 和 `CLASS_COLORS`,如果是 `yolov5` 或者 `yolov7` 基于 `anchor` 的模型请同时修改 `YOLOv5_ANCHORS` 和 `YOLOv7_ANCHORS`。 + +[`numpy_coder.py`](./projects/easydeploy/examples/numpy_coder.py) 是目前所有算法仅使用 `numpy` 实现的 `decoder`,如果您对性能有较高的要求,可以参照相关代码改写为 `c/c++`。 diff --git a/models/YOLO-World/third_party/mmyolo/projects/easydeploy/examples/config.py b/models/YOLO-World/third_party/mmyolo/projects/easydeploy/examples/config.py new file mode 100644 index 0000000000000000000000000000000000000000..4a85ff34273c22a356c9d6a3eaeb048b637b5f40 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/projects/easydeploy/examples/config.py @@ -0,0 +1,64 @@ +from enum import Enum + + +class TASK_TYPE(Enum): + DET = 'det' + SEG = 'seg' + POSE = 'pose' + + +class ModelType(Enum): + YOLOV5 = 'yolov5' + YOLOX = 'yolox' + PPYOLOE = 'ppyoloe' + PPYOLOEP = 'ppyoloep' + YOLOV6 = 'yolov6' + YOLOV7 = 'yolov7' + RTMDET = 'rtmdet' + YOLOV8 = 'yolov8' + + +CLASS_NAMES = ('person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', + 'train', 'truck', 'boat', 'traffic light', 'fire hydrant', + 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', + 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', + 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', + 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', + 'baseball glove', 'skateboard', 'surfboard', 'tennis racket', + 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', + 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', + 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', + 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', + 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', + 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', + 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush') + +CLASS_COLORS = [(220, 20, 60), (119, 11, 32), (0, 0, 142), (0, 0, 230), + (106, 0, 228), (0, 60, 100), (0, 80, 100), (0, 0, 70), + (0, 0, 192), (250, 170, 30), (100, 170, 30), (220, 220, 0), + (175, 116, 175), (250, 0, 30), (165, 42, 42), (255, 77, 255), + (0, 226, 252), (182, 182, 255), (0, 82, 0), (120, 166, 157), + (110, 76, 0), (174, 57, 255), (199, 100, 0), (72, 0, 118), + (255, 179, 240), (0, 125, 92), (209, 0, 151), (188, 208, 182), + (0, 220, 176), (255, 99, 164), (92, 0, 73), (133, 129, 255), + (78, 180, 255), (0, 228, 0), (174, 255, 243), (45, 89, 255), + (134, 134, 103), (145, 148, 174), (255, 208, 186), + (197, 226, 255), (171, 134, 1), (109, 63, 54), (207, 138, 255), + (151, 0, 95), (9, 80, 61), (84, 105, 51), (74, 65, 105), + (166, 196, 102), (208, 195, 210), (255, 109, 65), + (0, 143, 149), (179, 0, 194), (209, 99, 106), (5, 121, 0), + (227, 255, 205), (147, 186, 208), (153, 69, 1), (3, 95, 161), + (163, 255, 0), (119, 0, 170), (0, 182, 199), (0, 165, 120), + (183, 130, 88), (95, 32, 0), (130, 114, 135), (110, 129, 133), + (166, 74, 118), (219, 142, 185), (79, 210, 114), (178, 90, 62), + (65, 70, 15), (127, 167, 115), (59, 105, 106), (142, 108, 45), + (196, 172, 0), (95, 54, 80), (128, 76, 255), (201, 57, 1), + (246, 0, 122), (191, 162, 208)] + +YOLOv5_ANCHORS = [[(10, 13), (16, 30), (33, 23)], + [(30, 61), (62, 45), (59, 119)], + [(116, 90), (156, 198), (373, 326)]] + +YOLOv7_ANCHORS = [[(12, 16), (19, 36), (40, 28)], + [(36, 75), (76, 55), (72, 146)], + [(142, 110), (192, 243), (459, 401)]] diff --git a/models/YOLO-World/third_party/mmyolo/projects/easydeploy/examples/cv2_nms.py b/models/YOLO-World/third_party/mmyolo/projects/easydeploy/examples/cv2_nms.py new file mode 100644 index 0000000000000000000000000000000000000000..79e376356b75339c796aeeb280cd8cdb52db8518 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/projects/easydeploy/examples/cv2_nms.py @@ -0,0 +1,36 @@ +from typing import List, Tuple, Union + +import cv2 +from numpy import ndarray + +MAJOR, MINOR = map(int, cv2.__version__.split('.')[:2]) +assert MAJOR == 4 + + +def non_max_suppression(boxes: Union[List[ndarray], Tuple[ndarray]], + scores: Union[List[float], Tuple[float]], + labels: Union[List[int], Tuple[int]], + conf_thres: float = 0.25, + iou_thres: float = 0.65) -> Tuple[List, List, List]: + if MINOR >= 7: + indices = cv2.dnn.NMSBoxesBatched(boxes, scores, labels, conf_thres, + iou_thres) + elif MINOR == 6: + indices = cv2.dnn.NMSBoxes(boxes, scores, conf_thres, iou_thres) + else: + indices = cv2.dnn.NMSBoxes(boxes, scores, conf_thres, + iou_thres).flatten() + + nmsd_boxes = [] + nmsd_scores = [] + nmsd_labels = [] + for idx in indices: + box = boxes[idx] + # x0y0wh -> x0y0x1y1 + box[2:] = box[:2] + box[2:] + score = scores[idx] + label = labels[idx] + nmsd_boxes.append(box) + nmsd_scores.append(score) + nmsd_labels.append(label) + return nmsd_boxes, nmsd_scores, nmsd_labels diff --git a/models/YOLO-World/third_party/mmyolo/projects/easydeploy/examples/main_onnxruntime.py b/models/YOLO-World/third_party/mmyolo/projects/easydeploy/examples/main_onnxruntime.py new file mode 100644 index 0000000000000000000000000000000000000000..bc0ad1b0f10ed6cbea8c8b3c0c5010ec7a760cb5 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/projects/easydeploy/examples/main_onnxruntime.py @@ -0,0 +1,110 @@ +import math +import sys +from argparse import ArgumentParser +from pathlib import Path + +import cv2 +import onnxruntime +from config import (CLASS_COLORS, CLASS_NAMES, ModelType, YOLOv5_ANCHORS, + YOLOv7_ANCHORS) +from cv2_nms import non_max_suppression +from numpy_coder import Decoder +from preprocess import Preprocess +from tqdm import tqdm + +# Add __FILE__ to sys.path +sys.path.append(str(Path(__file__).resolve().parents[0])) + +IMG_EXTENSIONS = ('.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif', + '.tiff', '.webp') + + +def path_to_list(path: str): + path = Path(path) + if path.is_file() and path.suffix in IMG_EXTENSIONS: + res_list = [str(path.absolute())] + elif path.is_dir(): + res_list = [ + str(p.absolute()) for p in path.iterdir() + if p.suffix in IMG_EXTENSIONS + ] + else: + raise RuntimeError + return res_list + + +def parse_args(): + parser = ArgumentParser() + parser.add_argument( + 'img', help='Image path, include image file, dir and URL.') + parser.add_argument('onnx', type=str, help='Onnx file') + parser.add_argument('--type', type=str, help='Model type') + parser.add_argument( + '--img-size', + nargs='+', + type=int, + default=[640, 640], + help='Image size of height and width') + parser.add_argument( + '--out-dir', default='./output', type=str, help='Path to output file') + parser.add_argument( + '--show', action='store_true', help='Show the detection results') + parser.add_argument( + '--score-thr', type=float, default=0.3, help='Bbox score threshold') + parser.add_argument( + '--iou-thr', type=float, default=0.7, help='Bbox iou threshold') + args = parser.parse_args() + return args + + +def main(): + args = parse_args() + out_dir = Path(args.out_dir) + model_type = ModelType(args.type.lower()) + + if not args.show: + out_dir.mkdir(parents=True, exist_ok=True) + + files = path_to_list(args.img) + session = onnxruntime.InferenceSession( + args.onnx, providers=['CPUExecutionProvider']) + preprocessor = Preprocess(model_type) + decoder = Decoder(model_type, model_only=True) + if model_type == ModelType.YOLOV5: + anchors = YOLOv5_ANCHORS + elif model_type == ModelType.YOLOV7: + anchors = YOLOv7_ANCHORS + else: + anchors = None + + for file in tqdm(files): + image = cv2.imread(file) + image_h, image_w = image.shape[:2] + img, (ratio_w, ratio_h) = preprocessor(image, args.img_size) + features = session.run(None, {'images': img}) + decoder_outputs = decoder( + features, + args.score_thr, + num_labels=len(CLASS_NAMES), + anchors=anchors) + nmsd_boxes, nmsd_scores, nmsd_labels = non_max_suppression( + *decoder_outputs, args.score_thr, args.iou_thr) + for box, score, label in zip(nmsd_boxes, nmsd_scores, nmsd_labels): + x0, y0, x1, y1 = box + x0 = math.floor(min(max(x0 / ratio_w, 1), image_w - 1)) + y0 = math.floor(min(max(y0 / ratio_h, 1), image_h - 1)) + x1 = math.ceil(min(max(x1 / ratio_w, 1), image_w - 1)) + y1 = math.ceil(min(max(y1 / ratio_h, 1), image_h - 1)) + cv2.rectangle(image, (x0, y0), (x1, y1), CLASS_COLORS[label], 2) + cv2.putText(image, f'{CLASS_NAMES[label]}: {score:.2f}', + (x0, y0 - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, + (0, 255, 255), 2) + if args.show: + cv2.imshow('result', image) + cv2.waitKey(0) + else: + cv2.imwrite(f'{out_dir / Path(file).name}', image) + + +if __name__ == '__main__': + main() diff --git a/models/YOLO-World/third_party/mmyolo/projects/easydeploy/examples/numpy_coder.py b/models/YOLO-World/third_party/mmyolo/projects/easydeploy/examples/numpy_coder.py new file mode 100644 index 0000000000000000000000000000000000000000..ccd3687f89ed47dbbb1d90e603eba21a760bded9 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/projects/easydeploy/examples/numpy_coder.py @@ -0,0 +1,310 @@ +from typing import List, Tuple, Union + +import numpy as np +from config import ModelType +from numpy import ndarray + + +def softmax(x: ndarray, axis: int = -1) -> ndarray: + e_x = np.exp(x - np.max(x, axis=axis, keepdims=True)) + y = e_x / e_x.sum(axis=axis, keepdims=True) + return y + + +def sigmoid(x: ndarray) -> ndarray: + return 1. / (1. + np.exp(-x)) + + +class Decoder: + + def __init__(self, model_type: ModelType, model_only: bool = False): + self.model_type = model_type + self.model_only = model_only + self.boxes_pro = [] + self.scores_pro = [] + self.labels_pro = [] + self.is_logging = False + + def __call__(self, + feats: Union[List, Tuple], + conf_thres: float, + num_labels: int = 80, + **kwargs) -> Tuple: + if not self.is_logging: + print('Only support decode in batch==1') + self.is_logging = True + self.boxes_pro.clear() + self.scores_pro.clear() + self.labels_pro.clear() + + if self.model_only: + # transpose channel to last dim for easy decoding + feats = [ + np.ascontiguousarray(feat[0].transpose(1, 2, 0)) + for feat in feats + ] + else: + # ax620a horizonX3 transpose channel to last dim by default + feats = [np.ascontiguousarray(feat) for feat in feats] + if self.model_type == ModelType.YOLOV5: + self.__yolov5_decode(feats, conf_thres, num_labels, **kwargs) + elif self.model_type == ModelType.YOLOX: + self.__yolox_decode(feats, conf_thres, num_labels, **kwargs) + elif self.model_type in (ModelType.PPYOLOE, ModelType.PPYOLOEP): + self.__ppyoloe_decode(feats, conf_thres, num_labels, **kwargs) + elif self.model_type == ModelType.YOLOV6: + self.__yolov6_decode(feats, conf_thres, num_labels, **kwargs) + elif self.model_type == ModelType.YOLOV7: + self.__yolov7_decode(feats, conf_thres, num_labels, **kwargs) + elif self.model_type == ModelType.RTMDET: + self.__rtmdet_decode(feats, conf_thres, num_labels, **kwargs) + elif self.model_type == ModelType.YOLOV8: + self.__yolov8_decode(feats, conf_thres, num_labels, **kwargs) + else: + raise NotImplementedError + return self.boxes_pro, self.scores_pro, self.labels_pro + + def __yolov5_decode(self, + feats: List[ndarray], + conf_thres: float, + num_labels: int = 80, + **kwargs): + anchors: Union[List, Tuple] = kwargs.get( + 'anchors', + [[(10, 13), (16, 30), + (33, 23)], [(30, 61), (62, 45), + (59, 119)], [(116, 90), (156, 198), (373, 326)]]) + for i, feat in enumerate(feats): + stride = 8 << i + feat_h, feat_w, _ = feat.shape + anchor = anchors[i] + feat = sigmoid(feat) + feat = feat.reshape((feat_h, feat_w, len(anchor), -1)) + box_feat, conf_feat, score_feat = np.split(feat, [4, 5], -1) + + hIdx, wIdx, aIdx, _ = np.where(conf_feat > conf_thres) + + num_proposal = hIdx.size + if not num_proposal: + continue + + score_feat = score_feat[hIdx, wIdx, aIdx] * conf_feat[hIdx, wIdx, + aIdx] + boxes = box_feat[hIdx, wIdx, aIdx] + labels = score_feat.argmax(-1) + scores = score_feat.max(-1) + + indices = np.where(scores > conf_thres)[0] + if len(indices) == 0: + continue + + for idx in indices: + a_w, a_h = anchor[aIdx[idx]] + x, y, w, h = boxes[idx] + x = (x * 2.0 - 0.5 + wIdx[idx]) * stride + y = (y * 2.0 - 0.5 + hIdx[idx]) * stride + w = (w * 2.0)**2 * a_w + h = (h * 2.0)**2 * a_h + + x0 = x - w / 2 + y0 = y - h / 2 + + self.scores_pro.append(float(scores[idx])) + self.boxes_pro.append( + np.array([x0, y0, w, h], dtype=np.float32)) + self.labels_pro.append(int(labels[idx])) + + def __yolox_decode(self, + feats: List[ndarray], + conf_thres: float, + num_labels: int = 80, + **kwargs): + for i, feat in enumerate(feats): + stride = 8 << i + score_feat, box_feat, conf_feat = np.split( + feat, [num_labels, num_labels + 4], -1) + conf_feat = sigmoid(conf_feat) + + hIdx, wIdx, _ = np.where(conf_feat > conf_thres) + + num_proposal = hIdx.size + if not num_proposal: + continue + + score_feat = sigmoid(score_feat[hIdx, wIdx]) * conf_feat[hIdx, + wIdx] + boxes = box_feat[hIdx, wIdx] + labels = score_feat.argmax(-1) + scores = score_feat.max(-1) + indices = np.where(scores > conf_thres)[0] + + if len(indices) == 0: + continue + + for idx in indices: + score = scores[idx] + label = labels[idx] + + x, y, w, h = boxes[idx] + + x = (x + wIdx[idx]) * stride + y = (y + hIdx[idx]) * stride + w = np.exp(w) * stride + h = np.exp(h) * stride + + x0 = x - w / 2 + y0 = y - h / 2 + + self.scores_pro.append(float(score)) + self.boxes_pro.append( + np.array([x0, y0, w, h], dtype=np.float32)) + self.labels_pro.append(int(label)) + + def __ppyoloe_decode(self, + feats: List[ndarray], + conf_thres: float, + num_labels: int = 80, + **kwargs): + reg_max: int = kwargs.get('reg_max', 17) + dfl = np.arange(0, reg_max, dtype=np.float32) + for i, feat in enumerate(feats): + stride = 8 << i + score_feat, box_feat = np.split(feat, [ + num_labels, + ], -1) + score_feat = sigmoid(score_feat) + _argmax = score_feat.argmax(-1) + _max = score_feat.max(-1) + indices = np.where(_max > conf_thres) + hIdx, wIdx = indices + num_proposal = hIdx.size + if not num_proposal: + continue + + scores = _max[hIdx, wIdx] + boxes = box_feat[hIdx, wIdx].reshape(num_proposal, 4, reg_max) + boxes = softmax(boxes, -1) @ dfl + labels = _argmax[hIdx, wIdx] + + for k in range(num_proposal): + score = scores[k] + label = labels[k] + + x0, y0, x1, y1 = boxes[k] + + x0 = (wIdx[k] + 0.5 - x0) * stride + y0 = (hIdx[k] + 0.5 - y0) * stride + x1 = (wIdx[k] + 0.5 + x1) * stride + y1 = (hIdx[k] + 0.5 + y1) * stride + + w = x1 - x0 + h = y1 - y0 + + self.scores_pro.append(float(score)) + self.boxes_pro.append( + np.array([x0, y0, w, h], dtype=np.float32)) + self.labels_pro.append(int(label)) + + def __yolov6_decode(self, + feats: List[ndarray], + conf_thres: float, + num_labels: int = 80, + **kwargs): + for i, feat in enumerate(feats): + stride = 8 << i + score_feat, box_feat = np.split(feat, [ + num_labels, + ], -1) + score_feat = sigmoid(score_feat) + _argmax = score_feat.argmax(-1) + _max = score_feat.max(-1) + indices = np.where(_max > conf_thres) + hIdx, wIdx = indices + num_proposal = hIdx.size + if not num_proposal: + continue + + scores = _max[hIdx, wIdx] + boxes = box_feat[hIdx, wIdx] + labels = _argmax[hIdx, wIdx] + + for k in range(num_proposal): + score = scores[k] + label = labels[k] + + x0, y0, x1, y1 = boxes[k] + + x0 = (wIdx[k] + 0.5 - x0) * stride + y0 = (hIdx[k] + 0.5 - y0) * stride + x1 = (wIdx[k] + 0.5 + x1) * stride + y1 = (hIdx[k] + 0.5 + y1) * stride + + w = x1 - x0 + h = y1 - y0 + + self.scores_pro.append(float(score)) + self.boxes_pro.append( + np.array([x0, y0, w, h], dtype=np.float32)) + self.labels_pro.append(int(label)) + + def __yolov7_decode(self, + feats: List[ndarray], + conf_thres: float, + num_labels: int = 80, + **kwargs): + anchors: Union[List, Tuple] = kwargs.get( + 'anchors', + [[(12, 16), (19, 36), + (40, 28)], [(36, 75), (76, 55), + (72, 146)], [(142, 110), (192, 243), (459, 401)]]) + self.__yolov5_decode(feats, conf_thres, num_labels, anchors=anchors) + + def __rtmdet_decode(self, + feats: List[ndarray], + conf_thres: float, + num_labels: int = 80, + **kwargs): + for i, feat in enumerate(feats): + stride = 8 << i + score_feat, box_feat = np.split(feat, [ + num_labels, + ], -1) + score_feat = sigmoid(score_feat) + _argmax = score_feat.argmax(-1) + _max = score_feat.max(-1) + indices = np.where(_max > conf_thres) + hIdx, wIdx = indices + num_proposal = hIdx.size + if not num_proposal: + continue + + scores = _max[hIdx, wIdx] + boxes = box_feat[hIdx, wIdx] + labels = _argmax[hIdx, wIdx] + + for k in range(num_proposal): + score = scores[k] + label = labels[k] + + x0, y0, x1, y1 = boxes[k] + + x0 = (wIdx[k] - x0) * stride + y0 = (hIdx[k] - y0) * stride + x1 = (wIdx[k] + x1) * stride + y1 = (hIdx[k] + y1) * stride + + w = x1 - x0 + h = y1 - y0 + + self.scores_pro.append(float(score)) + self.boxes_pro.append( + np.array([x0, y0, w, h], dtype=np.float32)) + self.labels_pro.append(int(label)) + + def __yolov8_decode(self, + feats: List[ndarray], + conf_thres: float, + num_labels: int = 80, + **kwargs): + reg_max: int = kwargs.get('reg_max', 16) + self.__ppyoloe_decode(feats, conf_thres, num_labels, reg_max=reg_max) diff --git a/models/YOLO-World/third_party/mmyolo/projects/easydeploy/examples/preprocess.py b/models/YOLO-World/third_party/mmyolo/projects/easydeploy/examples/preprocess.py new file mode 100644 index 0000000000000000000000000000000000000000..6b6fb563a16a7f40ef556b5a23f635ab4627fc4f --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/projects/easydeploy/examples/preprocess.py @@ -0,0 +1,57 @@ +from typing import List, Tuple, Union + +import cv2 +import numpy as np +from config import ModelType +from numpy import ndarray + + +class Preprocess: + + def __init__(self, model_type: ModelType): + if model_type in (ModelType.YOLOV5, ModelType.YOLOV6, ModelType.YOLOV7, + ModelType.YOLOV8): + mean = np.array([0, 0, 0], dtype=np.float32) + std = np.array([255, 255, 255], dtype=np.float32) + is_rgb = True + elif model_type == ModelType.YOLOX: + mean = np.array([0, 0, 0], dtype=np.float32) + std = np.array([1, 1, 1], dtype=np.float32) + is_rgb = False + elif model_type == ModelType.PPYOLOE: + mean = np.array([123.675, 116.28, 103.53], dtype=np.float32) + std = np.array([58.395, 57.12, 57.375], dtype=np.float32) + is_rgb = True + + elif model_type == ModelType.PPYOLOEP: + mean = np.array([0, 0, 0], dtype=np.float32) + std = np.array([255, 255, 255], dtype=np.float32) + is_rgb = True + elif model_type == ModelType.RTMDET: + mean = np.array([103.53, 116.28, 123.675], dtype=np.float32) + std = np.array([57.375, 57.12, 58.3955], dtype=np.float32) + is_rgb = False + else: + raise NotImplementedError + + self.mean = mean.reshape((3, 1, 1)) + self.std = std.reshape((3, 1, 1)) + self.is_rgb = is_rgb + + def __call__(self, + image: ndarray, + new_size: Union[List[int], Tuple[int]] = (640, 640), + **kwargs) -> Tuple[ndarray, Tuple[float, float]]: + # new_size: (height, width) + height, width = image.shape[:2] + ratio_h, ratio_w = new_size[0] / height, new_size[1] / width + image = cv2.resize( + image, (0, 0), + fx=ratio_w, + fy=ratio_h, + interpolation=cv2.INTER_LINEAR) + image = np.ascontiguousarray(image.transpose(2, 0, 1)) + image = image.astype(np.float32) + image -= self.mean + image /= self.std + return image[np.newaxis], (ratio_w, ratio_h) diff --git a/models/YOLO-World/third_party/mmyolo/projects/easydeploy/examples/requirements.txt b/models/YOLO-World/third_party/mmyolo/projects/easydeploy/examples/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..0b761189b52fc57e4231b37df0ff42bb44404c95 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/projects/easydeploy/examples/requirements.txt @@ -0,0 +1,2 @@ +onnxruntime +opencv-python==4.7.0.72 diff --git a/models/YOLO-World/third_party/mmyolo/projects/easydeploy/model/__init__.py b/models/YOLO-World/third_party/mmyolo/projects/easydeploy/model/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..38af8bc322b0a8e0c870fac243a0af9c1dba7315 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/projects/easydeploy/model/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .backend import MMYOLOBackend +from .backendwrapper import ORTWrapper, TRTWrapper +from .model import DeployModel + +__all__ = ['DeployModel', 'TRTWrapper', 'ORTWrapper', 'MMYOLOBackend'] diff --git a/models/YOLO-World/third_party/mmyolo/projects/easydeploy/model/backend.py b/models/YOLO-World/third_party/mmyolo/projects/easydeploy/model/backend.py new file mode 100644 index 0000000000000000000000000000000000000000..64d6e3f020bcfd3c3cf7db5f5611a8f815df4cb1 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/projects/easydeploy/model/backend.py @@ -0,0 +1,23 @@ +from enum import Enum + +import torch +import torch.nn.functional as F + + +class MMYOLOBackend(Enum): + AX620A = 'ax620a' + COREML = 'coreml' + HORIZONX3 = 'horizonx3' + NCNN = 'ncnn' + ONNXRUNTIME = 'onnxruntime' + OPENVINO = 'openvino' + PPLNN = 'pplnn' + RKNN = 'rknn' + TENSORRT8 = 'tensorrt8' + TENSORRT7 = 'tensorrt7' + TORCHSCRIPT = 'torchscript' + TVM = 'tvm' + + +def HSigmoid__forward(self, x: torch.Tensor) -> torch.Tensor: + return F.hardsigmoid(x, inplace=True) diff --git a/models/YOLO-World/third_party/mmyolo/projects/easydeploy/model/backendwrapper.py b/models/YOLO-World/third_party/mmyolo/projects/easydeploy/model/backendwrapper.py new file mode 100644 index 0000000000000000000000000000000000000000..2997d84ea98b3f30973cf2335ab0eb4af4edaef5 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/projects/easydeploy/model/backendwrapper.py @@ -0,0 +1,202 @@ +import warnings +from collections import namedtuple +from functools import partial +from pathlib import Path +from typing import List, Optional, Union + +import numpy as np +import onnxruntime + +try: + import tensorrt as trt +except Exception: + trt = None +import torch + +warnings.filterwarnings(action='ignore', category=DeprecationWarning) + + +class TRTWrapper(torch.nn.Module): + dtype_mapping = {} + + def __init__(self, weight: Union[str, Path], + device: Optional[torch.device]): + super().__init__() + weight = Path(weight) if isinstance(weight, str) else weight + assert weight.exists() and weight.suffix in ('.engine', '.plan') + if isinstance(device, str): + device = torch.device(device) + elif isinstance(device, int): + device = torch.device(f'cuda:{device}') + self.weight = weight + self.device = device + self.stream = torch.cuda.Stream(device=device) + self.__update_mapping() + self.__init_engine() + self.__init_bindings() + + def __update_mapping(self): + self.dtype_mapping.update({ + trt.bool: torch.bool, + trt.int8: torch.int8, + trt.int32: torch.int32, + trt.float16: torch.float16, + trt.float32: torch.float32 + }) + + def __init_engine(self): + logger = trt.Logger(trt.Logger.ERROR) + self.log = partial(logger.log, trt.Logger.ERROR) + trt.init_libnvinfer_plugins(logger, namespace='') + self.logger = logger + with trt.Runtime(logger) as runtime: + model = runtime.deserialize_cuda_engine(self.weight.read_bytes()) + + context = model.create_execution_context() + + names = [model.get_binding_name(i) for i in range(model.num_bindings)] + + num_inputs, num_outputs = 0, 0 + + for i in range(model.num_bindings): + if model.binding_is_input(i): + num_inputs += 1 + else: + num_outputs += 1 + + self.is_dynamic = -1 in model.get_binding_shape(0) + + self.model = model + self.context = context + self.input_names = names[:num_inputs] + self.output_names = names[num_inputs:] + self.num_inputs = num_inputs + self.num_outputs = num_outputs + self.num_bindings = num_inputs + num_outputs + self.bindings: List[int] = [0] * self.num_bindings + + def __init_bindings(self): + Binding = namedtuple('Binding', ('name', 'dtype', 'shape')) + inputs_info = [] + outputs_info = [] + + for i, name in enumerate(self.input_names): + assert self.model.get_binding_name(i) == name + dtype = self.dtype_mapping[self.model.get_binding_dtype(i)] + shape = tuple(self.model.get_binding_shape(i)) + inputs_info.append(Binding(name, dtype, shape)) + + for i, name in enumerate(self.output_names): + i += self.num_inputs + assert self.model.get_binding_name(i) == name + dtype = self.dtype_mapping[self.model.get_binding_dtype(i)] + shape = tuple(self.model.get_binding_shape(i)) + outputs_info.append(Binding(name, dtype, shape)) + self.inputs_info = inputs_info + self.outputs_info = outputs_info + if not self.is_dynamic: + self.output_tensor = [ + torch.empty(o.shape, dtype=o.dtype, device=self.device) + for o in outputs_info + ] + + def forward(self, *inputs): + + assert len(inputs) == self.num_inputs + + contiguous_inputs: List[torch.Tensor] = [ + i.contiguous() for i in inputs + ] + + for i in range(self.num_inputs): + self.bindings[i] = contiguous_inputs[i].data_ptr() + if self.is_dynamic: + self.context.set_binding_shape( + i, tuple(contiguous_inputs[i].shape)) + + # create output tensors + outputs: List[torch.Tensor] = [] + + for i in range(self.num_outputs): + j = i + self.num_inputs + if self.is_dynamic: + shape = tuple(self.context.get_binding_shape(j)) + output = torch.empty( + size=shape, + dtype=self.output_dtypes[i], + device=self.device) + + else: + output = self.output_tensor[i] + outputs.append(output) + self.bindings[j] = output.data_ptr() + + self.context.execute_async_v2(self.bindings, self.stream.cuda_stream) + self.stream.synchronize() + + return tuple(outputs) + + +class ORTWrapper(torch.nn.Module): + + def __init__(self, weight: Union[str, Path], + device: Optional[torch.device]): + super().__init__() + weight = Path(weight) if isinstance(weight, str) else weight + assert weight.exists() and weight.suffix == '.onnx' + + if isinstance(device, str): + device = torch.device(device) + elif isinstance(device, int): + device = torch.device(f'cuda:{device}') + self.weight = weight + self.device = device + self.__init_session() + self.__init_bindings() + + def __init_session(self): + providers = ['CPUExecutionProvider'] + if 'cuda' in self.device.type: + providers.insert(0, 'CUDAExecutionProvider') + + session = onnxruntime.InferenceSession( + str(self.weight), providers=providers) + self.session = session + + def __init_bindings(self): + Binding = namedtuple('Binding', ('name', 'dtype', 'shape')) + inputs_info = [] + outputs_info = [] + self.is_dynamic = False + for i, tensor in enumerate(self.session.get_inputs()): + if any(not isinstance(i, int) for i in tensor.shape): + self.is_dynamic = True + inputs_info.append( + Binding(tensor.name, tensor.type, tuple(tensor.shape))) + + for i, tensor in enumerate(self.session.get_outputs()): + outputs_info.append( + Binding(tensor.name, tensor.type, tuple(tensor.shape))) + self.inputs_info = inputs_info + self.outputs_info = outputs_info + self.num_inputs = len(inputs_info) + + def forward(self, *inputs): + + assert len(inputs) == self.num_inputs + + contiguous_inputs: List[np.ndarray] = [ + i.contiguous().cpu().numpy() for i in inputs + ] + + if not self.is_dynamic: + # make sure input shape is right for static input shape + for i in range(self.num_inputs): + assert contiguous_inputs[i].shape == self.inputs_info[i].shape + + outputs = self.session.run([o.name for o in self.outputs_info], { + j.name: contiguous_inputs[i] + for i, j in enumerate(self.inputs_info) + }) + + return tuple(torch.from_numpy(o).to(self.device) for o in outputs) diff --git a/models/YOLO-World/third_party/mmyolo/projects/easydeploy/model/model.py b/models/YOLO-World/third_party/mmyolo/projects/easydeploy/model/model.py new file mode 100644 index 0000000000000000000000000000000000000000..c67ed2872097e82d7f569a2f486b1a6463cde986 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/projects/easydeploy/model/model.py @@ -0,0 +1,205 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from copy import deepcopy +from functools import partial +from typing import List, Optional, Tuple + +import torch +import torch.nn as nn +from mmdet.models.backbones.csp_darknet import Focus +from mmdet.models.layers import ChannelAttention +from mmengine.config import ConfigDict +from torch import Tensor + +from mmyolo.models import RepVGGBlock +from mmyolo.models.dense_heads import (PPYOLOEHead, RTMDetHead, YOLOv5Head, + YOLOv7Head, YOLOv8Head, YOLOXHead) +from mmyolo.models.layers import ImplicitA, ImplicitM +from ..backbone import DeployFocus, GConvFocus, NcnnFocus +from ..bbox_code import (rtmdet_bbox_decoder, yolov5_bbox_decoder, + yolox_bbox_decoder) +from ..nms import batched_nms, efficient_nms, onnx_nms +from .backend import MMYOLOBackend + + +class DeployModel(nn.Module): + transpose = False + + def __init__(self, + baseModel: nn.Module, + backend: MMYOLOBackend, + postprocess_cfg: Optional[ConfigDict] = None): + super().__init__() + self.baseModel = baseModel + self.baseHead = baseModel.bbox_head + self.backend = backend + if postprocess_cfg is None: + self.with_postprocess = False + else: + self.with_postprocess = True + self.__init_sub_attributes() + self.detector_type = type(self.baseHead) + self.pre_top_k = postprocess_cfg.get('pre_top_k', 1000) + self.keep_top_k = postprocess_cfg.get('keep_top_k', 100) + self.iou_threshold = postprocess_cfg.get('iou_threshold', 0.65) + self.score_threshold = postprocess_cfg.get('score_threshold', 0.25) + self.__switch_deploy() + + def __init_sub_attributes(self): + self.bbox_decoder = self.baseHead.bbox_coder.decode + self.prior_generate = self.baseHead.prior_generator.grid_priors + self.num_base_priors = self.baseHead.num_base_priors + self.featmap_strides = self.baseHead.featmap_strides + self.num_classes = self.baseHead.num_classes + + def __switch_deploy(self): + headType = type(self.baseHead) + if not self.with_postprocess: + if headType in (YOLOv5Head, YOLOv7Head): + self.baseHead.head_module.forward_single = self.forward_single + elif headType in (PPYOLOEHead, YOLOv8Head): + self.baseHead.head_module.reg_max = 0 + + if self.backend in (MMYOLOBackend.HORIZONX3, MMYOLOBackend.NCNN, + MMYOLOBackend.TORCHSCRIPT): + self.transpose = True + for layer in self.baseModel.modules(): + if isinstance(layer, RepVGGBlock): + layer.switch_to_deploy() + elif isinstance(layer, ChannelAttention): + layer.global_avgpool.forward = self.forward_gvp + elif isinstance(layer, Focus): + # onnxruntime openvino tensorrt8 tensorrt7 + if self.backend in (MMYOLOBackend.ONNXRUNTIME, + MMYOLOBackend.OPENVINO, + MMYOLOBackend.TENSORRT8, + MMYOLOBackend.TENSORRT7): + self.baseModel.backbone.stem = DeployFocus(layer) + # ncnn + elif self.backend == MMYOLOBackend.NCNN: + self.baseModel.backbone.stem = NcnnFocus(layer) + # switch focus to group conv + else: + self.baseModel.backbone.stem = GConvFocus(layer) + + def pred_by_feat(self, + cls_scores: List[Tensor], + bbox_preds: List[Tensor], + objectnesses: Optional[List[Tensor]] = None, + **kwargs): + assert len(cls_scores) == len(bbox_preds) + dtype = cls_scores[0].dtype + device = cls_scores[0].device + + nms_func = self.select_nms() + if self.detector_type in (YOLOv5Head, YOLOv7Head): + bbox_decoder = yolov5_bbox_decoder + elif self.detector_type is RTMDetHead: + bbox_decoder = rtmdet_bbox_decoder + elif self.detector_type is YOLOXHead: + bbox_decoder = yolox_bbox_decoder + else: + bbox_decoder = self.bbox_decoder + + num_imgs = cls_scores[0].shape[0] + featmap_sizes = [cls_score.shape[2:] for cls_score in cls_scores] + + mlvl_priors = self.prior_generate( + featmap_sizes, dtype=dtype, device=device) + + flatten_priors = torch.cat(mlvl_priors) + + mlvl_strides = [ + flatten_priors.new_full( + (featmap_size[0] * featmap_size[1] * self.num_base_priors, ), + stride) for featmap_size, stride in zip( + featmap_sizes, self.featmap_strides) + ] + flatten_stride = torch.cat(mlvl_strides) + + # flatten cls_scores, bbox_preds and objectness + flatten_cls_scores = [ + cls_score.permute(0, 2, 3, 1).reshape(num_imgs, -1, + self.num_classes) + for cls_score in cls_scores + ] + cls_scores = torch.cat(flatten_cls_scores, dim=1).sigmoid() + + flatten_bbox_preds = [ + bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, 4) + for bbox_pred in bbox_preds + ] + flatten_bbox_preds = torch.cat(flatten_bbox_preds, dim=1) + + if objectnesses is not None: + flatten_objectness = [ + objectness.permute(0, 2, 3, 1).reshape(num_imgs, -1) + for objectness in objectnesses + ] + flatten_objectness = torch.cat(flatten_objectness, dim=1).sigmoid() + cls_scores = cls_scores * (flatten_objectness.unsqueeze(-1)) + + scores = cls_scores + + bboxes = bbox_decoder(flatten_priors[None], flatten_bbox_preds, + flatten_stride) + + return nms_func(bboxes, scores, self.keep_top_k, self.iou_threshold, + self.score_threshold, self.pre_top_k, self.keep_top_k) + + def select_nms(self): + if self.backend in (MMYOLOBackend.ONNXRUNTIME, MMYOLOBackend.OPENVINO): + nms_func = onnx_nms + elif self.backend == MMYOLOBackend.TENSORRT8: + nms_func = efficient_nms + elif self.backend == MMYOLOBackend.TENSORRT7: + nms_func = batched_nms + else: + raise NotImplementedError + if type(self.baseHead) in (YOLOv5Head, YOLOv7Head, YOLOXHead): + nms_func = partial(nms_func, box_coding=1) + + return nms_func + + def forward(self, inputs: Tensor): + neck_outputs = self.baseModel(inputs) + if self.with_postprocess: + return self.pred_by_feat(*neck_outputs) + else: + outputs = [] + if self.transpose: + for feats in zip(*neck_outputs): + if self.backend in (MMYOLOBackend.NCNN, + MMYOLOBackend.TORCHSCRIPT): + outputs.append( + torch.cat( + [feat.permute(0, 2, 3, 1) for feat in feats], + -1)) + else: + outputs.append(torch.cat(feats, 1).permute(0, 2, 3, 1)) + else: + for feats in zip(*neck_outputs): + outputs.append(torch.cat(feats, 1)) + return tuple(outputs) + + @staticmethod + def forward_single(x: Tensor, convs: nn.Module) -> Tuple[Tensor]: + if isinstance(convs, nn.Sequential) and any( + type(m) in (ImplicitA, ImplicitM) for m in convs): + a, c, m = convs + aw = a.implicit.clone() + mw = m.implicit.clone() + c = deepcopy(c) + nw, cw, _, _ = c.weight.shape + na, ca, _, _ = aw.shape + nm, cm, _, _ = mw.shape + c.bias = nn.Parameter(c.bias + ( + c.weight.reshape(nw, cw) @ aw.reshape(ca, na)).squeeze(1)) + c.bias = nn.Parameter(c.bias * mw.reshape(cm)) + c.weight = nn.Parameter(c.weight * mw.transpose(0, 1)) + convs = c + feat = convs(x) + return (feat, ) + + @staticmethod + def forward_gvp(x: Tensor) -> Tensor: + return torch.mean(x, [2, 3], keepdim=True) diff --git a/models/YOLO-World/third_party/mmyolo/projects/easydeploy/nms/__init__.py b/models/YOLO-World/third_party/mmyolo/projects/easydeploy/nms/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..59c5cdbd2b3b195125a14f473b825f616755fd6e --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/projects/easydeploy/nms/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .ort_nms import onnx_nms +from .trt_nms import batched_nms, efficient_nms + +__all__ = ['efficient_nms', 'batched_nms', 'onnx_nms'] diff --git a/models/YOLO-World/third_party/mmyolo/projects/easydeploy/nms/ort_nms.py b/models/YOLO-World/third_party/mmyolo/projects/easydeploy/nms/ort_nms.py new file mode 100644 index 0000000000000000000000000000000000000000..aad93cf05ac2ee9d61a85b4bf9e7b63c352859ec --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/projects/easydeploy/nms/ort_nms.py @@ -0,0 +1,122 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +from torch import Tensor + +_XYWH2XYXY = torch.tensor([[1.0, 0.0, 1.0, 0.0], [0.0, 1.0, 0.0, 1.0], + [-0.5, 0.0, 0.5, 0.0], [0.0, -0.5, 0.0, 0.5]], + dtype=torch.float32) + + +def select_nms_index(scores: Tensor, + boxes: Tensor, + nms_index: Tensor, + batch_size: int, + keep_top_k: int = -1): + batch_inds, cls_inds = nms_index[:, 0], nms_index[:, 1] + box_inds = nms_index[:, 2] + + scores = scores[batch_inds, cls_inds, box_inds].unsqueeze(1) + boxes = boxes[batch_inds, box_inds, ...] + dets = torch.cat([boxes, scores], dim=1) + + batched_dets = dets.unsqueeze(0).repeat(batch_size, 1, 1) + batch_template = torch.arange( + 0, batch_size, dtype=batch_inds.dtype, device=batch_inds.device) + batched_dets = batched_dets.where( + (batch_inds == batch_template.unsqueeze(1)).unsqueeze(-1), + batched_dets.new_zeros(1)) + + batched_labels = cls_inds.unsqueeze(0).repeat(batch_size, 1) + batched_labels = batched_labels.where( + (batch_inds == batch_template.unsqueeze(1)), + batched_labels.new_ones(1) * -1) + + N = batched_dets.shape[0] + + batched_dets = torch.cat((batched_dets, batched_dets.new_zeros((N, 1, 5))), + 1) + batched_labels = torch.cat((batched_labels, -batched_labels.new_ones( + (N, 1))), 1) + + _, topk_inds = batched_dets[:, :, -1].sort(dim=1, descending=True) + topk_batch_inds = torch.arange( + batch_size, dtype=topk_inds.dtype, + device=topk_inds.device).view(-1, 1) + batched_dets = batched_dets[topk_batch_inds, topk_inds, ...] + batched_labels = batched_labels[topk_batch_inds, topk_inds, ...] + batched_dets, batched_scores = batched_dets.split([4, 1], 2) + batched_scores = batched_scores.squeeze(-1) + + num_dets = (batched_scores > 0).sum(1, keepdim=True) + return num_dets, batched_dets, batched_scores, batched_labels + + +class ONNXNMSop(torch.autograd.Function): + + @staticmethod + def forward( + ctx, + boxes: Tensor, + scores: Tensor, + max_output_boxes_per_class: Tensor = torch.tensor([100]), + iou_threshold: Tensor = torch.tensor([0.5]), + score_threshold: Tensor = torch.tensor([0.05]) + ) -> Tensor: + device = boxes.device + batch = scores.shape[0] + num_det = 20 + batches = torch.randint(0, batch, (num_det, )).sort()[0].to(device) + idxs = torch.arange(100, 100 + num_det).to(device) + zeros = torch.zeros((num_det, ), dtype=torch.int64).to(device) + selected_indices = torch.cat([batches[None], zeros[None], idxs[None]], + 0).T.contiguous() + selected_indices = selected_indices.to(torch.int64) + + return selected_indices + + @staticmethod + def symbolic( + g, + boxes: Tensor, + scores: Tensor, + max_output_boxes_per_class: Tensor = torch.tensor([100]), + iou_threshold: Tensor = torch.tensor([0.5]), + score_threshold: Tensor = torch.tensor([0.05]), + ): + return g.op( + 'NonMaxSuppression', + boxes, + scores, + max_output_boxes_per_class, + iou_threshold, + score_threshold, + outputs=1) + + +def onnx_nms( + boxes: torch.Tensor, + scores: torch.Tensor, + max_output_boxes_per_class: int = 100, + iou_threshold: float = 0.5, + score_threshold: float = 0.05, + pre_top_k: int = -1, + keep_top_k: int = 100, + box_coding: int = 0, +): + max_output_boxes_per_class = torch.tensor([max_output_boxes_per_class]) + iou_threshold = torch.tensor([iou_threshold]) + score_threshold = torch.tensor([score_threshold]) + + batch_size, _, _ = scores.shape + if box_coding == 1: + boxes = boxes @ (_XYWH2XYXY.to(boxes.device)) + scores = scores.transpose(1, 2).contiguous() + selected_indices = ONNXNMSop.apply(boxes, scores, + max_output_boxes_per_class, + iou_threshold, score_threshold) + + num_dets, batched_dets, batched_scores, batched_labels = select_nms_index( + scores, boxes, selected_indices, batch_size, keep_top_k=keep_top_k) + + return num_dets, batched_dets, batched_scores, batched_labels.to( + torch.int32) diff --git a/models/YOLO-World/third_party/mmyolo/projects/easydeploy/nms/trt_nms.py b/models/YOLO-World/third_party/mmyolo/projects/easydeploy/nms/trt_nms.py new file mode 100644 index 0000000000000000000000000000000000000000..e0db1e2164d4366ff9ce4f74d39ded917c39ba79 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/projects/easydeploy/nms/trt_nms.py @@ -0,0 +1,226 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +from torch import Tensor + +_XYWH2XYXY = torch.tensor([[1.0, 0.0, 1.0, 0.0], [0.0, 1.0, 0.0, 1.0], + [-0.5, 0.0, 0.5, 0.0], [0.0, -0.5, 0.0, 0.5]], + dtype=torch.float32) + + +class TRTEfficientNMSop(torch.autograd.Function): + + @staticmethod + def forward( + ctx, + boxes: Tensor, + scores: Tensor, + background_class: int = -1, + box_coding: int = 0, + iou_threshold: float = 0.45, + max_output_boxes: int = 100, + plugin_version: str = '1', + score_activation: int = 0, + score_threshold: float = 0.25, + ): + batch_size, _, num_classes = scores.shape + num_det = torch.randint( + 0, max_output_boxes, (batch_size, 1), dtype=torch.int32) + det_boxes = torch.randn(batch_size, max_output_boxes, 4) + det_scores = torch.randn(batch_size, max_output_boxes) + det_classes = torch.randint( + 0, num_classes, (batch_size, max_output_boxes), dtype=torch.int32) + return num_det, det_boxes, det_scores, det_classes + + @staticmethod + def symbolic(g, + boxes: Tensor, + scores: Tensor, + background_class: int = -1, + box_coding: int = 0, + iou_threshold: float = 0.45, + max_output_boxes: int = 100, + plugin_version: str = '1', + score_activation: int = 0, + score_threshold: float = 0.25): + out = g.op( + 'TRT::EfficientNMS_TRT', + boxes, + scores, + background_class_i=background_class, + box_coding_i=box_coding, + iou_threshold_f=iou_threshold, + max_output_boxes_i=max_output_boxes, + plugin_version_s=plugin_version, + score_activation_i=score_activation, + score_threshold_f=score_threshold, + outputs=4) + num_det, det_boxes, det_scores, det_classes = out + return num_det, det_boxes, det_scores, det_classes + + +class TRTbatchedNMSop(torch.autograd.Function): + """TensorRT NMS operation.""" + + @staticmethod + def forward( + ctx, + boxes: Tensor, + scores: Tensor, + plugin_version: str = '1', + shareLocation: int = 1, + backgroundLabelId: int = -1, + numClasses: int = 80, + topK: int = 1000, + keepTopK: int = 100, + scoreThreshold: float = 0.25, + iouThreshold: float = 0.45, + isNormalized: int = 0, + clipBoxes: int = 0, + scoreBits: int = 16, + caffeSemantics: int = 1, + ): + batch_size, _, numClasses = scores.shape + num_det = torch.randint( + 0, keepTopK, (batch_size, 1), dtype=torch.int32) + det_boxes = torch.randn(batch_size, keepTopK, 4) + det_scores = torch.randn(batch_size, keepTopK) + det_classes = torch.randint(0, numClasses, + (batch_size, keepTopK)).float() + return num_det, det_boxes, det_scores, det_classes + + @staticmethod + def symbolic( + g, + boxes: Tensor, + scores: Tensor, + plugin_version: str = '1', + shareLocation: int = 1, + backgroundLabelId: int = -1, + numClasses: int = 80, + topK: int = 1000, + keepTopK: int = 100, + scoreThreshold: float = 0.25, + iouThreshold: float = 0.45, + isNormalized: int = 0, + clipBoxes: int = 0, + scoreBits: int = 16, + caffeSemantics: int = 1, + ): + out = g.op( + 'TRT::BatchedNMSDynamic_TRT', + boxes, + scores, + shareLocation_i=shareLocation, + plugin_version_s=plugin_version, + backgroundLabelId_i=backgroundLabelId, + numClasses_i=numClasses, + topK_i=topK, + keepTopK_i=keepTopK, + scoreThreshold_f=scoreThreshold, + iouThreshold_f=iouThreshold, + isNormalized_i=isNormalized, + clipBoxes_i=clipBoxes, + scoreBits_i=scoreBits, + caffeSemantics_i=caffeSemantics, + outputs=4) + num_det, det_boxes, det_scores, det_classes = out + return num_det, det_boxes, det_scores, det_classes + + +def _efficient_nms( + boxes: Tensor, + scores: Tensor, + max_output_boxes_per_class: int = 1000, + iou_threshold: float = 0.5, + score_threshold: float = 0.05, + pre_top_k: int = -1, + keep_top_k: int = 100, + box_coding: int = 0, +): + """Wrapper for `efficient_nms` with TensorRT. + Args: + boxes (Tensor): The bounding boxes of shape [N, num_boxes, 4]. + scores (Tensor): The detection scores of shape + [N, num_boxes, num_classes]. + max_output_boxes_per_class (int): Maximum number of output + boxes per class of nms. Defaults to 1000. + iou_threshold (float): IOU threshold of nms. Defaults to 0.5. + score_threshold (float): score threshold of nms. + Defaults to 0.05. + pre_top_k (int): Number of top K boxes to keep before nms. + Defaults to -1. + keep_top_k (int): Number of top K boxes to keep after nms. + Defaults to -1. + box_coding (int): Bounding boxes format for nms. + Defaults to 0 means [x1, y1 ,x2, y2]. + Set to 1 means [x, y, w, h]. + Returns: + tuple[Tensor, Tensor, Tensor, Tensor]: + (num_det, det_boxes, det_scores, det_classes), + `num_det` of shape [N, 1] + `det_boxes` of shape [N, num_det, 4] + `det_scores` of shape [N, num_det] + `det_classes` of shape [N, num_det] + """ + num_det, det_boxes, det_scores, det_classes = TRTEfficientNMSop.apply( + boxes, scores, -1, box_coding, iou_threshold, keep_top_k, '1', 0, + score_threshold) + return num_det, det_boxes, det_scores, det_classes + + +def _batched_nms( + boxes: Tensor, + scores: Tensor, + max_output_boxes_per_class: int = 1000, + iou_threshold: float = 0.5, + score_threshold: float = 0.05, + pre_top_k: int = -1, + keep_top_k: int = 100, + box_coding: int = 0, +): + """Wrapper for `efficient_nms` with TensorRT. + Args: + boxes (Tensor): The bounding boxes of shape [N, num_boxes, 4]. + scores (Tensor): The detection scores of shape + [N, num_boxes, num_classes]. + max_output_boxes_per_class (int): Maximum number of output + boxes per class of nms. Defaults to 1000. + iou_threshold (float): IOU threshold of nms. Defaults to 0.5. + score_threshold (float): score threshold of nms. + Defaults to 0.05. + pre_top_k (int): Number of top K boxes to keep before nms. + Defaults to -1. + keep_top_k (int): Number of top K boxes to keep after nms. + Defaults to -1. + box_coding (int): Bounding boxes format for nms. + Defaults to 0 means [x1, y1 ,x2, y2]. + Set to 1 means [x, y, w, h]. + Returns: + tuple[Tensor, Tensor, Tensor, Tensor]: + (num_det, det_boxes, det_scores, det_classes), + `num_det` of shape [N, 1] + `det_boxes` of shape [N, num_det, 4] + `det_scores` of shape [N, num_det] + `det_classes` of shape [N, num_det] + """ + if box_coding == 1: + boxes = boxes @ (_XYWH2XYXY.to(boxes.device)) + boxes = boxes if boxes.dim() == 4 else boxes.unsqueeze(2) + _, _, numClasses = scores.shape + + num_det, det_boxes, det_scores, det_classes = TRTbatchedNMSop.apply( + boxes, scores, '1', 1, -1, int(numClasses), min(pre_top_k, 4096), + keep_top_k, score_threshold, iou_threshold, 0, 0, 16, 1) + + det_classes = det_classes.int() + return num_det, det_boxes, det_scores, det_classes + + +def efficient_nms(*args, **kwargs): + """Wrapper function for `_efficient_nms`.""" + return _efficient_nms(*args, **kwargs) + + +def batched_nms(*args, **kwargs): + """Wrapper function for `_batched_nms`.""" + return _batched_nms(*args, **kwargs) diff --git a/models/YOLO-World/third_party/mmyolo/projects/easydeploy/tools/build_engine.py b/models/YOLO-World/third_party/mmyolo/projects/easydeploy/tools/build_engine.py new file mode 100644 index 0000000000000000000000000000000000000000..b400c9db826878a7bb0fb13f4b1dea9b793583e7 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/projects/easydeploy/tools/build_engine.py @@ -0,0 +1,136 @@ +import argparse +from pathlib import Path +from typing import List, Optional, Tuple, Union + +try: + import tensorrt as trt +except Exception: + trt = None +import warnings + +import numpy as np +import torch + +warnings.filterwarnings(action='ignore', category=DeprecationWarning) + + +class EngineBuilder: + + def __init__( + self, + checkpoint: Union[str, Path], + opt_shape: Union[Tuple, List] = (1, 3, 640, 640), + device: Optional[Union[str, int, torch.device]] = None) -> None: + checkpoint = Path(checkpoint) if isinstance(checkpoint, + str) else checkpoint + assert checkpoint.exists() and checkpoint.suffix == '.onnx' + if isinstance(device, str): + device = torch.device(device) + elif isinstance(device, int): + device = torch.device(f'cuda:{device}') + + self.checkpoint = checkpoint + self.opt_shape = np.array(opt_shape, dtype=np.float32) + self.device = device + + def __build_engine(self, + scale: Optional[List[List]] = None, + fp16: bool = True, + with_profiling: bool = True) -> None: + logger = trt.Logger(trt.Logger.WARNING) + trt.init_libnvinfer_plugins(logger, namespace='') + builder = trt.Builder(logger) + config = builder.create_builder_config() + config.max_workspace_size = torch.cuda.get_device_properties( + self.device).total_memory + flag = (1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) + network = builder.create_network(flag) + parser = trt.OnnxParser(network, logger) + if not parser.parse_from_file(str(self.checkpoint)): + raise RuntimeError( + f'failed to load ONNX file: {str(self.checkpoint)}') + inputs = [network.get_input(i) for i in range(network.num_inputs)] + outputs = [network.get_output(i) for i in range(network.num_outputs)] + profile = None + dshape = -1 in network.get_input(0).shape + if dshape: + profile = builder.create_optimization_profile() + if scale is None: + scale = np.array( + [[1, 1, 0.5, 0.5], [1, 1, 1, 1], [4, 1, 1.5, 1.5]], + dtype=np.float32) + scale = (self.opt_shape * scale).astype(np.int32) + elif isinstance(scale, List): + scale = np.array(scale, dtype=np.int32) + assert scale.shape[0] == 3, 'Input a wrong scale list' + else: + raise NotImplementedError + + for inp in inputs: + logger.log( + trt.Logger.WARNING, + f'input "{inp.name}" with shape{inp.shape} {inp.dtype}') + if dshape: + profile.set_shape(inp.name, *scale) + for out in outputs: + logger.log( + trt.Logger.WARNING, + f'output "{out.name}" with shape{out.shape} {out.dtype}') + if fp16 and builder.platform_has_fast_fp16: + config.set_flag(trt.BuilderFlag.FP16) + self.weight = self.checkpoint.with_suffix('.engine') + if dshape: + config.add_optimization_profile(profile) + if with_profiling: + config.profiling_verbosity = trt.ProfilingVerbosity.DETAILED + with builder.build_engine(network, config) as engine: + self.weight.write_bytes(engine.serialize()) + logger.log( + trt.Logger.WARNING, f'Build tensorrt engine finish.\n' + f'Save in {str(self.weight.absolute())}') + + def build(self, + scale: Optional[List[List]] = None, + fp16: bool = True, + with_profiling=True): + self.__build_engine(scale, fp16, with_profiling) + + +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument('checkpoint', help='Checkpoint file') + parser.add_argument( + '--img-size', + nargs='+', + type=int, + default=[640, 640], + help='Image size of height and width') + parser.add_argument( + '--device', type=str, default='cuda:0', help='TensorRT builder device') + parser.add_argument( + '--scales', + type=str, + default='[[1,3,640,640],[1,3,640,640],[1,3,640,640]]', + help='Input scales for build dynamic input shape engine') + parser.add_argument( + '--fp16', action='store_true', help='Build model with fp16 mode') + args = parser.parse_args() + args.img_size *= 2 if len(args.img_size) == 1 else 1 + return args + + +def main(args): + img_size = (1, 3, *args.img_size) + try: + scales = eval(args.scales) + except Exception: + print('Input scales is not a python variable') + print('Set scales default None') + scales = None + builder = EngineBuilder(args.checkpoint, img_size, args.device) + builder.build(scales, fp16=args.fp16) + + +if __name__ == '__main__': + args = parse_args() + main(args) diff --git a/models/YOLO-World/third_party/mmyolo/projects/easydeploy/tools/export_onnx.py b/models/YOLO-World/third_party/mmyolo/projects/easydeploy/tools/export_onnx.py new file mode 100644 index 0000000000000000000000000000000000000000..b937cc8a72b5c09d61580ddb1297213693adaf1c --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/projects/easydeploy/tools/export_onnx.py @@ -0,0 +1,157 @@ +import argparse +import os +import sys +import warnings +from io import BytesIO +from pathlib import Path + +import onnx +import torch +from mmdet.apis import init_detector +from mmengine.config import ConfigDict +from mmengine.logging import print_log +from mmengine.utils.path import mkdir_or_exist + +# Add MMYOLO ROOT to sys.path +sys.path.append(str(Path(__file__).resolve().parents[3])) +from projects.easydeploy.model import DeployModel, MMYOLOBackend # noqa E402 + +warnings.filterwarnings(action='ignore', category=torch.jit.TracerWarning) +warnings.filterwarnings(action='ignore', category=torch.jit.ScriptWarning) +warnings.filterwarnings(action='ignore', category=UserWarning) +warnings.filterwarnings(action='ignore', category=FutureWarning) +warnings.filterwarnings(action='ignore', category=ResourceWarning) + + +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument('config', help='Config file') + parser.add_argument('checkpoint', help='Checkpoint file') + parser.add_argument( + '--model-only', action='store_true', help='Export model only') + parser.add_argument( + '--work-dir', default='./work_dir', help='Path to save export model') + parser.add_argument( + '--img-size', + nargs='+', + type=int, + default=[640, 640], + help='Image size of height and width') + parser.add_argument('--batch-size', type=int, default=1, help='Batch size') + parser.add_argument( + '--device', default='cuda:0', help='Device used for inference') + parser.add_argument( + '--simplify', + action='store_true', + help='Simplify onnx model by onnx-sim') + parser.add_argument( + '--opset', type=int, default=11, help='ONNX opset version') + parser.add_argument( + '--backend', + type=str, + default='onnxruntime', + help='Backend for export onnx') + parser.add_argument( + '--pre-topk', + type=int, + default=1000, + help='Postprocess pre topk bboxes feed into NMS') + parser.add_argument( + '--keep-topk', + type=int, + default=100, + help='Postprocess keep topk bboxes out of NMS') + parser.add_argument( + '--iou-threshold', + type=float, + default=0.65, + help='IoU threshold for NMS') + parser.add_argument( + '--score-threshold', + type=float, + default=0.25, + help='Score threshold for NMS') + args = parser.parse_args() + args.img_size *= 2 if len(args.img_size) == 1 else 1 + return args + + +def build_model_from_cfg(config_path, checkpoint_path, device): + model = init_detector(config_path, checkpoint_path, device=device) + model.eval() + return model + + +def main(): + args = parse_args() + mkdir_or_exist(args.work_dir) + backend = MMYOLOBackend(args.backend.lower()) + if backend in (MMYOLOBackend.ONNXRUNTIME, MMYOLOBackend.OPENVINO, + MMYOLOBackend.TENSORRT8, MMYOLOBackend.TENSORRT7): + if not args.model_only: + print_log('Export ONNX with bbox decoder and NMS ...') + else: + args.model_only = True + print_log(f'Can not export postprocess for {args.backend.lower()}.\n' + f'Set "args.model_only=True" default.') + if args.model_only: + postprocess_cfg = None + output_names = None + else: + postprocess_cfg = ConfigDict( + pre_top_k=args.pre_topk, + keep_top_k=args.keep_topk, + iou_threshold=args.iou_threshold, + score_threshold=args.score_threshold) + output_names = ['num_dets', 'boxes', 'scores', 'labels'] + baseModel = build_model_from_cfg(args.config, args.checkpoint, args.device) + + deploy_model = DeployModel( + baseModel=baseModel, backend=backend, postprocess_cfg=postprocess_cfg) + deploy_model.eval() + + fake_input = torch.randn(args.batch_size, 3, + *args.img_size).to(args.device) + # dry run + deploy_model(fake_input) + + save_onnx_path = os.path.join( + args.work_dir, + os.path.basename(args.checkpoint).replace('pth', 'onnx')) + # export onnx + with BytesIO() as f: + torch.onnx.export( + deploy_model, + fake_input, + f, + input_names=['images'], + output_names=output_names, + opset_version=args.opset) + f.seek(0) + onnx_model = onnx.load(f) + onnx.checker.check_model(onnx_model) + + # Fix tensorrt onnx output shape, just for view + if not args.model_only and backend in (MMYOLOBackend.TENSORRT8, + MMYOLOBackend.TENSORRT7): + shapes = [ + args.batch_size, 1, args.batch_size, args.keep_topk, 4, + args.batch_size, args.keep_topk, args.batch_size, + args.keep_topk + ] + for i in onnx_model.graph.output: + for j in i.type.tensor_type.shape.dim: + j.dim_param = str(shapes.pop(0)) + if args.simplify: + try: + import onnxsim + onnx_model, check = onnxsim.simplify(onnx_model) + assert check, 'assert check failed' + except Exception as e: + print_log(f'Simplify failure: {e}') + onnx.save(onnx_model, save_onnx_path) + print_log(f'ONNX export success, save into {save_onnx_path}') + + +if __name__ == '__main__': + main() diff --git a/models/YOLO-World/third_party/mmyolo/projects/easydeploy/tools/image-demo.py b/models/YOLO-World/third_party/mmyolo/projects/easydeploy/tools/image-demo.py new file mode 100644 index 0000000000000000000000000000000000000000..c85f31a02beeb708e23662fe08dd0a105f112aaf --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/projects/easydeploy/tools/image-demo.py @@ -0,0 +1,152 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from projects.easydeploy.model import ORTWrapper, TRTWrapper # isort:skip +import os +import random +from argparse import ArgumentParser + +import cv2 +import mmcv +import numpy as np +import torch +from mmcv.transforms import Compose +from mmdet.utils import get_test_pipeline_cfg +from mmengine.config import Config, ConfigDict +from mmengine.utils import ProgressBar, path + +from mmyolo.utils import register_all_modules +from mmyolo.utils.misc import get_file_list + + +def parse_args(): + parser = ArgumentParser() + parser.add_argument( + 'img', help='Image path, include image file, dir and URL.') + parser.add_argument('config', help='Config file') + parser.add_argument('checkpoint', help='Checkpoint file') + parser.add_argument( + '--out-dir', default='./output', help='Path to output file') + parser.add_argument( + '--device', default='cuda:0', help='Device used for inference') + parser.add_argument( + '--show', action='store_true', help='Show the detection results') + args = parser.parse_args() + return args + + +def preprocess(config): + data_preprocess = config.get('model', {}).get('data_preprocessor', {}) + mean = data_preprocess.get('mean', [0., 0., 0.]) + std = data_preprocess.get('std', [1., 1., 1.]) + mean = torch.tensor(mean, dtype=torch.float32).reshape(1, 3, 1, 1) + std = torch.tensor(std, dtype=torch.float32).reshape(1, 3, 1, 1) + + class PreProcess(torch.nn.Module): + + def __init__(self): + super().__init__() + + def forward(self, x): + x = x[None].float() + x -= mean.to(x.device) + x /= std.to(x.device) + return x + + return PreProcess().eval() + + +def main(): + args = parse_args() + + # register all modules in mmdet into the registries + register_all_modules() + + colors = [[random.randint(0, 255) for _ in range(3)] for _ in range(1000)] + + # build the model from a config file and a checkpoint file + if args.checkpoint.endswith('.onnx'): + model = ORTWrapper(args.checkpoint, args.device) + elif args.checkpoint.endswith('.engine') or args.checkpoint.endswith( + '.plan'): + model = TRTWrapper(args.checkpoint, args.device) + else: + raise NotImplementedError + + model.to(args.device) + + cfg = Config.fromfile(args.config) + class_names = cfg.get('class_name') + + test_pipeline = get_test_pipeline_cfg(cfg) + test_pipeline[0] = ConfigDict({'type': 'mmdet.LoadImageFromNDArray'}) + test_pipeline = Compose(test_pipeline) + + pre_pipeline = preprocess(cfg) + + if not args.show: + path.mkdir_or_exist(args.out_dir) + + # get file list + files, source_type = get_file_list(args.img) + + # start detector inference + progress_bar = ProgressBar(len(files)) + for i, file in enumerate(files): + bgr = mmcv.imread(file) + rgb = mmcv.imconvert(bgr, 'bgr', 'rgb') + data, samples = test_pipeline(dict(img=rgb, img_id=i)).values() + pad_param = samples.get('pad_param', + np.array([0, 0, 0, 0], dtype=np.float32)) + h, w = samples.get('ori_shape', rgb.shape[:2]) + pad_param = torch.asarray( + [pad_param[2], pad_param[0], pad_param[2], pad_param[0]], + device=args.device) + scale_factor = samples.get('scale_factor', [1., 1]) + scale_factor = torch.asarray(scale_factor * 2, device=args.device) + data = pre_pipeline(data).to(args.device) + + result = model(data) + if source_type['is_dir']: + filename = os.path.relpath(file, args.img).replace('/', '_') + else: + filename = os.path.basename(file) + out_file = None if args.show else os.path.join(args.out_dir, filename) + + # Get candidate predict info by num_dets + num_dets, bboxes, scores, labels = result + scores = scores[0, :num_dets] + bboxes = bboxes[0, :num_dets] + labels = labels[0, :num_dets] + bboxes -= pad_param + bboxes /= scale_factor + + bboxes[:, 0::2].clamp_(0, w) + bboxes[:, 1::2].clamp_(0, h) + bboxes = bboxes.round().int() + + for (bbox, score, label) in zip(bboxes, scores, labels): + bbox = bbox.tolist() + color = colors[label] + + if class_names is not None: + label_name = class_names[label] + name = f'cls:{label_name}_score:{score:0.4f}' + else: + name = f'cls:{label}_score:{score:0.4f}' + + cv2.rectangle(bgr, bbox[:2], bbox[2:], color, 2) + cv2.putText( + bgr, + name, (bbox[0], bbox[1] - 2), + cv2.FONT_HERSHEY_SIMPLEX, + 2.0, [225, 255, 255], + thickness=3) + + if args.show: + mmcv.imshow(bgr, 'result', 0) + else: + mmcv.imwrite(bgr, out_file) + progress_bar.update() + + +if __name__ == '__main__': + main() diff --git a/models/YOLO-World/third_party/mmyolo/projects/example_project/README.md b/models/YOLO-World/third_party/mmyolo/projects/example_project/README.md new file mode 100644 index 0000000000000000000000000000000000000000..24c84d9808aa4a78294aa23058083e0de80de62e --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/projects/example_project/README.md @@ -0,0 +1,141 @@ +# Dummy YOLOv5CSPDarknet Wrapper + +This is an example README for community `projects/`. We have provided detailed explanations for each field in the form of html comments, which are visible when you read the source of this README file. If you wish to submit your project to our main repository, then all the fields in this README are mandatory for others to understand what you have achieved in this implementation. For more details, read our [contribution guide](https://mmyolo.readthedocs.io/en/latest/community/contributing.html) or approach us in [Discussions](https://github.com/open-mmlab/mmyolo/discussions). + +## Description + + + +This project implements a dummy YOLOv5CSPDarknet wrapper, which literally does nothing new but prints "hello world" during initialization. + +## Usage + + + +### Training commands + +In MMYOLO's root directory, run the following command to train the model: + +```bash +python tools/train.py projects/example_project/configs/yolov5_s_dummy-backbone_v61_syncbn_8xb16-300e_coco.py +``` + +### Testing commands + +In MMYOLO's root directory, run the following command to test the model: + +```bash +python tools/test.py projects/example_project/configs/yolov5_s_dummy-backbone_v61_syncbn_8xb16-300e_coco.py ${CHECKPOINT_PATH} +``` + +## Results + + + +| Method | Backbone | Pretrained Model | Training set | Test set | #epoch | box AP | Download | +| :---------------------------------------------------------------------------: | :-------------------: | :--------------: | :------------: | :----------: | :----: | :----: | :----------------------: | +| [YOLOv5 dummy](configs/yolov5_s_dummy-backbone_v61_syncbn_8xb16-300e_coco.py) | DummyYOLOv5CSPDarknet | - | COCO2017 Train | COCO2017 Val | 300 | 37.7 | [model](<>) \| [log](<>) | + +## Citation + + + +```latex +@software{glenn_jocher_2022_7002879, + author = {Glenn Jocher and + Ayush Chaurasia and + Alex Stoken and + Jirka Borovec and + NanoCode012 and + Yonghye Kwon and + TaoXie and + Kalen Michael and + Jiacong Fang and + imyhxy and + Lorna and + Colin Wong and + 曾逸夫(Zeng Yifu) and + Abhiram V and + Diego Montes and + Zhiqiang Wang and + Cristi Fati and + Jebastin Nadar and + Laughing and + UnglvKitDe and + tkianai and + yxNONG and + Piotr Skalski and + Adam Hogan and + Max Strobel and + Mrinal Jain and + Lorenzo Mammana and + xylieong}, + title = {{ultralytics/yolov5: v6.2 - YOLOv5 Classification + Models, Apple M1, Reproducibility, ClearML and + Deci.ai integrations}}, + month = aug, + year = 2022, + publisher = {Zenodo}, + version = {v6.2}, + doi = {10.5281/zenodo.7002879}, + url = {https://doi.org/10.5281/zenodo.7002879} +} +``` + +## Checklist + + + +- [ ] Milestone 1: PR-ready, and acceptable to be one of the `projects/`. + + - [ ] Finish the code + + + + - [ ] Basic docstrings & proper citation + + + + - [ ] Test-time correctness + + + + - [ ] A full README + + + +- [ ] Milestone 2: Indicates a successful model implementation. + + - [ ] Training-time correctness + + + +- [ ] Milestone 3: Good to be a part of our core package! + + - [ ] Type hints and docstrings + + + + - [ ] Unit tests + + + + - [ ] Code polishing + + + + - [ ] Metafile.yml + + + +- [ ] Move your modules into the core package following the codebase's file hierarchy structure. + + + +- [ ] Refactor your modules into the core package following the codebase's file hierarchy structure. diff --git a/models/YOLO-World/third_party/mmyolo/projects/example_project/configs/yolov5_s_dummy-backbone_v61_syncbn_8xb16-300e_coco.py b/models/YOLO-World/third_party/mmyolo/projects/example_project/configs/yolov5_s_dummy-backbone_v61_syncbn_8xb16-300e_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..55b43bb3e97a20b4b9f98d5bc297bf8ef375da8e --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/projects/example_project/configs/yolov5_s_dummy-backbone_v61_syncbn_8xb16-300e_coco.py @@ -0,0 +1,5 @@ +_base_ = '../../../configs/yolov5/yolov5_s-v61_syncbn_8xb16-300e_coco.py' + +custom_imports = dict(imports=['projects.example_project.dummy']) + +_base_.model.backbone.type = 'DummyYOLOv5CSPDarknet' diff --git a/models/YOLO-World/third_party/mmyolo/projects/example_project/dummy/__init__.py b/models/YOLO-World/third_party/mmyolo/projects/example_project/dummy/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ca1028c8735be8ece5942d0ca64b69a8da16ed82 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/projects/example_project/dummy/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .dummy_yolov5cspdarknet import DummyYOLOv5CSPDarknet + +__all__ = ['DummyYOLOv5CSPDarknet'] diff --git a/models/YOLO-World/third_party/mmyolo/projects/example_project/dummy/dummy_yolov5cspdarknet.py b/models/YOLO-World/third_party/mmyolo/projects/example_project/dummy/dummy_yolov5cspdarknet.py new file mode 100644 index 0000000000000000000000000000000000000000..c500abb4278581af99d6a190fd7694ffdd08117c --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/projects/example_project/dummy/dummy_yolov5cspdarknet.py @@ -0,0 +1,16 @@ +# Copyright (c) OpenMMLab. All rights reserved. + +from mmyolo.models import YOLOv5CSPDarknet +from mmyolo.registry import MODELS + + +@MODELS.register_module() +class DummyYOLOv5CSPDarknet(YOLOv5CSPDarknet): + """Implements a dummy YOLOv5CSPDarknet wrapper for demonstration purpose. + Args: + **kwargs: All the arguments are passed to the parent class. + """ + + def __init__(self, **kwargs) -> None: + print('Hello world!') + super().__init__(**kwargs) diff --git a/models/YOLO-World/third_party/mmyolo/projects/misc/custom_dataset/README.md b/models/YOLO-World/third_party/mmyolo/projects/misc/custom_dataset/README.md new file mode 100644 index 0000000000000000000000000000000000000000..e98fa730241aee0d54fea62fb752ab4eb901f0a0 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/projects/misc/custom_dataset/README.md @@ -0,0 +1,3 @@ +Tips: 这个是自定义数据集的 config 文件,请结合 [标注+训练+测试+部署全流程](https://github.com/open-mmlab/mmyolo/blob/main/docs/zh_cn/recommended_topics/labeling_to_deployment_tutorials.md) 来使用。 + +Tips: This is the config file of the custom dataset. Please use it in combination with [Annotation-to-deployment workflow for custom dataset](https://github.com/open-mmlab/mmyolo/blob/main/docs/en/recommended_topics/labeling_to_deployment_tutorials.md). diff --git a/models/YOLO-World/third_party/mmyolo/projects/misc/custom_dataset/yolov5_s-v61_syncbn_fast_1xb32-100e_cat.py b/models/YOLO-World/third_party/mmyolo/projects/misc/custom_dataset/yolov5_s-v61_syncbn_fast_1xb32-100e_cat.py new file mode 100644 index 0000000000000000000000000000000000000000..1d6a9d3b0f5ecf9ff7a46202d50b733810d93124 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/projects/misc/custom_dataset/yolov5_s-v61_syncbn_fast_1xb32-100e_cat.py @@ -0,0 +1,76 @@ +_base_ = '../yolov5/yolov5_s-v61_syncbn_fast_8xb16-300e_coco.py' + +max_epochs = 100 +data_root = './data/cat/' +# data_root = '/root/workspace/mmyolo/data/cat/' # Docker + +work_dir = './work_dirs/yolov5_s-v61_syncbn_fast_1xb32-100e_cat' + +load_from = 'https://download.openmmlab.com/mmyolo/v0/yolov5/yolov5_s-v61_syncbn_fast_8xb16-300e_coco/yolov5_s-v61_syncbn_fast_8xb16-300e_coco_20220918_084700-86e02187.pth' # noqa + +train_batch_size_per_gpu = 32 +train_num_workers = 4 + +save_epoch_intervals = 2 + +# base_lr_default * (your_bs / default_bs) +base_lr = _base_.base_lr / 4 + +anchors = [ + [(68, 69), (154, 91), (143, 162)], # P3/8 + [(242, 160), (189, 287), (391, 207)], # P4/16 + [(353, 337), (539, 341), (443, 432)] # P5/32 +] + +class_name = ('cat', ) +num_classes = len(class_name) +metainfo = dict(classes=class_name, palette=[(220, 20, 60)]) + +train_cfg = dict( + max_epochs=max_epochs, val_begin=20, val_interval=save_epoch_intervals) + +model = dict( + bbox_head=dict( + head_module=dict(num_classes=num_classes), + prior_generator=dict(base_sizes=anchors), + loss_cls=dict(loss_weight=0.5 * + (num_classes / 80 * 3 / _base_.num_det_layers)))) + +train_dataloader = dict( + batch_size=train_batch_size_per_gpu, + num_workers=train_num_workers, + dataset=dict( + _delete_=True, + type='RepeatDataset', + times=5, + dataset=dict( + type=_base_.dataset_type, + data_root=data_root, + metainfo=metainfo, + ann_file='annotations/trainval.json', + data_prefix=dict(img='images/'), + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=_base_.train_pipeline))) + +val_dataloader = dict( + dataset=dict( + metainfo=metainfo, + data_root=data_root, + ann_file='annotations/trainval.json', + data_prefix=dict(img='images/'))) + +test_dataloader = val_dataloader + +val_evaluator = dict(ann_file=data_root + 'annotations/trainval.json') +test_evaluator = val_evaluator + +optim_wrapper = dict(optimizer=dict(lr=base_lr)) + +default_hooks = dict( + checkpoint=dict( + type='CheckpointHook', + interval=save_epoch_intervals, + max_keep_ckpts=5, + save_best='auto'), + param_scheduler=dict(max_epochs=max_epochs), + logger=dict(type='LoggerHook', interval=10)) diff --git a/models/YOLO-World/third_party/mmyolo/projects/misc/custom_dataset/yolov6_s_syncbn_fast_1xb32-100e_cat.py b/models/YOLO-World/third_party/mmyolo/projects/misc/custom_dataset/yolov6_s_syncbn_fast_1xb32-100e_cat.py new file mode 100644 index 0000000000000000000000000000000000000000..67d5638aae7532efb60bd608f2a976d8991503b8 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/projects/misc/custom_dataset/yolov6_s_syncbn_fast_1xb32-100e_cat.py @@ -0,0 +1,85 @@ +_base_ = '../yolov6/yolov6_s_syncbn_fast_8xb32-400e_coco.py' + +max_epochs = 100 +data_root = './data/cat/' + +work_dir = './work_dirs/yolov6_s_syncbn_fast_1xb32-100e_cat' + +load_from = 'https://download.openmmlab.com/mmyolo/v0/yolov6/yolov6_s_syncbn_fast_8xb32-400e_coco/yolov6_s_syncbn_fast_8xb32-400e_coco_20221102_203035-932e1d91.pth' # noqa + +train_batch_size_per_gpu = 32 +train_num_workers = 4 # train_num_workers = nGPU x 4 + +save_epoch_intervals = 2 + +# base_lr_default * (your_bs / default_bs) +base_lr = _base_.base_lr / 8 + +class_name = ('cat', ) +num_classes = len(class_name) +metainfo = dict(classes=class_name, palette=[(220, 20, 60)]) + +train_cfg = dict( + max_epochs=max_epochs, + val_begin=20, + val_interval=save_epoch_intervals, + dynamic_intervals=[(max_epochs - _base_.num_last_epochs, 1)]) + +model = dict( + bbox_head=dict(head_module=dict(num_classes=num_classes)), + train_cfg=dict( + initial_assigner=dict(num_classes=num_classes), + assigner=dict(num_classes=num_classes))) + +train_dataloader = dict( + batch_size=train_batch_size_per_gpu, + num_workers=train_num_workers, + dataset=dict( + _delete_=True, + type='RepeatDataset', + times=5, + dataset=dict( + type=_base_.dataset_type, + data_root=data_root, + metainfo=metainfo, + ann_file='annotations/trainval.json', + data_prefix=dict(img='images/'), + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=_base_.train_pipeline))) + +val_dataloader = dict( + dataset=dict( + metainfo=metainfo, + data_root=data_root, + ann_file='annotations/trainval.json', + data_prefix=dict(img='images/'))) + +test_dataloader = val_dataloader + +val_evaluator = dict(ann_file=data_root + 'annotations/trainval.json') +test_evaluator = val_evaluator + +optim_wrapper = dict(optimizer=dict(lr=base_lr)) + +default_hooks = dict( + checkpoint=dict( + type='CheckpointHook', + interval=save_epoch_intervals, + max_keep_ckpts=5, + save_best='auto'), + param_scheduler=dict(max_epochs=max_epochs), + logger=dict(type='LoggerHook', interval=10)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0001, + update_buffers=True, + strict_load=False, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - _base_.num_last_epochs, + switch_pipeline=_base_.train_pipeline_stage2) +] diff --git a/models/YOLO-World/third_party/mmyolo/projects/misc/custom_dataset/yolov7_tiny_syncbn_fast_1xb32-100e_cat.py b/models/YOLO-World/third_party/mmyolo/projects/misc/custom_dataset/yolov7_tiny_syncbn_fast_1xb32-100e_cat.py new file mode 100644 index 0000000000000000000000000000000000000000..fff59cb3d31f002724b11674bb8c1550220be503 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/projects/misc/custom_dataset/yolov7_tiny_syncbn_fast_1xb32-100e_cat.py @@ -0,0 +1,78 @@ +_base_ = '../yolov7/yolov7_tiny_syncbn_fast_8x16b-300e_coco.py' + +max_epochs = 100 +data_root = './data/cat/' + +work_dir = './work_dirs/yolov7_tiny_syncbn_fast_1xb32-100e_cat' + +load_from = 'https://download.openmmlab.com/mmyolo/v0/yolov7/yolov7_tiny_syncbn_fast_8x16b-300e_coco/yolov7_tiny_syncbn_fast_8x16b-300e_coco_20221126_102719-0ee5bbdf.pth' # noqa + +train_batch_size_per_gpu = 32 +train_num_workers = 4 # train_num_workers = nGPU x 4 + +save_epoch_intervals = 2 + +# base_lr_default * (your_bs / default_bs) +base_lr = 0.01 / 4 + +anchors = [ + [(68, 69), (154, 91), (143, 162)], # P3/8 + [(242, 160), (189, 287), (391, 207)], # P4/16 + [(353, 337), (539, 341), (443, 432)] # P5/32 +] + +class_name = ('cat', ) +num_classes = len(class_name) +metainfo = dict(classes=class_name, palette=[(220, 20, 60)]) + +train_cfg = dict( + max_epochs=max_epochs, + val_begin=20, + val_interval=save_epoch_intervals, + dynamic_intervals=[(max_epochs - 10, 1)]) + +model = dict( + bbox_head=dict( + head_module=dict(num_classes=num_classes), + prior_generator=dict(base_sizes=anchors), + loss_cls=dict(loss_weight=0.5 * + (num_classes / 80 * 3 / _base_.num_det_layers)))) + +train_dataloader = dict( + batch_size=train_batch_size_per_gpu, + num_workers=train_num_workers, + dataset=dict( + _delete_=True, + type='RepeatDataset', + times=5, + dataset=dict( + type=_base_.dataset_type, + data_root=data_root, + metainfo=metainfo, + ann_file='annotations/trainval.json', + data_prefix=dict(img='images/'), + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=_base_.train_pipeline))) + +val_dataloader = dict( + dataset=dict( + metainfo=metainfo, + data_root=data_root, + ann_file='annotations/trainval.json', + data_prefix=dict(img='images/'))) + +test_dataloader = val_dataloader + +val_evaluator = dict(ann_file=data_root + 'annotations/trainval.json') +test_evaluator = val_evaluator + +optim_wrapper = dict(optimizer=dict(lr=base_lr)) + +default_hooks = dict( + checkpoint=dict( + type='CheckpointHook', + interval=save_epoch_intervals, + max_keep_ckpts=2, + save_best='auto'), + param_scheduler=dict(max_epochs=max_epochs), + logger=dict(type='LoggerHook', interval=10)) diff --git a/models/YOLO-World/third_party/mmyolo/projects/misc/ionogram_detection/README.md b/models/YOLO-World/third_party/mmyolo/projects/misc/ionogram_detection/README.md new file mode 100644 index 0000000000000000000000000000000000000000..eb7ddd580fb4e2872e54b9eade49a25b83211159 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/projects/misc/ionogram_detection/README.md @@ -0,0 +1,3 @@ +Tips: 这是 MMYOLO 应用范例的配置文件,请结合 [基于 MMYOLO 的频高图实时目标检测 benchmark](/docs/zh_cn/recommended_topics/application_examples/ionogram_detection.md) 来使用。 + +Tips: This is the config file of the MMYOLO application examples. Please use it in combination with [A Benchmark for Ionogram Detection Based on MMYOLO](/docs/en/recommended_topics/application_examples/ionogram_detection.md). diff --git a/models/YOLO-World/third_party/mmyolo/projects/misc/ionogram_detection/rtmdet/rtmdet_l_fast_1xb32-100e_ionogram.py b/models/YOLO-World/third_party/mmyolo/projects/misc/ionogram_detection/rtmdet/rtmdet_l_fast_1xb32-100e_ionogram.py new file mode 100644 index 0000000000000000000000000000000000000000..f1829eebf93e0dd8480819ef7710b94c2f3c24f5 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/projects/misc/ionogram_detection/rtmdet/rtmdet_l_fast_1xb32-100e_ionogram.py @@ -0,0 +1,107 @@ +_base_ = 'mmyolo::rtmdet/rtmdet_l_syncbn_fast_8xb32-300e_coco.py' + +# ======================== Modified parameters ====================== +# -----data related----- +data_root = './Iono4311/' +train_ann_file = 'annotations/train.json' +train_data_prefix = 'train_images/' +val_ann_file = 'annotations/val.json' +val_data_prefix = 'val_images/' +test_ann_file = 'annotations/test.json' +test_data_prefix = 'test_images/' + +class_name = ('E', 'Es-l', 'Es-c', 'F1', 'F2', 'Spread-F') +num_classes = len(class_name) +metainfo = dict( + classes=class_name, + palette=[(250, 165, 30), (120, 69, 125), (53, 125, 34), (0, 11, 123), + (130, 20, 12), (120, 121, 80)]) + +train_batch_size_per_gpu = 32 +train_num_workers = 8 +val_batch_size_per_gpu = train_batch_size_per_gpu + +# Config of batch shapes. Only on val. +batch_shapes_cfg = dict(batch_size=val_batch_size_per_gpu) + +# -----train val related----- +load_from = 'https://download.openmmlab.com/mmyolo/v0/rtmdet/rtmdet_l_syncbn_fast_8xb32-300e_coco/rtmdet_l_syncbn_fast_8xb32-300e_coco_20230102_135928-ee3abdc4.pth' # noqa + +# default hooks +save_epoch_intervals = 10 +max_epochs = 100 +max_keep_ckpts = 1 + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', start_factor=1.0e-5, by_epoch=False, begin=0, + end=300), + dict( + # use cosine lr from 20 to 100 epoch + type='CosineAnnealingLR', + eta_min=_base_.base_lr * 0.05, + begin=max_epochs // 5, + end=max_epochs, + T_max=max_epochs * 4 // 5, + by_epoch=True, + convert_to_iter_based=True), +] + +# train_cfg +val_interval = 2 +val_begin = 20 + +tta_model = None +tta_pipeline = None + +visualizer = dict( + vis_backends=[dict(type='LocalVisBackend'), + dict(type='WandbVisBackend')]) + +# ===================== Unmodified in most cases ================== +model = dict( + bbox_head=dict(head_module=dict(num_classes=num_classes)), + train_cfg=dict(assigner=dict(num_classes=num_classes))) + +train_dataloader = dict( + batch_size=train_batch_size_per_gpu, + num_workers=train_num_workers, + dataset=dict( + metainfo=metainfo, + data_root=data_root, + ann_file=train_ann_file, + data_prefix=dict(img=train_data_prefix))) + +val_dataloader = dict( + batch_size=val_batch_size_per_gpu, + num_workers=train_num_workers, + dataset=dict( + metainfo=metainfo, + data_root=data_root, + data_prefix=dict(img=val_data_prefix), + ann_file=val_ann_file)) + +test_dataloader = dict( + batch_size=val_batch_size_per_gpu, + num_workers=train_num_workers, + dataset=dict( + metainfo=metainfo, + data_root=data_root, + data_prefix=dict(img=test_data_prefix), + ann_file=test_ann_file)) + +default_hooks = dict( + checkpoint=dict( + interval=save_epoch_intervals, + max_keep_ckpts=max_keep_ckpts, + save_best='auto')) + +val_evaluator = dict(ann_file=data_root + val_ann_file) +test_evaluator = dict(ann_file=data_root + test_ann_file) + +train_cfg = dict( + type='EpochBasedTrainLoop', + max_epochs=max_epochs, + val_begin=val_begin, + val_interval=val_interval) diff --git a/models/YOLO-World/third_party/mmyolo/projects/misc/ionogram_detection/rtmdet/rtmdet_s_fast_1xb32-100e_ionogram.py b/models/YOLO-World/third_party/mmyolo/projects/misc/ionogram_detection/rtmdet/rtmdet_s_fast_1xb32-100e_ionogram.py new file mode 100644 index 0000000000000000000000000000000000000000..49b284b09a0c5605d59c2e332f9894aadaf3d483 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/projects/misc/ionogram_detection/rtmdet/rtmdet_s_fast_1xb32-100e_ionogram.py @@ -0,0 +1,83 @@ +_base_ = './rtmdet_l_fast_1xb32-100e_ionogram.py' + +load_from = 'https://download.openmmlab.com/mmyolo/v0/rtmdet/rtmdet_s_syncbn_fast_8xb32-300e_coco/rtmdet_s_syncbn_fast_8xb32-300e_coco_20221230_182329-0a8c901a.pth' # noqa + +# ======================= Modified parameters ===================== +deepen_factor = 0.33 +widen_factor = 0.5 +img_scale = _base_.img_scale + +# ratio range for random resize +random_resize_ratio_range = (0.5, 2.0) +# Number of cached images in mosaic +mosaic_max_cached_images = 40 +# Number of cached images in mixup +mixup_max_cached_images = 20 + +# ===================== Unmodified in most cases ================== +model = dict( + backbone=dict(deepen_factor=deepen_factor, widen_factor=widen_factor), + neck=dict( + deepen_factor=deepen_factor, + widen_factor=widen_factor, + ), + bbox_head=dict(head_module=dict(widen_factor=widen_factor))) + +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args=_base_.backend_args), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='Mosaic', + img_scale=img_scale, + use_cached=True, + max_cached_images=mosaic_max_cached_images, + pad_val=114.0), + dict( + type='mmdet.RandomResize', + # img_scale is (width, height) + scale=(img_scale[0] * 2, img_scale[1] * 2), + ratio_range=random_resize_ratio_range, # note + resize_type='mmdet.Resize', + keep_ratio=True), + dict(type='mmdet.RandomCrop', crop_size=img_scale), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict(type='mmdet.RandomFlip', prob=0.5), + dict(type='mmdet.Pad', size=img_scale, pad_val=dict(img=(114, 114, 114))), + dict( + type='YOLOv5MixUp', + use_cached=True, + max_cached_images=mixup_max_cached_images), + dict(type='mmdet.PackDetInputs') +] + +train_pipeline_stage2 = [ + dict(type='LoadImageFromFile', backend_args=_base_.backend_args), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='mmdet.RandomResize', + scale=img_scale, + ratio_range=random_resize_ratio_range, # note + resize_type='mmdet.Resize', + keep_ratio=True), + dict(type='mmdet.RandomCrop', crop_size=img_scale), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict(type='mmdet.RandomFlip', prob=0.5), + dict(type='mmdet.Pad', size=img_scale, pad_val=dict(img=(114, 114, 114))), + dict(type='mmdet.PackDetInputs') +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0002, + update_buffers=True, + strict_load=False, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=_base_.max_epochs - _base_.num_epochs_stage2, + switch_pipeline=train_pipeline_stage2) +] diff --git a/models/YOLO-World/third_party/mmyolo/projects/misc/ionogram_detection/rtmdet/rtmdet_tiny_fast_1xb32-100e_ionogram.py b/models/YOLO-World/third_party/mmyolo/projects/misc/ionogram_detection/rtmdet/rtmdet_tiny_fast_1xb32-100e_ionogram.py new file mode 100644 index 0000000000000000000000000000000000000000..acdaa0756c5df4e3aff3391651ab737c6632da44 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/projects/misc/ionogram_detection/rtmdet/rtmdet_tiny_fast_1xb32-100e_ionogram.py @@ -0,0 +1,62 @@ +_base_ = './rtmdet_s_fast_1xb32-100e_ionogram.py' + +# ======================= Modified parameters ====================== +deepen_factor = 0.167 +widen_factor = 0.375 +img_scale = _base_.img_scale + +load_from = 'https://download.openmmlab.com/mmyolo/v0/rtmdet/rtmdet_tiny_syncbn_fast_8xb32-300e_coco/rtmdet_tiny_syncbn_fast_8xb32-300e_coco_20230102_140117-dbb1dc83.pth' # noqa + +# learning rate +param_scheduler = [ + dict( + type='LinearLR', start_factor=1.0e-5, by_epoch=False, begin=0, + end=300), + dict( + # use cosine lr from 50 to 100 epoch + type='CosineAnnealingLR', + eta_min=_base_.base_lr * 0.05, + begin=_base_.max_epochs // 2, + end=_base_.max_epochs, + T_max=_base_.max_epochs // 2, + by_epoch=True, + convert_to_iter_based=True), +] + +# =======================Unmodified in most cases================== +model = dict( + backbone=dict(deepen_factor=deepen_factor, widen_factor=widen_factor), + neck=dict(deepen_factor=deepen_factor, widen_factor=widen_factor), + bbox_head=dict(head_module=dict(widen_factor=widen_factor))) + +train_pipeline = [ + dict(type='LoadImageFromFile', backend_args=_base_.backend_args), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='Mosaic', + img_scale=img_scale, + use_cached=True, + max_cached_images=20, # note + random_pop=False, # note + pad_val=114.0), + dict( + type='mmdet.RandomResize', + # img_scale is (width, height) + scale=(img_scale[0] * 2, img_scale[1] * 2), + ratio_range=(0.5, 2.0), + resize_type='mmdet.Resize', + keep_ratio=True), + dict(type='mmdet.RandomCrop', crop_size=img_scale), + dict(type='mmdet.YOLOXHSVRandomAug'), + dict(type='mmdet.RandomFlip', prob=0.5), + dict(type='mmdet.Pad', size=img_scale, pad_val=dict(img=(114, 114, 114))), + dict( + type='YOLOv5MixUp', + use_cached=True, + random_pop=False, + max_cached_images=10, + prob=0.5), + dict(type='mmdet.PackDetInputs') +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) diff --git a/models/YOLO-World/third_party/mmyolo/projects/misc/ionogram_detection/yolov5/yolov5_m-v61_fast_1xb32-100e_ionogram.py b/models/YOLO-World/third_party/mmyolo/projects/misc/ionogram_detection/yolov5/yolov5_m-v61_fast_1xb32-100e_ionogram.py new file mode 100644 index 0000000000000000000000000000000000000000..737aeae9abeaee0e0024f04f4d7bfbeb9d8798a6 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/projects/misc/ionogram_detection/yolov5/yolov5_m-v61_fast_1xb32-100e_ionogram.py @@ -0,0 +1,95 @@ +_base_ = './yolov5_s-v61_fast_1xb96-100e_ionogram.py' + +# ======================= Modified parameters ===================== +# Copied from '../../yolov5/yolov5_m-v61_syncbn_fast_8xb16-300e_coco.py' +deepen_factor = 0.67 +widen_factor = 0.75 +lr_factor = 0.1 +affine_scale = 0.9 +loss_cls_weight = 0.3 +loss_obj_weight = 0.7 +mixup_prob = 0.1 + +# -----data related----- +train_batch_size_per_gpu = 32 + +# -----train val related----- +# Scale lr for SGD +base_lr = _base_.base_lr * train_batch_size_per_gpu \ + / _base_.train_batch_size_per_gpu +load_from = 'https://download.openmmlab.com/mmyolo/v0/yolov5/yolov5_m-v61_syncbn_fast_8xb16-300e_coco/yolov5_m-v61_syncbn_fast_8xb16-300e_coco_20220917_204944-516a710f.pth' # noqa + +# ===================== Unmodified in most cases ================== +num_classes = _base_.num_classes +num_det_layers = _base_.num_det_layers +img_scale = _base_.img_scale + +model = dict( + backbone=dict( + deepen_factor=deepen_factor, + widen_factor=widen_factor, + ), + neck=dict( + deepen_factor=deepen_factor, + widen_factor=widen_factor, + ), + bbox_head=dict( + head_module=dict(widen_factor=widen_factor), + loss_cls=dict(loss_weight=loss_cls_weight * + (num_classes / 80 * 3 / num_det_layers)), + loss_obj=dict(loss_weight=loss_obj_weight * + ((img_scale[0] / 640)**2 * 3 / num_det_layers)))) + +pre_transform = _base_.pre_transform +albu_train_transforms = _base_.albu_train_transforms + +mosaic_affine_pipeline = [ + dict( + type='Mosaic', + img_scale=img_scale, + pad_val=114.0, + pre_transform=pre_transform), + dict( + type='YOLOv5RandomAffine', + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=(1 - affine_scale, 1 + affine_scale), + # img_scale is (width, height) + border=(-img_scale[0] // 2, -img_scale[1] // 2), + border_val=(114, 114, 114)) +] + +# enable mixup +train_pipeline = [ + *pre_transform, *mosaic_affine_pipeline, + dict( + type='YOLOv5MixUp', + prob=mixup_prob, + pre_transform=[*pre_transform, *mosaic_affine_pipeline]), + dict( + type='mmdet.Albu', + transforms=albu_train_transforms, + bbox_params=dict( + type='BboxParams', + format='pascal_voc', + label_fields=['gt_bboxes_labels', 'gt_ignore_flags']), + keymap={ + 'img': 'image', + 'gt_bboxes': 'bboxes' + }), + dict(type='YOLOv5HSVRandomAug'), + dict(type='mmdet.RandomFlip', prob=0.5), + dict( + type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'flip', + 'flip_direction')) +] + +train_dataloader = dict( + batch_size=train_batch_size_per_gpu, + dataset=dict(dataset=dict(pipeline=train_pipeline))) + +val_dataloader = dict(batch_size=train_batch_size_per_gpu) +test_dataloader = dict(batch_size=train_batch_size_per_gpu) +optim_wrapper = dict(optimizer=dict(lr=base_lr)) +default_hooks = dict(param_scheduler=dict(lr_factor=lr_factor)) diff --git a/models/YOLO-World/third_party/mmyolo/projects/misc/ionogram_detection/yolov5/yolov5_s-v61_fast_1xb32-100e_ionogram_mosaic.py b/models/YOLO-World/third_party/mmyolo/projects/misc/ionogram_detection/yolov5/yolov5_s-v61_fast_1xb32-100e_ionogram_mosaic.py new file mode 100644 index 0000000000000000000000000000000000000000..1252ebfca09eb21b1b96d4424c2329855e1b1a40 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/projects/misc/ionogram_detection/yolov5/yolov5_s-v61_fast_1xb32-100e_ionogram_mosaic.py @@ -0,0 +1,35 @@ +_base_ = './yolov5_s-v61_fast_1xb96-100e_ionogram.py' + +# ======================= Modified parameters ===================== +# -----data related----- +train_batch_size_per_gpu = 32 + +# -----train val related----- +base_lr = _base_.base_lr * train_batch_size_per_gpu \ + / _base_.train_batch_size_per_gpu / 2 +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='Mosaic', + img_scale=(640, 640), + pad_val=114.0, + pre_transform=[ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True) + ]), + dict( + type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape')) +] + +# ===================== Unmodified in most cases ================== +train_dataloader = dict( + batch_size=train_batch_size_per_gpu, + dataset=dict(dataset=dict(pipeline=train_pipeline))) + +val_dataloader = dict(batch_size=train_batch_size_per_gpu) + +test_dataloader = dict(batch_size=train_batch_size_per_gpu) + +optim_wrapper = dict(optimizer=dict(lr=base_lr)) diff --git a/models/YOLO-World/third_party/mmyolo/projects/misc/ionogram_detection/yolov5/yolov5_s-v61_fast_1xb96-100e_ionogram.py b/models/YOLO-World/third_party/mmyolo/projects/misc/ionogram_detection/yolov5/yolov5_s-v61_fast_1xb96-100e_ionogram.py new file mode 100644 index 0000000000000000000000000000000000000000..dbe1305d835e8e0a435433deb36ff0d7ce9ec77d --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/projects/misc/ionogram_detection/yolov5/yolov5_s-v61_fast_1xb96-100e_ionogram.py @@ -0,0 +1,108 @@ +_base_ = 'mmyolo::yolov5/yolov5_s-v61_syncbn_fast_8xb16-300e_coco.py' + +# ======================= Modified parameters ===================== +# -----data related----- +data_root = './Iono4311/' +train_ann_file = 'annotations/train.json' +train_data_prefix = 'train_images/' +val_ann_file = 'annotations/val.json' +val_data_prefix = 'val_images/' +test_ann_file = 'annotations/test.json' +test_data_prefix = 'test_images/' +class_name = ('E', 'Es-l', 'Es-c', 'F1', 'F2', 'Spread-F') +num_classes = len(class_name) +metainfo = dict( + classes=class_name, + palette=[(250, 165, 30), (120, 69, 125), (53, 125, 34), (0, 11, 123), + (130, 20, 12), (120, 121, 80)]) +# Batch size of a single GPU during training +train_batch_size_per_gpu = 96 +# Worker to pre-fetch data for each single GPU during training +train_num_workers = 8 + +# -----model related----- +# Basic size of multi-scale prior box +anchors = [[[8, 6], [24, 4], [19, 9]], [[22, 19], [17, 49], [29, 45]], + [[44, 66], [96, 76], [126, 59]]] + +# -----train val related----- +# base_lr_default * (your_bs / default_bs (8x16)) for SGD +base_lr = _base_.base_lr * train_batch_size_per_gpu / (8 * 16) +max_epochs = 100 +load_from = 'https://download.openmmlab.com/mmyolo/v0/yolov5/yolov5_s-v61_syncbn_fast_8xb16-300e_coco/yolov5_s-v61_syncbn_fast_8xb16-300e_coco_20220918_084700-86e02187.pth' # noqa + +# default_hooks +save_epoch_intervals = 10 +logger_interval = 20 +max_keep_ckpts = 1 + +# train_cfg +val_interval = 2 +val_begin = 20 + +tta_model = None +tta_pipeline = None + +visualizer = dict( + vis_backends=[dict(type='LocalVisBackend'), + dict(type='WandbVisBackend')]) + +# ===================== Unmodified in most cases ================== +model = dict( + bbox_head=dict( + head_module=dict(num_classes=num_classes), + prior_generator=dict(base_sizes=anchors), + loss_cls=dict(loss_weight=0.5 * + (num_classes / 80 * 3 / _base_.num_det_layers)))) + +train_dataloader = dict( + batch_size=train_batch_size_per_gpu, + num_workers=train_num_workers, + dataset=dict( + _delete_=True, + type='RepeatDataset', + times=1, + dataset=dict( + type=_base_.dataset_type, + data_root=data_root, + metainfo=metainfo, + ann_file=train_ann_file, + data_prefix=dict(img=train_data_prefix), + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=_base_.train_pipeline))) + +val_dataloader = dict( + batch_size=train_batch_size_per_gpu, + num_workers=train_num_workers, + dataset=dict( + metainfo=metainfo, + data_root=data_root, + ann_file=val_ann_file, + data_prefix=dict(img=val_data_prefix))) + +test_dataloader = dict( + batch_size=train_batch_size_per_gpu, + num_workers=train_num_workers, + dataset=dict( + metainfo=metainfo, + data_root=data_root, + ann_file=test_ann_file, + data_prefix=dict(img=test_data_prefix))) + +optim_wrapper = dict(optimizer=dict(lr=base_lr)) + +default_hooks = dict( + checkpoint=dict( + type='CheckpointHook', + save_param_scheduler=None, # for yolov5 + interval=save_epoch_intervals, + max_keep_ckpts=max_keep_ckpts, + save_best='auto'), + param_scheduler=dict(max_epochs=max_epochs), + logger=dict(type='LoggerHook', interval=logger_interval)) + +val_evaluator = dict(ann_file=data_root + val_ann_file) +test_evaluator = dict(ann_file=data_root + test_ann_file) + +train_cfg = dict( + max_epochs=max_epochs, val_begin=val_begin, val_interval=val_interval) diff --git a/models/YOLO-World/third_party/mmyolo/projects/misc/ionogram_detection/yolov5/yolov5_s-v61_fast_1xb96-100e_ionogram_aug0.py b/models/YOLO-World/third_party/mmyolo/projects/misc/ionogram_detection/yolov5/yolov5_s-v61_fast_1xb96-100e_ionogram_aug0.py new file mode 100644 index 0000000000000000000000000000000000000000..39ffb6ba1e110b0ee59136414939164d8e0fe1b5 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/projects/misc/ionogram_detection/yolov5/yolov5_s-v61_fast_1xb96-100e_ionogram_aug0.py @@ -0,0 +1,21 @@ +_base_ = './yolov5_s-v61_fast_1xb96-100e_ionogram.py' + +# ======================= Modified parameters ===================== +# -----train val related----- +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='YOLOv5KeepRatioResize', scale=(640, 640)), + dict( + type='LetterResize', + scale=(640, 640), + allow_scale_up=False, + pad_val=dict(img=114)), + dict( + type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'pad_param')) +] + +# ===================== Unmodified in most cases ================== +train_dataloader = dict(dataset=dict(dataset=dict(pipeline=train_pipeline))) diff --git a/models/YOLO-World/third_party/mmyolo/projects/misc/ionogram_detection/yolov5/yolov5_s-v61_fast_1xb96-100e_ionogram_mosaic_affine.py b/models/YOLO-World/third_party/mmyolo/projects/misc/ionogram_detection/yolov5/yolov5_s-v61_fast_1xb96-100e_ionogram_mosaic_affine.py new file mode 100644 index 0000000000000000000000000000000000000000..10c114cbcc1f754d46139157eece5d59666d6649 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/projects/misc/ionogram_detection/yolov5/yolov5_s-v61_fast_1xb96-100e_ionogram_mosaic_affine.py @@ -0,0 +1,29 @@ +_base_ = './yolov5_s-v61_fast_1xb96-100e_ionogram.py' + +# ======================= Modified parameters ===================== +# -----train val related----- +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='Mosaic', + img_scale=(640, 640), + pad_val=114.0, + pre_transform=[ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True) + ]), + dict( + type='YOLOv5RandomAffine', + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=(0.5, 1.5), + border=(-320, -320), + border_val=(114, 114, 114)), + dict( + type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape')) +] + +# ===================== Unmodified in most cases ================== +train_dataloader = dict(dataset=dict(dataset=dict(pipeline=train_pipeline))) diff --git a/models/YOLO-World/third_party/mmyolo/projects/misc/ionogram_detection/yolov5/yolov5_s-v61_fast_1xb96-100e_ionogram_mosaic_affine_albu_hsv.py b/models/YOLO-World/third_party/mmyolo/projects/misc/ionogram_detection/yolov5/yolov5_s-v61_fast_1xb96-100e_ionogram_mosaic_affine_albu_hsv.py new file mode 100644 index 0000000000000000000000000000000000000000..df8f6a2c561a67b275abca3cc5ca3763f1527d72 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/projects/misc/ionogram_detection/yolov5/yolov5_s-v61_fast_1xb96-100e_ionogram_mosaic_affine_albu_hsv.py @@ -0,0 +1,44 @@ +_base_ = './yolov5_s-v61_fast_1xb96-100e_ionogram.py' + +# ======================= Modified parameters ===================== +# -----train val related----- +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='Mosaic', + img_scale=(640, 640), + pad_val=114.0, + pre_transform=[ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True) + ]), + dict( + type='YOLOv5RandomAffine', + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=(0.5, 1.5), + border=(-320, -320), + border_val=(114, 114, 114)), + dict( + type='mmdet.Albu', + transforms=[ + dict(type='Blur', p=0.01), + dict(type='MedianBlur', p=0.01), + dict(type='ToGray', p=0.01), + dict(type='CLAHE', p=0.01) + ], + bbox_params=dict( + type='BboxParams', + format='pascal_voc', + label_fields=['gt_bboxes_labels', 'gt_ignore_flags']), + keymap=dict(img='image', gt_bboxes='bboxes')), + dict(type='YOLOv5HSVRandomAug'), + # dict(type='mmdet.RandomFlip', prob=0.5), + dict( + type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape')) +] + +# ===================== Unmodified in most cases ================== +train_dataloader = dict(dataset=dict(dataset=dict(pipeline=train_pipeline))) diff --git a/models/YOLO-World/third_party/mmyolo/projects/misc/ionogram_detection/yolov5/yolov5_s-v61_fast_1xb96-200e_ionogram_pre0.py b/models/YOLO-World/third_party/mmyolo/projects/misc/ionogram_detection/yolov5/yolov5_s-v61_fast_1xb96-200e_ionogram_pre0.py new file mode 100644 index 0000000000000000000000000000000000000000..9f62fac92864c1de2d52d3382452a84a16dfe6f8 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/projects/misc/ionogram_detection/yolov5/yolov5_s-v61_fast_1xb96-200e_ionogram_pre0.py @@ -0,0 +1,17 @@ +_base_ = './yolov5_s-v61_fast_1xb96-100e_ionogram.py' + +# ======================= Modified parameters ===================== +# -----train val related----- +base_lr = _base_.base_lr * 4 +max_epochs = 200 +load_from = None +logger_interval = 50 + +train_cfg = dict(max_epochs=max_epochs, ) + +# ===================== Unmodified in most cases ================== +optim_wrapper = dict(optimizer=dict(lr=base_lr)) + +default_hooks = dict( + param_scheduler=dict(max_epochs=max_epochs), + logger=dict(type='LoggerHook', interval=logger_interval)) diff --git a/models/YOLO-World/third_party/mmyolo/projects/misc/ionogram_detection/yolov6/yolov6_l_fast_1xb32-100e_ionogram.py b/models/YOLO-World/third_party/mmyolo/projects/misc/ionogram_detection/yolov6/yolov6_l_fast_1xb32-100e_ionogram.py new file mode 100644 index 0000000000000000000000000000000000000000..dc5918d828ddd82ca349a307cb015b7fc29f68f1 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/projects/misc/ionogram_detection/yolov6/yolov6_l_fast_1xb32-100e_ionogram.py @@ -0,0 +1,29 @@ +_base_ = './yolov6_m_fast_1xb32-100e_ionogram.py' + +# ======================= Modified parameters ======================= +# -----model related----- +deepen_factor = 1 +widen_factor = 1 + +# -----train val related----- +load_from = 'https://download.openmmlab.com/mmyolo/v0/yolov6/yolov6_l_syncbn_fast_8xb32-300e_coco/yolov6_l_syncbn_fast_8xb32-300e_coco_20221109_183156-91e3c447.pth' # noqa + +# ====================== Unmodified in most cases =================== +model = dict( + backbone=dict( + deepen_factor=deepen_factor, + widen_factor=widen_factor, + hidden_ratio=1. / 2, + block_cfg=dict( + type='ConvWrapper', + norm_cfg=dict(type='BN', momentum=0.03, eps=0.001)), + act_cfg=dict(type='SiLU', inplace=True)), + neck=dict( + deepen_factor=deepen_factor, + widen_factor=widen_factor, + hidden_ratio=1. / 2, + block_cfg=dict( + type='ConvWrapper', + norm_cfg=dict(type='BN', momentum=0.03, eps=0.001)), + block_act_cfg=dict(type='SiLU', inplace=True)), + bbox_head=dict(head_module=dict(widen_factor=widen_factor))) diff --git a/models/YOLO-World/third_party/mmyolo/projects/misc/ionogram_detection/yolov6/yolov6_m_fast_1xb32-100e_ionogram.py b/models/YOLO-World/third_party/mmyolo/projects/misc/ionogram_detection/yolov6/yolov6_m_fast_1xb32-100e_ionogram.py new file mode 100644 index 0000000000000000000000000000000000000000..00ea8ff055efd5b2094c723cb52118f51d3ce1c6 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/projects/misc/ionogram_detection/yolov6/yolov6_m_fast_1xb32-100e_ionogram.py @@ -0,0 +1,63 @@ +_base_ = './yolov6_s_fast_1xb32-100e_ionogram.py' + +# ======================= Modified parameters ======================= +# -----model related----- +# The scaling factor that controls the depth of the network structure +deepen_factor = 0.6 +# The scaling factor that controls the width of the network structure +widen_factor = 0.75 + +# -----train val related----- +affine_scale = 0.9 # YOLOv5RandomAffine scaling ratio +load_from = 'https://download.openmmlab.com/mmyolo/v0/yolov6/yolov6_m_syncbn_fast_8xb32-300e_coco/yolov6_m_syncbn_fast_8xb32-300e_coco_20221109_182658-85bda3f4.pth' # noqa + +# ====================== Unmodified in most cases =================== +model = dict( + backbone=dict( + type='YOLOv6CSPBep', + deepen_factor=deepen_factor, + widen_factor=widen_factor, + hidden_ratio=2. / 3, + block_cfg=dict(type='RepVGGBlock'), + act_cfg=dict(type='ReLU', inplace=True)), + neck=dict( + type='YOLOv6CSPRepPAFPN', + deepen_factor=deepen_factor, + widen_factor=widen_factor, + block_cfg=dict(type='RepVGGBlock'), + hidden_ratio=2. / 3, + block_act_cfg=dict(type='ReLU', inplace=True)), + bbox_head=dict( + type='YOLOv6Head', head_module=dict(widen_factor=widen_factor))) + +mosaic_affine_pipeline = [ + dict( + type='Mosaic', + img_scale=_base_.img_scale, + pad_val=114.0, + pre_transform=_base_.pre_transform), + dict( + type='YOLOv5RandomAffine', + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=(1 - affine_scale, 1 + affine_scale), + # img_scale is (width, height) + border=(-_base_.img_scale[0] // 2, -_base_.img_scale[1] // 2), + border_val=(114, 114, 114)) +] + +train_pipeline = [ + *_base_.pre_transform, *mosaic_affine_pipeline, + dict( + type='YOLOv5MixUp', + prob=0.1, + pre_transform=[*_base_.pre_transform, *mosaic_affine_pipeline]), + dict(type='YOLOv5HSVRandomAug'), + dict(type='mmdet.RandomFlip', prob=0.5), + dict( + type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'flip', + 'flip_direction')) +] + +train_dataloader = dict(dataset=dict(dataset=dict(pipeline=train_pipeline))) diff --git a/models/YOLO-World/third_party/mmyolo/projects/misc/ionogram_detection/yolov6/yolov6_s_fast_1xb32-100e_ionogram.py b/models/YOLO-World/third_party/mmyolo/projects/misc/ionogram_detection/yolov6/yolov6_s_fast_1xb32-100e_ionogram.py new file mode 100644 index 0000000000000000000000000000000000000000..c9748b408d7a899d96c2852e1f5a9d726187957c --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/projects/misc/ionogram_detection/yolov6/yolov6_s_fast_1xb32-100e_ionogram.py @@ -0,0 +1,108 @@ +_base_ = 'mmyolo::yolov6/yolov6_s_syncbn_fast_8xb32-400e_coco.py' + +# ======================= Modified parameters ===================== +# -----data related----- +data_root = './Iono4311/' +train_ann_file = 'annotations/train.json' +train_data_prefix = 'train_images/' +val_ann_file = 'annotations/val.json' +val_data_prefix = 'val_images/' +test_ann_file = 'annotations/test.json' +test_data_prefix = 'test_images/' + +class_name = ('E', 'Es-l', 'Es-c', 'F1', 'F2', 'Spread-F') +num_classes = len(class_name) +metainfo = dict( + classes=class_name, + palette=[(250, 165, 30), (120, 69, 125), (53, 125, 34), (0, 11, 123), + (130, 20, 12), (120, 121, 80)]) + +train_batch_size_per_gpu = 32 +train_num_workers = 8 + +tta_model = None +tta_pipeline = None + +# -----train val related----- +load_from = 'https://download.openmmlab.com/mmyolo/v0/yolov6/yolov6_s_syncbn_fast_8xb32-400e_coco/yolov6_s_syncbn_fast_8xb32-400e_coco_20221102_203035-932e1d91.pth' # noqa +# base_lr_default * (your_bs 32 / default_bs (8 x 32)) +base_lr = _base_.base_lr * train_batch_size_per_gpu / (8 * 32) +max_epochs = 100 +save_epoch_intervals = 10 +val_begin = 20 +max_keep_ckpts = 1 +log_interval = 50 +visualizer = dict( + vis_backends=[dict(type='LocalVisBackend'), + dict(type='WandbVisBackend')]) + +# ==================== Unmodified in most cases =================== +train_cfg = dict( + max_epochs=max_epochs, + val_begin=val_begin, + val_interval=save_epoch_intervals, + dynamic_intervals=None) + +model = dict( + bbox_head=dict(head_module=dict(num_classes=num_classes)), + train_cfg=dict( + initial_assigner=dict(num_classes=num_classes), + assigner=dict(num_classes=num_classes))) + +train_dataloader = dict( + batch_size=train_batch_size_per_gpu, + num_workers=train_num_workers, + dataset=dict( + _delete_=True, + type='RepeatDataset', + times=1, + dataset=dict( + type=_base_.dataset_type, + data_root=data_root, + metainfo=metainfo, + ann_file=train_ann_file, + data_prefix=dict(img=train_data_prefix), + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=_base_.train_pipeline))) + +val_dataloader = dict( + dataset=dict( + metainfo=metainfo, + data_root=data_root, + ann_file=val_ann_file, + data_prefix=dict(img=val_data_prefix))) + +test_dataloader = dict( + dataset=dict( + metainfo=metainfo, + data_root=data_root, + ann_file=test_ann_file, + data_prefix=dict(img=test_data_prefix))) + +val_evaluator = dict(ann_file=data_root + val_data_prefix) +test_evaluator = dict(ann_file=data_root + test_data_prefix) + +optim_wrapper = dict(optimizer=dict(lr=base_lr)) + +default_hooks = dict( + checkpoint=dict( + type='CheckpointHook', + interval=save_epoch_intervals, + max_keep_ckpts=max_keep_ckpts, + save_best='auto'), + param_scheduler=dict(max_epochs=max_epochs), + logger=dict(type='LoggerHook', interval=log_interval)) + +custom_hooks = [ + dict( + type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0001, + update_buffers=True, + strict_load=False, + priority=49), + dict( + type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - _base_.num_last_epochs, + switch_pipeline=_base_.train_pipeline_stage2) +] diff --git a/models/YOLO-World/third_party/mmyolo/projects/misc/ionogram_detection/yolov6/yolov6_s_fast_1xb32-200e_ionogram_pre0.py b/models/YOLO-World/third_party/mmyolo/projects/misc/ionogram_detection/yolov6/yolov6_s_fast_1xb32-200e_ionogram_pre0.py new file mode 100644 index 0000000000000000000000000000000000000000..cc38730f971664bb07edff2a8497e25d4376531f --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/projects/misc/ionogram_detection/yolov6/yolov6_s_fast_1xb32-200e_ionogram_pre0.py @@ -0,0 +1,17 @@ +_base_ = './yolov6_s_fast_1xb32-100e_ionogram.py' + +# ======================= Modified parameters ===================== +base_lr = _base_.base_lr * 4 +optim_wrapper = dict(optimizer=dict(lr=base_lr)) +max_epochs = 200 +load_from = None + +# ==================== Unmodified in most cases =================== +train_cfg = dict( + max_epochs=max_epochs, + val_begin=20, +) + +default_hooks = dict( + param_scheduler=dict(max_epochs=max_epochs), + logger=dict(type='LoggerHook', interval=50)) diff --git a/models/YOLO-World/third_party/mmyolo/projects/misc/ionogram_detection/yolov7/yolov7_l_fast_1xb16-100e_ionogram.py b/models/YOLO-World/third_party/mmyolo/projects/misc/ionogram_detection/yolov7/yolov7_l_fast_1xb16-100e_ionogram.py new file mode 100644 index 0000000000000000000000000000000000000000..44d58c1f33a12b945c4fafb6f01b521a2e8c6e54 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/projects/misc/ionogram_detection/yolov7/yolov7_l_fast_1xb16-100e_ionogram.py @@ -0,0 +1,98 @@ +_base_ = 'mmyolo::yolov7/yolov7_l_syncbn_fast_8x16b-300e_coco.py' + +# ======================== Modified parameters ====================== +# -----data related----- +data_root = './Iono4311/' +train_ann_file = 'annotations/train.json' +train_data_prefix = 'train_images/' +val_ann_file = 'annotations/val.json' +val_data_prefix = 'val_images/' +test_ann_file = 'annotations/test.json' +test_data_prefix = 'test_images/' + +class_name = ('E', 'Es-l', 'Es-c', 'F1', 'F2', 'Spread-F') +num_classes = len(class_name) +metainfo = dict( + classes=class_name, + palette=[(250, 165, 30), (120, 69, 125), (53, 125, 34), (0, 11, 123), + (130, 20, 12), (120, 121, 80)]) + +train_batch_size_per_gpu = 16 +train_num_workers = 8 + +# -----model related----- +anchors = [[[14, 14], [35, 6], [32, 18]], [[32, 45], [28, 97], [52, 80]], + [[71, 122], [185, 94], [164, 134]]] + +# -----train val related----- +# base_lr_default * (your_bs 32 / default_bs (8 x 16)) +base_lr = _base_.base_lr * train_batch_size_per_gpu / (8 * 16) +load_from = 'https://download.openmmlab.com/mmyolo/v0/yolov7/yolov7_l_syncbn_fast_8x16b-300e_coco/yolov7_l_syncbn_fast_8x16b-300e_coco_20221123_023601-8113c0eb.pth' # noqa + +# default hooks +save_epoch_intervals = 10 +max_epochs = 100 +max_keep_ckpts = 1 + +# train_cfg +val_interval = 2 +val_begin = 20 + +tta_model = None +tta_pipeline = None + +visualizer = dict( + vis_backends=[dict(type='LocalVisBackend'), + dict(type='WandbVisBackend')]) + +# ===================== Unmodified in most cases ================== +model = dict( + bbox_head=dict( + head_module=dict(num_classes=num_classes), + prior_generator=dict(base_sizes=anchors), + loss_cls=dict(loss_weight=_base_.loss_cls_weight * + (num_classes / 80 * 3 / _base_.num_det_layers)))) + +train_dataloader = dict( + batch_size=train_batch_size_per_gpu, + num_workers=train_num_workers, + dataset=dict( + metainfo=metainfo, + data_root=data_root, + ann_file=train_ann_file, + data_prefix=dict(img=train_data_prefix))) + +val_dataloader = dict( + batch_size=train_batch_size_per_gpu, + num_workers=train_num_workers, + dataset=dict( + metainfo=metainfo, + data_root=data_root, + data_prefix=dict(img=val_data_prefix), + ann_file=val_ann_file)) + +test_dataloader = dict( + batch_size=train_batch_size_per_gpu, + num_workers=train_num_workers, + dataset=dict( + metainfo=metainfo, + data_root=data_root, + data_prefix=dict(img=test_data_prefix), + ann_file=test_ann_file)) + +optim_wrapper = dict( + optimizer=dict(lr=base_lr, batch_size_per_gpu=train_batch_size_per_gpu)) + +default_hooks = dict( + param_scheduler=dict(max_epochs=max_epochs), + checkpoint=dict( + interval=save_epoch_intervals, max_keep_ckpts=max_keep_ckpts)) + +val_evaluator = dict(ann_file=data_root + val_ann_file) +test_evaluator = dict(ann_file=data_root + test_ann_file) + +train_cfg = dict( + type='EpochBasedTrainLoop', + max_epochs=max_epochs, + val_begin=val_begin, + val_interval=val_interval) diff --git a/models/YOLO-World/third_party/mmyolo/projects/misc/ionogram_detection/yolov7/yolov7_tiny_fast_1xb16-100e_ionogram.py b/models/YOLO-World/third_party/mmyolo/projects/misc/ionogram_detection/yolov7/yolov7_tiny_fast_1xb16-100e_ionogram.py new file mode 100644 index 0000000000000000000000000000000000000000..9c2d63ddeefaa50d3e180c558b1eec2e45180d46 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/projects/misc/ionogram_detection/yolov7/yolov7_tiny_fast_1xb16-100e_ionogram.py @@ -0,0 +1,101 @@ +_base_ = './yolov7_l_fast_1xb16-100e_ionogram.py' + +# ======================== Modified parameters ======================= +# pre-train +load_from = 'https://download.openmmlab.com/mmyolo/v0/yolov7/yolov7_tiny_syncbn_fast_8x16b-300e_coco/yolov7_tiny_syncbn_fast_8x16b-300e_coco_20221126_102719-0ee5bbdf.pth' # noqa + +# -----model related----- +# Data augmentation +max_translate_ratio = 0.1 # YOLOv5RandomAffine +scaling_ratio_range = (0.5, 1.6) # YOLOv5RandomAffine +mixup_prob = 0.05 # YOLOv5MixUp +randchoice_mosaic_prob = [0.8, 0.2] +mixup_alpha = 8.0 # YOLOv5MixUp +mixup_beta = 8.0 # YOLOv5MixUp + +# -----train val related----- +loss_cls_weight = 0.5 +loss_obj_weight = 1.0 + +lr_factor = 0.01 # Learning rate scaling factor + +# ====================== Unmodified in most cases ==================== +num_classes = _base_.num_classes +num_det_layers = _base_.num_det_layers +img_scale = _base_.img_scale +pre_transform = _base_.pre_transform +model = dict( + backbone=dict( + arch='Tiny', act_cfg=dict(type='LeakyReLU', negative_slope=0.1)), + neck=dict( + is_tiny_version=True, + in_channels=[128, 256, 512], + out_channels=[64, 128, 256], + block_cfg=dict( + _delete_=True, type='TinyDownSampleBlock', middle_ratio=0.25), + act_cfg=dict(type='LeakyReLU', negative_slope=0.1), + use_repconv_outs=False), + bbox_head=dict( + head_module=dict(in_channels=[128, 256, 512]), + loss_cls=dict(loss_weight=loss_cls_weight * + (num_classes / 80 * 3 / num_det_layers)), + loss_obj=dict(loss_weight=loss_obj_weight * + ((img_scale[0] / 640)**2 * 3 / num_det_layers)))) + +mosiac4_pipeline = [ + dict( + type='Mosaic', + img_scale=img_scale, + pad_val=114.0, + pre_transform=pre_transform), + dict( + type='YOLOv5RandomAffine', + max_rotate_degree=0.0, + max_shear_degree=0.0, + max_translate_ratio=max_translate_ratio, # change + scaling_ratio_range=scaling_ratio_range, # change + # img_scale is (width, height) + border=(-img_scale[0] // 2, -img_scale[1] // 2), + border_val=(114, 114, 114)), +] + +mosiac9_pipeline = [ + dict( + type='Mosaic9', + img_scale=img_scale, + pad_val=114.0, + pre_transform=pre_transform), + dict( + type='YOLOv5RandomAffine', + max_rotate_degree=0.0, + max_shear_degree=0.0, + max_translate_ratio=max_translate_ratio, # change + scaling_ratio_range=scaling_ratio_range, # change + border=(-img_scale[0] // 2, -img_scale[1] // 2), + border_val=(114, 114, 114)), +] + +randchoice_mosaic_pipeline = dict( + type='RandomChoice', + transforms=[mosiac4_pipeline, mosiac9_pipeline], + prob=randchoice_mosaic_prob) + +train_pipeline = [ + *pre_transform, + randchoice_mosaic_pipeline, + dict( + type='YOLOv5MixUp', + alpha=mixup_alpha, + beta=mixup_beta, + prob=mixup_prob, # change + pre_transform=[*pre_transform, randchoice_mosaic_pipeline]), + dict(type='YOLOv5HSVRandomAug'), + dict(type='mmdet.RandomFlip', prob=0.5), + dict( + type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'flip', + 'flip_direction')) +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) +default_hooks = dict(param_scheduler=dict(lr_factor=lr_factor)) diff --git a/models/YOLO-World/third_party/mmyolo/projects/misc/ionogram_detection/yolov7/yolov7_x_fast_1xb16-100e_ionogram.py b/models/YOLO-World/third_party/mmyolo/projects/misc/ionogram_detection/yolov7/yolov7_x_fast_1xb16-100e_ionogram.py new file mode 100644 index 0000000000000000000000000000000000000000..606232a6619278e9583276ee89a9c4c340e3e8db --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/projects/misc/ionogram_detection/yolov7/yolov7_x_fast_1xb16-100e_ionogram.py @@ -0,0 +1,19 @@ +_base_ = './yolov7_l_fast_1xb16-100e_ionogram.py' + +# ======================== Modified parameters ======================= +load_from = 'https://download.openmmlab.com/mmyolo/v0/yolov7/yolov7_x_syncbn_fast_8x16b-300e_coco/yolov7_x_syncbn_fast_8x16b-300e_coco_20221124_215331-ef949a68.pth' # noqa + +# ===================== Unmodified in most cases ================== +model = dict( + backbone=dict(arch='X'), + neck=dict( + in_channels=[640, 1280, 1280], + out_channels=[160, 320, 640], + block_cfg=dict( + type='ELANBlock', + middle_ratio=0.4, + block_ratio=0.4, + num_blocks=3, + num_convs_in_block=2), + use_repconv_outs=False), + bbox_head=dict(head_module=dict(in_channels=[320, 640, 1280]))) diff --git a/models/YOLO-World/third_party/mmyolo/pytest.ini b/models/YOLO-World/third_party/mmyolo/pytest.ini new file mode 100644 index 0000000000000000000000000000000000000000..9796e871e70c7c67345b1d6bcf708c0c82377a98 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/pytest.ini @@ -0,0 +1,7 @@ +[pytest] +addopts = --xdoctest --xdoctest-style=auto +norecursedirs = .git ignore build __pycache__ data docker docs .eggs + +filterwarnings= default + ignore:.*No cfgstr given in Cacher constructor or call.*:Warning + ignore:.*Define the __nice__ method for.*:Warning diff --git a/models/YOLO-World/third_party/mmyolo/requirements.txt b/models/YOLO-World/third_party/mmyolo/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..5f50cbdc09d6389264f87e2aa1a576a81990e66a --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/requirements.txt @@ -0,0 +1,3 @@ +-r requirements/build.txt +-r requirements/runtime.txt +-r requirements/tests.txt diff --git a/models/YOLO-World/third_party/mmyolo/requirements/albu.txt b/models/YOLO-World/third_party/mmyolo/requirements/albu.txt new file mode 100644 index 0000000000000000000000000000000000000000..2957391ba9d71f694c74257b42e194529c11879f --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/requirements/albu.txt @@ -0,0 +1 @@ +albumentations --no-binary qudida,albumentations diff --git a/models/YOLO-World/third_party/mmyolo/requirements/build.txt b/models/YOLO-World/third_party/mmyolo/requirements/build.txt new file mode 100644 index 0000000000000000000000000000000000000000..c96c69aae6a2dfd7d8329707c7a7fe77e0b91f99 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/requirements/build.txt @@ -0,0 +1,3 @@ +# These must be installed before building mmyolo +cython +numpy diff --git a/models/YOLO-World/third_party/mmyolo/requirements/docs.txt b/models/YOLO-World/third_party/mmyolo/requirements/docs.txt new file mode 100644 index 0000000000000000000000000000000000000000..a93a3766c87ffad4c802c323f1a43578d7c8fd92 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/requirements/docs.txt @@ -0,0 +1,13 @@ +docutils==0.16.0 +mmcv>=2.0.0rc4,<=2.1.0 +mmdet>=3.0.0 +mmengine>=0.7.1 +myst-parser +-e git+https://github.com/open-mmlab/pytorch_sphinx_theme.git#egg=pytorch_sphinx_theme +sphinx==4.0.2 +sphinx-copybutton +sphinx_markdown_tables +sphinx_rtd_theme==0.5.2 +torch +torchvision +urllib3<2.0.0 diff --git a/models/YOLO-World/third_party/mmyolo/requirements/mminstall.txt b/models/YOLO-World/third_party/mmyolo/requirements/mminstall.txt new file mode 100644 index 0000000000000000000000000000000000000000..843738f7caa0cd20a2c27c07381e960f0923624a --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/requirements/mminstall.txt @@ -0,0 +1,3 @@ +mmcv>=2.0.0rc4,<=2.1.0 +mmdet>=3.0.0 +mmengine>=0.7.1 diff --git a/models/YOLO-World/third_party/mmyolo/requirements/mmpose.txt b/models/YOLO-World/third_party/mmyolo/requirements/mmpose.txt new file mode 100644 index 0000000000000000000000000000000000000000..8e4726e68452ccd045940fa9df95681d9d44c2cf --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/requirements/mmpose.txt @@ -0,0 +1 @@ +mmpose>=1.0.0 diff --git a/models/YOLO-World/third_party/mmyolo/requirements/mmrotate.txt b/models/YOLO-World/third_party/mmyolo/requirements/mmrotate.txt new file mode 100644 index 0000000000000000000000000000000000000000..15f05d38e76ce50f84535abcbe40109aadd1e1cb --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/requirements/mmrotate.txt @@ -0,0 +1 @@ +mmrotate>=1.0.0rc1 diff --git a/models/YOLO-World/third_party/mmyolo/requirements/runtime.txt b/models/YOLO-World/third_party/mmyolo/requirements/runtime.txt new file mode 100644 index 0000000000000000000000000000000000000000..794a9cab5748caf8059c4a610e7782bef321841f --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/requirements/runtime.txt @@ -0,0 +1,2 @@ +numpy +prettytable diff --git a/models/YOLO-World/third_party/mmyolo/requirements/sahi.txt b/models/YOLO-World/third_party/mmyolo/requirements/sahi.txt new file mode 100644 index 0000000000000000000000000000000000000000..0e7b7b842fdc0ead64ce78615c99daa7420bddb9 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/requirements/sahi.txt @@ -0,0 +1 @@ +sahi>=0.11.4 diff --git a/models/YOLO-World/third_party/mmyolo/requirements/tests.txt b/models/YOLO-World/third_party/mmyolo/requirements/tests.txt new file mode 100644 index 0000000000000000000000000000000000000000..285b3f3969a2137639e694b3b1652166bc43b177 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/requirements/tests.txt @@ -0,0 +1,17 @@ +flake8 +interrogate +isort==4.3.21 +# Note: used for kwarray.group_items, this may be ported to mmcv in the future. +kwarray +memory_profiler +mmcls>=1.0.0rc4 +mmpose>=1.0.0 +mmrazor>=1.0.0rc2 +mmrotate>=1.0.0rc1 +parameterized +protobuf<=3.20.1 +psutil +pytest +ubelt +xdoctest>=0.10.0 +yapf diff --git a/models/YOLO-World/third_party/mmyolo/setup.cfg b/models/YOLO-World/third_party/mmyolo/setup.cfg new file mode 100644 index 0000000000000000000000000000000000000000..d30673d0f6242fef3381b4171f9ec208b7f7bc3d --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/setup.cfg @@ -0,0 +1,21 @@ +[isort] +line_length = 79 +multi_line_output = 0 +extra_standard_library = setuptools +known_first_party = mmyolo +known_third_party = PIL,asynctest,cityscapesscripts,cv2,gather_models,matplotlib,mmcv,numpy,onnx,onnxruntime,pycocotools,pytest,parameterized,pytorch_sphinx_theme,requests,scipy,seaborn,six,terminaltables,torch,ts,yaml,mmengine,mmdet,mmdeploy +no_lines_before = STDLIB,LOCALFOLDER +default_section = THIRDPARTY + +[yapf] +BASED_ON_STYLE = pep8 +BLANK_LINE_BEFORE_NESTED_CLASS_OR_DEF = true +SPLIT_BEFORE_EXPRESSION_AFTER_OPENING_PAREN = true + +# ignore-words-list needs to be lowercase format. For example, if we want to +# ignore word "BA", then we need to append "ba" to ignore-words-list rather +# than "BA" +[codespell] +skip = *.ipynb +quiet-level = 3 +ignore-words-list = patten,nd,ty,mot,hist,formating,winn,gool,datas,wan,confids,tood,ba,warmup,elease,dota diff --git a/models/YOLO-World/third_party/mmyolo/setup.py b/models/YOLO-World/third_party/mmyolo/setup.py new file mode 100644 index 0000000000000000000000000000000000000000..f37c89791fee95fb321d66a479f13420f64aa5b9 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/setup.py @@ -0,0 +1,191 @@ +#!/usr/bin/env python +# Copyright (c) OpenMMLab. All rights reserved. +import os +import os.path as osp +import platform +import shutil +import sys +import warnings +from setuptools import find_packages, setup + +from torch.utils.cpp_extension import BuildExtension + + +def readme(): + with open('README.md', encoding='utf-8') as f: + content = f.read() + return content + + +version_file = 'mmyolo/version.py' + + +def get_version(): + with open(version_file) as f: + exec(compile(f.read(), version_file, 'exec')) + return locals()['__version__'] + + +def parse_requirements(fname='requirements.txt', with_version=True): + """Parse the package dependencies listed in a requirements file but strips + specific versioning information. + + Args: + fname (str): path to requirements file + with_version (bool, default=False): if True include version specs + + Returns: + List[str]: list of requirements items + + CommandLine: + python -c "import setup; print(setup.parse_requirements())" + """ + import re + import sys + from os.path import exists + require_fpath = fname + + def parse_line(line): + """Parse information from a line in a requirements text file.""" + if line.startswith('-r '): + # Allow specifying requirements in other files + target = line.split(' ')[1] + for info in parse_require_file(target): + yield info + else: + info = {'line': line} + if line.startswith('-e '): + info['package'] = line.split('#egg=')[1] + elif '@git+' in line: + info['package'] = line + else: + # Remove versioning from the package + pat = '(' + '|'.join(['>=', '==', '>']) + ')' + parts = re.split(pat, line, maxsplit=1) + parts = [p.strip() for p in parts] + + info['package'] = parts[0] + if len(parts) > 1: + op, rest = parts[1:] + if ';' in rest: + # Handle platform specific dependencies + # http://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-platform-specific-dependencies + version, platform_deps = map(str.strip, + rest.split(';')) + info['platform_deps'] = platform_deps + else: + version = rest # NOQA + info['version'] = (op, version) + yield info + + def parse_require_file(fpath): + with open(fpath) as f: + for line in f.readlines(): + line = line.strip() + if line and not line.startswith('#'): + yield from parse_line(line) + + def gen_packages_items(): + if exists(require_fpath): + for info in parse_require_file(require_fpath): + parts = [info['package']] + if with_version and 'version' in info: + parts.extend(info['version']) + if not sys.version.startswith('3.4'): + # apparently package_deps are broken in 3.4 + platform_deps = info.get('platform_deps') + if platform_deps is not None: + parts.append(';' + platform_deps) + item = ''.join(parts) + yield item + + packages = list(gen_packages_items()) + return packages + + +def add_mim_extension(): + """Add extra files that are required to support MIM into the package. + + These files will be added by creating a symlink to the originals if the + package is installed in `editable` mode (e.g. pip install -e .), or by + copying from the originals otherwise. + """ + + # parse installment mode + if 'develop' in sys.argv: + # installed by `pip install -e .` + if platform.system() == 'Windows': + # set `copy` mode here since symlink fails on Windows. + mode = 'copy' + else: + mode = 'symlink' + elif 'sdist' in sys.argv or 'bdist_wheel' in sys.argv: + # installed by `pip install .` + # or create source distribution by `python setup.py sdist` + mode = 'copy' + else: + return + + filenames = ['tools', 'configs', 'demo', 'model-index.yml'] + repo_path = osp.dirname(__file__) + mim_path = osp.join(repo_path, 'mmyolo', '.mim') + os.makedirs(mim_path, exist_ok=True) + + for filename in filenames: + if osp.exists(filename): + src_path = osp.join(repo_path, filename) + tar_path = osp.join(mim_path, filename) + + if osp.isfile(tar_path) or osp.islink(tar_path): + os.remove(tar_path) + elif osp.isdir(tar_path): + shutil.rmtree(tar_path) + + if mode == 'symlink': + src_relpath = osp.relpath(src_path, osp.dirname(tar_path)) + os.symlink(src_relpath, tar_path) + elif mode == 'copy': + if osp.isfile(src_path): + shutil.copyfile(src_path, tar_path) + elif osp.isdir(src_path): + shutil.copytree(src_path, tar_path) + else: + warnings.warn(f'Cannot copy file {src_path}.') + else: + raise ValueError(f'Invalid mode {mode}') + + +if __name__ == '__main__': + add_mim_extension() + setup( + name='mmyolo', + version=get_version(), + description='OpenMMLab Toolbox of YOLO', + long_description=readme(), + long_description_content_type='text/markdown', + author='MMYOLO Contributors', + author_email='openmmlab@gmail.com', + keywords='computer vision, object detection', + url='https://github.com/open-mmlab/mmyolo', + packages=find_packages(exclude=('configs', 'tools', 'demo')), + include_package_data=True, + classifiers=[ + 'Development Status :: 5 - Production/Stable', + 'License :: OSI Approved :: Apache Software License', + 'Operating System :: OS Independent', + 'Programming Language :: Python :: 3', + 'Programming Language :: Python :: 3.7', + 'Programming Language :: Python :: 3.8', + 'Programming Language :: Python :: 3.9', + ], + license='GPL License 3.0', + install_requires=parse_requirements('requirements/runtime.txt'), + extras_require={ + 'all': parse_requirements('requirements.txt'), + 'tests': parse_requirements('requirements/tests.txt'), + 'build': parse_requirements('requirements/build.txt'), + 'mim': parse_requirements('requirements/mminstall.txt'), + }, + ext_modules=[], + cmdclass={'build_ext': BuildExtension}, + zip_safe=False) diff --git a/models/YOLO-World/third_party/mmyolo/tests/regression/mmyolo.yml b/models/YOLO-World/third_party/mmyolo/tests/regression/mmyolo.yml new file mode 100644 index 0000000000000000000000000000000000000000..55eaec38e1d7a7d3ef524928a1896c97f39633e4 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/tests/regression/mmyolo.yml @@ -0,0 +1,81 @@ +globals: + codebase_dir: ../mmyolo + checkpoint_force_download: False + images: + input_img: &input_img ../mmyolo/demo/demo.jpg + test_img: &test_img ./tests/data/tiger.jpeg + metric_info: &metric_info + box AP: # named after metafile.Results.Metrics + metric_key: coco/bbox_mAP # eval OrderedDict key name + tolerance: 1 # metric ±n% + multi_value: 100 + convert_image: &convert_image + input_img: *input_img + test_img: *test_img + backend_test: &default_backend_test True + +onnxruntime: + pipeline_ort_static_fp32: &pipeline_ort_static_fp32 + convert_image: *convert_image + backend_test: False + deploy_config: configs/mmyolo/detection_onnxruntime_static.py + + pipeline_ort_dynamic_fp32: &pipeline_ort_dynamic_fp32 + convert_image: *convert_image + backend_test: False + deploy_config: configs/mmyolo/detection_onnxruntime_dynamic.py + +tensorrt: + pipeline_trt_static_fp32: &pipeline_trt_static_fp32_640x640 + convert_image: *convert_image + backend_test: False + deploy_config: configs/mmyolo/detection_tensorrt_static-640x640.py + + pipeline_trt_static_fp16: &pipeline_trt_static_fp16_640x640 + convert_image: *convert_image + backend_test: False + deploy_config: configs/mmyolo/detection_tensorrt-fp16_static-640x640.py + + pipeline_trt_dynamic_fp32: &pipeline_trt_dynamic_fp32 + convert_image: *convert_image + backend_test: *default_backend_test + deploy_config: configs/mmyolo/detection_tensorrt_dynamic-192x192-960x960.py + + pipeline_trt_dynamic_fp16: &pipeline_trt_dynamic_fp16 + convert_image: *convert_image + backend_test: *default_backend_test + deploy_config: configs/mmyolo/detection_tensorrt-fp16_dynamic-64x64-1344x1344.py + +models: + - name: YOLOv5 + metafile: configs/yolov5/metafile.yml + model_configs: + - configs/yolov5/yolov5_s-p6-v62_syncbn_fast_8xb16-300e_coco.py + pipelines: + - *pipeline_ort_dynamic_fp32 + - *pipeline_trt_dynamic_fp16 + + - name: YOLOv6 + metafile: configs/yolov6/metafile.yml + model_configs: + - configs/yolov6/yolov6_s_syncbn_fast_8xb32-400e_coco.py + pipelines: + - *pipeline_ort_dynamic_fp32 + - *pipeline_trt_dynamic_fp16 + + - name: YOLOX + metafile: configs/yolox/metafile.yml + model_configs: + - configs/yolox/yolox_s_8xb8-300e_coco.py + pipelines: + - *pipeline_ort_dynamic_fp32 + - *pipeline_trt_dynamic_fp16 + + + - name: RTMDet + metafile: configs/rtmdet/metafile.yml + model_configs: + - configs/rtmdet/rtmdet_s_syncbn_8xb32-300e_coco.py + pipelines: + - *pipeline_ort_dynamic_fp32 + - *pipeline_trt_dynamic_fp16 diff --git a/models/YOLO-World/third_party/mmyolo/tests/test_datasets/__init__.py b/models/YOLO-World/third_party/mmyolo/tests/test_datasets/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ef101fec61e72abc0eb90266d453b5b22331378d --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/tests/test_datasets/__init__.py @@ -0,0 +1 @@ +# Copyright (c) OpenMMLab. All rights reserved. diff --git a/models/YOLO-World/third_party/mmyolo/tests/test_datasets/test_transforms/__init__.py b/models/YOLO-World/third_party/mmyolo/tests/test_datasets/test_transforms/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ef101fec61e72abc0eb90266d453b5b22331378d --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/tests/test_datasets/test_transforms/__init__.py @@ -0,0 +1 @@ +# Copyright (c) OpenMMLab. All rights reserved. diff --git a/models/YOLO-World/third_party/mmyolo/tests/test_datasets/test_transforms/test_formatting.py b/models/YOLO-World/third_party/mmyolo/tests/test_datasets/test_transforms/test_formatting.py new file mode 100644 index 0000000000000000000000000000000000000000..c75475dfcfb4e32f656a194d55fc162a165107b3 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/tests/test_datasets/test_transforms/test_formatting.py @@ -0,0 +1,119 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +import os.path as osp +import unittest + +import numpy as np +from mmdet.structures import DetDataSample +from mmdet.structures.mask import BitmapMasks +from mmengine.structures import InstanceData, PixelData + +from mmyolo.datasets.transforms import PackDetInputs + + +class TestPackDetInputs(unittest.TestCase): + + def setUp(self): + """Setup the model and optimizer which are used in every test method. + + TestCase calls functions in this order: setUp() -> testMethod() -> + tearDown() -> cleanUp() + """ + data_prefix = osp.join(osp.dirname(__file__), '../../data') + img_path = osp.join(data_prefix, 'color.jpg') + rng = np.random.RandomState(0) + self.results1 = { + 'img_id': 1, + 'img_path': img_path, + 'ori_shape': (300, 400), + 'img_shape': (600, 800), + 'scale_factor': 2.0, + 'flip': False, + 'img': rng.rand(300, 400), + 'gt_seg_map': rng.rand(300, 400), + 'gt_masks': + BitmapMasks(rng.rand(3, 300, 400), height=300, width=400), + 'gt_bboxes_labels': rng.rand(3, ), + 'gt_ignore_flags': np.array([0, 0, 1], dtype=bool), + 'proposals': rng.rand(2, 4), + 'proposals_scores': rng.rand(2, ) + } + self.results2 = { + 'img_id': 1, + 'img_path': img_path, + 'ori_shape': (300, 400), + 'img_shape': (600, 800), + 'scale_factor': 2.0, + 'flip': False, + 'img': rng.rand(300, 400), + 'gt_seg_map': rng.rand(300, 400), + 'gt_masks': + BitmapMasks(rng.rand(3, 300, 400), height=300, width=400), + 'gt_bboxes_labels': rng.rand(3, ), + 'proposals': rng.rand(2, 4), + 'proposals_scores': rng.rand(2, ) + } + self.results3 = { + 'img_id': 1, + 'img_path': img_path, + 'ori_shape': (300, 400), + 'img_shape': (600, 800), + 'scale_factor': 2.0, + 'flip': False, + 'img': rng.rand(300, 400), + 'gt_seg_map': rng.rand(300, 400), + 'gt_masks': + BitmapMasks(rng.rand(3, 300, 400), height=300, width=400), + 'gt_panoptic_seg': rng.rand(1, 300, 400), + 'gt_bboxes_labels': rng.rand(3, ), + 'proposals': rng.rand(2, 4), + 'proposals_scores': rng.rand(2, ) + } + self.meta_keys = ('img_id', 'img_path', 'ori_shape', 'scale_factor', + 'flip') + + def test_transform(self): + transform = PackDetInputs(meta_keys=self.meta_keys) + results = transform(copy.deepcopy(self.results1)) + self.assertIn('data_samples', results) + self.assertIsInstance(results['data_samples'], DetDataSample) + self.assertIsInstance(results['data_samples'].gt_instances, + InstanceData) + self.assertIsInstance(results['data_samples'].ignored_instances, + InstanceData) + self.assertEqual(len(results['data_samples'].gt_instances), 2) + self.assertEqual(len(results['data_samples'].ignored_instances), 1) + self.assertIsInstance(results['data_samples'].gt_sem_seg, PixelData) + + def test_transform_without_ignore(self): + transform = PackDetInputs(meta_keys=self.meta_keys) + results = transform(copy.deepcopy(self.results2)) + self.assertIn('data_samples', results) + self.assertIsInstance(results['data_samples'], DetDataSample) + self.assertIsInstance(results['data_samples'].gt_instances, + InstanceData) + self.assertIsInstance(results['data_samples'].ignored_instances, + InstanceData) + self.assertEqual(len(results['data_samples'].gt_instances), 3) + self.assertEqual(len(results['data_samples'].ignored_instances), 0) + self.assertIsInstance(results['data_samples'].gt_sem_seg, PixelData) + + def test_transform_with_panoptic_seg(self): + transform = PackDetInputs(meta_keys=self.meta_keys) + results = transform(copy.deepcopy(self.results3)) + self.assertIn('data_samples', results) + self.assertIsInstance(results['data_samples'], DetDataSample) + self.assertIsInstance(results['data_samples'].gt_instances, + InstanceData) + self.assertIsInstance(results['data_samples'].ignored_instances, + InstanceData) + self.assertEqual(len(results['data_samples'].gt_instances), 3) + self.assertEqual(len(results['data_samples'].ignored_instances), 0) + self.assertIsInstance(results['data_samples'].gt_sem_seg, PixelData) + self.assertIsInstance(results['data_samples'].gt_panoptic_seg, + PixelData) + + def test_repr(self): + transform = PackDetInputs(meta_keys=self.meta_keys) + self.assertEqual( + repr(transform), f'PackDetInputs(meta_keys={self.meta_keys})') diff --git a/models/YOLO-World/third_party/mmyolo/tests/test_datasets/test_transforms/test_mix_img_transforms.py b/models/YOLO-World/third_party/mmyolo/tests/test_datasets/test_transforms/test_mix_img_transforms.py new file mode 100644 index 0000000000000000000000000000000000000000..2e9bf20e39572c946e1b66bdf87626a0c243ac29 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/tests/test_datasets/test_transforms/test_mix_img_transforms.py @@ -0,0 +1,416 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +import os.path as osp +import unittest + +import numpy as np +import torch +from mmdet.structures.bbox import HorizontalBoxes +from mmdet.structures.mask import BitmapMasks, PolygonMasks + +from mmyolo.datasets import YOLOv5CocoDataset +from mmyolo.datasets.transforms import Mosaic, Mosaic9, YOLOv5MixUp, YOLOXMixUp +from mmyolo.utils import register_all_modules + +register_all_modules() + + +class TestMosaic(unittest.TestCase): + + def setUp(self): + """Setup the data info which are used in every test method. + + TestCase calls functions in this order: setUp() -> testMethod() -> + tearDown() -> cleanUp() + """ + self.pre_transform = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True) + ] + + self.dataset = YOLOv5CocoDataset( + data_prefix=dict( + img=osp.join(osp.dirname(__file__), '../../data')), + ann_file=osp.join( + osp.dirname(__file__), '../../data/coco_sample_color.json'), + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[]) + self.results = { + 'img': + np.random.random((224, 224, 3)), + 'img_shape': (224, 224), + 'gt_bboxes_labels': + np.array([1, 2, 3], dtype=np.int64), + 'gt_bboxes': + np.array([[10, 10, 20, 20], [20, 20, 40, 40], [40, 40, 80, 80]], + dtype=np.float32), + 'gt_ignore_flags': + np.array([0, 0, 1], dtype=bool), + 'dataset': + self.dataset + } + + def test_transform(self): + # test assertion for invalid img_scale + with self.assertRaises(AssertionError): + transform = Mosaic(img_scale=640) + + # test assertion for invalid probability + with self.assertRaises(AssertionError): + transform = Mosaic(prob=1.5) + + # test assertion for invalid max_cached_images + with self.assertRaises(AssertionError): + transform = Mosaic(use_cached=True, max_cached_images=1) + + transform = Mosaic( + img_scale=(12, 10), pre_transform=self.pre_transform) + results = transform(copy.deepcopy(self.results)) + self.assertTrue(results['img'].shape[:2] == (20, 24)) + self.assertTrue(results['gt_bboxes_labels'].shape[0] == + results['gt_bboxes'].shape[0]) + self.assertTrue(results['gt_bboxes_labels'].dtype == np.int64) + self.assertTrue(results['gt_bboxes'].dtype == np.float32) + self.assertTrue(results['gt_ignore_flags'].dtype == bool) + + def test_transform_with_no_gt(self): + self.results['gt_bboxes'] = np.empty((0, 4), dtype=np.float32) + self.results['gt_bboxes_labels'] = np.empty((0, ), dtype=np.int64) + self.results['gt_ignore_flags'] = np.empty((0, ), dtype=bool) + transform = Mosaic( + img_scale=(12, 10), pre_transform=self.pre_transform) + results = transform(copy.deepcopy(self.results)) + self.assertIsInstance(results, dict) + self.assertTrue(results['img'].shape[:2] == (20, 24)) + self.assertTrue( + results['gt_bboxes_labels'].shape[0] == results['gt_bboxes']. + shape[0] == results['gt_ignore_flags'].shape[0]) + self.assertTrue(results['gt_bboxes_labels'].dtype == np.int64) + self.assertTrue(results['gt_bboxes'].dtype == np.float32) + self.assertTrue(results['gt_ignore_flags'].dtype == bool) + + def test_transform_with_box_list(self): + transform = Mosaic( + img_scale=(12, 10), pre_transform=self.pre_transform) + results = copy.deepcopy(self.results) + results['gt_bboxes'] = HorizontalBoxes(results['gt_bboxes']) + results = transform(results) + self.assertTrue(results['img'].shape[:2] == (20, 24)) + self.assertTrue(results['gt_bboxes_labels'].shape[0] == + results['gt_bboxes'].shape[0]) + self.assertTrue(results['gt_bboxes_labels'].dtype == np.int64) + self.assertTrue(results['gt_bboxes'].dtype == torch.float32) + self.assertTrue(results['gt_ignore_flags'].dtype == bool) + + def test_transform_with_mask(self): + rng = np.random.RandomState(0) + pre_transform = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True) + ] + + dataset = YOLOv5CocoDataset( + data_prefix=dict( + img=osp.join(osp.dirname(__file__), '../../data')), + ann_file=osp.join( + osp.dirname(__file__), '../../data/coco_sample_color.json'), + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[]) + results = { + 'img': + np.random.random((224, 224, 3)), + 'img_shape': (224, 224), + 'gt_bboxes_labels': + np.array([1, 2, 3], dtype=np.int64), + 'gt_bboxes': + np.array([[10, 10, 20, 20], [20, 20, 40, 40], [40, 40, 80, 80]], + dtype=np.float32), + 'gt_ignore_flags': + np.array([0, 0, 1], dtype=bool), + 'gt_masks': + PolygonMasks.random(num_masks=3, height=224, width=224, rng=rng), + 'dataset': + dataset + } + transform = Mosaic(img_scale=(12, 10), pre_transform=pre_transform) + results['gt_bboxes'] = HorizontalBoxes(results['gt_bboxes']) + results = transform(results) + self.assertTrue(results['img'].shape[:2] == (20, 24)) + self.assertTrue(results['gt_bboxes_labels'].shape[0] == + results['gt_bboxes'].shape[0]) + self.assertTrue(results['gt_bboxes_labels'].dtype == np.int64) + self.assertTrue(results['gt_bboxes'].dtype == torch.float32) + self.assertTrue(results['gt_ignore_flags'].dtype == bool) + + +class TestMosaic9(unittest.TestCase): + + def setUp(self): + """Setup the data info which are used in every test method. + + TestCase calls functions in this order: setUp() -> testMethod() -> + tearDown() -> cleanUp() + """ + rng = np.random.RandomState(0) + self.pre_transform = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True) + ] + + self.dataset = YOLOv5CocoDataset( + data_prefix=dict( + img=osp.join(osp.dirname(__file__), '../../data')), + ann_file=osp.join( + osp.dirname(__file__), '../../data/coco_sample_color.json'), + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[]) + self.results = { + 'img': + np.random.random((224, 224, 3)), + 'img_shape': (224, 224), + 'gt_bboxes_labels': + np.array([1, 2, 3], dtype=np.int64), + 'gt_bboxes': + np.array([[10, 10, 20, 20], [20, 20, 40, 40], [40, 40, 80, 80]], + dtype=np.float32), + 'gt_ignore_flags': + np.array([0, 0, 1], dtype=bool), + 'gt_masks': + BitmapMasks(rng.rand(3, 224, 224), height=224, width=224), + 'dataset': + self.dataset + } + + def test_transform(self): + # test assertion for invalid img_scale + with self.assertRaises(AssertionError): + transform = Mosaic9(img_scale=640) + + # test assertion for invalid probability + with self.assertRaises(AssertionError): + transform = Mosaic9(prob=1.5) + + # test assertion for invalid max_cached_images + with self.assertRaises(AssertionError): + transform = Mosaic9(use_cached=True, max_cached_images=1) + + transform = Mosaic9( + img_scale=(12, 10), pre_transform=self.pre_transform) + results = transform(copy.deepcopy(self.results)) + self.assertTrue(results['img'].shape[:2] == (20, 24)) + self.assertTrue(results['gt_bboxes_labels'].shape[0] == + results['gt_bboxes'].shape[0]) + self.assertTrue(results['gt_bboxes_labels'].dtype == np.int64) + self.assertTrue(results['gt_bboxes'].dtype == np.float32) + self.assertTrue(results['gt_ignore_flags'].dtype == bool) + + def test_transform_with_no_gt(self): + self.results['gt_bboxes'] = np.empty((0, 4), dtype=np.float32) + self.results['gt_bboxes_labels'] = np.empty((0, ), dtype=np.int64) + self.results['gt_ignore_flags'] = np.empty((0, ), dtype=bool) + transform = Mosaic9( + img_scale=(12, 10), pre_transform=self.pre_transform) + results = transform(copy.deepcopy(self.results)) + self.assertIsInstance(results, dict) + self.assertTrue(results['img'].shape[:2] == (20, 24)) + self.assertTrue( + results['gt_bboxes_labels'].shape[0] == results['gt_bboxes']. + shape[0] == results['gt_ignore_flags'].shape[0]) + self.assertTrue(results['gt_bboxes_labels'].dtype == np.int64) + self.assertTrue(results['gt_bboxes'].dtype == np.float32) + self.assertTrue(results['gt_ignore_flags'].dtype == bool) + + def test_transform_with_box_list(self): + transform = Mosaic9( + img_scale=(12, 10), pre_transform=self.pre_transform) + results = copy.deepcopy(self.results) + results['gt_bboxes'] = HorizontalBoxes(results['gt_bboxes']) + results = transform(results) + self.assertTrue(results['img'].shape[:2] == (20, 24)) + self.assertTrue(results['gt_bboxes_labels'].shape[0] == + results['gt_bboxes'].shape[0]) + self.assertTrue(results['gt_bboxes_labels'].dtype == np.int64) + self.assertTrue(results['gt_bboxes'].dtype == torch.float32) + self.assertTrue(results['gt_ignore_flags'].dtype == bool) + + +class TestYOLOv5MixUp(unittest.TestCase): + + def setUp(self): + """Setup the data info which are used in every test method. + + TestCase calls functions in this order: setUp() -> testMethod() -> + tearDown() -> cleanUp() + """ + self.pre_transform = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True) + ] + self.dataset = YOLOv5CocoDataset( + data_prefix=dict( + img=osp.join(osp.dirname(__file__), '../../data')), + ann_file=osp.join( + osp.dirname(__file__), '../../data/coco_sample_color.json'), + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[]) + + self.results = { + 'img': + np.random.random((288, 512, 3)), + 'img_shape': (288, 512), + 'gt_bboxes_labels': + np.array([1, 2, 3], dtype=np.int64), + 'gt_bboxes': + np.array([[10, 10, 20, 20], [20, 20, 40, 40], [40, 40, 80, 80]], + dtype=np.float32), + 'gt_ignore_flags': + np.array([0, 0, 1], dtype=bool), + 'dataset': + self.dataset + } + + def test_transform(self): + transform = YOLOv5MixUp(pre_transform=self.pre_transform) + results = transform(copy.deepcopy(self.results)) + self.assertTrue(results['img'].shape[:2] == (288, 512)) + self.assertTrue(results['gt_bboxes_labels'].shape[0] == + results['gt_bboxes'].shape[0]) + self.assertTrue(results['gt_bboxes_labels'].dtype == np.int64) + self.assertTrue(results['gt_bboxes'].dtype == np.float32) + self.assertTrue(results['gt_ignore_flags'].dtype == bool) + + # test assertion for invalid max_cached_images + with self.assertRaises(AssertionError): + transform = YOLOv5MixUp(use_cached=True, max_cached_images=1) + + def test_transform_with_box_list(self): + results = copy.deepcopy(self.results) + results['gt_bboxes'] = HorizontalBoxes(results['gt_bboxes']) + + transform = YOLOv5MixUp(pre_transform=self.pre_transform) + results = transform(results) + self.assertTrue(results['img'].shape[:2] == (288, 512)) + self.assertTrue(results['gt_bboxes_labels'].shape[0] == + results['gt_bboxes'].shape[0]) + self.assertTrue(results['gt_bboxes_labels'].dtype == np.int64) + self.assertTrue(results['gt_bboxes'].dtype == torch.float32) + self.assertTrue(results['gt_ignore_flags'].dtype == bool) + + def test_transform_with_mask(self): + rng = np.random.RandomState(0) + pre_transform = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True) + ] + dataset = YOLOv5CocoDataset( + data_prefix=dict( + img=osp.join(osp.dirname(__file__), '../../data')), + ann_file=osp.join( + osp.dirname(__file__), '../../data/coco_sample_color.json'), + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[]) + + results = { + 'img': + np.random.random((288, 512, 3)), + 'img_shape': (288, 512), + 'gt_bboxes_labels': + np.array([1, 2, 3], dtype=np.int64), + 'gt_bboxes': + np.array([[10, 10, 20, 20], [20, 20, 40, 40], [40, 40, 80, 80]], + dtype=np.float32), + 'gt_ignore_flags': + np.array([0, 0, 1], dtype=bool), + 'gt_masks': + PolygonMasks.random(num_masks=3, height=288, width=512, rng=rng), + 'dataset': + dataset + } + + transform = YOLOv5MixUp(pre_transform=pre_transform) + results = transform(copy.deepcopy(results)) + self.assertTrue(results['img'].shape[:2] == (288, 512)) + self.assertTrue(results['gt_bboxes_labels'].shape[0] == + results['gt_bboxes'].shape[0]) + self.assertTrue(results['gt_bboxes_labels'].dtype == np.int64) + self.assertTrue(results['gt_bboxes'].dtype == np.float32) + self.assertTrue(results['gt_ignore_flags'].dtype == bool) + + +class TestYOLOXMixUp(unittest.TestCase): + + def setUp(self): + """Setup the data info which are used in every test method. + + TestCase calls functions in this order: setUp() -> testMethod() -> + tearDown() -> cleanUp() + """ + rng = np.random.RandomState(0) + self.pre_transform = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True) + ] + self.dataset = YOLOv5CocoDataset( + data_prefix=dict( + img=osp.join(osp.dirname(__file__), '../../data')), + ann_file=osp.join( + osp.dirname(__file__), '../../data/coco_sample_color.json'), + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[]) + self.results = { + 'img': + np.random.random((224, 224, 3)), + 'img_shape': (224, 224), + 'gt_bboxes_labels': + np.array([1, 2, 3], dtype=np.int64), + 'gt_bboxes': + np.array([[10, 10, 20, 20], [20, 20, 40, 40], [40, 40, 80, 80]], + dtype=np.float32), + 'gt_ignore_flags': + np.array([0, 0, 1], dtype=bool), + 'gt_masks': + BitmapMasks(rng.rand(3, 224, 224), height=224, width=224), + 'dataset': + self.dataset + } + + def test_transform(self): + # test assertion for invalid img_scale + with self.assertRaises(AssertionError): + transform = YOLOXMixUp(img_scale=640) + + # test assertion for invalid max_cached_images + with self.assertRaises(AssertionError): + transform = YOLOXMixUp(use_cached=True, max_cached_images=1) + + transform = YOLOXMixUp( + img_scale=(10, 12), + ratio_range=(0.8, 1.6), + pad_val=114.0, + pre_transform=self.pre_transform) + + # self.results['mix_results'] = [copy.deepcopy(self.results)] + results = transform(copy.deepcopy(self.results)) + self.assertTrue(results['img'].shape[:2] == (224, 224)) + self.assertTrue(results['gt_bboxes_labels'].shape[0] == + results['gt_bboxes'].shape[0]) + self.assertTrue(results['gt_bboxes_labels'].dtype == np.int64) + self.assertTrue(results['gt_bboxes'].dtype == np.float32) + self.assertTrue(results['gt_ignore_flags'].dtype == bool) + + def test_transform_with_boxlist(self): + results = copy.deepcopy(self.results) + results['gt_bboxes'] = HorizontalBoxes(results['gt_bboxes']) + + transform = YOLOXMixUp( + img_scale=(10, 12), + ratio_range=(0.8, 1.6), + pad_val=114.0, + pre_transform=self.pre_transform) + results = transform(results) + self.assertTrue(results['img'].shape[:2] == (224, 224)) + self.assertTrue(results['gt_bboxes_labels'].shape[0] == + results['gt_bboxes'].shape[0]) + self.assertTrue(results['gt_bboxes_labels'].dtype == np.int64) + self.assertTrue(results['gt_bboxes'].dtype == torch.float32) + self.assertTrue(results['gt_ignore_flags'].dtype == bool) diff --git a/models/YOLO-World/third_party/mmyolo/tests/test_datasets/test_transforms/test_transforms.py b/models/YOLO-World/third_party/mmyolo/tests/test_datasets/test_transforms/test_transforms.py new file mode 100644 index 0000000000000000000000000000000000000000..a8b7ea49f875582a343829ec7142ed09a61fe51e --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/tests/test_datasets/test_transforms/test_transforms.py @@ -0,0 +1,493 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +import os.path as osp +import unittest + +import mmcv +import numpy as np +import torch +from mmdet.structures.bbox import HorizontalBoxes +from mmdet.structures.mask import BitmapMasks, PolygonMasks + +from mmyolo.datasets.transforms import (LetterResize, LoadAnnotations, + YOLOv5HSVRandomAug, + YOLOv5KeepRatioResize, + YOLOv5RandomAffine) +from mmyolo.datasets.transforms.transforms import (PPYOLOERandomCrop, + PPYOLOERandomDistort, + YOLOv5CopyPaste) + + +class TestLetterResize(unittest.TestCase): + + def setUp(self): + """Set up the data info which are used in every test method. + + TestCase calls functions in this order: setUp() -> testMethod() -> + tearDown() -> cleanUp() + """ + rng = np.random.RandomState(0) + self.data_info1 = dict( + img=np.random.random((300, 400, 3)), + gt_bboxes=np.array([[0, 0, 150, 150]], dtype=np.float32), + batch_shape=np.array([192, 672], dtype=np.int64), + gt_masks=PolygonMasks.random(1, height=300, width=400, rng=rng)) + self.data_info2 = dict( + img=np.random.random((300, 400, 3)), + gt_bboxes=np.array([[0, 0, 150, 150]], dtype=np.float32)) + self.data_info3 = dict( + img=np.random.random((300, 400, 3)), + batch_shape=np.array([192, 672], dtype=np.int64)) + self.data_info4 = dict(img=np.random.random((300, 400, 3))) + + def test_letter_resize(self): + # Test allow_scale_up + transform = LetterResize(scale=(640, 640), allow_scale_up=False) + results = transform(copy.deepcopy(self.data_info1)) + self.assertEqual(results['img_shape'], (192, 672, 3)) + self.assertTrue( + (results['gt_bboxes'] == np.array([[208., 0., 304., 96.]])).all()) + self.assertTrue((results['batch_shape'] == np.array([192, 672])).all()) + self.assertTrue((results['pad_param'] == np.array([0., 0., 208., + 208.])).all()) + self.assertTrue( + (np.array(results['scale_factor'], dtype=np.float32) <= 1.).all()) + + # Test pad_val + transform = LetterResize(scale=(640, 640), pad_val=dict(img=144)) + results = transform(copy.deepcopy(self.data_info1)) + self.assertEqual(results['img_shape'], (192, 672, 3)) + self.assertTrue( + (results['gt_bboxes'] == np.array([[208., 0., 304., 96.]])).all()) + self.assertTrue((results['batch_shape'] == np.array([192, 672])).all()) + self.assertTrue((results['pad_param'] == np.array([0., 0., 208., + 208.])).all()) + self.assertTrue( + (np.array(results['scale_factor'], dtype=np.float32) <= 1.).all()) + + # Test use_mini_pad + transform = LetterResize(scale=(640, 640), use_mini_pad=True) + results = transform(copy.deepcopy(self.data_info1)) + self.assertEqual(results['img_shape'], (192, 256, 3)) + self.assertTrue((results['gt_bboxes'] == np.array([[0., 0., 96., + 96.]])).all()) + self.assertTrue((results['batch_shape'] == np.array([192, 672])).all()) + self.assertTrue((results['pad_param'] == np.array([0., 0., 0., + 0.])).all()) + self.assertTrue( + (np.array(results['scale_factor'], dtype=np.float32) <= 1.).all()) + + # Test stretch_only + transform = LetterResize(scale=(640, 640), stretch_only=True) + results = transform(copy.deepcopy(self.data_info1)) + self.assertEqual(results['img_shape'], (192, 672, 3)) + self.assertTrue((results['gt_bboxes'] == np.array( + [[0., 0., 251.99998474121094, 96.]])).all()) + self.assertTrue((results['batch_shape'] == np.array([192, 672])).all()) + self.assertTrue((results['pad_param'] == np.array([0., 0., 0., + 0.])).all()) + + # Test + transform = LetterResize(scale=(640, 640), pad_val=dict(img=144)) + for _ in range(5): + input_h, input_w = np.random.randint(100, 700), np.random.randint( + 100, 700) + output_h, output_w = np.random.randint(100, + 700), np.random.randint( + 100, 700) + data_info = dict( + img=np.random.random((input_h, input_w, 3)), + gt_bboxes=np.array([[0, 0, 10, 10]], dtype=np.float32), + batch_shape=np.array([output_h, output_w], dtype=np.int64), + gt_masks=PolygonMasks( + [[np.array([0., 0., 0., 10., 10., 10., 10., 0.])]], + height=input_h, + width=input_w)) + results = transform(data_info) + self.assertEqual(results['img_shape'], (output_h, output_w, 3)) + self.assertTrue( + (results['batch_shape'] == np.array([output_h, + output_w])).all()) + + # Test without batchshape + transform = LetterResize(scale=(640, 640), pad_val=dict(img=144)) + for _ in range(5): + input_h, input_w = np.random.randint(100, 700), np.random.randint( + 100, 700) + data_info = dict( + img=np.random.random((input_h, input_w, 3)), + gt_bboxes=np.array([[0, 0, 10, 10]], dtype=np.float32), + gt_masks=PolygonMasks( + [[np.array([0., 0., 0., 10., 10., 10., 10., 0.])]], + height=input_h, + width=input_w)) + results = transform(data_info) + self.assertEqual(results['img_shape'], (640, 640, 3)) + + # TODO: Testing the existence of multiple scale_factor and pad_param + transform = [ + YOLOv5KeepRatioResize(scale=(32, 32)), + LetterResize(scale=(64, 68), pad_val=dict(img=144)) + ] + for _ in range(5): + input_h, input_w = np.random.randint(100, 700), np.random.randint( + 100, 700) + output_h, output_w = np.random.randint(100, + 700), np.random.randint( + 100, 700) + data_info = dict( + img=np.random.random((input_h, input_w, 3)), + gt_bboxes=np.array([[0, 0, 5, 5]], dtype=np.float32), + batch_shape=np.array([output_h, output_w], dtype=np.int64)) + for t in transform: + data_info = t(data_info) + # because of the "math.round" operation, + # it is unable to strictly restore the original input shape + # we just validate the correctness of scale_factor and pad_param + self.assertIn('scale_factor', data_info) + self.assertIn('pad_param', data_info) + pad_param = data_info['pad_param'].reshape(-1, 2).sum( + 1) # (top, b, l, r) -> (h, w) + scale_factor = np.asarray(data_info['scale_factor']) # (w, h) + + max_long_edge = max((32, 32)) + max_short_edge = min((32, 32)) + scale_factor_keepratio = min( + max_long_edge / max(input_h, input_w), + max_short_edge / min(input_h, input_w)) + validate_shape = np.asarray( + (int(input_h * scale_factor_keepratio), + int(input_w * scale_factor_keepratio))) + scale_factor_keepratio = np.asarray( + (validate_shape[1] / input_w, validate_shape[0] / input_h)) + + scale_factor_letter = ((np.asarray( + (output_h, output_w)) - pad_param) / validate_shape)[::-1] + self.assertTrue(data_info['img_shape'][:2] == (output_h, output_w)) + self.assertTrue((scale_factor == (scale_factor_keepratio * + scale_factor_letter)).all()) + + +class TestYOLOv5KeepRatioResize(unittest.TestCase): + + def setUp(self): + """Set up the data info which are used in every test method. + + TestCase calls functions in this order: setUp() -> testMethod() -> + tearDown() -> cleanUp() + """ + rng = np.random.RandomState(0) + self.data_info1 = dict( + img=np.random.random((300, 400, 3)), + gt_bboxes=np.array([[0, 0, 150, 150]], dtype=np.float32), + gt_masks=PolygonMasks.random( + num_masks=1, height=300, width=400, rng=rng)) + self.data_info2 = dict(img=np.random.random((300, 400, 3))) + + def test_yolov5_keep_ratio_resize(self): + # test assertion for invalid keep_ratio + with self.assertRaises(AssertionError): + transform = YOLOv5KeepRatioResize(scale=(640, 640)) + transform.keep_ratio = False + results = transform(copy.deepcopy(self.data_info1)) + + # Test with gt_bboxes + transform = YOLOv5KeepRatioResize(scale=(640, 640)) + results = transform(copy.deepcopy(self.data_info1)) + self.assertTrue(transform.keep_ratio, True) + self.assertEqual(results['img_shape'], (480, 640)) + self.assertTrue( + (results['gt_bboxes'] == np.array([[0., 0., 240., 240.]])).all()) + self.assertTrue((np.array(results['scale_factor'], + dtype=np.float32) == 1.6).all()) + + # Test only img + transform = YOLOv5KeepRatioResize(scale=(640, 640)) + results = transform(copy.deepcopy(self.data_info2)) + self.assertEqual(results['img_shape'], (480, 640)) + self.assertTrue((np.array(results['scale_factor'], + dtype=np.float32) == 1.6).all()) + + +class TestYOLOv5HSVRandomAug(unittest.TestCase): + + def setUp(self): + """Set up the data info which are used in every test method. + + TestCase calls functions in this order: setUp() -> testMethod() -> + tearDown() -> cleanUp() + """ + self.data_info = dict( + img=mmcv.imread( + osp.join(osp.dirname(__file__), '../../data/color.jpg'), + 'color')) + + def test_yolov5_hsv_random_aug(self): + # Test with gt_bboxes + transform = YOLOv5HSVRandomAug( + hue_delta=0.015, saturation_delta=0.7, value_delta=0.4) + results = transform(copy.deepcopy(self.data_info)) + self.assertTrue( + results['img'].shape[:2] == self.data_info['img'].shape[:2]) + + +class TestLoadAnnotations(unittest.TestCase): + + def setUp(self): + """Set up the data info which are used in every test method. + + TestCase calls functions in this order: setUp() -> testMethod() -> + tearDown() -> cleanUp() + """ + data_prefix = osp.join(osp.dirname(__file__), '../../data') + seg_map = osp.join(data_prefix, 'gray.jpg') + self.results = { + 'ori_shape': (300, 400), + 'seg_map_path': + seg_map, + 'instances': [{ + 'bbox': [0, 0, 10, 20], + 'bbox_label': 1, + 'mask': [[0, 0, 0, 20, 10, 20, 10, 0]], + 'ignore_flag': 0 + }, { + 'bbox': [10, 10, 110, 120], + 'bbox_label': 2, + 'mask': [[10, 10, 110, 10, 110, 120, 110, 10]], + 'ignore_flag': 0 + }, { + 'bbox': [50, 50, 60, 80], + 'bbox_label': 2, + 'mask': [[50, 50, 60, 50, 60, 80, 50, 80]], + 'ignore_flag': 1 + }] + } + + def test_load_bboxes(self): + transform = LoadAnnotations( + with_bbox=True, + with_label=False, + with_seg=False, + with_mask=False, + box_type=None) + results = transform(copy.deepcopy(self.results)) + self.assertIn('gt_bboxes', results) + self.assertTrue((results['gt_bboxes'] == np.array([[0, 0, 10, 20], + [10, 10, 110, + 120]])).all()) + self.assertEqual(results['gt_bboxes'].dtype, np.float32) + self.assertTrue( + (results['gt_ignore_flags'] == np.array([False, False])).all()) + self.assertEqual(results['gt_ignore_flags'].dtype, bool) + + # test empty instance + results = transform({}) + self.assertIn('gt_bboxes', results) + self.assertTrue(results['gt_bboxes'].shape == (0, 4)) + self.assertIn('gt_ignore_flags', results) + self.assertTrue(results['gt_ignore_flags'].shape == (0, )) + + def test_load_labels(self): + transform = LoadAnnotations( + with_bbox=False, + with_label=True, + with_seg=False, + with_mask=False, + ) + results = transform(copy.deepcopy(self.results)) + self.assertIn('gt_bboxes_labels', results) + self.assertTrue((results['gt_bboxes_labels'] == np.array([1, + 2])).all()) + self.assertEqual(results['gt_bboxes_labels'].dtype, np.int64) + + # test empty instance + results = transform({}) + self.assertIn('gt_bboxes_labels', results) + self.assertTrue(results['gt_bboxes_labels'].shape == (0, )) + + +class TestYOLOv5RandomAffine(unittest.TestCase): + + def setUp(self): + """Setup the data info which are used in every test method. + + TestCase calls functions in this order: setUp() -> testMethod() -> + tearDown() -> cleanUp() + """ + self.results = { + 'img': + np.random.random((224, 224, 3)), + 'img_shape': (224, 224), + 'gt_bboxes_labels': + np.array([1, 2, 3], dtype=np.int64), + 'gt_bboxes': + np.array([[10, 10, 20, 20], [20, 20, 40, 40], [40, 40, 80, 80]], + dtype=np.float32), + 'gt_ignore_flags': + np.array([0, 0, 1], dtype=bool), + } + + def test_transform(self): + # test assertion for invalid translate_ratio + with self.assertRaises(AssertionError): + transform = YOLOv5RandomAffine(max_translate_ratio=1.5) + + # test assertion for invalid scaling_ratio_range + with self.assertRaises(AssertionError): + transform = YOLOv5RandomAffine(scaling_ratio_range=(1.5, 0.5)) + + with self.assertRaises(AssertionError): + transform = YOLOv5RandomAffine(scaling_ratio_range=(0, 0.5)) + + transform = YOLOv5RandomAffine() + results = transform(copy.deepcopy(self.results)) + self.assertTrue(results['img'].shape[:2] == (224, 224)) + self.assertTrue(results['gt_bboxes_labels'].shape[0] == + results['gt_bboxes'].shape[0]) + self.assertTrue(results['gt_bboxes_labels'].dtype == np.int64) + self.assertTrue(results['gt_bboxes'].dtype == np.float32) + self.assertTrue(results['gt_ignore_flags'].dtype == bool) + + def test_transform_with_boxlist(self): + results = copy.deepcopy(self.results) + results['gt_bboxes'] = HorizontalBoxes(results['gt_bboxes']) + + transform = YOLOv5RandomAffine() + results = transform(copy.deepcopy(results)) + self.assertTrue(results['img'].shape[:2] == (224, 224)) + self.assertTrue(results['gt_bboxes_labels'].shape[0] == + results['gt_bboxes'].shape[0]) + self.assertTrue(results['gt_bboxes_labels'].dtype == np.int64) + self.assertTrue(results['gt_bboxes'].dtype == torch.float32) + self.assertTrue(results['gt_ignore_flags'].dtype == bool) + + +class TestPPYOLOERandomCrop(unittest.TestCase): + + def setUp(self): + """Setup the data info which are used in every test method. + + TestCase calls functions in this order: setUp() -> testMethod() -> + tearDown() -> cleanUp() + """ + self.results = { + 'img': + np.random.random((224, 224, 3)), + 'img_shape': (224, 224), + 'gt_bboxes_labels': + np.array([1, 2, 3], dtype=np.int64), + 'gt_bboxes': + np.array([[10, 10, 20, 20], [20, 20, 40, 40], [40, 40, 80, 80]], + dtype=np.float32), + 'gt_ignore_flags': + np.array([0, 0, 1], dtype=bool), + } + + def test_transform(self): + transform = PPYOLOERandomCrop() + results = transform(copy.deepcopy(self.results)) + self.assertTrue(results['gt_bboxes_labels'].shape[0] == + results['gt_bboxes'].shape[0]) + self.assertTrue(results['gt_bboxes_labels'].dtype == np.int64) + self.assertTrue(results['gt_bboxes'].dtype == np.float32) + self.assertTrue(results['gt_ignore_flags'].dtype == bool) + + def test_transform_with_boxlist(self): + results = copy.deepcopy(self.results) + results['gt_bboxes'] = HorizontalBoxes(results['gt_bboxes']) + + transform = PPYOLOERandomCrop() + results = transform(copy.deepcopy(results)) + self.assertTrue(results['gt_bboxes_labels'].shape[0] == + results['gt_bboxes'].shape[0]) + self.assertTrue(results['gt_bboxes_labels'].dtype == np.int64) + self.assertTrue(results['gt_bboxes'].dtype == torch.float32) + self.assertTrue(results['gt_ignore_flags'].dtype == bool) + + +class TestPPYOLOERandomDistort(unittest.TestCase): + + def setUp(self): + """Setup the data info which are used in every test method. + + TestCase calls functions in this order: setUp() -> testMethod() -> + tearDown() -> cleanUp() + """ + self.results = { + 'img': + np.random.random((224, 224, 3)), + 'img_shape': (224, 224), + 'gt_bboxes_labels': + np.array([1, 2, 3], dtype=np.int64), + 'gt_bboxes': + np.array([[10, 10, 20, 20], [20, 20, 40, 40], [40, 40, 80, 80]], + dtype=np.float32), + 'gt_ignore_flags': + np.array([0, 0, 1], dtype=bool), + } + + def test_transform(self): + # test assertion for invalid prob + with self.assertRaises(AssertionError): + transform = PPYOLOERandomDistort( + hue_cfg=dict(min=-18, max=18, prob=1.5)) + + # test assertion for invalid num_distort_func + with self.assertRaises(AssertionError): + transform = PPYOLOERandomDistort(num_distort_func=5) + + transform = PPYOLOERandomDistort() + results = transform(copy.deepcopy(self.results)) + self.assertTrue(results['img'].shape[:2] == (224, 224)) + self.assertTrue(results['gt_bboxes_labels'].shape[0] == + results['gt_bboxes'].shape[0]) + self.assertTrue(results['gt_bboxes_labels'].dtype == np.int64) + self.assertTrue(results['gt_bboxes'].dtype == np.float32) + self.assertTrue(results['gt_ignore_flags'].dtype == bool) + + def test_transform_with_boxlist(self): + results = copy.deepcopy(self.results) + results['gt_bboxes'] = HorizontalBoxes(results['gt_bboxes']) + + transform = PPYOLOERandomDistort() + results = transform(copy.deepcopy(results)) + self.assertTrue(results['img'].shape[:2] == (224, 224)) + self.assertTrue(results['gt_bboxes_labels'].shape[0] == + results['gt_bboxes'].shape[0]) + self.assertTrue(results['gt_bboxes_labels'].dtype == np.int64) + self.assertTrue(results['gt_bboxes'].dtype == torch.float32) + self.assertTrue(results['gt_ignore_flags'].dtype == bool) + + +class TestYOLOv5CopyPaste(unittest.TestCase): + + def setUp(self): + """Set up the data info which are used in every test method. + + TestCase calls functions in this order: setUp() -> testMethod() -> + tearDown() -> cleanUp() + """ + self.data_info = dict( + img=np.random.random((300, 400, 3)), + gt_bboxes=np.array([[0, 0, 10, 10]], dtype=np.float32), + gt_masks=PolygonMasks( + [[np.array([0., 0., 0., 10., 10., 10., 10., 0.])]], + height=300, + width=400)) + + def test_transform(self): + # test transform + transform = YOLOv5CopyPaste(prob=1.0) + results = transform(copy.deepcopy(self.data_info)) + self.assertTrue(len(results['gt_bboxes']) == 2) + self.assertTrue(len(results['gt_masks']) == 2) + + rng = np.random.RandomState(0) + # test with bitmap + with self.assertRaises(AssertionError): + results = transform( + dict( + img=np.random.random((300, 400, 3)), + gt_bboxes=np.array([[0, 0, 10, 10]], dtype=np.float32), + gt_masks=BitmapMasks( + rng.rand(1, 300, 400), height=300, width=400))) diff --git a/models/YOLO-World/third_party/mmyolo/tests/test_datasets/test_utils.py b/models/YOLO-World/third_party/mmyolo/tests/test_datasets/test_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..dc7b9022650fd49ed4283858bb030852191260c8 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/tests/test_datasets/test_utils.py @@ -0,0 +1,138 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import unittest + +import numpy as np +import torch +from mmdet.structures import DetDataSample +from mmdet.structures.bbox import HorizontalBoxes +from mmengine.structures import InstanceData + +from mmyolo.datasets import BatchShapePolicy, yolov5_collate + + +def _rand_bboxes(rng, num_boxes, w, h): + cx, cy, bw, bh = rng.rand(num_boxes, 4).T + + tl_x = ((cx * w) - (w * bw / 2)).clip(0, w) + tl_y = ((cy * h) - (h * bh / 2)).clip(0, h) + br_x = ((cx * w) + (w * bw / 2)).clip(0, w) + br_y = ((cy * h) + (h * bh / 2)).clip(0, h) + + bboxes = np.vstack([tl_x, tl_y, br_x, br_y]).T + return bboxes + + +class TestYOLOv5Collate(unittest.TestCase): + + def test_yolov5_collate(self): + rng = np.random.RandomState(0) + + inputs = torch.randn((3, 10, 10)) + data_samples = DetDataSample() + gt_instances = InstanceData() + bboxes = _rand_bboxes(rng, 4, 6, 8) + gt_instances.bboxes = HorizontalBoxes(bboxes, dtype=torch.float32) + labels = rng.randint(1, 2, size=len(bboxes)) + gt_instances.labels = torch.LongTensor(labels) + data_samples.gt_instances = gt_instances + + out = yolov5_collate([dict(inputs=inputs, data_samples=data_samples)]) + self.assertIsInstance(out, dict) + self.assertTrue(out['inputs'].shape == (1, 3, 10, 10)) + self.assertTrue(out['data_samples'], dict) + self.assertTrue(out['data_samples']['bboxes_labels'].shape == (4, 6)) + + out = yolov5_collate([dict(inputs=inputs, data_samples=data_samples)] * + 2) + self.assertIsInstance(out, dict) + self.assertTrue(out['inputs'].shape == (2, 3, 10, 10)) + self.assertTrue(out['data_samples'], dict) + self.assertTrue(out['data_samples']['bboxes_labels'].shape == (8, 6)) + + def test_yolov5_collate_with_multi_scale(self): + rng = np.random.RandomState(0) + + inputs = torch.randn((3, 10, 10)) + data_samples = DetDataSample() + gt_instances = InstanceData() + bboxes = _rand_bboxes(rng, 4, 6, 8) + gt_instances.bboxes = HorizontalBoxes(bboxes, dtype=torch.float32) + labels = rng.randint(1, 2, size=len(bboxes)) + gt_instances.labels = torch.LongTensor(labels) + data_samples.gt_instances = gt_instances + + out = yolov5_collate([dict(inputs=inputs, data_samples=data_samples)], + use_ms_training=True) + self.assertIsInstance(out, dict) + self.assertTrue(out['inputs'][0].shape == (3, 10, 10)) + self.assertTrue(out['data_samples'], dict) + self.assertTrue(out['data_samples']['bboxes_labels'].shape == (4, 6)) + self.assertIsInstance(out['inputs'], list) + self.assertIsInstance(out['data_samples']['bboxes_labels'], + torch.Tensor) + + out = yolov5_collate( + [dict(inputs=inputs, data_samples=data_samples)] * 2, + use_ms_training=True) + self.assertIsInstance(out, dict) + self.assertTrue(out['inputs'][0].shape == (3, 10, 10)) + self.assertTrue(out['data_samples'], dict) + self.assertTrue(out['data_samples']['bboxes_labels'].shape == (8, 6)) + self.assertIsInstance(out['inputs'], list) + self.assertIsInstance(out['data_samples']['bboxes_labels'], + torch.Tensor) + + +class TestBatchShapePolicy(unittest.TestCase): + + def test_batch_shape_policy(self): + src_data_infos = [{ + 'height': 20, + 'width': 100, + }, { + 'height': 11, + 'width': 100, + }, { + 'height': 21, + 'width': 100, + }, { + 'height': 30, + 'width': 100, + }, { + 'height': 10, + 'width': 100, + }] + + expected_data_infos = [{ + 'height': 10, + 'width': 100, + 'batch_shape': np.array([96, 672]) + }, { + 'height': 11, + 'width': 100, + 'batch_shape': np.array([96, 672]) + }, { + 'height': 20, + 'width': 100, + 'batch_shape': np.array([160, 672]) + }, { + 'height': 21, + 'width': 100, + 'batch_shape': np.array([160, 672]) + }, { + 'height': 30, + 'width': 100, + 'batch_shape': np.array([224, 672]) + }] + + batch_shapes_policy = BatchShapePolicy(batch_size=2) + out_data_infos = batch_shapes_policy(src_data_infos) + + for i in range(5): + self.assertEqual( + (expected_data_infos[i]['height'], + expected_data_infos[i]['width']), + (out_data_infos[i]['height'], out_data_infos[i]['width'])) + self.assertTrue( + np.allclose(expected_data_infos[i]['batch_shape'], + out_data_infos[i]['batch_shape'])) diff --git a/models/YOLO-World/third_party/mmyolo/tests/test_datasets/test_yolov5_coco.py b/models/YOLO-World/third_party/mmyolo/tests/test_datasets/test_yolov5_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..b7e1c9a43077e7e34f36b2ecda5b3235cfa9bd75 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/tests/test_datasets/test_yolov5_coco.py @@ -0,0 +1,71 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import unittest + +from mmyolo.datasets import YOLOv5CocoDataset + + +class TestYOLOv5CocoDataset(unittest.TestCase): + + def test_batch_shapes_cfg(self): + batch_shapes_cfg = dict( + type='BatchShapePolicy', + batch_size=2, + img_size=640, + size_divisor=32, + extra_pad_ratio=0.5) + + # test serialize_data=True + dataset = YOLOv5CocoDataset( + data_prefix=dict(img='imgs'), + ann_file='tests/data/coco_sample.json', + filter_cfg=dict(filter_empty_gt=False, min_size=0), + pipeline=[], + serialize_data=True, + batch_shapes_cfg=batch_shapes_cfg, + ) + + expected_img_ids = [3, 0, 2, 1] + expected_batch_shapes = [[512, 672], [512, 672], [672, 672], + [672, 672]] + for i, data in enumerate(dataset): + assert data['img_id'] == expected_img_ids[i] + assert data['batch_shape'].tolist() == expected_batch_shapes[i] + + # test serialize_data=True + dataset = YOLOv5CocoDataset( + data_prefix=dict(img='imgs'), + ann_file='tests/data/coco_sample.json', + filter_cfg=dict(filter_empty_gt=False, min_size=0), + pipeline=[], + serialize_data=False, + batch_shapes_cfg=batch_shapes_cfg, + ) + + expected_img_ids = [3, 0, 2, 1] + expected_batch_shapes = [[512, 672], [512, 672], [672, 672], + [672, 672]] + for i, data in enumerate(dataset): + assert data['img_id'] == expected_img_ids[i] + assert data['batch_shape'].tolist() == expected_batch_shapes[i] + + def test_prepare_data(self): + dataset = YOLOv5CocoDataset( + data_prefix=dict(img='imgs'), + ann_file='tests/data/coco_sample.json', + filter_cfg=dict(filter_empty_gt=False, min_size=0), + pipeline=[], + serialize_data=True, + batch_shapes_cfg=None, + ) + for data in dataset: + assert 'dataset' in data + + # test with test_mode = True + dataset = YOLOv5CocoDataset( + data_prefix=dict(img='imgs'), + ann_file='tests/data/coco_sample.json', + test_mode=True, + pipeline=[]) + + for data in dataset: + assert 'dataset' not in data diff --git a/models/YOLO-World/third_party/mmyolo/tests/test_datasets/test_yolov5_voc.py b/models/YOLO-World/third_party/mmyolo/tests/test_datasets/test_yolov5_voc.py new file mode 100644 index 0000000000000000000000000000000000000000..f7e9b989c8b390624a2c1996b8ca534a0b000b56 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/tests/test_datasets/test_yolov5_voc.py @@ -0,0 +1,86 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import unittest + +from mmengine.dataset import ConcatDataset + +from mmyolo.datasets import YOLOv5VOCDataset +from mmyolo.utils import register_all_modules + +register_all_modules() + + +class TestYOLOv5VocDataset(unittest.TestCase): + + def test_batch_shapes_cfg(self): + batch_shapes_cfg = dict( + type='BatchShapePolicy', + batch_size=2, + img_size=640, + size_divisor=32, + extra_pad_ratio=0.5) + + # test serialize_data=True + dataset = YOLOv5VOCDataset( + data_root='tests/data/VOCdevkit/', + ann_file='VOC2007/ImageSets/Main/trainval.txt', + data_prefix=dict(sub_data_root='VOC2007/'), + test_mode=True, + pipeline=[], + batch_shapes_cfg=batch_shapes_cfg, + ) + + expected_img_ids = ['000001'] + expected_batch_shapes = [[672, 480]] + for i, data in enumerate(dataset): + assert data['img_id'] == expected_img_ids[i] + assert data['batch_shape'].tolist() == expected_batch_shapes[i] + + def test_prepare_data(self): + dataset = YOLOv5VOCDataset( + data_root='tests/data/VOCdevkit/', + ann_file='VOC2007/ImageSets/Main/trainval.txt', + data_prefix=dict(sub_data_root='VOC2007/'), + filter_cfg=dict(filter_empty_gt=False, min_size=0), + pipeline=[], + serialize_data=True, + batch_shapes_cfg=None, + ) + for data in dataset: + assert 'dataset' in data + + # test with test_mode = True + dataset = YOLOv5VOCDataset( + data_root='tests/data/VOCdevkit/', + ann_file='VOC2007/ImageSets/Main/trainval.txt', + data_prefix=dict(sub_data_root='VOC2007/'), + filter_cfg=dict( + filter_empty_gt=True, min_size=32, bbox_min_size=None), + pipeline=[], + test_mode=True, + batch_shapes_cfg=None) + + for data in dataset: + assert 'dataset' not in data + + def test_concat_dataset(self): + dataset = ConcatDataset( + datasets=[ + dict( + type='YOLOv5VOCDataset', + data_root='tests/data/VOCdevkit/', + ann_file='VOC2007/ImageSets/Main/trainval.txt', + data_prefix=dict(sub_data_root='VOC2007/'), + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[]), + dict( + type='YOLOv5VOCDataset', + data_root='tests/data/VOCdevkit/', + ann_file='VOC2012/ImageSets/Main/trainval.txt', + data_prefix=dict(sub_data_root='VOC2012/'), + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[]) + ], + ignore_keys='dataset_type') + + dataset.full_init() + self.assertEqual(len(dataset), 2) diff --git a/models/YOLO-World/third_party/mmyolo/tests/test_deploy/conftest.py b/models/YOLO-World/third_party/mmyolo/tests/test_deploy/conftest.py new file mode 100644 index 0000000000000000000000000000000000000000..ed1bd3d88905e39928d9bc1c1803844d59f92ad9 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/tests/test_deploy/conftest.py @@ -0,0 +1,13 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest + + +@pytest.fixture(autouse=True) +def init_test(): + # init default scope + from mmdet.utils import register_all_modules as register_det + + from mmyolo.utils import register_all_modules as register_yolo + + register_yolo(True) + register_det(False) diff --git a/models/YOLO-World/third_party/mmyolo/tests/test_deploy/test_mmyolo_models.py b/models/YOLO-World/third_party/mmyolo/tests/test_deploy/test_mmyolo_models.py new file mode 100644 index 0000000000000000000000000000000000000000..65394e539aa5b8dca39c17012aa8b805ca69bc39 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/tests/test_deploy/test_mmyolo_models.py @@ -0,0 +1,165 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os +import random + +import numpy as np +import pytest +import torch +from mmengine import Config + +try: + import importlib + importlib.import_module('mmdeploy') +except ImportError: + pytest.skip('mmdeploy is not installed.', allow_module_level=True) + +from mmdeploy.codebase import import_codebase +from mmdeploy.utils import Backend +from mmdeploy.utils.config_utils import register_codebase +from mmdeploy.utils.test import (WrapModel, check_backend, get_model_outputs, + get_rewrite_outputs) + +try: + codebase = register_codebase('mmyolo') + import_codebase(codebase, ['mmyolo.deploy']) +except ImportError: + pytest.skip('mmyolo is not installed.', allow_module_level=True) + + +def seed_everything(seed=1029): + random.seed(seed) + os.environ['PYTHONHASHSEED'] = str(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed(seed) + torch.cuda.manual_seed_all(seed) # if you are using multi-GPU. + torch.backends.cudnn.benchmark = False + torch.backends.cudnn.deterministic = True + torch.backends.cudnn.enabled = False + + +def get_yolov5_head_model(): + """YOLOv5 Head Config.""" + test_cfg = Config( + dict( + multi_label=True, + nms_pre=30000, + score_thr=0.001, + nms=dict(type='nms', iou_threshold=0.65), + max_per_img=300)) + + from mmyolo.models.dense_heads import YOLOv5Head + head_module = dict( + type='YOLOv5HeadModule', + num_classes=4, + in_channels=[2, 4, 8], + featmap_strides=[8, 16, 32], + num_base_priors=1) + + model = YOLOv5Head(head_module, test_cfg=test_cfg) + + model.requires_grad_(False) + return model + + +@pytest.mark.parametrize('backend_type', [Backend.ONNXRUNTIME]) +def test_yolov5_head_predict_by_feat(backend_type: Backend): + """Test predict_by_feat rewrite of YOLOXHead.""" + check_backend(backend_type) + yolov5_head = get_yolov5_head_model() + yolov5_head.cpu().eval() + s = 256 + batch_img_metas = [{ + 'scale_factor': (1.0, 1.0), + 'pad_shape': (s, s, 3), + 'img_shape': (s, s, 3), + 'ori_shape': (s, s, 3) + }] + output_names = ['dets', 'labels'] + deploy_cfg = Config( + dict( + backend_config=dict(type=backend_type.value), + onnx_config=dict(output_names=output_names, input_shape=None), + codebase_config=dict( + type='mmyolo', + task='ObjectDetection', + post_processing=dict( + score_threshold=0.05, + iou_threshold=0.5, + max_output_boxes_per_class=20, + pre_top_k=-1, + keep_top_k=10, + background_label_id=-1, + ), + module=['mmyolo.deploy']))) + seed_everything(1234) + cls_scores = [ + torch.rand(1, yolov5_head.num_classes * yolov5_head.num_base_priors, + 4 * pow(2, i), 4 * pow(2, i)) for i in range(3, 0, -1) + ] + seed_everything(5678) + bbox_preds = [ + torch.rand(1, 4 * yolov5_head.num_base_priors, 4 * pow(2, i), + 4 * pow(2, i)) for i in range(3, 0, -1) + ] + seed_everything(9101) + objectnesses = [ + torch.rand(1, 1 * yolov5_head.num_base_priors, 4 * pow(2, i), + 4 * pow(2, i)) for i in range(3, 0, -1) + ] + + # to get outputs of pytorch model + model_inputs = { + 'cls_scores': cls_scores, + 'bbox_preds': bbox_preds, + 'objectnesses': objectnesses, + 'batch_img_metas': batch_img_metas, + 'with_nms': True + } + model_outputs = get_model_outputs(yolov5_head, 'predict_by_feat', + model_inputs) + + # to get outputs of onnx model after rewrite + wrapped_model = WrapModel( + yolov5_head, + 'predict_by_feat', + batch_img_metas=batch_img_metas, + with_nms=True) + rewrite_inputs = { + 'cls_scores': cls_scores, + 'bbox_preds': bbox_preds, + 'objectnesses': objectnesses, + } + rewrite_outputs, is_backend_output = get_rewrite_outputs( + wrapped_model=wrapped_model, + model_inputs=rewrite_inputs, + deploy_cfg=deploy_cfg) + + if is_backend_output: + # hard code to make two tensors with the same shape + # rewrite and original codes applied different nms strategy + min_shape = min(model_outputs[0].bboxes.shape[0], + rewrite_outputs[0].shape[1], 5) + for i in range(len(model_outputs)): + rewrite_outputs[0][i, :min_shape, 0::2] = \ + rewrite_outputs[0][i, :min_shape, 0::2].clamp_(0, s) + rewrite_outputs[0][i, :min_shape, 1::2] = \ + rewrite_outputs[0][i, :min_shape, 1::2].clamp_(0, s) + assert np.allclose( + model_outputs[i].bboxes[:min_shape], + rewrite_outputs[0][i, :min_shape, :4], + rtol=1e-03, + atol=1e-05) + assert np.allclose( + model_outputs[i].scores[:min_shape], + rewrite_outputs[0][i, :min_shape, 4], + rtol=1e-03, + atol=1e-05) + assert np.allclose( + model_outputs[i].labels[:min_shape], + rewrite_outputs[1][i, :min_shape], + rtol=1e-03, + atol=1e-05) + else: + assert rewrite_outputs is not None diff --git a/models/YOLO-World/third_party/mmyolo/tests/test_deploy/test_object_detection.py b/models/YOLO-World/third_party/mmyolo/tests/test_deploy/test_object_detection.py new file mode 100644 index 0000000000000000000000000000000000000000..b701e2557699de14d5e42679740e67706fa3bf6d --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/tests/test_deploy/test_object_detection.py @@ -0,0 +1,96 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os +from tempfile import NamedTemporaryFile, TemporaryDirectory + +import numpy as np +import pytest +import torch +from mmengine import Config + +try: + import importlib + importlib.import_module('mmdeploy') +except ImportError: + pytest.skip('mmdeploy is not installed.', allow_module_level=True) + +import mmdeploy.backend.onnxruntime as ort_apis +from mmdeploy.apis import build_task_processor +from mmdeploy.codebase import import_codebase +from mmdeploy.utils import load_config +from mmdeploy.utils.config_utils import register_codebase +from mmdeploy.utils.test import SwitchBackendWrapper + +try: + codebase = register_codebase('mmyolo') + import_codebase(codebase, ['mmyolo.deploy']) +except ImportError: + pytest.skip('mmyolo is not installed.', allow_module_level=True) + +model_cfg_path = 'tests/test_deploy/data/model.py' +model_cfg = load_config(model_cfg_path)[0] +model_cfg.test_dataloader.dataset.data_root = \ + 'tests/data' +model_cfg.test_dataloader.dataset.ann_file = 'coco_sample.json' +model_cfg.test_evaluator.ann_file = \ + 'tests/coco_sample.json' +deploy_cfg = Config( + dict( + backend_config=dict(type='onnxruntime'), + codebase_config=dict( + type='mmyolo', + task='ObjectDetection', + post_processing=dict( + score_threshold=0.05, + confidence_threshold=0.005, # for YOLOv3 + iou_threshold=0.5, + max_output_boxes_per_class=200, + pre_top_k=5000, + keep_top_k=100, + background_label_id=-1, + ), + module=['mmyolo.deploy']), + onnx_config=dict( + type='onnx', + export_params=True, + keep_initializers_as_inputs=False, + opset_version=11, + input_shape=None, + input_names=['input'], + output_names=['dets', 'labels']))) +onnx_file = NamedTemporaryFile(suffix='.onnx').name +task_processor = None +img_shape = (32, 32) +img = np.random.rand(*img_shape, 3) + + +@pytest.fixture(autouse=True) +def init_task_processor(): + global task_processor + task_processor = build_task_processor(model_cfg, deploy_cfg, 'cpu') + + +@pytest.fixture +def backend_model(): + from mmdeploy.backend.onnxruntime import ORTWrapper + ort_apis.__dict__.update({'ORTWrapper': ORTWrapper}) + wrapper = SwitchBackendWrapper(ORTWrapper) + wrapper.set( + outputs={ + 'dets': torch.rand(1, 10, 5).sort(2).values, + 'labels': torch.randint(0, 10, (1, 10)) + }) + + yield task_processor.build_backend_model(['']) + + wrapper.recover() + + +def test_visualize(backend_model): + img_path = 'tests/data/color.jpg' + input_dict, _ = task_processor.create_input( + img_path, input_shape=img_shape) + results = backend_model.test_step(input_dict)[0] + with TemporaryDirectory() as dir: + filename = dir + 'tmp.jpg' + task_processor.visualize(img, results, filename, 'window') + assert os.path.exists(filename) diff --git a/models/YOLO-World/third_party/mmyolo/tests/test_downstream/test_mmrazor.py b/models/YOLO-World/third_party/mmyolo/tests/test_downstream/test_mmrazor.py new file mode 100644 index 0000000000000000000000000000000000000000..dc3090d263853e871fb70950be0acd845e19a238 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/tests/test_downstream/test_mmrazor.py @@ -0,0 +1,21 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy + +import pytest +from mmcls.models.backbones.base_backbone import BaseBackbone + +from mmyolo.testing import get_detector_cfg + + +@pytest.mark.parametrize('cfg_file', [ + 'razor/subnets/' + 'yolov5_s_spos_shufflenetv2_syncbn_8xb16-300e_coco.py', 'razor/subnets/' + 'rtmdet_tiny_ofa_lat31_syncbn_16xb16-300e_coco.py', 'razor/subnets/' + 'yolov6_l_attentivenas_a6_d12_syncbn_fast_8xb32-300e_coco.py' +]) +def test_razor_backbone_init(cfg_file): + model = get_detector_cfg(cfg_file) + model_cfg = copy.deepcopy(model.backbone) + from mmrazor.registry import MODELS + model = MODELS.build(model_cfg) + assert isinstance(model, BaseBackbone) diff --git a/models/YOLO-World/third_party/mmyolo/tests/test_engine/__init__.py b/models/YOLO-World/third_party/mmyolo/tests/test_engine/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ef101fec61e72abc0eb90266d453b5b22331378d --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/tests/test_engine/__init__.py @@ -0,0 +1 @@ +# Copyright (c) OpenMMLab. All rights reserved. diff --git a/models/YOLO-World/third_party/mmyolo/tests/test_engine/test_hooks/test_switch_to_deploy_hook.py b/models/YOLO-World/third_party/mmyolo/tests/test_engine/test_hooks/test_switch_to_deploy_hook.py new file mode 100644 index 0000000000000000000000000000000000000000..52d6e9f0583923feff08cf1cc6f41c8223503d88 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/tests/test_engine/test_hooks/test_switch_to_deploy_hook.py @@ -0,0 +1,24 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase +from unittest.mock import Mock + +from mmyolo.engine.hooks import SwitchToDeployHook +from mmyolo.models import RepVGGBlock +from mmyolo.utils import register_all_modules + +register_all_modules() + + +class TestSwitchToDeployHook(TestCase): + + def test(self): + + runner = Mock() + runner.model = RepVGGBlock(256, 256) + + hook = SwitchToDeployHook() + self.assertFalse(runner.model.deploy) + + # test after change mode + hook.before_test_epoch(runner) + self.assertTrue(runner.model.deploy) diff --git a/models/YOLO-World/third_party/mmyolo/tests/test_engine/test_hooks/test_yolov5_param_scheduler_hook.py b/models/YOLO-World/third_party/mmyolo/tests/test_engine/test_hooks/test_yolov5_param_scheduler_hook.py new file mode 100644 index 0000000000000000000000000000000000000000..1a527333023a179d95b8cd41b82fa5fd9842c0c6 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/tests/test_engine/test_hooks/test_yolov5_param_scheduler_hook.py @@ -0,0 +1,124 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase +from unittest.mock import Mock + +import torch +from mmengine.config import Config +from mmengine.optim import build_optim_wrapper +from mmengine.runner import Runner +from torch import nn +from torch.utils.data import Dataset + +from mmyolo.engine.hooks import YOLOv5ParamSchedulerHook +from mmyolo.utils import register_all_modules + + +class ToyModel(nn.Module): + + def __init__(self): + super().__init__() + self.linear = nn.Linear(2, 1) + + def forward(self, inputs, data_samples, mode='tensor'): + labels = torch.stack(data_samples) + inputs = torch.stack(inputs) + outputs = self.linear(inputs) + if mode == 'tensor': + return outputs + elif mode == 'loss': + loss = (labels - outputs).sum() + outputs = dict(loss=loss) + return outputs + else: + return outputs + + +class DummyDataset(Dataset): + METAINFO = dict() # type: ignore + data = torch.randn(12, 2) + label = torch.ones(12) + + @property + def metainfo(self): + return self.METAINFO + + def __len__(self): + return self.data.size(0) + + def __getitem__(self, index): + return dict(inputs=self.data[index], data_sample=self.label[index]) + + +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict( + type='SGD', + lr=0.01, + momentum=0.937, + weight_decay=0.0005, + nesterov=True, + batch_size_per_gpu=1), + constructor='YOLOv5OptimizerConstructor') + +register_all_modules() + + +class TestYOLOv5ParamSchelerHook(TestCase): + + def test(self): + model = ToyModel() + train_dataloader = dict( + dataset=DummyDataset(), + sampler=dict(type='DefaultSampler', shuffle=True), + batch_size=3, + num_workers=0) + + runner = Mock() + runner.model = model + runner.optim_wrapper = build_optim_wrapper(model, optim_wrapper) + runner.cfg.train_dataloader = Config(train_dataloader) + runner.train_dataloader = Runner.build_dataloader(train_dataloader) + + hook = YOLOv5ParamSchedulerHook( + scheduler_type='linear', lr_factor=0.01, max_epochs=300) + + # test before train + runner.epoch = 0 + runner.iter = 0 + hook.before_train(runner) + + for group in runner.optim_wrapper.param_groups: + self.assertEqual(group['lr'], 0.01) + self.assertEqual(group['momentum'], 0.937) + + self.assertFalse(hook._warmup_end) + + # test after training 10 steps + for i in range(10): + runner.iter += 1 + hook.before_train_iter(runner, 0) + + for group_idx, group in enumerate(runner.optim_wrapper.param_groups): + if group_idx == 2: + self.assertEqual(round(group['lr'], 5), 0.0991) + self.assertEqual(group['momentum'], 0.80137) + self.assertFalse(hook._warmup_end) + + # test after warm up + runner.iter = 1000 + hook.before_train_iter(runner, 0) + self.assertFalse(hook._warmup_end) + + for group in runner.optim_wrapper.param_groups: + self.assertEqual(group['lr'], 0.01) + self.assertEqual(group['momentum'], 0.937) + + runner.iter = 1001 + hook.before_train_iter(runner, 0) + self.assertTrue(hook._warmup_end) + + # test after train_epoch + hook.after_train_epoch(runner) + for group in runner.optim_wrapper.param_groups: + self.assertEqual(group['lr'], 0.01) + self.assertEqual(group['momentum'], 0.937) diff --git a/models/YOLO-World/third_party/mmyolo/tests/test_engine/test_hooks/test_yolox_mode_switch_hook.py b/models/YOLO-World/third_party/mmyolo/tests/test_engine/test_hooks/test_yolox_mode_switch_hook.py new file mode 100644 index 0000000000000000000000000000000000000000..fbe13413c4c2abf6369e3e439de63044dc68444c --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/tests/test_engine/test_hooks/test_yolox_mode_switch_hook.py @@ -0,0 +1,67 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase +from unittest.mock import Mock + +import torch +from mmengine.config import Config +from mmengine.runner import Runner +from torch.utils.data import Dataset + +from mmyolo.engine.hooks import YOLOXModeSwitchHook +from mmyolo.utils import register_all_modules + + +class DummyDataset(Dataset): + METAINFO = dict() # type: ignore + data = torch.randn(12, 2) + label = torch.ones(12) + + @property + def metainfo(self): + return self.METAINFO + + def __len__(self): + return self.data.size(0) + + def __getitem__(self, index): + return dict(inputs=self.data[index], data_sample=self.label[index]) + + +pipeline1 = [ + dict(type='mmdet.Resize'), +] + +pipeline2 = [ + dict(type='mmdet.RandomFlip'), +] +register_all_modules() + + +class TestYOLOXModeSwitchHook(TestCase): + + def test(self): + train_dataloader = dict( + dataset=DummyDataset(), + sampler=dict(type='DefaultSampler', shuffle=True), + batch_size=3, + num_workers=0) + + runner = Mock() + runner.model = Mock() + runner.model.module = Mock() + + runner.model.bbox_head.use_bbox_aux = False + runner.cfg.train_dataloader = Config(train_dataloader) + runner.train_dataloader = Runner.build_dataloader(train_dataloader) + runner.train_dataloader.dataset.pipeline = pipeline1 + + hook = YOLOXModeSwitchHook( + num_last_epochs=15, new_train_pipeline=pipeline2) + + # test after change mode + runner.epoch = 284 + runner.max_epochs = 300 + hook.before_train_epoch(runner) + self.assertTrue(runner.model.bbox_head.use_bbox_aux) + self.assertEqual(runner.train_loop.dataloader.dataset.pipeline, + pipeline2) diff --git a/models/YOLO-World/third_party/mmyolo/tests/test_engine/test_optimizers/__init__.py b/models/YOLO-World/third_party/mmyolo/tests/test_engine/test_optimizers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ef101fec61e72abc0eb90266d453b5b22331378d --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/tests/test_engine/test_optimizers/__init__.py @@ -0,0 +1 @@ +# Copyright (c) OpenMMLab. All rights reserved. diff --git a/models/YOLO-World/third_party/mmyolo/tests/test_engine/test_optimizers/test_yolov5_optim_constructor.py b/models/YOLO-World/third_party/mmyolo/tests/test_engine/test_optimizers/test_yolov5_optim_constructor.py new file mode 100644 index 0000000000000000000000000000000000000000..4830e5cd604f99bb40f783c4815e124a37f11c96 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/tests/test_engine/test_optimizers/test_yolov5_optim_constructor.py @@ -0,0 +1,81 @@ +# Copyright (c) OpenMMLab. All rights reserved. + +import copy +from unittest import TestCase + +import torch +import torch.nn as nn +from mmengine.optim import build_optim_wrapper + +from mmyolo.engine import YOLOv5OptimizerConstructor +from mmyolo.utils import register_all_modules + +register_all_modules() + + +class ExampleModel(nn.Module): + + def __init__(self): + super().__init__() + self.param1 = nn.Parameter(torch.ones(1)) + self.conv1 = nn.Conv2d(3, 4, kernel_size=1, bias=False) + self.conv2 = nn.Conv2d(4, 2, kernel_size=1) + self.bn = nn.BatchNorm2d(2) + + +class TestYOLOv5OptimizerConstructor(TestCase): + + def setUp(self): + self.model = ExampleModel() + self.base_lr = 0.01 + self.weight_decay = 0.0001 + self.optim_wrapper_cfg = dict( + type='OptimWrapper', + optimizer=dict( + type='SGD', + lr=self.base_lr, + momentum=0.9, + weight_decay=self.weight_decay, + batch_size_per_gpu=16)) + + def test_init(self): + YOLOv5OptimizerConstructor(copy.deepcopy(self.optim_wrapper_cfg)) + YOLOv5OptimizerConstructor( + copy.deepcopy(self.optim_wrapper_cfg), + paramwise_cfg={'base_total_batch_size': 64}) + + # `paramwise_cfg` must include `base_total_batch_size` if not None. + with self.assertRaises(AssertionError): + YOLOv5OptimizerConstructor( + copy.deepcopy(self.optim_wrapper_cfg), paramwise_cfg={'a': 64}) + + def test_build(self): + optim_wrapper = YOLOv5OptimizerConstructor( + copy.deepcopy(self.optim_wrapper_cfg))( + self.model) + # test param_groups + assert len(optim_wrapper.optimizer.param_groups) == 3 + for i in range(3): + param_groups_i = optim_wrapper.optimizer.param_groups[i] + assert param_groups_i['lr'] == self.base_lr + if i == 0: + assert param_groups_i['weight_decay'] == self.weight_decay + else: + assert param_groups_i['weight_decay'] == 0 + + # test weight_decay linear scaling + optim_wrapper_cfg = copy.deepcopy(self.optim_wrapper_cfg) + optim_wrapper_cfg['optimizer']['batch_size_per_gpu'] = 128 + optim_wrapper = YOLOv5OptimizerConstructor(optim_wrapper_cfg)( + self.model) + assert optim_wrapper.optimizer.param_groups[0][ + 'weight_decay'] == self.weight_decay * 2 + + # test without batch_size_per_gpu + optim_wrapper_cfg = copy.deepcopy(self.optim_wrapper_cfg) + optim_wrapper_cfg['optimizer'].pop('batch_size_per_gpu') + optim_wrapper = dict( + optim_wrapper_cfg, constructor='YOLOv5OptimizerConstructor') + optim_wrapper = build_optim_wrapper(self.model, optim_wrapper) + assert optim_wrapper.optimizer.param_groups[0][ + 'weight_decay'] == self.weight_decay diff --git a/models/YOLO-World/third_party/mmyolo/tests/test_engine/test_optimizers/test_yolov7_optim_wrapper_constructor.py b/models/YOLO-World/third_party/mmyolo/tests/test_engine/test_optimizers/test_yolov7_optim_wrapper_constructor.py new file mode 100644 index 0000000000000000000000000000000000000000..a2f445bedd7b86ffaa00f4c74affa990eaeb663e --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/tests/test_engine/test_optimizers/test_yolov7_optim_wrapper_constructor.py @@ -0,0 +1,81 @@ +# Copyright (c) OpenMMLab. All rights reserved. + +import copy +from unittest import TestCase + +import torch +import torch.nn as nn +from mmengine.optim import build_optim_wrapper + +from mmyolo.engine import YOLOv7OptimWrapperConstructor +from mmyolo.utils import register_all_modules + +register_all_modules() + + +class ExampleModel(nn.Module): + + def __init__(self): + super().__init__() + self.param1 = nn.Parameter(torch.ones(1)) + self.conv1 = nn.Conv2d(3, 4, kernel_size=1, bias=False) + self.conv2 = nn.Conv2d(4, 2, kernel_size=1) + self.bn = nn.BatchNorm2d(2) + + +class TestYOLOv7OptimWrapperConstructor(TestCase): + + def setUp(self): + self.model = ExampleModel() + self.base_lr = 0.01 + self.weight_decay = 0.0001 + self.optim_wrapper_cfg = dict( + type='OptimWrapper', + optimizer=dict( + type='SGD', + lr=self.base_lr, + momentum=0.9, + weight_decay=self.weight_decay, + batch_size_per_gpu=16)) + + def test_init(self): + YOLOv7OptimWrapperConstructor(copy.deepcopy(self.optim_wrapper_cfg)) + YOLOv7OptimWrapperConstructor( + copy.deepcopy(self.optim_wrapper_cfg), + paramwise_cfg={'base_total_batch_size': 64}) + + # `paramwise_cfg` must include `base_total_batch_size` if not None. + with self.assertRaises(AssertionError): + YOLOv7OptimWrapperConstructor( + copy.deepcopy(self.optim_wrapper_cfg), paramwise_cfg={'a': 64}) + + def test_build(self): + optim_wrapper = YOLOv7OptimWrapperConstructor( + copy.deepcopy(self.optim_wrapper_cfg))( + self.model) + # test param_groups + assert len(optim_wrapper.optimizer.param_groups) == 3 + for i in range(3): + param_groups_i = optim_wrapper.optimizer.param_groups[i] + assert param_groups_i['lr'] == self.base_lr + if i == 0: + assert param_groups_i['weight_decay'] == self.weight_decay + else: + assert param_groups_i['weight_decay'] == 0 + + # test weight_decay linear scaling + optim_wrapper_cfg = copy.deepcopy(self.optim_wrapper_cfg) + optim_wrapper_cfg['optimizer']['batch_size_per_gpu'] = 128 + optim_wrapper = YOLOv7OptimWrapperConstructor(optim_wrapper_cfg)( + self.model) + assert optim_wrapper.optimizer.param_groups[0][ + 'weight_decay'] == self.weight_decay * 2 + + # test without batch_size_per_gpu + optim_wrapper_cfg = copy.deepcopy(self.optim_wrapper_cfg) + optim_wrapper_cfg['optimizer'].pop('batch_size_per_gpu') + optim_wrapper = dict( + optim_wrapper_cfg, constructor='YOLOv7OptimWrapperConstructor') + optim_wrapper = build_optim_wrapper(self.model, optim_wrapper) + assert optim_wrapper.optimizer.param_groups[0][ + 'weight_decay'] == self.weight_decay diff --git a/models/YOLO-World/third_party/mmyolo/tests/test_models/__init__.py b/models/YOLO-World/third_party/mmyolo/tests/test_models/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ef101fec61e72abc0eb90266d453b5b22331378d --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/tests/test_models/__init__.py @@ -0,0 +1 @@ +# Copyright (c) OpenMMLab. All rights reserved. diff --git a/models/YOLO-World/third_party/mmyolo/tests/test_models/test_backbone/__init__.py b/models/YOLO-World/third_party/mmyolo/tests/test_models/test_backbone/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ef101fec61e72abc0eb90266d453b5b22331378d --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/tests/test_models/test_backbone/__init__.py @@ -0,0 +1 @@ +# Copyright (c) OpenMMLab. All rights reserved. diff --git a/models/YOLO-World/third_party/mmyolo/tests/test_models/test_backbone/test_csp_darknet.py b/models/YOLO-World/third_party/mmyolo/tests/test_models/test_backbone/test_csp_darknet.py new file mode 100644 index 0000000000000000000000000000000000000000..82dceb55f90558b8d6bec48254640e248e7ba772 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/tests/test_models/test_backbone/test_csp_darknet.py @@ -0,0 +1,119 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import pytest +import torch +from parameterized import parameterized +from torch.nn.modules.batchnorm import _BatchNorm + +from mmyolo.models.backbones import (YOLOv5CSPDarknet, YOLOv8CSPDarknet, + YOLOXCSPDarknet) +from mmyolo.utils import register_all_modules +from .utils import check_norm_state, is_norm + +register_all_modules() + + +class TestCSPDarknet(TestCase): + + @parameterized.expand([(YOLOv5CSPDarknet, ), (YOLOXCSPDarknet, ), + (YOLOv8CSPDarknet, )]) + def test_init(self, module_class): + # out_indices in range(len(arch_setting) + 1) + with pytest.raises(AssertionError): + module_class(out_indices=(6, )) + + with pytest.raises(ValueError): + # frozen_stages must in range(-1, len(arch_setting) + 1) + module_class(frozen_stages=6) + + @parameterized.expand([(YOLOv5CSPDarknet, ), (YOLOXCSPDarknet, ), + (YOLOv8CSPDarknet, )]) + def test_forward(self, module_class): + # Test CSPDarknet with first stage frozen + frozen_stages = 1 + model = module_class(frozen_stages=frozen_stages) + model.init_weights() + model.train() + + for mod in model.stem.modules(): + for param in mod.parameters(): + assert param.requires_grad is False + for i in range(1, frozen_stages + 1): + layer = getattr(model, f'stage{i}') + for mod in layer.modules(): + if isinstance(mod, _BatchNorm): + assert mod.training is False + for param in layer.parameters(): + assert param.requires_grad is False + + # Test CSPDarknet with norm_eval=True + model = module_class(norm_eval=True) + model.train() + + assert check_norm_state(model.modules(), False) + + # Test CSPDarknet-P5 forward with widen_factor=0.25 + model = module_class( + arch='P5', widen_factor=0.25, out_indices=range(0, 5)) + model.train() + + imgs = torch.randn(1, 3, 64, 64) + feat = model(imgs) + assert len(feat) == 5 + assert feat[0].shape == torch.Size((1, 16, 32, 32)) + assert feat[1].shape == torch.Size((1, 32, 16, 16)) + assert feat[2].shape == torch.Size((1, 64, 8, 8)) + assert feat[3].shape == torch.Size((1, 128, 4, 4)) + assert feat[4].shape == torch.Size((1, 256, 2, 2)) + + # Test CSPDarknet forward with dict(type='ReLU') + model = module_class( + widen_factor=0.125, + act_cfg=dict(type='ReLU'), + out_indices=range(0, 5)) + model.train() + + imgs = torch.randn(1, 3, 64, 64) + feat = model(imgs) + assert len(feat) == 5 + assert feat[0].shape == torch.Size((1, 8, 32, 32)) + assert feat[1].shape == torch.Size((1, 16, 16, 16)) + assert feat[2].shape == torch.Size((1, 32, 8, 8)) + assert feat[3].shape == torch.Size((1, 64, 4, 4)) + assert feat[4].shape == torch.Size((1, 128, 2, 2)) + + # Test CSPDarknet with BatchNorm forward + model = module_class(widen_factor=0.125, out_indices=range(0, 5)) + for m in model.modules(): + if is_norm(m): + assert isinstance(m, _BatchNorm) + model.train() + + imgs = torch.randn(1, 3, 64, 64) + feat = model(imgs) + assert len(feat) == 5 + assert feat[0].shape == torch.Size((1, 8, 32, 32)) + assert feat[1].shape == torch.Size((1, 16, 16, 16)) + assert feat[2].shape == torch.Size((1, 32, 8, 8)) + assert feat[3].shape == torch.Size((1, 64, 4, 4)) + assert feat[4].shape == torch.Size((1, 128, 2, 2)) + + # Test CSPDarknet with Dropout Block + model = module_class(plugins=[ + dict( + cfg=dict(type='mmdet.DropBlock', drop_prob=0.1, block_size=3), + stages=(False, False, True, True)), + ]) + + assert len(model.stage1) == 2 + assert len(model.stage2) == 2 + assert len(model.stage3) == 3 # +DropBlock + assert len(model.stage4) == 4 # +SPPF+DropBlock + model.train() + imgs = torch.randn(1, 3, 256, 256) + feat = model(imgs) + assert len(feat) == 3 + assert feat[0].shape == torch.Size((1, 256, 32, 32)) + assert feat[1].shape == torch.Size((1, 512, 16, 16)) + assert feat[2].shape == torch.Size((1, 1024, 8, 8)) diff --git a/models/YOLO-World/third_party/mmyolo/tests/test_models/test_backbone/test_csp_resnet.py b/models/YOLO-World/third_party/mmyolo/tests/test_models/test_backbone/test_csp_resnet.py new file mode 100644 index 0000000000000000000000000000000000000000..dd0f3c473a8adbf5fa139bff50a7d39006657065 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/tests/test_models/test_backbone/test_csp_resnet.py @@ -0,0 +1,113 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import pytest +import torch +from torch.nn.modules.batchnorm import _BatchNorm + +from mmyolo.models import PPYOLOECSPResNet +from mmyolo.utils import register_all_modules +from .utils import check_norm_state, is_norm + +register_all_modules() + + +class TestPPYOLOECSPResNet(TestCase): + + def test_init(self): + # out_indices in range(len(arch_setting) + 1) + with pytest.raises(AssertionError): + PPYOLOECSPResNet(out_indices=(6, )) + + with pytest.raises(ValueError): + # frozen_stages must in range(-1, len(arch_setting) + 1) + PPYOLOECSPResNet(frozen_stages=6) + + def test_forward(self): + # Test PPYOLOECSPResNet with first stage frozen + frozen_stages = 1 + model = PPYOLOECSPResNet(frozen_stages=frozen_stages) + model.init_weights() + model.train() + + for mod in model.stem.modules(): + for param in mod.parameters(): + assert param.requires_grad is False + for i in range(1, frozen_stages + 1): + layer = getattr(model, f'stage{i}') + for mod in layer.modules(): + if isinstance(mod, _BatchNorm): + assert mod.training is False + for param in layer.parameters(): + assert param.requires_grad is False + + # Test PPYOLOECSPResNet with norm_eval=True + model = PPYOLOECSPResNet(norm_eval=True) + model.train() + + assert check_norm_state(model.modules(), False) + + # Test PPYOLOECSPResNet-P5 forward with widen_factor=0.25 + model = PPYOLOECSPResNet( + arch='P5', widen_factor=0.25, out_indices=range(0, 5)) + model.train() + + imgs = torch.randn(1, 3, 64, 64) + feat = model(imgs) + assert len(feat) == 5 + assert feat[0].shape == torch.Size((1, 16, 32, 32)) + assert feat[1].shape == torch.Size((1, 32, 16, 16)) + assert feat[2].shape == torch.Size((1, 64, 8, 8)) + assert feat[3].shape == torch.Size((1, 128, 4, 4)) + assert feat[4].shape == torch.Size((1, 256, 2, 2)) + + # Test PPYOLOECSPResNet forward with dict(type='ReLU') + model = PPYOLOECSPResNet( + widen_factor=0.125, + act_cfg=dict(type='ReLU'), + out_indices=range(0, 5)) + model.train() + + imgs = torch.randn(1, 3, 64, 64) + feat = model(imgs) + assert len(feat) == 5 + assert feat[0].shape == torch.Size((1, 8, 32, 32)) + assert feat[1].shape == torch.Size((1, 16, 16, 16)) + assert feat[2].shape == torch.Size((1, 32, 8, 8)) + assert feat[3].shape == torch.Size((1, 64, 4, 4)) + assert feat[4].shape == torch.Size((1, 128, 2, 2)) + + # Test PPYOLOECSPResNet with BatchNorm forward + model = PPYOLOECSPResNet(widen_factor=0.125, out_indices=range(0, 5)) + for m in model.modules(): + if is_norm(m): + assert isinstance(m, _BatchNorm) + model.train() + + imgs = torch.randn(1, 3, 64, 64) + feat = model(imgs) + assert len(feat) == 5 + assert feat[0].shape == torch.Size((1, 8, 32, 32)) + assert feat[1].shape == torch.Size((1, 16, 16, 16)) + assert feat[2].shape == torch.Size((1, 32, 8, 8)) + assert feat[3].shape == torch.Size((1, 64, 4, 4)) + assert feat[4].shape == torch.Size((1, 128, 2, 2)) + + # Test PPYOLOECSPResNet with BatchNorm forward + model = PPYOLOECSPResNet(plugins=[ + dict( + cfg=dict(type='mmdet.DropBlock', drop_prob=0.1, block_size=3), + stages=(False, False, True, True)), + ]) + + assert len(model.stage1) == 1 + assert len(model.stage2) == 1 + assert len(model.stage3) == 2 # +DropBlock + assert len(model.stage4) == 2 # +DropBlock + model.train() + imgs = torch.randn(1, 3, 256, 256) + feat = model(imgs) + assert len(feat) == 3 + assert feat[0].shape == torch.Size((1, 256, 32, 32)) + assert feat[1].shape == torch.Size((1, 512, 16, 16)) + assert feat[2].shape == torch.Size((1, 1024, 8, 8)) diff --git a/models/YOLO-World/third_party/mmyolo/tests/test_models/test_backbone/test_efficient_rep.py b/models/YOLO-World/third_party/mmyolo/tests/test_models/test_backbone/test_efficient_rep.py new file mode 100644 index 0000000000000000000000000000000000000000..53af20294137b0d29a67e4f1946fe9fd79991f80 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/tests/test_models/test_backbone/test_efficient_rep.py @@ -0,0 +1,202 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import pytest +import torch +from torch.nn.modules.batchnorm import _BatchNorm + +from mmyolo.models.backbones import YOLOv6CSPBep, YOLOv6EfficientRep +from mmyolo.utils import register_all_modules +from .utils import check_norm_state, is_norm + +register_all_modules() + + +class TestYOLOv6EfficientRep(TestCase): + + def test_init(self): + # out_indices in range(len(arch_setting) + 1) + with pytest.raises(AssertionError): + YOLOv6EfficientRep(out_indices=(6, )) + + with pytest.raises(ValueError): + # frozen_stages must in range(-1, len(arch_setting) + 1) + YOLOv6EfficientRep(frozen_stages=6) + + def test_YOLOv6EfficientRep_forward(self): + # Test YOLOv6EfficientRep with first stage frozen + frozen_stages = 1 + model = YOLOv6EfficientRep(frozen_stages=frozen_stages) + model.init_weights() + model.train() + + for mod in model.stem.modules(): + for param in mod.parameters(): + assert param.requires_grad is False + for i in range(1, frozen_stages + 1): + layer = getattr(model, f'stage{i}') + for mod in layer.modules(): + if isinstance(mod, _BatchNorm): + assert mod.training is False + for param in layer.parameters(): + assert param.requires_grad is False + + # Test YOLOv6EfficientRep with norm_eval=True + model = YOLOv6EfficientRep(norm_eval=True) + model.train() + + assert check_norm_state(model.modules(), False) + + # Test YOLOv6EfficientRep-P5 forward with widen_factor=0.25 + model = YOLOv6EfficientRep( + arch='P5', widen_factor=0.25, out_indices=range(0, 5)) + model.train() + + imgs = torch.randn(1, 3, 64, 64) + feat = model(imgs) + assert len(feat) == 5 + assert feat[0].shape == torch.Size((1, 16, 32, 32)) + assert feat[1].shape == torch.Size((1, 32, 16, 16)) + assert feat[2].shape == torch.Size((1, 64, 8, 8)) + assert feat[3].shape == torch.Size((1, 128, 4, 4)) + assert feat[4].shape == torch.Size((1, 256, 2, 2)) + + # Test YOLOv6EfficientRep forward with dict(type='ReLU') + model = YOLOv6EfficientRep( + widen_factor=0.125, + act_cfg=dict(type='ReLU'), + out_indices=range(0, 5)) + model.train() + + imgs = torch.randn(1, 3, 64, 64) + feat = model(imgs) + assert len(feat) == 5 + assert feat[0].shape == torch.Size((1, 8, 32, 32)) + assert feat[1].shape == torch.Size((1, 16, 16, 16)) + assert feat[2].shape == torch.Size((1, 32, 8, 8)) + assert feat[3].shape == torch.Size((1, 64, 4, 4)) + assert feat[4].shape == torch.Size((1, 128, 2, 2)) + + # Test YOLOv6EfficientRep with BatchNorm forward + model = YOLOv6EfficientRep(widen_factor=0.125, out_indices=range(0, 5)) + for m in model.modules(): + if is_norm(m): + assert isinstance(m, _BatchNorm) + model.train() + + imgs = torch.randn(1, 3, 64, 64) + feat = model(imgs) + assert len(feat) == 5 + assert feat[0].shape == torch.Size((1, 8, 32, 32)) + assert feat[1].shape == torch.Size((1, 16, 16, 16)) + assert feat[2].shape == torch.Size((1, 32, 8, 8)) + assert feat[3].shape == torch.Size((1, 64, 4, 4)) + assert feat[4].shape == torch.Size((1, 128, 2, 2)) + + # Test YOLOv6EfficientRep with BatchNorm forward + model = YOLOv6EfficientRep(plugins=[ + dict( + cfg=dict(type='mmdet.DropBlock', drop_prob=0.1, block_size=3), + stages=(False, False, True, True)), + ]) + + assert len(model.stage1) == 1 + assert len(model.stage2) == 1 + assert len(model.stage3) == 2 # +DropBlock + assert len(model.stage4) == 3 # +SPPF+DropBlock + model.train() + imgs = torch.randn(1, 3, 256, 256) + feat = model(imgs) + assert len(feat) == 3 + assert feat[0].shape == torch.Size((1, 256, 32, 32)) + assert feat[1].shape == torch.Size((1, 512, 16, 16)) + assert feat[2].shape == torch.Size((1, 1024, 8, 8)) + + def test_YOLOv6CSPBep_forward(self): + # Test YOLOv6CSPBep with first stage frozen + frozen_stages = 1 + model = YOLOv6CSPBep(frozen_stages=frozen_stages) + model.init_weights() + model.train() + + for mod in model.stem.modules(): + for param in mod.parameters(): + assert param.requires_grad is False + for i in range(1, frozen_stages + 1): + layer = getattr(model, f'stage{i}') + for mod in layer.modules(): + if isinstance(mod, _BatchNorm): + assert mod.training is False + for param in layer.parameters(): + assert param.requires_grad is False + + # Test YOLOv6CSPBep with norm_eval=True + model = YOLOv6CSPBep(norm_eval=True) + model.train() + + assert check_norm_state(model.modules(), False) + + # Test YOLOv6CSPBep forward with widen_factor=0.25 + model = YOLOv6CSPBep( + arch='P5', widen_factor=0.25, out_indices=range(0, 5)) + model.train() + + imgs = torch.randn(1, 3, 64, 64) + feat = model(imgs) + assert len(feat) == 5 + assert feat[0].shape == torch.Size((1, 16, 32, 32)) + assert feat[1].shape == torch.Size((1, 32, 16, 16)) + assert feat[2].shape == torch.Size((1, 64, 8, 8)) + assert feat[3].shape == torch.Size((1, 128, 4, 4)) + assert feat[4].shape == torch.Size((1, 256, 2, 2)) + + # Test YOLOv6CSPBep forward with dict(type='ReLU') + model = YOLOv6CSPBep( + widen_factor=0.125, + act_cfg=dict(type='ReLU'), + out_indices=range(0, 5)) + model.train() + + imgs = torch.randn(1, 3, 64, 64) + feat = model(imgs) + assert len(feat) == 5 + assert feat[0].shape == torch.Size((1, 8, 32, 32)) + assert feat[1].shape == torch.Size((1, 16, 16, 16)) + assert feat[2].shape == torch.Size((1, 32, 8, 8)) + assert feat[3].shape == torch.Size((1, 64, 4, 4)) + assert feat[4].shape == torch.Size((1, 128, 2, 2)) + + # Test YOLOv6CSPBep with BatchNorm forward + model = YOLOv6CSPBep(widen_factor=0.125, out_indices=range(0, 5)) + for m in model.modules(): + if is_norm(m): + assert isinstance(m, _BatchNorm) + model.train() + + imgs = torch.randn(1, 3, 64, 64) + feat = model(imgs) + assert len(feat) == 5 + assert feat[0].shape == torch.Size((1, 8, 32, 32)) + assert feat[1].shape == torch.Size((1, 16, 16, 16)) + assert feat[2].shape == torch.Size((1, 32, 8, 8)) + assert feat[3].shape == torch.Size((1, 64, 4, 4)) + assert feat[4].shape == torch.Size((1, 128, 2, 2)) + + # Test YOLOv6CSPBep with BatchNorm forward + model = YOLOv6CSPBep(plugins=[ + dict( + cfg=dict(type='mmdet.DropBlock', drop_prob=0.1, block_size=3), + stages=(False, False, True, True)), + ]) + + assert len(model.stage1) == 1 + assert len(model.stage2) == 1 + assert len(model.stage3) == 2 # +DropBlock + assert len(model.stage4) == 3 # +SPPF+DropBlock + model.train() + imgs = torch.randn(1, 3, 256, 256) + feat = model(imgs) + assert len(feat) == 3 + assert feat[0].shape == torch.Size((1, 256, 32, 32)) + assert feat[1].shape == torch.Size((1, 512, 16, 16)) + assert feat[2].shape == torch.Size((1, 1024, 8, 8)) diff --git a/models/YOLO-World/third_party/mmyolo/tests/test_models/test_backbone/test_yolov7_backbone.py b/models/YOLO-World/third_party/mmyolo/tests/test_models/test_backbone/test_yolov7_backbone.py new file mode 100644 index 0000000000000000000000000000000000000000..76b40aa44b99ea1509be6768a6c4287652961ad0 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/tests/test_models/test_backbone/test_yolov7_backbone.py @@ -0,0 +1,154 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import pytest +import torch +from torch.nn.modules.batchnorm import _BatchNorm + +from mmyolo.models.backbones import YOLOv7Backbone +from mmyolo.utils import register_all_modules +from .utils import check_norm_state + +register_all_modules() + + +class TestYOLOv7Backbone(TestCase): + + def test_init(self): + # out_indices in range(len(arch_setting) + 1) + with pytest.raises(AssertionError): + YOLOv7Backbone(out_indices=(6, )) + + with pytest.raises(ValueError): + # frozen_stages must in range(-1, len(arch_setting) + 1) + YOLOv7Backbone(frozen_stages=6) + + def test_forward(self): + # Test YOLOv7Backbone-L with first stage frozen + frozen_stages = 1 + model = YOLOv7Backbone(frozen_stages=frozen_stages) + model.init_weights() + model.train() + + for mod in model.stem.modules(): + for param in mod.parameters(): + assert param.requires_grad is False + for i in range(1, frozen_stages + 1): + layer = getattr(model, f'stage{i}') + for mod in layer.modules(): + if isinstance(mod, _BatchNorm): + assert mod.training is False + for param in layer.parameters(): + assert param.requires_grad is False + + # Test YOLOv7Backbone-L with norm_eval=True + model = YOLOv7Backbone(norm_eval=True) + model.train() + + assert check_norm_state(model.modules(), False) + + # Test YOLOv7Backbone-L forward with widen_factor=0.25 + model = YOLOv7Backbone( + widen_factor=0.25, out_indices=tuple(range(0, 5))) + model.train() + + imgs = torch.randn(1, 3, 64, 64) + feat = model(imgs) + assert len(feat) == 5 + assert feat[0].shape == torch.Size((1, 16, 32, 32)) + assert feat[1].shape == torch.Size((1, 64, 16, 16)) + assert feat[2].shape == torch.Size((1, 128, 8, 8)) + assert feat[3].shape == torch.Size((1, 256, 4, 4)) + assert feat[4].shape == torch.Size((1, 256, 2, 2)) + + # Test YOLOv7Backbone-L with plugins + model = YOLOv7Backbone( + widen_factor=0.25, + plugins=[ + dict( + cfg=dict( + type='mmdet.DropBlock', drop_prob=0.1, block_size=3), + stages=(False, False, True, True)), + ]) + + assert len(model.stage1) == 2 + assert len(model.stage2) == 2 + assert len(model.stage3) == 3 # +DropBlock + assert len(model.stage4) == 3 # +DropBlock + model.train() + imgs = torch.randn(1, 3, 128, 128) + feat = model(imgs) + assert len(feat) == 3 + assert feat[0].shape == torch.Size((1, 128, 16, 16)) + assert feat[1].shape == torch.Size((1, 256, 8, 8)) + assert feat[2].shape == torch.Size((1, 256, 4, 4)) + + # Test YOLOv7Backbone-X forward with widen_factor=0.25 + model = YOLOv7Backbone(arch='X', widen_factor=0.25) + model.train() + + imgs = torch.randn(1, 3, 64, 64) + feat = model(imgs) + assert len(feat) == 3 + assert feat[0].shape == torch.Size((1, 160, 8, 8)) + assert feat[1].shape == torch.Size((1, 320, 4, 4)) + assert feat[2].shape == torch.Size((1, 320, 2, 2)) + + # Test YOLOv7Backbone-tiny forward with widen_factor=0.25 + model = YOLOv7Backbone(arch='Tiny', widen_factor=0.25) + model.train() + + feat = model(imgs) + assert len(feat) == 3 + assert feat[0].shape == torch.Size((1, 32, 8, 8)) + assert feat[1].shape == torch.Size((1, 64, 4, 4)) + assert feat[2].shape == torch.Size((1, 128, 2, 2)) + + # Test YOLOv7Backbone-w forward with widen_factor=0.25 + model = YOLOv7Backbone( + arch='W', widen_factor=0.25, out_indices=(2, 3, 4, 5)) + model.train() + + imgs = torch.randn(1, 3, 128, 128) + feat = model(imgs) + assert len(feat) == 4 + assert feat[0].shape == torch.Size((1, 64, 16, 16)) + assert feat[1].shape == torch.Size((1, 128, 8, 8)) + assert feat[2].shape == torch.Size((1, 192, 4, 4)) + assert feat[3].shape == torch.Size((1, 256, 2, 2)) + + # Test YOLOv7Backbone-w forward with widen_factor=0.25 + model = YOLOv7Backbone( + arch='D', widen_factor=0.25, out_indices=(2, 3, 4, 5)) + model.train() + + feat = model(imgs) + assert len(feat) == 4 + assert feat[0].shape == torch.Size((1, 96, 16, 16)) + assert feat[1].shape == torch.Size((1, 192, 8, 8)) + assert feat[2].shape == torch.Size((1, 288, 4, 4)) + assert feat[3].shape == torch.Size((1, 384, 2, 2)) + + # Test YOLOv7Backbone-w forward with widen_factor=0.25 + model = YOLOv7Backbone( + arch='E', widen_factor=0.25, out_indices=(2, 3, 4, 5)) + model.train() + + feat = model(imgs) + assert len(feat) == 4 + assert feat[0].shape == torch.Size((1, 80, 16, 16)) + assert feat[1].shape == torch.Size((1, 160, 8, 8)) + assert feat[2].shape == torch.Size((1, 240, 4, 4)) + assert feat[3].shape == torch.Size((1, 320, 2, 2)) + + # Test YOLOv7Backbone-w forward with widen_factor=0.25 + model = YOLOv7Backbone( + arch='E2E', widen_factor=0.25, out_indices=(2, 3, 4, 5)) + model.train() + + feat = model(imgs) + assert len(feat) == 4 + assert feat[0].shape == torch.Size((1, 80, 16, 16)) + assert feat[1].shape == torch.Size((1, 160, 8, 8)) + assert feat[2].shape == torch.Size((1, 240, 4, 4)) + assert feat[3].shape == torch.Size((1, 320, 2, 2)) diff --git a/models/YOLO-World/third_party/mmyolo/tests/test_models/test_backbone/utils.py b/models/YOLO-World/third_party/mmyolo/tests/test_models/test_backbone/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..d65db568d6f1693eb457dc74b0d8c417cef1b9ea --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/tests/test_models/test_backbone/utils.py @@ -0,0 +1,31 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmdet.models.backbones.res2net import Bottle2neck +from mmdet.models.backbones.resnet import BasicBlock, Bottleneck +from mmdet.models.backbones.resnext import Bottleneck as BottleneckX +from mmdet.models.layers import SimplifiedBasicBlock +from torch.nn.modules import GroupNorm +from torch.nn.modules.batchnorm import _BatchNorm + + +def is_block(modules): + """Check if is ResNet building block.""" + if isinstance(modules, (BasicBlock, Bottleneck, BottleneckX, Bottle2neck, + SimplifiedBasicBlock)): + return True + return False + + +def is_norm(modules): + """Check if is one of the norms.""" + if isinstance(modules, (GroupNorm, _BatchNorm)): + return True + return False + + +def check_norm_state(modules, train_state): + """Check if norm layer is in correct train state.""" + for mod in modules: + if isinstance(mod, _BatchNorm): + if mod.training != train_state: + return False + return True diff --git a/models/YOLO-World/third_party/mmyolo/tests/test_models/test_data_preprocessor/__init__.py b/models/YOLO-World/third_party/mmyolo/tests/test_models/test_data_preprocessor/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ef101fec61e72abc0eb90266d453b5b22331378d --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/tests/test_models/test_data_preprocessor/__init__.py @@ -0,0 +1 @@ +# Copyright (c) OpenMMLab. All rights reserved. diff --git a/models/YOLO-World/third_party/mmyolo/tests/test_models/test_data_preprocessor/test_data_preprocessor.py b/models/YOLO-World/third_party/mmyolo/tests/test_models/test_data_preprocessor/test_data_preprocessor.py new file mode 100644 index 0000000000000000000000000000000000000000..2c7e4415b627afe0046bc30b3b416af9deb302b6 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/tests/test_models/test_data_preprocessor/test_data_preprocessor.py @@ -0,0 +1,156 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import torch +from mmdet.structures import DetDataSample +from mmengine import MessageHub + +from mmyolo.models import PPYOLOEBatchRandomResize, PPYOLOEDetDataPreprocessor +from mmyolo.models.data_preprocessors import (YOLOv5DetDataPreprocessor, + YOLOXBatchSyncRandomResize) +from mmyolo.utils import register_all_modules + +register_all_modules() + + +class TestYOLOv5DetDataPreprocessor(TestCase): + + def test_forward(self): + processor = YOLOv5DetDataPreprocessor(mean=[0, 0, 0], std=[1, 1, 1]) + + data = { + 'inputs': [torch.randint(0, 256, (3, 11, 10))], + 'data_samples': [DetDataSample()] + } + out_data = processor(data, training=False) + batch_inputs, batch_data_samples = out_data['inputs'], out_data[ + 'data_samples'] + + self.assertEqual(batch_inputs.shape, (1, 3, 11, 10)) + self.assertEqual(len(batch_data_samples), 1) + + # test channel_conversion + processor = YOLOv5DetDataPreprocessor( + mean=[0., 0., 0.], std=[1., 1., 1.], bgr_to_rgb=True) + out_data = processor(data, training=False) + batch_inputs, batch_data_samples = out_data['inputs'], out_data[ + 'data_samples'] + self.assertEqual(batch_inputs.shape, (1, 3, 11, 10)) + self.assertEqual(len(batch_data_samples), 1) + + # test padding, training=False + data = { + 'inputs': [ + torch.randint(0, 256, (3, 10, 11)), + torch.randint(0, 256, (3, 9, 14)) + ] + } + processor = YOLOv5DetDataPreprocessor( + mean=[0., 0., 0.], std=[1., 1., 1.], bgr_to_rgb=True) + out_data = processor(data, training=False) + batch_inputs, batch_data_samples = out_data['inputs'], out_data[ + 'data_samples'] + self.assertEqual(batch_inputs.shape, (2, 3, 10, 14)) + self.assertIsNone(batch_data_samples) + + # test training + data = { + 'inputs': torch.randint(0, 256, (2, 3, 10, 11)), + 'data_samples': { + 'bboxes_labels': torch.randint(0, 11, (18, 6)) + }, + } + out_data = processor(data, training=True) + batch_inputs, batch_data_samples = out_data['inputs'], out_data[ + 'data_samples'] + self.assertIn('img_metas', batch_data_samples) + self.assertIn('bboxes_labels', batch_data_samples) + self.assertEqual(batch_inputs.shape, (2, 3, 10, 11)) + self.assertIsInstance(batch_data_samples['bboxes_labels'], + torch.Tensor) + self.assertIsInstance(batch_data_samples['img_metas'], list) + + data = { + 'inputs': [torch.randint(0, 256, (3, 11, 10))], + 'data_samples': [DetDataSample()] + } + # data_samples must be dict + with self.assertRaises(AssertionError): + processor(data, training=True) + + +class TestPPYOLOEDetDataPreprocessor(TestCase): + + def test_batch_random_resize(self): + processor = PPYOLOEDetDataPreprocessor( + pad_size_divisor=32, + batch_augments=[ + dict( + type='PPYOLOEBatchRandomResize', + random_size_range=(320, 480), + interval=1, + size_divisor=32, + random_interp=True, + keep_ratio=False) + ], + mean=[0., 0., 0.], + std=[255., 255., 255.], + bgr_to_rgb=True) + self.assertTrue( + isinstance(processor.batch_augments[0], PPYOLOEBatchRandomResize)) + message_hub = MessageHub.get_instance('test_batch_random_resize') + message_hub.update_info('iter', 0) + + # test training + data = { + 'inputs': [ + torch.randint(0, 256, (3, 10, 11)), + torch.randint(0, 256, (3, 10, 11)) + ], + 'data_samples': { + 'bboxes_labels': torch.randint(0, 11, (18, 6)).float() + }, + } + out_data = processor(data, training=True) + batch_data_samples = out_data['data_samples'] + self.assertIn('img_metas', batch_data_samples) + self.assertIn('bboxes_labels', batch_data_samples) + self.assertIsInstance(batch_data_samples['bboxes_labels'], + torch.Tensor) + self.assertIsInstance(batch_data_samples['img_metas'], list) + + data = { + 'inputs': [torch.randint(0, 256, (3, 11, 10))], + 'data_samples': DetDataSample() + } + # data_samples must be list + with self.assertRaises(AssertionError): + processor(data, training=True) + + +class TestYOLOXDetDataPreprocessor(TestCase): + + def test_batch_sync_random_size(self): + processor = YOLOXBatchSyncRandomResize( + random_size_range=(480, 800), size_divisor=32, interval=1) + self.assertTrue(isinstance(processor, YOLOXBatchSyncRandomResize)) + message_hub = MessageHub.get_instance( + 'test_yolox_batch_sync_random_resize') + message_hub.update_info('iter', 0) + + # test training + inputs = torch.randint(0, 256, (4, 3, 10, 11)) + data_samples = {'bboxes_labels': torch.randint(0, 11, (18, 6)).float()} + + inputs, data_samples = processor(inputs, data_samples) + + self.assertIn('bboxes_labels', data_samples) + self.assertIsInstance(data_samples['bboxes_labels'], torch.Tensor) + self.assertIsInstance(inputs, torch.Tensor) + + inputs = torch.randint(0, 256, (4, 3, 10, 11)) + data_samples = DetDataSample() + + # data_samples must be dict + with self.assertRaises(AssertionError): + processor(inputs, data_samples) diff --git a/models/YOLO-World/third_party/mmyolo/tests/test_models/test_dense_heads/__init__.py b/models/YOLO-World/third_party/mmyolo/tests/test_models/test_dense_heads/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ef101fec61e72abc0eb90266d453b5b22331378d --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/tests/test_models/test_dense_heads/__init__.py @@ -0,0 +1 @@ +# Copyright (c) OpenMMLab. All rights reserved. diff --git a/models/YOLO-World/third_party/mmyolo/tests/test_models/test_dense_heads/test_ppyoloe_head.py b/models/YOLO-World/third_party/mmyolo/tests/test_models/test_dense_heads/test_ppyoloe_head.py new file mode 100644 index 0000000000000000000000000000000000000000..20e0c45761454f3575856babe39fa3fc95e6d5fa --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/tests/test_models/test_dense_heads/test_ppyoloe_head.py @@ -0,0 +1,205 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import torch +from mmengine import ConfigDict, MessageHub +from mmengine.config import Config +from mmengine.model import bias_init_with_prob +from mmengine.testing import assert_allclose + +from mmyolo.models import PPYOLOEHead +from mmyolo.utils import register_all_modules + +register_all_modules() + + +class TestPPYOLOEHead(TestCase): + + def setUp(self): + self.head_module = dict( + type='PPYOLOEHeadModule', + num_classes=4, + in_channels=[32, 64, 128], + featmap_strides=(8, 16, 32)) + + def test_init_weights(self): + head = PPYOLOEHead(head_module=self.head_module) + head.head_module.init_weights() + bias_init = bias_init_with_prob(0.01) + for conv_cls, conv_reg in zip(head.head_module.cls_preds, + head.head_module.reg_preds): + assert_allclose(conv_cls.weight.data, + torch.zeros_like(conv_cls.weight.data)) + assert_allclose(conv_reg.weight.data, + torch.zeros_like(conv_reg.weight.data)) + + assert_allclose(conv_cls.bias.data, + torch.ones_like(conv_cls.bias.data) * bias_init) + assert_allclose(conv_reg.bias.data, + torch.ones_like(conv_reg.bias.data)) + + def test_predict_by_feat(self): + s = 256 + img_metas = [{ + 'img_shape': (s, s, 3), + 'ori_shape': (s, s, 3), + 'scale_factor': (1.0, 1.0), + }] + test_cfg = Config( + dict( + multi_label=True, + nms_pre=1000, + score_thr=0.01, + nms=dict(type='nms', iou_threshold=0.7), + max_per_img=300)) + + head = PPYOLOEHead(head_module=self.head_module, test_cfg=test_cfg) + head.eval() + feat = [ + torch.rand(1, in_channels, s // feat_size, s // feat_size) + for in_channels, feat_size in [[32, 8], [64, 16], [128, 32]] + ] + cls_scores, bbox_preds = head.forward(feat) + head.predict_by_feat( + cls_scores, + bbox_preds, + None, + img_metas, + cfg=test_cfg, + rescale=True, + with_nms=True) + head.predict_by_feat( + cls_scores, + bbox_preds, + None, + img_metas, + cfg=test_cfg, + rescale=False, + with_nms=False) + + def test_loss_by_feat(self): + message_hub = MessageHub.get_instance('test_ppyoloe_loss_by_feat') + message_hub.update_info('epoch', 1) + + s = 256 + img_metas = [{ + 'img_shape': (s, s, 3), + 'batch_input_shape': (s, s), + 'scale_factor': 1, + }] + + head = PPYOLOEHead( + head_module=self.head_module, + train_cfg=ConfigDict( + initial_epoch=31, + initial_assigner=dict( + type='BatchATSSAssigner', + num_classes=4, + topk=9, + iou_calculator=dict(type='mmdet.BboxOverlaps2D')), + assigner=dict( + type='BatchTaskAlignedAssigner', + num_classes=4, + topk=13, + alpha=1, + beta=6))) + head.train() + + feat = [] + for i in range(len(self.head_module['in_channels'])): + in_channel = self.head_module['in_channels'][i] + feat_size = self.head_module['featmap_strides'][i] + feat.append( + torch.rand(1, in_channel, s // feat_size, s // feat_size)) + + cls_scores, bbox_preds, bbox_dist_preds = head.forward(feat) + + # Test that empty ground truth encourages the network to predict + # background + gt_instances = torch.empty((0, 6), dtype=torch.float32) + + empty_gt_losses = head.loss_by_feat(cls_scores, bbox_preds, + bbox_dist_preds, gt_instances, + img_metas) + # When there is no truth, the cls loss should be nonzero but there + # should be no box loss. + empty_cls_loss = empty_gt_losses['loss_cls'].sum() + empty_box_loss = empty_gt_losses['loss_bbox'].sum() + empty_dfl_loss = empty_gt_losses['loss_dfl'].sum() + self.assertGreater(empty_cls_loss.item(), 0, + 'cls loss should be non-zero') + self.assertEqual( + empty_box_loss.item(), 0, + 'there should be no box loss when there are no true boxes') + self.assertEqual( + empty_dfl_loss.item(), 0, + 'there should be df loss when there are no true boxes') + + # When truth is non-empty then both cls and box loss should be nonzero + # for random inputs + head = PPYOLOEHead( + head_module=self.head_module, + train_cfg=ConfigDict( + initial_epoch=31, + initial_assigner=dict( + type='BatchATSSAssigner', + num_classes=4, + topk=9, + iou_calculator=dict(type='mmdet.BboxOverlaps2D')), + assigner=dict( + type='BatchTaskAlignedAssigner', + num_classes=4, + topk=13, + alpha=1, + beta=6))) + head.train() + gt_instances = torch.Tensor( + [[0., 0., 23.6667, 23.8757, 238.6326, 151.8874]]) + + one_gt_losses = head.loss_by_feat(cls_scores, bbox_preds, + bbox_dist_preds, gt_instances, + img_metas) + onegt_cls_loss = one_gt_losses['loss_cls'].sum() + onegt_box_loss = one_gt_losses['loss_bbox'].sum() + onegt_loss_dfl = one_gt_losses['loss_dfl'].sum() + self.assertGreater(onegt_cls_loss.item(), 0, + 'cls loss should be non-zero') + self.assertGreater(onegt_box_loss.item(), 0, + 'box loss should be non-zero') + self.assertGreater(onegt_loss_dfl.item(), 0, + 'obj loss should be non-zero') + + # test num_class = 1 + self.head_module['num_classes'] = 1 + head = PPYOLOEHead( + head_module=self.head_module, + train_cfg=ConfigDict( + initial_epoch=31, + initial_assigner=dict( + type='BatchATSSAssigner', + num_classes=1, + topk=9, + iou_calculator=dict(type='mmdet.BboxOverlaps2D')), + assigner=dict( + type='BatchTaskAlignedAssigner', + num_classes=1, + topk=13, + alpha=1, + beta=6))) + head.train() + gt_instances = torch.Tensor( + [[0., 0., 23.6667, 23.8757, 238.6326, 151.8874]]) + cls_scores, bbox_preds, bbox_dist_preds = head.forward(feat) + + one_gt_losses = head.loss_by_feat(cls_scores, bbox_preds, + bbox_dist_preds, gt_instances, + img_metas) + onegt_cls_loss = one_gt_losses['loss_cls'].sum() + onegt_box_loss = one_gt_losses['loss_bbox'].sum() + onegt_loss_dfl = one_gt_losses['loss_dfl'].sum() + self.assertGreater(onegt_cls_loss.item(), 0, + 'cls loss should be non-zero') + self.assertGreater(onegt_box_loss.item(), 0, + 'box loss should be non-zero') + self.assertGreater(onegt_loss_dfl.item(), 0, + 'obj loss should be non-zero') diff --git a/models/YOLO-World/third_party/mmyolo/tests/test_models/test_dense_heads/test_rotated_rtmdet_head.py b/models/YOLO-World/third_party/mmyolo/tests/test_models/test_dense_heads/test_rotated_rtmdet_head.py new file mode 100644 index 0000000000000000000000000000000000000000..21e1d4d139a2cbf2815f69ffac105100bcd62f34 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/tests/test_models/test_dense_heads/test_rotated_rtmdet_head.py @@ -0,0 +1,264 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import pytest +import torch +from mmengine.config import Config +from mmengine.structures import InstanceData + +from mmyolo.models.dense_heads import RTMDetRotatedHead +from mmyolo.utils import register_all_modules + +register_all_modules() + + +class TestRTMDetRotatedHead(TestCase): + + def setUp(self): + self.head_module = dict( + type='RTMDetRotatedSepBNHeadModule', + num_classes=4, + in_channels=1, + stacked_convs=1, + feat_channels=64, + featmap_strides=[4, 8, 16]) + + def test_init_weights(self): + head = RTMDetRotatedHead(head_module=self.head_module) + head.head_module.init_weights() + + def test_predict_by_feat(self): + s = 256 + img_metas = [{ + 'img_shape': (s, s, 3), + 'ori_shape': (s, s, 3), + 'scale_factor': (1.0, 1.0), + }] + test_cfg = dict( + multi_label=True, + decode_with_angle=True, + nms_pre=2000, + score_thr=0.01, + nms=dict(type='nms_rotated', iou_threshold=0.1), + max_per_img=300) + test_cfg = Config(test_cfg) + + head = RTMDetRotatedHead( + head_module=self.head_module, test_cfg=test_cfg) + feat = [ + torch.rand(1, 1, s // feat_size, s // feat_size) + for feat_size in [4, 8, 16] + ] + cls_scores, bbox_preds, angle_preds = head.forward(feat) + head.predict_by_feat( + cls_scores, + bbox_preds, + angle_preds, + batch_img_metas=img_metas, + cfg=test_cfg, + rescale=True, + with_nms=True) + head.predict_by_feat( + cls_scores, + bbox_preds, + angle_preds, + batch_img_metas=img_metas, + cfg=test_cfg, + rescale=False, + with_nms=False) + + def test_loss_by_feat(self): + if not torch.cuda.is_available(): + pytest.skip('test requires GPU and torch+cuda') + + s = 256 + img_metas = [{ + 'img_shape': (s, s, 3), + 'batch_input_shape': (s, s), + 'scale_factor': 1, + }] + train_cfg = dict( + assigner=dict( + type='BatchDynamicSoftLabelAssigner', + num_classes=80, + topk=13, + iou_calculator=dict(type='mmrotate.RBboxOverlaps2D'), + batch_iou=False), + allowed_border=-1, + pos_weight=-1, + debug=False) + train_cfg = Config(train_cfg) + head = RTMDetRotatedHead( + head_module=self.head_module, train_cfg=train_cfg).cuda() + + feat = [ + torch.rand(1, 1, s // feat_size, s // feat_size).cuda() + for feat_size in [4, 8, 16] + ] + cls_scores, bbox_preds, angle_preds = head.forward(feat) + + # Test that empty ground truth encourages the network to predict + # background + gt_instances = InstanceData( + bboxes=torch.empty((0, 5)).cuda(), + labels=torch.LongTensor([]).cuda()) + + empty_gt_losses = head.loss_by_feat(cls_scores, bbox_preds, + angle_preds, [gt_instances], + img_metas) + # When there is no truth, the cls loss should be nonzero but there + # should be no box loss. + empty_cls_loss = empty_gt_losses['loss_cls'].sum() + empty_box_loss = empty_gt_losses['loss_bbox'].sum() + self.assertGreater(empty_cls_loss.item(), 0, + 'classification loss should be non-zero') + self.assertEqual( + empty_box_loss.item(), 0, + 'there should be no box loss when there are no true boxes') + + # When truth is non-empty then both cls and box loss should be nonzero + # for random inputs + head = RTMDetRotatedHead( + head_module=self.head_module, train_cfg=train_cfg).cuda() + gt_instances = InstanceData( + bboxes=torch.Tensor([[130.6667, 86.8757, 100.6326, 70.8874, + 0.2]]).cuda(), + labels=torch.LongTensor([1]).cuda()) + + one_gt_losses = head.loss_by_feat(cls_scores, bbox_preds, angle_preds, + [gt_instances], img_metas) + onegt_cls_loss = one_gt_losses['loss_cls'].sum() + onegt_box_loss = one_gt_losses['loss_bbox'].sum() + self.assertGreater(onegt_cls_loss.item(), 0, + 'cls loss should be non-zero') + self.assertGreater(onegt_box_loss.item(), 0, + 'box loss should be non-zero') + + # test num_class = 1 + self.head_module['num_classes'] = 1 + head = RTMDetRotatedHead( + head_module=self.head_module, train_cfg=train_cfg).cuda() + gt_instances = InstanceData( + bboxes=torch.Tensor([[130.6667, 86.8757, 100.6326, 70.8874, + 0.2]]).cuda(), + labels=torch.LongTensor([0]).cuda()) + + cls_scores, bbox_preds, angle_preds = head.forward(feat) + + one_gt_losses = head.loss_by_feat(cls_scores, bbox_preds, angle_preds, + [gt_instances], img_metas) + onegt_cls_loss = one_gt_losses['loss_cls'].sum() + onegt_box_loss = one_gt_losses['loss_bbox'].sum() + self.assertGreater(onegt_cls_loss.item(), 0, + 'cls loss should be non-zero') + self.assertGreater(onegt_box_loss.item(), 0, + 'box loss should be non-zero') + + def test_hbb_loss_by_feat(self): + + s = 256 + img_metas = [{ + 'img_shape': (s, s, 3), + 'batch_input_shape': (s, s), + 'scale_factor': 1, + }] + train_cfg = dict( + assigner=dict( + type='BatchDynamicSoftLabelAssigner', + num_classes=80, + topk=13, + iou_calculator=dict(type='mmrotate.RBboxOverlaps2D'), + batch_iou=False), + allowed_border=-1, + pos_weight=-1, + debug=False) + train_cfg = Config(train_cfg) + hbb_cfg = dict( + bbox_coder=dict( + type='DistanceAnglePointCoder', angle_version='le90'), + loss_bbox=dict(type='mmdet.GIoULoss', loss_weight=2.0), + angle_coder=dict( + type='mmrotate.CSLCoder', + angle_version='le90', + omega=1, + window='gaussian', + radius=1), + loss_angle=dict( + type='mmrotate.SmoothFocalLoss', + gamma=2.0, + alpha=0.25, + loss_weight=0.2), + use_hbbox_loss=True, + ) + head = RTMDetRotatedHead( + head_module=self.head_module, **hbb_cfg, train_cfg=train_cfg) + + feat = [ + torch.rand(1, 1, s // feat_size, s // feat_size) + for feat_size in [4, 8, 16] + ] + cls_scores, bbox_preds, angle_preds = head.forward(feat) + + # Test that empty ground truth encourages the network to predict + # background + gt_instances = InstanceData( + bboxes=torch.empty((0, 5)), labels=torch.LongTensor([])) + + empty_gt_losses = head.loss_by_feat(cls_scores, bbox_preds, + angle_preds, [gt_instances], + img_metas) + # When there is no truth, the cls loss should be nonzero but there + # should be no box loss. + empty_cls_loss = empty_gt_losses['loss_cls'].sum() + empty_box_loss = empty_gt_losses['loss_bbox'].sum() + empty_angle_loss = empty_gt_losses['loss_angle'].sum() + self.assertGreater(empty_cls_loss.item(), 0, + 'classification loss should be non-zero') + self.assertEqual( + empty_box_loss.item(), 0, + 'there should be no box loss when there are no true boxes') + self.assertEqual( + empty_angle_loss.item(), 0, + 'there should be no angle loss when there are no true boxes') + + # When truth is non-empty then both cls and box loss should be nonzero + # for random inputs + head = RTMDetRotatedHead( + head_module=self.head_module, **hbb_cfg, train_cfg=train_cfg) + gt_instances = InstanceData( + bboxes=torch.Tensor([[130.6667, 86.8757, 100.6326, 70.8874, 0.2]]), + labels=torch.LongTensor([1])) + + one_gt_losses = head.loss_by_feat(cls_scores, bbox_preds, angle_preds, + [gt_instances], img_metas) + onegt_cls_loss = one_gt_losses['loss_cls'].sum() + onegt_box_loss = one_gt_losses['loss_bbox'].sum() + onegt_angle_loss = one_gt_losses['loss_angle'].sum() + self.assertGreater(onegt_cls_loss.item(), 0, + 'cls loss should be non-zero') + self.assertGreater(onegt_box_loss.item(), 0, + 'box loss should be non-zero') + self.assertGreater(onegt_angle_loss.item(), 0, + 'angle loss should be non-zero') + + # test num_class = 1 + self.head_module['num_classes'] = 1 + head = RTMDetRotatedHead( + head_module=self.head_module, **hbb_cfg, train_cfg=train_cfg) + gt_instances = InstanceData( + bboxes=torch.Tensor([[130.6667, 86.8757, 100.6326, 70.8874, 0.2]]), + labels=torch.LongTensor([0])) + + cls_scores, bbox_preds, angle_preds = head.forward(feat) + + one_gt_losses = head.loss_by_feat(cls_scores, bbox_preds, angle_preds, + [gt_instances], img_metas) + onegt_cls_loss = one_gt_losses['loss_cls'].sum() + onegt_box_loss = one_gt_losses['loss_bbox'].sum() + onegt_angle_loss = one_gt_losses['loss_angle'].sum() + self.assertGreater(onegt_cls_loss.item(), 0, + 'cls loss should be non-zero') + self.assertGreater(onegt_box_loss.item(), 0, + 'box loss should be non-zero') + self.assertGreater(onegt_angle_loss.item(), 0, + 'angle loss should be non-zero') diff --git a/models/YOLO-World/third_party/mmyolo/tests/test_models/test_dense_heads/test_rtmdet_head.py b/models/YOLO-World/third_party/mmyolo/tests/test_models/test_dense_heads/test_rtmdet_head.py new file mode 100644 index 0000000000000000000000000000000000000000..cce5ee6ffae5c697b32430b9b13cab16127450bb --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/tests/test_models/test_dense_heads/test_rtmdet_head.py @@ -0,0 +1,223 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import numpy as np +import torch +from mmengine.config import Config +from mmengine.structures import InstanceData + +from mmyolo.models import RTMDetInsSepBNHead +from mmyolo.models.dense_heads import RTMDetHead +from mmyolo.utils import register_all_modules + +register_all_modules() + + +class TestRTMDetHead(TestCase): + + def setUp(self): + self.head_module = dict( + type='RTMDetSepBNHeadModule', + num_classes=4, + in_channels=1, + stacked_convs=1, + feat_channels=64, + featmap_strides=[4, 8, 16]) + + def test_init_weights(self): + head = RTMDetHead(head_module=self.head_module) + head.head_module.init_weights() + + def test_predict_by_feat(self): + s = 256 + img_metas = [{ + 'img_shape': (s, s, 3), + 'ori_shape': (s, s, 3), + 'scale_factor': (1.0, 1.0), + }] + test_cfg = dict( + multi_label=True, + nms_pre=30000, + score_thr=0.001, + nms=dict(type='nms', iou_threshold=0.65), + max_per_img=300) + test_cfg = Config(test_cfg) + + head = RTMDetHead(head_module=self.head_module, test_cfg=test_cfg) + feat = [ + torch.rand(1, 1, s // feat_size, s // feat_size) + for feat_size in [4, 8, 16] + ] + cls_scores, bbox_preds = head.forward(feat) + head.predict_by_feat( + cls_scores, + bbox_preds, + batch_img_metas=img_metas, + cfg=test_cfg, + rescale=True, + with_nms=True) + head.predict_by_feat( + cls_scores, + bbox_preds, + batch_img_metas=img_metas, + cfg=test_cfg, + rescale=False, + with_nms=False) + + def test_loss_by_feat(self): + s = 256 + img_metas = [{ + 'img_shape': (s, s, 3), + 'batch_input_shape': (s, s), + 'scale_factor': 1, + }] + train_cfg = dict( + assigner=dict( + num_classes=80, + type='BatchDynamicSoftLabelAssigner', + topk=13, + iou_calculator=dict(type='mmdet.BboxOverlaps2D')), + allowed_border=-1, + pos_weight=-1, + debug=False) + train_cfg = Config(train_cfg) + head = RTMDetHead(head_module=self.head_module, train_cfg=train_cfg) + + feat = [ + torch.rand(1, 1, s // feat_size, s // feat_size) + for feat_size in [4, 8, 16] + ] + cls_scores, bbox_preds = head.forward(feat) + + # Test that empty ground truth encourages the network to predict + # background + gt_instances = InstanceData( + bboxes=torch.empty((0, 4)), labels=torch.LongTensor([])) + + empty_gt_losses = head.loss_by_feat(cls_scores, bbox_preds, + [gt_instances], img_metas) + # When there is no truth, the cls loss should be nonzero but there + # should be no box loss. + empty_cls_loss = empty_gt_losses['loss_cls'].sum() + empty_box_loss = empty_gt_losses['loss_bbox'].sum() + self.assertGreater(empty_cls_loss.item(), 0, + 'classification loss should be non-zero') + self.assertEqual( + empty_box_loss.item(), 0, + 'there should be no box loss when there are no true boxes') + + # When truth is non-empty then both cls and box loss should be nonzero + # for random inputs + head = RTMDetHead(head_module=self.head_module, train_cfg=train_cfg) + gt_instances = InstanceData( + bboxes=torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]), + labels=torch.LongTensor([1])) + + one_gt_losses = head.loss_by_feat(cls_scores, bbox_preds, + [gt_instances], img_metas) + onegt_cls_loss = one_gt_losses['loss_cls'].sum() + onegt_box_loss = one_gt_losses['loss_bbox'].sum() + self.assertGreater(onegt_cls_loss.item(), 0, + 'cls loss should be non-zero') + self.assertGreater(onegt_box_loss.item(), 0, + 'box loss should be non-zero') + + # test num_class = 1 + self.head_module['num_classes'] = 1 + head = RTMDetHead(head_module=self.head_module, train_cfg=train_cfg) + gt_instances = InstanceData( + bboxes=torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]), + labels=torch.LongTensor([0])) + + cls_scores, bbox_preds = head.forward(feat) + + one_gt_losses = head.loss_by_feat(cls_scores, bbox_preds, + [gt_instances], img_metas) + onegt_cls_loss = one_gt_losses['loss_cls'].sum() + onegt_box_loss = one_gt_losses['loss_bbox'].sum() + self.assertGreater(onegt_cls_loss.item(), 0, + 'cls loss should be non-zero') + self.assertGreater(onegt_box_loss.item(), 0, + 'box loss should be non-zero') + + +class TestRTMDetInsHead(TestCase): + + def setUp(self): + self.head_module = dict( + type='RTMDetInsSepBNHeadModule', + num_classes=4, + in_channels=1, + stacked_convs=1, + feat_channels=64, + featmap_strides=[4, 8, 16], + num_prototypes=8, + dyconv_channels=8, + num_dyconvs=3, + share_conv=True, + use_sigmoid_cls=True) + + def test_init_weights(self): + head = RTMDetInsSepBNHead(head_module=self.head_module) + head.head_module.init_weights() + + def test_predict_by_feat(self): + s = 256 + img_metas = [{ + 'img_shape': (s, s, 3), + 'ori_shape': (s, s, 3), + 'scale_factor': (1.0, 1.0), + 'pad_param': np.array([0., 0., 0., 0.]) + }] + test_cfg = dict( + multi_label=False, + nms_pre=1000, + min_bbox_size=0, + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.6), + max_per_img=100, + mask_thr_binary=0.5) + test_cfg = Config(test_cfg) + + head = RTMDetInsSepBNHead( + head_module=self.head_module, test_cfg=test_cfg) + feat = [ + torch.rand(1, 1, s // feat_size, s // feat_size) + for feat_size in [4, 8, 16] + ] + cls_scores, bbox_preds, kernel_preds, mask_feat = head.forward(feat) + head.predict_by_feat( + cls_scores, + bbox_preds, + kernel_preds, + mask_feat, + batch_img_metas=img_metas, + cfg=test_cfg, + rescale=True, + with_nms=True) + + img_metas_without_pad_param = [{ + 'img_shape': (s, s, 3), + 'ori_shape': (s, s, 3), + 'scale_factor': (1.0, 1.0) + }] + head.predict_by_feat( + cls_scores, + bbox_preds, + kernel_preds, + mask_feat, + batch_img_metas=img_metas_without_pad_param, + cfg=test_cfg, + rescale=True, + with_nms=True) + + with self.assertRaises(AssertionError): + head.predict_by_feat( + cls_scores, + bbox_preds, + kernel_preds, + mask_feat, + batch_img_metas=img_metas, + cfg=test_cfg, + rescale=False, + with_nms=False) diff --git a/models/YOLO-World/third_party/mmyolo/tests/test_models/test_dense_heads/test_yolov5_head.py b/models/YOLO-World/third_party/mmyolo/tests/test_models/test_dense_heads/test_yolov5_head.py new file mode 100644 index 0000000000000000000000000000000000000000..974b9a9869dbcf39e6928cadd7399b452ba93e1d --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/tests/test_models/test_dense_heads/test_yolov5_head.py @@ -0,0 +1,411 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import numpy as np +import torch +from mmengine.config import Config +from mmengine.structures import InstanceData + +from mmyolo.models.dense_heads import YOLOv5Head, YOLOv5InsHead +from mmyolo.utils import register_all_modules + +register_all_modules() + + +class TestYOLOv5Head(TestCase): + + def setUp(self): + self.head_module = dict( + type='YOLOv5HeadModule', + num_classes=2, + in_channels=[32, 64, 128], + featmap_strides=[8, 16, 32], + num_base_priors=3) + + def test_predict_by_feat(self): + s = 256 + img_metas = [{ + 'img_shape': (s, s, 3), + 'ori_shape': (s, s, 3), + 'scale_factor': (1.0, 1.0), + }] + test_cfg = Config( + dict( + multi_label=True, + max_per_img=300, + score_thr=0.01, + nms=dict(type='nms', iou_threshold=0.65))) + + head = YOLOv5Head(head_module=self.head_module, test_cfg=test_cfg) + + feat = [] + for i in range(len(self.head_module['in_channels'])): + in_channel = self.head_module['in_channels'][i] + feat_size = self.head_module['featmap_strides'][i] + feat.append( + torch.rand(1, in_channel, s // feat_size, s // feat_size)) + + cls_scores, bbox_preds, objectnesses = head.forward(feat) + head.predict_by_feat( + cls_scores, + bbox_preds, + objectnesses, + img_metas, + cfg=test_cfg, + rescale=True, + with_nms=True) + head.predict_by_feat( + cls_scores, + bbox_preds, + objectnesses, + img_metas, + cfg=test_cfg, + rescale=False, + with_nms=False) + + def test_loss_by_feat(self): + s = 256 + img_metas = [{ + 'img_shape': (s, s, 3), + 'batch_input_shape': (s, s), + 'scale_factor': 1, + }] + + head = YOLOv5Head(head_module=self.head_module) + + feat = [] + for i in range(len(self.head_module['in_channels'])): + in_channel = self.head_module['in_channels'][i] + feat_size = self.head_module['featmap_strides'][i] + feat.append( + torch.rand(1, in_channel, s // feat_size, s // feat_size)) + + cls_scores, bbox_preds, objectnesses = head.forward(feat) + + # Test that empty ground truth encourages the network to predict + # background + gt_instances = InstanceData( + bboxes=torch.empty((0, 4)), labels=torch.LongTensor([])) + + empty_gt_losses = head.loss_by_feat(cls_scores, bbox_preds, + objectnesses, [gt_instances], + img_metas) + # When there is no truth, the cls loss should be nonzero but there + # should be no box loss. + empty_cls_loss = empty_gt_losses['loss_cls'].sum() + empty_box_loss = empty_gt_losses['loss_bbox'].sum() + empty_obj_loss = empty_gt_losses['loss_obj'].sum() + self.assertEqual( + empty_cls_loss.item(), 0, + 'there should be no cls loss when there are no true boxes') + self.assertEqual( + empty_box_loss.item(), 0, + 'there should be no box loss when there are no true boxes') + self.assertGreater(empty_obj_loss.item(), 0, + 'objectness loss should be non-zero') + + # When truth is non-empty then both cls and box loss should be nonzero + # for random inputs + head = YOLOv5Head(head_module=self.head_module) + gt_instances = InstanceData( + bboxes=torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]), + labels=torch.LongTensor([1])) + + one_gt_losses = head.loss_by_feat(cls_scores, bbox_preds, objectnesses, + [gt_instances], img_metas) + onegt_cls_loss = one_gt_losses['loss_cls'].sum() + onegt_box_loss = one_gt_losses['loss_bbox'].sum() + onegt_obj_loss = one_gt_losses['loss_obj'].sum() + self.assertGreater(onegt_cls_loss.item(), 0, + 'cls loss should be non-zero') + self.assertGreater(onegt_box_loss.item(), 0, + 'box loss should be non-zero') + self.assertGreater(onegt_obj_loss.item(), 0, + 'obj loss should be non-zero') + + # test num_class = 1 + self.head_module['num_classes'] = 1 + head = YOLOv5Head(head_module=self.head_module) + gt_instances = InstanceData( + bboxes=torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]), + labels=torch.LongTensor([0])) + + one_gt_losses = head.loss_by_feat(cls_scores, bbox_preds, objectnesses, + [gt_instances], img_metas) + onegt_cls_loss = one_gt_losses['loss_cls'].sum() + onegt_box_loss = one_gt_losses['loss_bbox'].sum() + onegt_obj_loss = one_gt_losses['loss_obj'].sum() + self.assertEqual(onegt_cls_loss.item(), 0, + 'cls loss should be non-zero') + self.assertGreater(onegt_box_loss.item(), 0, + 'box loss should be non-zero') + self.assertGreater(onegt_obj_loss.item(), 0, + 'obj loss should be non-zero') + + def test_loss_by_feat_with_ignore(self): + s = 256 + img_metas = [{ + 'img_shape': (s, s, 3), + 'batch_input_shape': (s, s), + 'scale_factor': 1, + }] + + head = YOLOv5Head(head_module=self.head_module, ignore_iof_thr=0.8) + + feat = [] + for i in range(len(self.head_module['in_channels'])): + in_channel = self.head_module['in_channels'][i] + feat_size = self.head_module['featmap_strides'][i] + feat.append( + torch.rand(1, in_channel, s // feat_size, s // feat_size)) + + cls_scores, bbox_preds, objectnesses = head.forward(feat) + + # Test that empty ground truth encourages the network to predict + # background + gt_instances = InstanceData( + bboxes=torch.empty((0, 4)), labels=torch.LongTensor([])) + # ignore boxes + gt_instances_ignore = torch.tensor( + [[0, 0, 69.7688, 0, 619.3611, 62.2711]], dtype=torch.float32) + + empty_gt_losses = head._loss_by_feat_with_ignore( + cls_scores, bbox_preds, objectnesses, [gt_instances], img_metas, + gt_instances_ignore) + # When there is no truth, the cls loss should be nonzero but there + # should be no box loss. + empty_cls_loss = empty_gt_losses['loss_cls'].sum() + empty_box_loss = empty_gt_losses['loss_bbox'].sum() + empty_obj_loss = empty_gt_losses['loss_obj'].sum() + self.assertEqual( + empty_cls_loss.item(), 0, + 'there should be no cls loss when there are no true boxes') + self.assertEqual( + empty_box_loss.item(), 0, + 'there should be no box loss when there are no true boxes') + self.assertGreater(empty_obj_loss.item(), 0, + 'objectness loss should be non-zero') + + # When truth is non-empty then both cls and box loss should be nonzero + # for random inputs + head = YOLOv5Head(head_module=self.head_module, ignore_iof_thr=0.8) + gt_instances = InstanceData( + bboxes=torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]), + labels=torch.LongTensor([1])) + + gt_instances_ignore = torch.tensor( + [[0, 0, 69.7688, 0, 619.3611, 62.2711]], dtype=torch.float32) + + one_gt_losses = head._loss_by_feat_with_ignore(cls_scores, bbox_preds, + objectnesses, + [gt_instances], + img_metas, + gt_instances_ignore) + onegt_cls_loss = one_gt_losses['loss_cls'].sum() + onegt_box_loss = one_gt_losses['loss_bbox'].sum() + onegt_obj_loss = one_gt_losses['loss_obj'].sum() + self.assertGreater(onegt_cls_loss.item(), 0, + 'cls loss should be non-zero') + self.assertGreater(onegt_box_loss.item(), 0, + 'box loss should be non-zero') + self.assertGreater(onegt_obj_loss.item(), 0, + 'obj loss should be non-zero') + + # test num_class = 1 + self.head_module['num_classes'] = 1 + head = YOLOv5Head(head_module=self.head_module, ignore_iof_thr=0.8) + gt_instances = InstanceData( + bboxes=torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]), + labels=torch.LongTensor([0])) + + gt_instances_ignore = torch.tensor( + [[0, 0, 69.7688, 0, 619.3611, 62.2711]], dtype=torch.float32) + + one_gt_losses = head._loss_by_feat_with_ignore(cls_scores, bbox_preds, + objectnesses, + [gt_instances], + img_metas, + gt_instances_ignore) + onegt_cls_loss = one_gt_losses['loss_cls'].sum() + onegt_box_loss = one_gt_losses['loss_bbox'].sum() + onegt_obj_loss = one_gt_losses['loss_obj'].sum() + self.assertEqual(onegt_cls_loss.item(), 0, + 'cls loss should be non-zero') + self.assertGreater(onegt_box_loss.item(), 0, + 'box loss should be non-zero') + self.assertGreater(onegt_obj_loss.item(), 0, + 'obj loss should be non-zero') + + +class TestYOLOv5InsHead(TestCase): + + def setUp(self): + self.head_module = dict( + type='YOLOv5InsHeadModule', + num_classes=4, + in_channels=[32, 64, 128], + featmap_strides=[8, 16, 32], + mask_channels=32, + proto_channels=32, + widen_factor=1.0) + + def test_init_weights(self): + head = YOLOv5InsHead(head_module=self.head_module) + head.head_module.init_weights() + + def test_predict_by_feat(self): + s = 256 + img_metas = [{ + 'img_shape': (s, s, 3), + 'ori_shape': (s, s, 3), + 'batch_input_shape': (s, s), + 'scale_factor': (1.0, 1.0), + }] + test_cfg = Config( + dict( + multi_label=True, + nms_pre=30000, + min_bbox_size=0, + score_thr=0.001, + nms=dict(type='nms', iou_threshold=0.6), + max_per_img=300, + mask_thr_binary=0.5)) + + head = YOLOv5InsHead(head_module=self.head_module, test_cfg=test_cfg) + head.eval() + + feat = [] + for i in range(len(self.head_module['in_channels'])): + in_channel = self.head_module['in_channels'][i] + feat_size = self.head_module['featmap_strides'][i] + feat.append( + torch.rand(1, in_channel, s // feat_size, s // feat_size)) + + with torch.no_grad(): + res = head.forward(feat) + cls_scores, bbox_preds, objectnesses,\ + coeff_preds, proto_preds = res + head.predict_by_feat( + cls_scores, + bbox_preds, + objectnesses, + coeff_preds, + proto_preds, + img_metas, + cfg=test_cfg, + rescale=True, + with_nms=True) + + with self.assertRaises(AssertionError): + head.predict_by_feat( + cls_scores, + bbox_preds, + coeff_preds, + proto_preds, + img_metas, + cfg=test_cfg, + rescale=True, + with_nms=False) + + def test_loss_by_feat(self): + s = 256 + img_metas = [{ + 'img_shape': (s, s, 3), + 'batch_input_shape': (s, s), + 'scale_factor': 1, + }] + + head = YOLOv5InsHead(head_module=self.head_module) + rng = np.random.RandomState(0) + + feat = [] + for i in range(len(self.head_module['in_channels'])): + in_channel = self.head_module['in_channels'][i] + feat_size = self.head_module['featmap_strides'][i] + feat.append( + torch.rand(1, in_channel, s // feat_size, s // feat_size)) + + cls_scores, bbox_preds, objectnesses,\ + coeff_preds, proto_preds = head.forward(feat) + + # Test that empty ground truth encourages the network to predict + # background + gt_bboxes_labels = torch.empty((0, 6)) + gt_masks = rng.rand(0, s // 4, s // 4) + + empty_gt_losses = head.loss_by_feat(cls_scores, bbox_preds, + objectnesses, coeff_preds, + proto_preds, gt_bboxes_labels, + gt_masks, img_metas) + # When there is no truth, the cls loss should be nonzero but there + # should be no box loss. + empty_cls_loss = empty_gt_losses['loss_cls'].sum() + empty_box_loss = empty_gt_losses['loss_bbox'].sum() + empty_obj_loss = empty_gt_losses['loss_obj'].sum() + empty_mask_loss = empty_gt_losses['loss_mask'].sum() + self.assertEqual( + empty_cls_loss.item(), 0, + 'there should be no cls loss when there are no true boxes') + self.assertEqual( + empty_box_loss.item(), 0, + 'there should be no box loss when there are no true boxes') + self.assertGreater(empty_obj_loss.item(), 0, + 'objectness loss should be non-zero') + self.assertEqual( + empty_mask_loss.item(), 0, + 'there should be no mask loss when there are no true masks') + + # When truth is non-empty then both cls and box loss should be nonzero + # for random inputs + head = YOLOv5InsHead(head_module=self.head_module) + + bboxes = torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]) + labels = torch.Tensor([1.]) + batch_id = torch.LongTensor([0]) + gt_bboxes_labels = torch.cat([batch_id[None], labels[None], bboxes], + dim=1) + gt_masks = torch.from_numpy(rng.rand(1, s // 4, s // 4)).int() + + one_gt_losses = head.loss_by_feat(cls_scores, bbox_preds, objectnesses, + coeff_preds, proto_preds, + gt_bboxes_labels, gt_masks, + img_metas) + onegt_cls_loss = one_gt_losses['loss_cls'].sum() + onegt_box_loss = one_gt_losses['loss_bbox'].sum() + onegt_obj_loss = one_gt_losses['loss_obj'].sum() + onegt_mask_loss = one_gt_losses['loss_mask'].sum() + self.assertGreater(onegt_cls_loss.item(), 0, + 'cls loss should be non-zero') + self.assertGreater(onegt_box_loss.item(), 0, + 'box loss should be non-zero') + self.assertGreater(onegt_obj_loss.item(), 0, + 'obj loss should be non-zero') + self.assertGreater(onegt_mask_loss.item(), 0, + 'mask loss should be non-zero') + + # test num_class = 1 + self.head_module['num_classes'] = 1 + head = YOLOv5InsHead(head_module=self.head_module) + bboxes = torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]) + labels = torch.Tensor([1.]) + batch_id = torch.LongTensor([0]) + gt_bboxes_labels = torch.cat([batch_id[None], labels[None], bboxes], + dim=1) + gt_masks = torch.from_numpy(rng.rand(1, s // 4, s // 4)).int() + + one_gt_losses = head.loss_by_feat(cls_scores, bbox_preds, objectnesses, + coeff_preds, proto_preds, + gt_bboxes_labels, gt_masks, + img_metas) + onegt_cls_loss = one_gt_losses['loss_cls'].sum() + onegt_box_loss = one_gt_losses['loss_bbox'].sum() + onegt_obj_loss = one_gt_losses['loss_obj'].sum() + onegt_mask_loss = one_gt_losses['loss_mask'].sum() + self.assertEqual(onegt_cls_loss.item(), 0, + 'cls loss should be non-zero') + self.assertGreater(onegt_box_loss.item(), 0, + 'box loss should be non-zero') + self.assertGreater(onegt_obj_loss.item(), 0, + 'obj loss should be non-zero') + self.assertGreater(onegt_mask_loss.item(), 0, + 'mask loss should be non-zero') diff --git a/models/YOLO-World/third_party/mmyolo/tests/test_models/test_dense_heads/test_yolov6_head.py b/models/YOLO-World/third_party/mmyolo/tests/test_models/test_dense_heads/test_yolov6_head.py new file mode 100644 index 0000000000000000000000000000000000000000..5bb951d12360614b26b5d3ccf30d1c044ab0ccdc --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/tests/test_models/test_dense_heads/test_yolov6_head.py @@ -0,0 +1,62 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import torch +from mmengine.config import Config + +from mmyolo.models.dense_heads import YOLOv6Head +from mmyolo.utils import register_all_modules + +register_all_modules() + + +class TestYOLOv6Head(TestCase): + + def setUp(self): + self.head_module = dict( + type='YOLOv6HeadModule', + num_classes=2, + in_channels=[32, 64, 128], + featmap_strides=[8, 16, 32]) + + def test_predict_by_feat(self): + s = 256 + img_metas = [{ + 'img_shape': (s, s, 3), + 'ori_shape': (s, s, 3), + 'scale_factor': (1.0, 1.0), + }] + test_cfg = Config( + dict( + multi_label=True, + max_per_img=300, + score_thr=0.01, + nms=dict(type='nms', iou_threshold=0.65))) + + head = YOLOv6Head(head_module=self.head_module, test_cfg=test_cfg) + head.eval() + + feat = [] + for i in range(len(self.head_module['in_channels'])): + in_channel = self.head_module['in_channels'][i] + feat_size = self.head_module['featmap_strides'][i] + feat.append( + torch.rand(1, in_channel, s // feat_size, s // feat_size)) + + cls_scores, bbox_preds = head.forward(feat) + head.predict_by_feat( + cls_scores, + bbox_preds, + None, + img_metas, + cfg=test_cfg, + rescale=True, + with_nms=True) + head.predict_by_feat( + cls_scores, + bbox_preds, + None, + img_metas, + cfg=test_cfg, + rescale=False, + with_nms=False) diff --git a/models/YOLO-World/third_party/mmyolo/tests/test_models/test_dense_heads/test_yolov7_head.py b/models/YOLO-World/third_party/mmyolo/tests/test_models/test_dense_heads/test_yolov7_head.py new file mode 100644 index 0000000000000000000000000000000000000000..5033f97e19673af79ab9a9c3ee2c618db3ea80e0 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/tests/test_models/test_dense_heads/test_yolov7_head.py @@ -0,0 +1,145 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import torch +from mmengine.config import Config +from mmengine.structures import InstanceData + +from mmyolo.models.dense_heads import YOLOv7Head +from mmyolo.utils import register_all_modules + +register_all_modules() + + +# TODO: Test YOLOv7p6HeadModule +class TestYOLOv7Head(TestCase): + + def setUp(self): + self.head_module = dict( + type='YOLOv7HeadModule', + num_classes=2, + in_channels=[32, 64, 128], + featmap_strides=[8, 16, 32], + num_base_priors=3) + + def test_predict_by_feat(self): + s = 256 + img_metas = [{ + 'img_shape': (s, s, 3), + 'ori_shape': (s, s, 3), + 'scale_factor': (1.0, 1.0), + }] + test_cfg = Config( + dict( + multi_label=True, + max_per_img=300, + score_thr=0.01, + nms=dict(type='nms', iou_threshold=0.65))) + + head = YOLOv7Head(head_module=self.head_module, test_cfg=test_cfg) + + feat = [] + for i in range(len(self.head_module['in_channels'])): + in_channel = self.head_module['in_channels'][i] + feat_size = self.head_module['featmap_strides'][i] + feat.append( + torch.rand(1, in_channel, s // feat_size, s // feat_size)) + + cls_scores, bbox_preds, objectnesses = head.forward(feat) + head.predict_by_feat( + cls_scores, + bbox_preds, + objectnesses, + img_metas, + cfg=test_cfg, + rescale=True, + with_nms=True) + head.predict_by_feat( + cls_scores, + bbox_preds, + objectnesses, + img_metas, + cfg=test_cfg, + rescale=False, + with_nms=False) + + def test_loss_by_feat(self): + s = 256 + img_metas = [{ + 'img_shape': (s, s, 3), + 'batch_input_shape': (s, s), + 'scale_factor': 1, + }] + + head = YOLOv7Head(head_module=self.head_module) + + feat = [] + for i in range(len(self.head_module['in_channels'])): + in_channel = self.head_module['in_channels'][i] + feat_size = self.head_module['featmap_strides'][i] + feat.append( + torch.rand(1, in_channel, s // feat_size, s // feat_size)) + + cls_scores, bbox_preds, objectnesses = head.forward(feat) + + # Test that empty ground truth encourages the network to predict + # background + gt_instances = InstanceData( + bboxes=torch.empty((0, 4)), labels=torch.LongTensor([])) + + empty_gt_losses = head.loss_by_feat(cls_scores, bbox_preds, + objectnesses, [gt_instances], + img_metas) + # When there is no truth, the cls loss should be nonzero but there + # should be no box loss. + empty_cls_loss = empty_gt_losses['loss_cls'].sum() + empty_box_loss = empty_gt_losses['loss_bbox'].sum() + empty_obj_loss = empty_gt_losses['loss_obj'].sum() + self.assertEqual( + empty_cls_loss.item(), 0, + 'there should be no cls loss when there are no true boxes') + self.assertEqual( + empty_box_loss.item(), 0, + 'there should be no box loss when there are no true boxes') + self.assertGreater(empty_obj_loss.item(), 0, + 'objectness loss should be non-zero') + + # When truth is non-empty then both cls and box loss should be nonzero + # for random inputs + head = YOLOv7Head(head_module=self.head_module) + gt_instances = InstanceData( + bboxes=torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]), + labels=torch.LongTensor([1])) + + one_gt_losses = head.loss_by_feat(cls_scores, bbox_preds, objectnesses, + [gt_instances], img_metas) + onegt_cls_loss = one_gt_losses['loss_cls'].sum() + onegt_box_loss = one_gt_losses['loss_bbox'].sum() + onegt_obj_loss = one_gt_losses['loss_obj'].sum() + self.assertGreater(onegt_cls_loss.item(), 0, + 'cls loss should be non-zero') + self.assertGreater(onegt_box_loss.item(), 0, + 'box loss should be non-zero') + self.assertGreater(onegt_obj_loss.item(), 0, + 'obj loss should be non-zero') + + # test num_class = 1 + self.head_module['num_classes'] = 1 + head = YOLOv7Head(head_module=self.head_module) + gt_instances = InstanceData( + bboxes=torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]), + labels=torch.LongTensor([0])) + + cls_scores, bbox_preds, objectnesses = head.forward(feat) + + one_gt_losses = head.loss_by_feat(cls_scores, bbox_preds, objectnesses, + [gt_instances], img_metas) + onegt_cls_loss = one_gt_losses['loss_cls'].sum() + onegt_box_loss = one_gt_losses['loss_bbox'].sum() + onegt_obj_loss = one_gt_losses['loss_obj'].sum() + self.assertEqual(onegt_cls_loss.item(), 0, + 'cls loss should be non-zero') + self.assertGreater(onegt_box_loss.item(), 0, + 'box loss should be non-zero') + self.assertGreater(onegt_obj_loss.item(), 0, + 'obj loss should be non-zero') diff --git a/models/YOLO-World/third_party/mmyolo/tests/test_models/test_dense_heads/test_yolov8_head.py b/models/YOLO-World/third_party/mmyolo/tests/test_models/test_dense_heads/test_yolov8_head.py new file mode 100644 index 0000000000000000000000000000000000000000..8980387a75bdd4ac1d3aebacf8a364e82259a01b --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/tests/test_models/test_dense_heads/test_yolov8_head.py @@ -0,0 +1,161 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import torch +from mmengine import ConfigDict +from mmengine.config import Config + +from mmyolo.models import YOLOv8Head +from mmyolo.utils import register_all_modules + +register_all_modules() + + +class TestYOLOv8Head(TestCase): + + def setUp(self): + self.head_module = dict( + type='YOLOv8HeadModule', + num_classes=4, + in_channels=[32, 64, 128], + featmap_strides=[8, 16, 32]) + + def test_predict_by_feat(self): + s = 256 + img_metas = [{ + 'img_shape': (s, s, 3), + 'ori_shape': (s, s, 3), + 'scale_factor': (1.0, 1.0), + }] + test_cfg = Config( + dict( + multi_label=True, + max_per_img=300, + score_thr=0.01, + nms=dict(type='nms', iou_threshold=0.65))) + + head = YOLOv8Head(head_module=self.head_module, test_cfg=test_cfg) + head.eval() + + feat = [] + for i in range(len(self.head_module['in_channels'])): + in_channel = self.head_module['in_channels'][i] + feat_size = self.head_module['featmap_strides'][i] + feat.append( + torch.rand(1, in_channel, s // feat_size, s // feat_size)) + + cls_scores, bbox_preds = head.forward(feat) + head.predict_by_feat( + cls_scores, + bbox_preds, + None, + img_metas, + cfg=test_cfg, + rescale=True, + with_nms=True) + head.predict_by_feat( + cls_scores, + bbox_preds, + None, + img_metas, + cfg=test_cfg, + rescale=False, + with_nms=False) + + def test_loss_by_feat(self): + s = 256 + img_metas = [{ + 'img_shape': (s, s, 3), + 'batch_input_shape': (s, s), + 'scale_factor': 1, + }] + + head = YOLOv8Head( + head_module=self.head_module, + train_cfg=ConfigDict( + assigner=dict( + type='BatchTaskAlignedAssigner', + num_classes=4, + topk=10, + alpha=0.5, + beta=6))) + head.train() + + feat = [] + for i in range(len(self.head_module['in_channels'])): + in_channel = self.head_module['in_channels'][i] + feat_size = self.head_module['featmap_strides'][i] + feat.append( + torch.rand(1, in_channel, s // feat_size, s // feat_size)) + + cls_scores, bbox_preds, bbox_dist_preds = head.forward(feat) + + # Test that empty ground truth encourages the network to predict + # background + gt_instances = torch.empty((0, 6), dtype=torch.float32) + + empty_gt_losses = head.loss_by_feat(cls_scores, bbox_preds, + bbox_dist_preds, gt_instances, + img_metas) + # When there is no truth, the cls loss should be nonzero but there + # should be no box loss. + empty_cls_loss = empty_gt_losses['loss_cls'].sum() + empty_box_loss = empty_gt_losses['loss_bbox'].sum() + empty_dfl_loss = empty_gt_losses['loss_dfl'].sum() + self.assertGreater(empty_cls_loss.item(), 0, + 'cls loss should be non-zero') + self.assertEqual( + empty_box_loss.item(), 0, + 'there should be no box loss when there are no true boxes') + self.assertEqual( + empty_dfl_loss.item(), 0, + 'there should be df loss when there are no true boxes') + + # When truth is non-empty then both cls and box loss should be nonzero + # for random inputs + gt_instances = torch.Tensor( + [[0., 0., 23.6667, 23.8757, 238.6326, 151.8874]]) + + one_gt_losses = head.loss_by_feat(cls_scores, bbox_preds, + bbox_dist_preds, gt_instances, + img_metas) + onegt_cls_loss = one_gt_losses['loss_cls'].sum() + onegt_box_loss = one_gt_losses['loss_bbox'].sum() + onegt_loss_dfl = one_gt_losses['loss_dfl'].sum() + self.assertGreater(onegt_cls_loss.item(), 0, + 'cls loss should be non-zero') + self.assertGreater(onegt_box_loss.item(), 0, + 'box loss should be non-zero') + self.assertGreater(onegt_loss_dfl.item(), 0, + 'obj loss should be non-zero') + + # test num_class = 1 + self.head_module['num_classes'] = 1 + head = YOLOv8Head( + head_module=self.head_module, + train_cfg=ConfigDict( + assigner=dict( + type='BatchTaskAlignedAssigner', + num_classes=1, + topk=10, + alpha=0.5, + beta=6))) + head.train() + + gt_instances = torch.Tensor( + [[0., 0., 23.6667, 23.8757, 238.6326, 151.8874], + [1., 0., 24.6667, 27.8757, 28.6326, 51.8874]]) + cls_scores, bbox_preds, bbox_dist_preds = head.forward(feat) + + one_gt_losses = head.loss_by_feat(cls_scores, bbox_preds, + bbox_dist_preds, gt_instances, + img_metas) + onegt_cls_loss = one_gt_losses['loss_cls'].sum() + onegt_box_loss = one_gt_losses['loss_bbox'].sum() + onegt_loss_dfl = one_gt_losses['loss_dfl'].sum() + self.assertGreater(onegt_cls_loss.item(), 0, + 'cls loss should be non-zero') + self.assertGreater(onegt_box_loss.item(), 0, + 'box loss should be non-zero') + self.assertGreater(onegt_loss_dfl.item(), 0, + 'obj loss should be non-zero') diff --git a/models/YOLO-World/third_party/mmyolo/tests/test_models/test_dense_heads/test_yolox_head.py b/models/YOLO-World/third_party/mmyolo/tests/test_models/test_dense_heads/test_yolox_head.py new file mode 100644 index 0000000000000000000000000000000000000000..390994417c7fc9c0b2cb4470484ee3e28248a4a5 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/tests/test_models/test_dense_heads/test_yolox_head.py @@ -0,0 +1,379 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import torch +from mmengine.config import Config +from mmengine.model import bias_init_with_prob +from mmengine.testing import assert_allclose + +from mmyolo.models.dense_heads import YOLOXHead, YOLOXPoseHead +from mmyolo.utils import register_all_modules + +register_all_modules() + + +class TestYOLOXHead(TestCase): + + def setUp(self): + self.head_module = dict( + type='YOLOXHeadModule', + num_classes=4, + in_channels=1, + stacked_convs=1, + ) + + def test_init_weights(self): + head = YOLOXHead(head_module=self.head_module) + head.head_module.init_weights() + bias_init = bias_init_with_prob(0.01) + for conv_cls, conv_obj in zip(head.head_module.multi_level_conv_cls, + head.head_module.multi_level_conv_obj): + assert_allclose(conv_cls.bias.data, + torch.ones_like(conv_cls.bias.data) * bias_init) + assert_allclose(conv_obj.bias.data, + torch.ones_like(conv_obj.bias.data) * bias_init) + + def test_predict_by_feat(self): + s = 256 + img_metas = [{ + 'img_shape': (s, s, 3), + 'ori_shape': (s, s, 3), + 'scale_factor': (1.0, 1.0), + }] + test_cfg = Config( + dict( + multi_label=True, + max_per_img=300, + score_thr=0.01, + nms=dict(type='nms', iou_threshold=0.65))) + + head = YOLOXHead(head_module=self.head_module, test_cfg=test_cfg) + feat = [ + torch.rand(1, 1, s // feat_size, s // feat_size) + for feat_size in [4, 8, 16] + ] + cls_scores, bbox_preds, objectnesses = head.forward(feat) + head.predict_by_feat( + cls_scores, + bbox_preds, + objectnesses, + img_metas, + cfg=test_cfg, + rescale=True, + with_nms=True) + head.predict_by_feat( + cls_scores, + bbox_preds, + objectnesses, + img_metas, + cfg=test_cfg, + rescale=False, + with_nms=False) + + def test_loss_by_feat(self): + s = 256 + img_metas = [{ + 'img_shape': (s, s, 3), + 'scale_factor': 1, + }] + train_cfg = Config( + dict( + assigner=dict( + type='mmdet.SimOTAAssigner', + iou_calculator=dict(type='mmdet.BboxOverlaps2D'), + center_radius=2.5, + candidate_topk=10, + iou_weight=3.0, + cls_weight=1.0))) + + head = YOLOXHead(head_module=self.head_module, train_cfg=train_cfg) + assert not head.use_bbox_aux + + feat = [ + torch.rand(1, 1, s // feat_size, s // feat_size) + for feat_size in [4, 8, 16] + ] + cls_scores, bbox_preds, objectnesses = head.forward(feat) + + # Test that empty ground truth encourages the network to predict + # background + gt_instances = torch.empty((0, 6)) + + empty_gt_losses = head.loss_by_feat(cls_scores, bbox_preds, + objectnesses, gt_instances, + img_metas) + # When there is no truth, the cls loss should be nonzero but there + # should be no box loss. + empty_cls_loss = empty_gt_losses['loss_cls'].sum() + empty_box_loss = empty_gt_losses['loss_bbox'].sum() + empty_obj_loss = empty_gt_losses['loss_obj'].sum() + self.assertEqual( + empty_cls_loss.item(), 0, + 'there should be no cls loss when there are no true boxes') + self.assertEqual( + empty_box_loss.item(), 0, + 'there should be no box loss when there are no true boxes') + self.assertGreater(empty_obj_loss.item(), 0, + 'objectness loss should be non-zero') + + # When truth is non-empty then both cls and box loss should be nonzero + # for random inputs + head = YOLOXHead(head_module=self.head_module, train_cfg=train_cfg) + head.use_bbox_aux = True + gt_instances = torch.Tensor( + [[0, 2, 23.6667, 23.8757, 238.6326, 151.8874]]) + + one_gt_losses = head.loss_by_feat(cls_scores, bbox_preds, objectnesses, + gt_instances, img_metas) + onegt_cls_loss = one_gt_losses['loss_cls'].sum() + onegt_box_loss = one_gt_losses['loss_bbox'].sum() + onegt_obj_loss = one_gt_losses['loss_obj'].sum() + onegt_l1_loss = one_gt_losses['loss_bbox_aux'].sum() + self.assertGreater(onegt_cls_loss.item(), 0, + 'cls loss should be non-zero') + self.assertGreater(onegt_box_loss.item(), 0, + 'box loss should be non-zero') + self.assertGreater(onegt_obj_loss.item(), 0, + 'obj loss should be non-zero') + self.assertGreater(onegt_l1_loss.item(), 0, + 'l1 loss should be non-zero') + + # Test groud truth out of bound + gt_instances = torch.Tensor( + [[0, 2, s * 4, s * 4, s * 4 + 10, s * 4 + 10]]) + empty_gt_losses = head.loss_by_feat(cls_scores, bbox_preds, + objectnesses, gt_instances, + img_metas) + # When gt_bboxes out of bound, the assign results should be empty, + # so the cls and bbox loss should be zero. + empty_cls_loss = empty_gt_losses['loss_cls'].sum() + empty_box_loss = empty_gt_losses['loss_bbox'].sum() + empty_obj_loss = empty_gt_losses['loss_obj'].sum() + self.assertEqual( + empty_cls_loss.item(), 0, + 'there should be no cls loss when gt_bboxes out of bound') + self.assertEqual( + empty_box_loss.item(), 0, + 'there should be no box loss when gt_bboxes out of bound') + self.assertGreater(empty_obj_loss.item(), 0, + 'objectness loss should be non-zero') + + +class TestYOLOXPoseHead(TestCase): + + def setUp(self): + self.head_module = dict( + type='YOLOXPoseHeadModule', + num_classes=1, + num_keypoints=17, + in_channels=1, + stacked_convs=1, + ) + self.train_cfg = Config( + dict( + assigner=dict( + type='PoseSimOTAAssigner', + center_radius=2.5, + oks_weight=3.0, + iou_calculator=dict(type='mmdet.BboxOverlaps2D'), + oks_calculator=dict( + type='OksLoss', + metainfo='configs/_base_/pose/coco.py')))) + self.loss_pose = Config( + dict( + type='OksLoss', + metainfo='configs/_base_/pose/coco.py', + loss_weight=30.0)) + + def test_init_weights(self): + head = YOLOXPoseHead( + head_module=self.head_module, + loss_pose=self.loss_pose, + train_cfg=self.train_cfg) + head.head_module.init_weights() + bias_init = bias_init_with_prob(0.01) + for conv_cls, conv_obj, conv_vis in zip( + head.head_module.multi_level_conv_cls, + head.head_module.multi_level_conv_obj, + head.head_module.multi_level_conv_vis): + assert_allclose(conv_cls.bias.data, + torch.ones_like(conv_cls.bias.data) * bias_init) + assert_allclose(conv_obj.bias.data, + torch.ones_like(conv_obj.bias.data) * bias_init) + assert_allclose(conv_vis.bias.data, + torch.ones_like(conv_vis.bias.data) * bias_init) + + def test_predict_by_feat(self): + s = 256 + img_metas = [{ + 'img_shape': (s, s, 3), + 'ori_shape': (s, s, 3), + 'scale_factor': (1.0, 1.0), + }] + test_cfg = Config( + dict( + multi_label=True, + max_per_img=300, + score_thr=0.01, + nms=dict(type='nms', iou_threshold=0.65))) + + head = YOLOXPoseHead( + head_module=self.head_module, + loss_pose=self.loss_pose, + train_cfg=self.train_cfg, + test_cfg=test_cfg) + feat = [ + torch.rand(1, 1, s // feat_size, s // feat_size) + for feat_size in [4, 8, 16] + ] + cls_scores, bbox_preds, objectnesses, \ + offsets_preds, vis_preds = head.forward(feat) + head.predict_by_feat( + cls_scores, + bbox_preds, + objectnesses, + offsets_preds, + vis_preds, + img_metas, + cfg=test_cfg, + rescale=True, + with_nms=True) + + def test_loss_by_feat(self): + s = 256 + img_metas = [{ + 'img_shape': (s, s, 3), + 'scale_factor': 1, + }] + + head = YOLOXPoseHead( + head_module=self.head_module, + loss_pose=self.loss_pose, + train_cfg=self.train_cfg) + assert not head.use_bbox_aux + + feat = [ + torch.rand(1, 1, s // feat_size, s // feat_size) + for feat_size in [4, 8, 16] + ] + cls_scores, bbox_preds, objectnesses, \ + offsets_preds, vis_preds = head.forward(feat) + + # Test that empty ground truth encourages the network to predict + # background + gt_instances = torch.empty((0, 6)) + gt_keypoints = torch.empty((0, 17, 2)) + gt_keypoints_visible = torch.empty((0, 17)) + + empty_gt_losses = head.loss_by_feat(cls_scores, bbox_preds, + objectnesses, offsets_preds, + vis_preds, gt_instances, + gt_keypoints, gt_keypoints_visible, + img_metas) + # When there is no truth, the cls loss should be nonzero but there + # should be no box loss. + empty_cls_loss = empty_gt_losses['loss_cls'].sum() + empty_box_loss = empty_gt_losses['loss_bbox'].sum() + empty_obj_loss = empty_gt_losses['loss_obj'].sum() + empty_loss_kpt = empty_gt_losses['loss_kpt'].sum() + empty_loss_vis = empty_gt_losses['loss_vis'].sum() + self.assertEqual( + empty_cls_loss.item(), 0, + 'there should be no cls loss when there are no true boxes') + self.assertEqual( + empty_box_loss.item(), 0, + 'there should be no box loss when there are no true boxes') + self.assertGreater(empty_obj_loss.item(), 0, + 'objectness loss should be non-zero') + self.assertEqual( + empty_loss_kpt.item(), 0, + 'there should be no kpt loss when there are no true keypoints') + self.assertEqual( + empty_loss_vis.item(), 0, + 'there should be no vis loss when there are no true keypoints') + # When truth is non-empty then both cls and box loss should be nonzero + # for random inputs + head = YOLOXPoseHead( + head_module=self.head_module, + loss_pose=self.loss_pose, + train_cfg=self.train_cfg) + gt_instances = torch.Tensor( + [[0, 0, 23.6667, 23.8757, 238.6326, 151.8874]]) + gt_keypoints = torch.Tensor([[[317.1519, + 429.8433], [338.3080, 416.9187], + [298.9951, + 403.8911], [102.7025, 273.1329], + [255.4321, + 404.8712], [400.0422, 554.4373], + [167.7857, + 516.7591], [397.4943, 737.4575], + [116.3247, + 674.5684], [102.7025, 273.1329], + [66.0319, + 808.6383], [102.7025, 273.1329], + [157.6150, + 819.1249], [102.7025, 273.1329], + [102.7025, + 273.1329], [102.7025, 273.1329], + [102.7025, 273.1329]]]) + gt_keypoints_visible = torch.Tensor([[ + 1., 1., 1., 0., 1., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0. + ]]) + + one_gt_losses = head.loss_by_feat(cls_scores, bbox_preds, objectnesses, + offsets_preds, vis_preds, + gt_instances, gt_keypoints, + gt_keypoints_visible, img_metas) + onegt_cls_loss = one_gt_losses['loss_cls'].sum() + onegt_box_loss = one_gt_losses['loss_bbox'].sum() + onegt_obj_loss = one_gt_losses['loss_obj'].sum() + onegt_loss_kpt = one_gt_losses['loss_kpt'].sum() + onegt_loss_vis = one_gt_losses['loss_vis'].sum() + + self.assertGreater(onegt_cls_loss.item(), 0, + 'cls loss should be non-zero') + self.assertGreater(onegt_box_loss.item(), 0, + 'box loss should be non-zero') + self.assertGreater(onegt_obj_loss.item(), 0, + 'obj loss should be non-zero') + self.assertGreater(onegt_loss_kpt.item(), 0, + 'kpt loss should be non-zero') + self.assertGreater(onegt_loss_vis.item(), 0, + 'vis loss should be non-zero') + + # Test groud truth out of bound + gt_instances = torch.Tensor( + [[0, 2, s * 4, s * 4, s * 4 + 10, s * 4 + 10]]) + gt_keypoints = torch.Tensor([[[s * 4, s * 4 + 10], [s * 4, s * 4 + 10], + [s * 4, s * 4 + 10], [s * 4, s * 4 + 10], + [s * 4, s * 4 + 10], [s * 4, s * 4 + 10], + [s * 4, s * 4 + 10], [s * 4, s * 4 + 10], + [s * 4, s * 4 + 10], [s * 4, s * 4 + 10], + [s * 4, s * 4 + 10], [s * 4, s * 4 + 10], + [s * 4, s * 4 + 10], [s * 4, s * 4 + 10], + [s * 4, s * 4 + 10], [s * 4, s * 4 + 10], + [s * 4, s * 4 + 10]]]) + empty_gt_losses = head.loss_by_feat(cls_scores, bbox_preds, + objectnesses, offsets_preds, + vis_preds, gt_instances, + gt_keypoints, gt_keypoints_visible, + img_metas) + # When gt_bboxes out of bound, the assign results should be empty, + # so the cls and bbox loss should be zero. + empty_cls_loss = empty_gt_losses['loss_cls'].sum() + empty_box_loss = empty_gt_losses['loss_bbox'].sum() + empty_obj_loss = empty_gt_losses['loss_obj'].sum() + empty_kpt_loss = empty_gt_losses['loss_kpt'].sum() + empty_vis_loss = empty_gt_losses['loss_vis'].sum() + self.assertEqual( + empty_cls_loss.item(), 0, + 'there should be no cls loss when gt_bboxes out of bound') + self.assertEqual( + empty_box_loss.item(), 0, + 'there should be no box loss when gt_bboxes out of bound') + self.assertGreater(empty_obj_loss.item(), 0, + 'objectness loss should be non-zero') + self.assertEqual(empty_kpt_loss.item(), 0, + 'kps loss should be non-zero') + self.assertEqual(empty_vis_loss.item(), 0, + 'vis loss should be non-zero') diff --git a/models/YOLO-World/third_party/mmyolo/tests/test_models/test_detectors/test_yolo_detector.py b/models/YOLO-World/third_party/mmyolo/tests/test_models/test_detectors/test_yolo_detector.py new file mode 100644 index 0000000000000000000000000000000000000000..4b2952040d193781a6d042976c336485232e1a0a --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/tests/test_models/test_detectors/test_yolo_detector.py @@ -0,0 +1,137 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import time +import unittest +from unittest import TestCase + +import torch +from mmdet.structures import DetDataSample +from mmdet.testing import demo_mm_inputs +from mmengine.logging import MessageHub +from parameterized import parameterized + +from mmyolo.testing import get_detector_cfg +from mmyolo.utils import register_all_modules + + +class TestSingleStageDetector(TestCase): + + def setUp(self): + register_all_modules() + + @parameterized.expand([ + 'yolov5/yolov5_n-v61_syncbn_fast_8xb16-300e_coco.py', + 'yolov6/yolov6_s_syncbn_fast_8xb32-400e_coco.py', + 'yolox/yolox_tiny_fast_8xb8-300e_coco.py', + 'rtmdet/rtmdet_tiny_syncbn_fast_8xb32-300e_coco.py', + 'yolov7/yolov7_tiny_syncbn_fast_8x16b-300e_coco.py', + 'yolov8/yolov8_n_syncbn_fast_8xb16-500e_coco.py' + ]) + def test_init(self, cfg_file): + model = get_detector_cfg(cfg_file) + model.backbone.init_cfg = None + + from mmyolo.registry import MODELS + detector = MODELS.build(model) + self.assertTrue(detector.backbone) + self.assertTrue(detector.neck) + self.assertTrue(detector.bbox_head) + + @parameterized.expand([ + ('yolov5/yolov5_s-v61_syncbn_8xb16-300e_coco.py', ('cuda', 'cpu')), + ('yolov7/yolov7_tiny_syncbn_fast_8x16b-300e_coco.py', ('cuda', 'cpu')), + ('rtmdet/rtmdet_tiny_syncbn_fast_8xb32-300e_coco.py', ('cuda', 'cpu')), + ('yolov8/yolov8_n_syncbn_fast_8xb16-500e_coco.py', ('cuda', 'cpu')) + ]) + def test_forward_loss_mode(self, cfg_file, devices): + message_hub = MessageHub.get_instance( + f'test_single_stage_forward_loss_mode-{time.time()}') + message_hub.update_info('iter', 0) + message_hub.update_info('epoch', 0) + model = get_detector_cfg(cfg_file) + model.backbone.init_cfg = None + + if 'fast' in cfg_file: + model.data_preprocessor = dict( + type='mmdet.DetDataPreprocessor', + mean=[0., 0., 0.], + std=[255., 255., 255.], + bgr_to_rgb=True) + + from mmyolo.registry import MODELS + assert all([device in ['cpu', 'cuda'] for device in devices]) + + for device in devices: + detector = MODELS.build(model) + detector.init_weights() + + if device == 'cuda': + if not torch.cuda.is_available(): + return unittest.skip('test requires GPU and torch+cuda') + detector = detector.cuda() + + packed_inputs = demo_mm_inputs(2, [[3, 320, 128], [3, 125, 320]]) + data = detector.data_preprocessor(packed_inputs, True) + losses = detector.forward(**data, mode='loss') + self.assertIsInstance(losses, dict) + + @parameterized.expand([ + ('yolov5/yolov5_n-v61_syncbn_fast_8xb16-300e_coco.py', ('cuda', + 'cpu')), + ('yolov6/yolov6_s_syncbn_fast_8xb32-400e_coco.py', ('cuda', 'cpu')), + ('yolox/yolox_tiny_fast_8xb8-300e_coco.py', ('cuda', 'cpu')), + ('yolov7/yolov7_tiny_syncbn_fast_8x16b-300e_coco.py', ('cuda', 'cpu')), + ('rtmdet/rtmdet_tiny_syncbn_fast_8xb32-300e_coco.py', ('cuda', 'cpu')), + ('yolov8/yolov8_n_syncbn_fast_8xb16-500e_coco.py', ('cuda', 'cpu')) + ]) + def test_forward_predict_mode(self, cfg_file, devices): + model = get_detector_cfg(cfg_file) + model.backbone.init_cfg = None + + from mmyolo.registry import MODELS + assert all([device in ['cpu', 'cuda'] for device in devices]) + + for device in devices: + detector = MODELS.build(model) + + if device == 'cuda': + if not torch.cuda.is_available(): + return unittest.skip('test requires GPU and torch+cuda') + detector = detector.cuda() + + packed_inputs = demo_mm_inputs(2, [[3, 320, 128], [3, 125, 320]]) + data = detector.data_preprocessor(packed_inputs, False) + # Test forward test + detector.eval() + with torch.no_grad(): + batch_results = detector.forward(**data, mode='predict') + self.assertEqual(len(batch_results), 2) + self.assertIsInstance(batch_results[0], DetDataSample) + + @parameterized.expand([ + ('yolov5/yolov5_n-v61_syncbn_fast_8xb16-300e_coco.py', ('cuda', + 'cpu')), + ('yolov6/yolov6_s_syncbn_fast_8xb32-400e_coco.py', ('cuda', 'cpu')), + ('yolox/yolox_tiny_fast_8xb8-300e_coco.py', ('cuda', 'cpu')), + ('yolov7/yolov7_tiny_syncbn_fast_8x16b-300e_coco.py', ('cuda', 'cpu')), + ('rtmdet/rtmdet_tiny_syncbn_fast_8xb32-300e_coco.py', ('cuda', 'cpu')), + ('yolov8/yolov8_n_syncbn_fast_8xb16-500e_coco.py', ('cuda', 'cpu')) + ]) + def test_forward_tensor_mode(self, cfg_file, devices): + model = get_detector_cfg(cfg_file) + model.backbone.init_cfg = None + + from mmyolo.registry import MODELS + assert all([device in ['cpu', 'cuda'] for device in devices]) + + for device in devices: + detector = MODELS.build(model) + + if device == 'cuda': + if not torch.cuda.is_available(): + return unittest.skip('test requires GPU and torch+cuda') + detector = detector.cuda() + + packed_inputs = demo_mm_inputs(2, [[3, 320, 128], [3, 125, 320]]) + data = detector.data_preprocessor(packed_inputs, False) + batch_results = detector.forward(**data, mode='tensor') + self.assertIsInstance(batch_results, tuple) diff --git a/models/YOLO-World/third_party/mmyolo/tests/test_models/test_layers/__init__.py b/models/YOLO-World/third_party/mmyolo/tests/test_models/test_layers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ef101fec61e72abc0eb90266d453b5b22331378d --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/tests/test_models/test_layers/__init__.py @@ -0,0 +1 @@ +# Copyright (c) OpenMMLab. All rights reserved. diff --git a/models/YOLO-World/third_party/mmyolo/tests/test_models/test_layers/test_ema.py b/models/YOLO-World/third_party/mmyolo/tests/test_models/test_layers/test_ema.py new file mode 100644 index 0000000000000000000000000000000000000000..b35838280ee5bc09d7c82b451f72468b53f5583f --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/tests/test_models/test_layers/test_ema.py @@ -0,0 +1,94 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import itertools +import math +from unittest import TestCase + +import torch +import torch.nn as nn +from mmengine.testing import assert_allclose + +from mmyolo.models.layers import ExpMomentumEMA + + +class TestEMA(TestCase): + + def test_exp_momentum_ema(self): + model = nn.Sequential(nn.Conv2d(1, 5, kernel_size=3), nn.Linear(5, 10)) + # Test invalid gamma + with self.assertRaisesRegex(AssertionError, + 'gamma must be greater than 0'): + ExpMomentumEMA(model, gamma=-1) + + # Test EMA + model = torch.nn.Sequential( + torch.nn.Conv2d(1, 5, kernel_size=3), torch.nn.Linear(5, 10)) + momentum = 0.1 + gamma = 4 + + ema_model = ExpMomentumEMA(model, momentum=momentum, gamma=gamma) + averaged_params = [ + torch.zeros_like(param) for param in model.parameters() + ] + n_updates = 10 + for i in range(n_updates): + updated_averaged_params = [] + for p, p_avg in zip(model.parameters(), averaged_params): + p.detach().add_(torch.randn_like(p)) + if i == 0: + updated_averaged_params.append(p.clone()) + else: + m = (1 - momentum) * math.exp(-(1 + i) / gamma) + momentum + updated_averaged_params.append( + (p_avg * (1 - m) + p * m).clone()) + ema_model.update_parameters(model) + averaged_params = updated_averaged_params + + for p_target, p_ema in zip(averaged_params, ema_model.parameters()): + assert_allclose(p_target, p_ema) + + def test_exp_momentum_ema_update_buffer(self): + model = nn.Sequential( + nn.Conv2d(1, 5, kernel_size=3), nn.BatchNorm2d(5, momentum=0.3), + nn.Linear(5, 10)) + # Test invalid gamma + with self.assertRaisesRegex(AssertionError, + 'gamma must be greater than 0'): + ExpMomentumEMA(model, gamma=-1) + + # Test EMA with momentum annealing. + momentum = 0.1 + gamma = 4 + + ema_model = ExpMomentumEMA( + model, gamma=gamma, momentum=momentum, update_buffers=True) + averaged_params = [ + torch.zeros_like(param) + for param in itertools.chain(model.parameters(), model.buffers()) + if param.size() != torch.Size([]) + ] + n_updates = 10 + for i in range(n_updates): + updated_averaged_params = [] + params = [ + param for param in itertools.chain(model.parameters(), + model.buffers()) + if param.size() != torch.Size([]) + ] + for p, p_avg in zip(params, averaged_params): + p.detach().add_(torch.randn_like(p)) + if i == 0: + updated_averaged_params.append(p.clone()) + else: + m = (1 - momentum) * math.exp(-(1 + i) / gamma) + momentum + updated_averaged_params.append( + (p_avg * (1 - m) + p * m).clone()) + ema_model.update_parameters(model) + averaged_params = updated_averaged_params + + ema_params = [ + param for param in itertools.chain(ema_model.module.parameters(), + ema_model.module.buffers()) + if param.size() != torch.Size([]) + ] + for p_target, p_ema in zip(averaged_params, ema_params): + assert_allclose(p_target, p_ema) diff --git a/models/YOLO-World/third_party/mmyolo/tests/test_models/test_layers/test_yolo_bricks.py b/models/YOLO-World/third_party/mmyolo/tests/test_models/test_layers/test_yolo_bricks.py new file mode 100644 index 0000000000000000000000000000000000000000..5331a4e013c797052ed003b64b477d24ad10444c --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/tests/test_models/test_layers/test_yolo_bricks.py @@ -0,0 +1,34 @@ +# Copyright (c) OpenMMLab. All rights reserved. + +from unittest import TestCase + +import torch + +from mmyolo.models.layers import SPPFBottleneck +from mmyolo.utils import register_all_modules + +register_all_modules() + + +class TestSPPFBottleneck(TestCase): + + def test_forward(self): + input_tensor = torch.randn((1, 3, 20, 20)) + bottleneck = SPPFBottleneck(3, 16) + out_tensor = bottleneck(input_tensor) + self.assertEqual(out_tensor.shape, (1, 16, 20, 20)) + + bottleneck = SPPFBottleneck(3, 16, kernel_sizes=[3, 5, 7]) + out_tensor = bottleneck(input_tensor) + self.assertEqual(out_tensor.shape, (1, 16, 20, 20)) + + # set len(kernel_sizes)=4 + bottleneck = SPPFBottleneck(3, 16, kernel_sizes=[3, 5, 7, 9]) + out_tensor = bottleneck(input_tensor) + self.assertEqual(out_tensor.shape, (1, 16, 20, 20)) + + # set use_conv_first=False + bottleneck = SPPFBottleneck( + 3, 16, use_conv_first=False, kernel_sizes=[3, 5, 7, 9]) + out_tensor = bottleneck(input_tensor) + self.assertEqual(out_tensor.shape, (1, 16, 20, 20)) diff --git a/models/YOLO-World/third_party/mmyolo/tests/test_models/test_necks/__init__.py b/models/YOLO-World/third_party/mmyolo/tests/test_models/test_necks/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ef101fec61e72abc0eb90266d453b5b22331378d --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/tests/test_models/test_necks/__init__.py @@ -0,0 +1 @@ +# Copyright (c) OpenMMLab. All rights reserved. diff --git a/models/YOLO-World/third_party/mmyolo/tests/test_models/test_necks/test_cspnext_pafpn.py b/models/YOLO-World/third_party/mmyolo/tests/test_models/test_necks/test_cspnext_pafpn.py new file mode 100644 index 0000000000000000000000000000000000000000..b26c99aa3c90c9e53be6ef7f8f28c4996c49ca2f --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/tests/test_models/test_necks/test_cspnext_pafpn.py @@ -0,0 +1,37 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import torch + +from mmyolo.models.necks import CSPNeXtPAFPN +from mmyolo.utils import register_all_modules + +register_all_modules() + + +class TestCSPNeXtPAFPN(TestCase): + + def test_forward(self): + s = 64 + in_channels = [8, 16, 32] + feat_sizes = [s // 2**i for i in range(4)] # [32, 16, 8] + out_channels = 24 + feats = [ + torch.rand(1, in_channels[i], feat_sizes[i], feat_sizes[i]) + for i in range(len(in_channels)) + ] + neck = CSPNeXtPAFPN(in_channels=in_channels, out_channels=out_channels) + outs = neck(feats) + assert len(outs) == len(feats) + for i in range(len(feats)): + assert outs[i].shape[1] == out_channels + assert outs[i].shape[2] == outs[i].shape[3] == s // (2**i) + + # test depth-wise + neck = CSPNeXtPAFPN( + in_channels=in_channels, + out_channels=out_channels, + use_depthwise=True) + + from mmcv.cnn.bricks import DepthwiseSeparableConvModule + self.assertTrue(neck.conv, DepthwiseSeparableConvModule) diff --git a/models/YOLO-World/third_party/mmyolo/tests/test_models/test_necks/test_ppyoloe_csppan.py b/models/YOLO-World/third_party/mmyolo/tests/test_models/test_necks/test_ppyoloe_csppan.py new file mode 100644 index 0000000000000000000000000000000000000000..b79c1ce5bee9f0761b6c3deedc2c8c250ad8aac7 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/tests/test_models/test_necks/test_ppyoloe_csppan.py @@ -0,0 +1,53 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import torch + +from mmyolo.models import PPYOLOECSPPAFPN +from mmyolo.utils import register_all_modules + +register_all_modules() + + +class TestPPYOLOECSPPAFPN(TestCase): + + def test_forward(self): + s = 64 + in_channels = [8, 16, 32] + feat_sizes = [s // 2**i for i in range(4)] # [32, 16, 8] + out_channels = [8, 16, 32] + feats = [ + torch.rand(1, in_channels[i], feat_sizes[i], feat_sizes[i]) + for i in range(len(in_channels)) + ] + neck = PPYOLOECSPPAFPN( + in_channels=in_channels, out_channels=out_channels) + outs = neck(feats) + assert len(outs) == len(feats) + for i in range(len(feats)): + assert outs[i].shape[1] == out_channels[i] + assert outs[i].shape[2] == outs[i].shape[3] == s // (2**i) + + def test_drop_block(self): + s = 64 + in_channels = [8, 16, 32] + feat_sizes = [s // 2**i for i in range(4)] # [32, 16, 8] + out_channels = [8, 16, 32] + feats = [ + torch.rand(1, in_channels[i], feat_sizes[i], feat_sizes[i]) + for i in range(len(in_channels)) + ] + neck = PPYOLOECSPPAFPN( + in_channels=in_channels, + out_channels=out_channels, + drop_block_cfg=dict( + type='mmdet.DropBlock', + drop_prob=0.1, + block_size=3, + warm_iters=0)) + neck.train() + outs = neck(feats) + assert len(outs) == len(feats) + for i in range(len(feats)): + assert outs[i].shape[1] == out_channels[i] + assert outs[i].shape[2] == outs[i].shape[3] == s // (2**i) diff --git a/models/YOLO-World/third_party/mmyolo/tests/test_models/test_necks/test_yolov5_pafpn.py b/models/YOLO-World/third_party/mmyolo/tests/test_models/test_necks/test_yolov5_pafpn.py new file mode 100644 index 0000000000000000000000000000000000000000..339621ec4ba81de7c913b20dc1530289c3bd8c8c --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/tests/test_models/test_necks/test_yolov5_pafpn.py @@ -0,0 +1,28 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import torch + +from mmyolo.models.necks import YOLOv5PAFPN +from mmyolo.utils import register_all_modules + +register_all_modules() + + +class TestYOLOv5PAFPN(TestCase): + + def test_forward(self): + s = 64 + in_channels = [8, 16, 32] + feat_sizes = [s // 2**i for i in range(4)] # [32, 16, 8] + out_channels = [8, 16, 32] + feats = [ + torch.rand(1, in_channels[i], feat_sizes[i], feat_sizes[i]) + for i in range(len(in_channels)) + ] + neck = YOLOv5PAFPN(in_channels=in_channels, out_channels=out_channels) + outs = neck(feats) + assert len(outs) == len(feats) + for i in range(len(feats)): + assert outs[i].shape[1] == out_channels[i] + assert outs[i].shape[2] == outs[i].shape[3] == s // (2**i) diff --git a/models/YOLO-World/third_party/mmyolo/tests/test_models/test_necks/test_yolov6_pafpn.py b/models/YOLO-World/third_party/mmyolo/tests/test_models/test_necks/test_yolov6_pafpn.py new file mode 100644 index 0000000000000000000000000000000000000000..e766aa8700e292d13d411b3eccc4542b8ef49725 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/tests/test_models/test_necks/test_yolov6_pafpn.py @@ -0,0 +1,81 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import torch + +from mmyolo.models.necks import (YOLOv6CSPRepBiPAFPN, YOLOv6CSPRepPAFPN, + YOLOv6RepBiPAFPN, YOLOv6RepPAFPN) +from mmyolo.utils import register_all_modules + +register_all_modules() + + +class TestYOLOv6PAFPN(TestCase): + + def test_YOLOv6RepPAFP_forward(self): + s = 64 + in_channels = [8, 16, 32] + feat_sizes = [s // 2**i for i in range(4)] # [32, 16, 8] + out_channels = [8, 16, 32] + feats = [ + torch.rand(1, in_channels[i], feat_sizes[i], feat_sizes[i]) + for i in range(len(in_channels)) + ] + neck = YOLOv6RepPAFPN( + in_channels=in_channels, out_channels=out_channels) + outs = neck(feats) + assert len(outs) == len(feats) + for i in range(len(feats)): + assert outs[i].shape[1] == out_channels[i] + assert outs[i].shape[2] == outs[i].shape[3] == s // (2**i) + + def test_YOLOv6CSPRepPAFPN_forward(self): + s = 64 + in_channels = [8, 16, 32] + feat_sizes = [s // 2**i for i in range(4)] # [32, 16, 8] + out_channels = [8, 16, 32] + feats = [ + torch.rand(1, in_channels[i], feat_sizes[i], feat_sizes[i]) + for i in range(len(in_channels)) + ] + neck = YOLOv6CSPRepPAFPN( + in_channels=in_channels, out_channels=out_channels) + outs = neck(feats) + assert len(outs) == len(feats) + for i in range(len(feats)): + assert outs[i].shape[1] == out_channels[i] + assert outs[i].shape[2] == outs[i].shape[3] == s // (2**i) + + def test_YOLOv6CSPRepBiPAFPN_forward(self): + s = 64 + in_channels = [4, 8, 16, 32] # includes an extra input for BiFusion + feat_sizes = [s // 2**i for i in range(4)] # [64, 32, 16, 8] + out_channels = [8, 16, 32] + feats = [ + torch.rand(1, in_channels[i], feat_sizes[i], feat_sizes[i]) + for i in range(len(in_channels)) + ] + neck = YOLOv6CSPRepBiPAFPN( + in_channels=in_channels, out_channels=out_channels) + outs = neck(feats) + assert len(outs) == len(feats) - 1 + for i in range(len(feats) - 1): + assert outs[i].shape[1] == out_channels[i] + assert outs[i].shape[2] == outs[i].shape[3] == feat_sizes[i + 1] + + def test_YOLOv6RepBiPAFPN_forward(self): + s = 64 + in_channels = [4, 8, 16, 32] # includes an extra input for BiFusion + feat_sizes = [s // 2**i for i in range(4)] # [64, 32, 16, 8] + out_channels = [8, 16, 32] + feats = [ + torch.rand(1, in_channels[i], feat_sizes[i], feat_sizes[i]) + for i in range(len(in_channels)) + ] + neck = YOLOv6RepBiPAFPN( + in_channels=in_channels, out_channels=out_channels) + outs = neck(feats) + assert len(outs) == len(feats) - 1 + for i in range(len(feats) - 1): + assert outs[i].shape[1] == out_channels[i] + assert outs[i].shape[2] == outs[i].shape[3] == feat_sizes[i + 1] diff --git a/models/YOLO-World/third_party/mmyolo/tests/test_models/test_necks/test_yolov7_pafpn.py b/models/YOLO-World/third_party/mmyolo/tests/test_models/test_necks/test_yolov7_pafpn.py new file mode 100644 index 0000000000000000000000000000000000000000..17bf455c12d6f75191813213d286ae9646ef2d14 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/tests/test_models/test_necks/test_yolov7_pafpn.py @@ -0,0 +1,79 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import torch +from mmcv.cnn import ConvModule + +from mmyolo.models.necks import YOLOv7PAFPN +from mmyolo.utils import register_all_modules + +register_all_modules() + + +class TestYOLOv7PAFPN(TestCase): + + def test_forward(self): + # test P5 + s = 64 + in_channels = [8, 16, 32] + feat_sizes = [s // 2**i for i in range(4)] # [32, 16, 8] + out_channels = [8, 16, 32] + feats = [ + torch.rand(1, in_channels[i], feat_sizes[i], feat_sizes[i]) + for i in range(len(in_channels)) + ] + neck = YOLOv7PAFPN(in_channels=in_channels, out_channels=out_channels) + outs = neck(feats) + assert len(outs) == len(feats) + for i in range(len(feats)): + assert outs[i].shape[1] == out_channels[i] * 2 + assert outs[i].shape[2] == outs[i].shape[3] == s // (2**i) + + # test is_tiny_version + neck = YOLOv7PAFPN( + in_channels=in_channels, + out_channels=out_channels, + is_tiny_version=True) + outs = neck(feats) + assert len(outs) == len(feats) + for i in range(len(feats)): + assert outs[i].shape[1] == out_channels[i] * 2 + assert outs[i].shape[2] == outs[i].shape[3] == s // (2**i) + + # test use_in_channels_in_downsample + neck = YOLOv7PAFPN( + in_channels=in_channels, + out_channels=out_channels, + use_in_channels_in_downsample=True) + for f in feats: + print(f.shape) + outs = neck(feats) + for f in outs: + print(f.shape) + assert len(outs) == len(feats) + for i in range(len(feats)): + assert outs[i].shape[1] == out_channels[i] * 2 + assert outs[i].shape[2] == outs[i].shape[3] == s // (2**i) + + # test use_repconv_outs is False + neck = YOLOv7PAFPN( + in_channels=in_channels, + out_channels=out_channels, + use_repconv_outs=False) + self.assertIsInstance(neck.out_layers[0], ConvModule) + + # test P6 + s = 64 + in_channels = [8, 16, 32, 64] + feat_sizes = [s // 2**i for i in range(4)] + out_channels = [8, 16, 32, 64] + feats = [ + torch.rand(1, in_channels[i], feat_sizes[i], feat_sizes[i]) + for i in range(len(in_channels)) + ] + neck = YOLOv7PAFPN(in_channels=in_channels, out_channels=out_channels) + outs = neck(feats) + assert len(outs) == len(feats) + for i in range(len(feats)): + assert outs[i].shape[1] == out_channels[i] + assert outs[i].shape[2] == outs[i].shape[3] == s // (2**i) diff --git a/models/YOLO-World/third_party/mmyolo/tests/test_models/test_necks/test_yolov8_pafpn.py b/models/YOLO-World/third_party/mmyolo/tests/test_models/test_necks/test_yolov8_pafpn.py new file mode 100644 index 0000000000000000000000000000000000000000..66d136d0f26f68628b29c8a585bfaf4bea0b92fd --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/tests/test_models/test_necks/test_yolov8_pafpn.py @@ -0,0 +1,28 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import torch + +from mmyolo.models import YOLOv8PAFPN +from mmyolo.utils import register_all_modules + +register_all_modules() + + +class TestYOLOv8PAFPN(TestCase): + + def test_YOLOv8PAFPN_forward(self): + s = 64 + in_channels = [8, 16, 32] + feat_sizes = [s // 2**i for i in range(4)] # [32, 16, 8] + out_channels = [8, 16, 32] + feats = [ + torch.rand(1, in_channels[i], feat_sizes[i], feat_sizes[i]) + for i in range(len(in_channels)) + ] + neck = YOLOv8PAFPN(in_channels=in_channels, out_channels=out_channels) + outs = neck(feats) + assert len(outs) == len(feats) + for i in range(len(feats)): + assert outs[i].shape[1] == out_channels[i] + assert outs[i].shape[2] == outs[i].shape[3] == s // (2**i) diff --git a/models/YOLO-World/third_party/mmyolo/tests/test_models/test_necks/test_yolox_pafpn.py b/models/YOLO-World/third_party/mmyolo/tests/test_models/test_necks/test_yolox_pafpn.py new file mode 100644 index 0000000000000000000000000000000000000000..25fe67a12e969c28bfc09d66c265664c038feba5 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/tests/test_models/test_necks/test_yolox_pafpn.py @@ -0,0 +1,28 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import torch + +from mmyolo.models.necks import YOLOXPAFPN +from mmyolo.utils import register_all_modules + +register_all_modules() + + +class TestYOLOXPAFPN(TestCase): + + def test_forward(self): + s = 64 + in_channels = [8, 16, 32] + feat_sizes = [s // 2**i for i in range(4)] # [32, 16, 8] + out_channels = 24 + feats = [ + torch.rand(1, in_channels[i], feat_sizes[i], feat_sizes[i]) + for i in range(len(in_channels)) + ] + neck = YOLOXPAFPN(in_channels=in_channels, out_channels=out_channels) + outs = neck(feats) + assert len(outs) == len(feats) + for i in range(len(feats)): + assert outs[i].shape[1] == out_channels + assert outs[i].shape[2] == outs[i].shape[3] == s // (2**i) diff --git a/models/YOLO-World/third_party/mmyolo/tests/test_models/test_plugins/__init__.py b/models/YOLO-World/third_party/mmyolo/tests/test_models/test_plugins/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ef101fec61e72abc0eb90266d453b5b22331378d --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/tests/test_models/test_plugins/__init__.py @@ -0,0 +1 @@ +# Copyright (c) OpenMMLab. All rights reserved. diff --git a/models/YOLO-World/third_party/mmyolo/tests/test_models/test_plugins/test_cbam.py b/models/YOLO-World/third_party/mmyolo/tests/test_models/test_plugins/test_cbam.py new file mode 100644 index 0000000000000000000000000000000000000000..4af547c05172a2e8de09a5d56c35fa0b383dcea0 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/tests/test_models/test_plugins/test_cbam.py @@ -0,0 +1,31 @@ +# Copyright (c) OpenMMLab. All rights reserved. + +from unittest import TestCase + +import torch + +from mmyolo.models.plugins import CBAM +from mmyolo.utils import register_all_modules + +register_all_modules() + + +class TestCBAM(TestCase): + + def test_forward(self): + tensor_shape = (2, 16, 20, 20) + + images = torch.randn(*tensor_shape) + cbam = CBAM(16) + out = cbam(images) + self.assertEqual(out.shape, tensor_shape) + + # test other ratio + cbam = CBAM(16, reduce_ratio=8) + out = cbam(images) + self.assertEqual(out.shape, tensor_shape) + + # test other act_cfg in ChannelAttention + cbam = CBAM(in_channels=16, act_cfg=dict(type='Sigmoid')) + out = cbam(images) + self.assertEqual(out.shape, tensor_shape) diff --git a/models/YOLO-World/third_party/mmyolo/tests/test_models/test_task_modules/__init__.py b/models/YOLO-World/third_party/mmyolo/tests/test_models/test_task_modules/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ef101fec61e72abc0eb90266d453b5b22331378d --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/tests/test_models/test_task_modules/__init__.py @@ -0,0 +1 @@ +# Copyright (c) OpenMMLab. All rights reserved. diff --git a/models/YOLO-World/third_party/mmyolo/tests/test_models/test_task_modules/test_assigners/__init__.py b/models/YOLO-World/third_party/mmyolo/tests/test_models/test_task_modules/test_assigners/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ef101fec61e72abc0eb90266d453b5b22331378d --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/tests/test_models/test_task_modules/test_assigners/__init__.py @@ -0,0 +1 @@ +# Copyright (c) OpenMMLab. All rights reserved. diff --git a/models/YOLO-World/third_party/mmyolo/tests/test_models/test_task_modules/test_assigners/test_batch_atss_assigner.py b/models/YOLO-World/third_party/mmyolo/tests/test_models/test_task_modules/test_assigners/test_batch_atss_assigner.py new file mode 100644 index 0000000000000000000000000000000000000000..a01e4fce390965bb16a489237464c74851f09a25 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/tests/test_models/test_task_modules/test_assigners/test_batch_atss_assigner.py @@ -0,0 +1,175 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import torch + +from mmyolo.models.task_modules.assigners import BatchATSSAssigner + + +class TestBatchATSSAssigner(TestCase): + + def test_batch_atss_assigner(self): + num_classes = 2 + batch_size = 2 + batch_atss_assigner = BatchATSSAssigner( + topk=3, + iou_calculator=dict(type='mmdet.BboxOverlaps2D'), + num_classes=num_classes) + priors = torch.FloatTensor([ + [4., 4., 8., 8.], + [12., 4., 8., 8.], + [20., 4., 8., 8.], + [28., 4., 8., 8.], + ]).repeat(21, 1) + gt_bboxes = torch.FloatTensor([ + [0, 0, 60, 93], + [229, 0, 532, 157], + ]).unsqueeze(0).repeat(batch_size, 1, 1) + gt_labels = torch.LongTensor([ + [0], + [11], + ]).unsqueeze(0).repeat(batch_size, 1, 1) + num_level_bboxes = [64, 16, 4] + pad_bbox_flag = torch.FloatTensor([ + [1], + [0], + ]).unsqueeze(0).repeat(batch_size, 1, 1) + pred_bboxes = torch.FloatTensor([ + [-4., -4., 12., 12.], + [4., -4., 20., 12.], + [12., -4., 28., 12.], + [20., -4., 36., 12.], + ]).unsqueeze(0).repeat(batch_size, 21, 1) + batch_assign_result = batch_atss_assigner.forward( + pred_bboxes, priors, num_level_bboxes, gt_labels, gt_bboxes, + pad_bbox_flag) + + assigned_labels = batch_assign_result['assigned_labels'] + assigned_bboxes = batch_assign_result['assigned_bboxes'] + assigned_scores = batch_assign_result['assigned_scores'] + fg_mask_pre_prior = batch_assign_result['fg_mask_pre_prior'] + + self.assertEqual(assigned_labels.shape, torch.Size([batch_size, 84])) + self.assertEqual(assigned_bboxes.shape, torch.Size([batch_size, 84, + 4])) + self.assertEqual(assigned_scores.shape, + torch.Size([batch_size, 84, num_classes])) + self.assertEqual(fg_mask_pre_prior.shape, torch.Size([batch_size, 84])) + + def test_batch_atss_assigner_with_empty_gt(self): + """Test corner case where an image might have no true detections.""" + num_classes = 2 + batch_size = 2 + batch_atss_assigner = BatchATSSAssigner( + topk=3, + iou_calculator=dict(type='mmdet.BboxOverlaps2D'), + num_classes=num_classes) + priors = torch.FloatTensor([ + [4., 4., 8., 8.], + [12., 4., 8., 8.], + [20., 4., 8., 8.], + [28., 4., 8., 8.], + ]).repeat(21, 1) + num_level_bboxes = [64, 16, 4] + pad_bbox_flag = torch.FloatTensor([ + [1], + [0], + ]).unsqueeze(0).repeat(batch_size, 1, 1) + pred_bboxes = torch.FloatTensor([ + [-4., -4., 12., 12.], + [4., -4., 20., 12.], + [12., -4., 28., 12.], + [20., -4., 36., 12.], + ]).unsqueeze(0).repeat(batch_size, 21, 1) + + gt_bboxes = torch.zeros(batch_size, 0, 4) + gt_labels = torch.zeros(batch_size, 0, 1) + + batch_assign_result = batch_atss_assigner.forward( + pred_bboxes, priors, num_level_bboxes, gt_labels, gt_bboxes, + pad_bbox_flag) + + assigned_labels = batch_assign_result['assigned_labels'] + assigned_bboxes = batch_assign_result['assigned_bboxes'] + assigned_scores = batch_assign_result['assigned_scores'] + fg_mask_pre_prior = batch_assign_result['fg_mask_pre_prior'] + + self.assertEqual(assigned_labels.shape, torch.Size([batch_size, 84])) + self.assertEqual(assigned_bboxes.shape, torch.Size([batch_size, 84, + 4])) + self.assertEqual(assigned_scores.shape, + torch.Size([batch_size, 84, num_classes])) + self.assertEqual(fg_mask_pre_prior.shape, torch.Size([batch_size, 84])) + + def test_batch_atss_assigner_with_empty_boxs(self): + """Test corner case where a network might predict no boxes.""" + num_classes = 2 + batch_size = 2 + batch_atss_assigner = BatchATSSAssigner( + topk=3, + iou_calculator=dict(type='mmdet.BboxOverlaps2D'), + num_classes=num_classes) + priors = torch.zeros(84, 4) + gt_bboxes = torch.FloatTensor([ + [0, 0, 60, 93], + [229, 0, 532, 157], + ]).unsqueeze(0).repeat(batch_size, 1, 1) + gt_labels = torch.LongTensor([ + [0], + [11], + ]).unsqueeze(0).repeat(batch_size, 1, 1) + num_level_bboxes = [64, 16, 4] + pad_bbox_flag = torch.FloatTensor([[1], [0]]).unsqueeze(0).repeat( + batch_size, 1, 1) + pred_bboxes = torch.FloatTensor([ + [-4., -4., 12., 12.], + [4., -4., 20., 12.], + [12., -4., 28., 12.], + [20., -4., 36., 12.], + ]).unsqueeze(0).repeat(batch_size, 21, 1) + + batch_assign_result = batch_atss_assigner.forward( + pred_bboxes, priors, num_level_bboxes, gt_labels, gt_bboxes, + pad_bbox_flag) + assigned_labels = batch_assign_result['assigned_labels'] + assigned_bboxes = batch_assign_result['assigned_bboxes'] + assigned_scores = batch_assign_result['assigned_scores'] + fg_mask_pre_prior = batch_assign_result['fg_mask_pre_prior'] + + self.assertEqual(assigned_labels.shape, torch.Size([batch_size, 84])) + self.assertEqual(assigned_bboxes.shape, torch.Size([batch_size, 84, + 4])) + self.assertEqual(assigned_scores.shape, + torch.Size([batch_size, 84, num_classes])) + self.assertEqual(fg_mask_pre_prior.shape, torch.Size([batch_size, 84])) + + def test_batch_atss_assigner_with_empty_boxes_and_gt(self): + """Test corner case where a network might predict no boxes and no + gt.""" + num_classes = 2 + batch_size = 2 + batch_atss_assigner = BatchATSSAssigner( + topk=3, + iou_calculator=dict(type='mmdet.BboxOverlaps2D'), + num_classes=num_classes) + priors = torch.zeros(84, 4) + gt_bboxes = torch.zeros(batch_size, 0, 4) + gt_labels = torch.zeros(batch_size, 0, 1) + num_level_bboxes = [64, 16, 4] + pad_bbox_flag = torch.zeros(batch_size, 0, 1) + pred_bboxes = torch.zeros(batch_size, 0, 4) + + batch_assign_result = batch_atss_assigner.forward( + pred_bboxes, priors, num_level_bboxes, gt_labels, gt_bboxes, + pad_bbox_flag) + assigned_labels = batch_assign_result['assigned_labels'] + assigned_bboxes = batch_assign_result['assigned_bboxes'] + assigned_scores = batch_assign_result['assigned_scores'] + fg_mask_pre_prior = batch_assign_result['fg_mask_pre_prior'] + + self.assertEqual(assigned_labels.shape, torch.Size([batch_size, 84])) + self.assertEqual(assigned_bboxes.shape, torch.Size([batch_size, 84, + 4])) + self.assertEqual(assigned_scores.shape, + torch.Size([batch_size, 84, num_classes])) + self.assertEqual(fg_mask_pre_prior.shape, torch.Size([batch_size, 84])) diff --git a/models/YOLO-World/third_party/mmyolo/tests/test_models/test_task_modules/test_assigners/test_batch_dsl_assigner.py b/models/YOLO-World/third_party/mmyolo/tests/test_models/test_task_modules/test_assigners/test_batch_dsl_assigner.py new file mode 100644 index 0000000000000000000000000000000000000000..9644896ca2b609ae161de9eb74c2a520e13b76db --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/tests/test_models/test_task_modules/test_assigners/test_batch_dsl_assigner.py @@ -0,0 +1,192 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import pytest +import torch + +from mmyolo.models.task_modules.assigners import BatchDynamicSoftLabelAssigner + + +class TestBatchDynamicSoftLabelAssigner(TestCase): + + def test_assign(self): + num_classes = 2 + batch_size = 2 + + assigner = BatchDynamicSoftLabelAssigner( + num_classes=num_classes, + soft_center_radius=3.0, + topk=1, + iou_weight=3.0) + + pred_bboxes = torch.FloatTensor([ + [23, 23, 43, 43], + [4, 5, 6, 7], + ]).unsqueeze(0).repeat(batch_size, 10, 1) + + pred_scores = torch.FloatTensor([ + [0.2], + [0.8], + ]).unsqueeze(0).repeat(batch_size, 10, 1) + + priors = torch.FloatTensor([[30, 30, 8, 8], [4, 5, 6, + 7]]).repeat(10, 1) + + gt_bboxes = torch.FloatTensor([[23, 23, 43, 43]]).unsqueeze(0).repeat( + batch_size, 1, 1) + + gt_labels = torch.LongTensor([[0] + ]).unsqueeze(0).repeat(batch_size, 1, 1) + pad_bbox_flag = torch.FloatTensor([[1]]).unsqueeze(0).repeat( + batch_size, 1, 1) + + assign_result = assigner.forward(pred_bboxes, pred_scores, priors, + gt_labels, gt_bboxes, pad_bbox_flag) + + assigned_labels = assign_result['assigned_labels'] + assigned_labels_weights = assign_result['assigned_labels_weights'] + assigned_bboxes = assign_result['assigned_bboxes'] + assign_metrics = assign_result['assign_metrics'] + + self.assertEqual(assigned_labels.shape, torch.Size([batch_size, 20])) + self.assertEqual(assigned_bboxes.shape, torch.Size([batch_size, 20, + 4])) + self.assertEqual(assigned_labels_weights.shape, + torch.Size([batch_size, 20])) + self.assertEqual(assign_metrics.shape, torch.Size([batch_size, 20])) + + def test_assign_with_empty_gt(self): + num_classes = 2 + batch_size = 2 + + assigner = BatchDynamicSoftLabelAssigner( + num_classes=num_classes, + soft_center_radius=3.0, + topk=1, + iou_weight=3.0) + + pred_bboxes = torch.FloatTensor([ + [23, 23, 43, 43], + [4, 5, 6, 7], + ]).unsqueeze(0).repeat(batch_size, 10, 1) + + pred_scores = torch.FloatTensor([ + [0.2], + [0.8], + ]).unsqueeze(0).repeat(batch_size, 10, 1) + + priors = torch.FloatTensor([[30, 30, 8, 8], [4, 5, 6, + 7]]).repeat(10, 1) + + gt_bboxes = torch.zeros(batch_size, 0, 4) + gt_labels = torch.zeros(batch_size, 0, 1) + pad_bbox_flag = torch.zeros(batch_size, 0, 1) + + assign_result = assigner.forward(pred_bboxes, pred_scores, priors, + gt_labels, gt_bboxes, pad_bbox_flag) + + assigned_labels = assign_result['assigned_labels'] + assigned_labels_weights = assign_result['assigned_labels_weights'] + assigned_bboxes = assign_result['assigned_bboxes'] + assign_metrics = assign_result['assign_metrics'] + + self.assertEqual(assigned_labels.shape, torch.Size([batch_size, 20])) + self.assertEqual(assigned_bboxes.shape, torch.Size([batch_size, 20, + 4])) + self.assertEqual(assigned_labels_weights.shape, + torch.Size([batch_size, 20])) + self.assertEqual(assign_metrics.shape, torch.Size([batch_size, 20])) + + def test_assign_with_empty_boxs(self): + num_classes = 2 + batch_size = 2 + + assigner = BatchDynamicSoftLabelAssigner( + num_classes=num_classes, + soft_center_radius=3.0, + topk=1, + iou_weight=3.0) + + pred_bboxes = torch.zeros(batch_size, 0, 4) + + pred_scores = torch.zeros(batch_size, 0, 4) + + priors = torch.zeros(0, 4) + gt_bboxes = torch.FloatTensor([[23, 23, 43, 43]]).unsqueeze(0).repeat( + batch_size, 1, 1) + + gt_labels = torch.LongTensor([[0] + ]).unsqueeze(0).repeat(batch_size, 1, 1) + pad_bbox_flag = torch.FloatTensor([[1]]).unsqueeze(0).repeat( + batch_size, 1, 1) + + assign_result = assigner.forward(pred_bboxes, pred_scores, priors, + gt_labels, gt_bboxes, pad_bbox_flag) + + assigned_labels = assign_result['assigned_labels'] + assigned_labels_weights = assign_result['assigned_labels_weights'] + assigned_bboxes = assign_result['assigned_bboxes'] + assign_metrics = assign_result['assign_metrics'] + + self.assertEqual(assigned_labels.shape, torch.Size([batch_size, 0])) + self.assertEqual(assigned_bboxes.shape, torch.Size([batch_size, 0, 4])) + self.assertEqual(assigned_labels_weights.shape, + torch.Size([batch_size, 0])) + self.assertEqual(assign_metrics.shape, torch.Size([batch_size, 0])) + + def test_assign_rotate_box(self): + try: + import importlib + importlib.import_module('mmrotate') + except ImportError: + pytest.skip('mmrotate is not installed.', allow_module_level=True) + + num_classes = 2 + batch_size = 2 + + assigner = BatchDynamicSoftLabelAssigner( + num_classes=num_classes, + soft_center_radius=3.0, + topk=1, + iou_weight=3.0, + iou_calculator=dict(type='mmrotate.RBboxOverlaps2D'), + # RBboxOverlaps2D doesn't support batch input, use loop instead. + batch_iou=False, + ) + + pred_bboxes = torch.FloatTensor([ + [23, 23, 20, 20, 0.078], + [4, 5, 2, 2, 0.078], + ]).unsqueeze(0).repeat(batch_size, 10, 1) + + pred_scores = torch.FloatTensor([ + [0.2], + [0.8], + ]).unsqueeze(0).repeat(batch_size, 10, 1) + + priors = torch.FloatTensor([[30, 30, 8, 8], [4, 5, 6, + 7]]).repeat(10, 1) + + gt_bboxes = torch.FloatTensor([[23, 23, 20, 20, + 0.078]]).unsqueeze(0).repeat( + batch_size, 1, 1) + + gt_labels = torch.LongTensor([[0] + ]).unsqueeze(0).repeat(batch_size, 1, 1) + pad_bbox_flag = torch.FloatTensor([[1]]).unsqueeze(0).repeat( + batch_size, 1, 1) + + assign_result = assigner.forward(pred_bboxes, pred_scores, priors, + gt_labels, gt_bboxes, pad_bbox_flag) + + assigned_labels = assign_result['assigned_labels'] + assigned_labels_weights = assign_result['assigned_labels_weights'] + assigned_bboxes = assign_result['assigned_bboxes'] + assign_metrics = assign_result['assign_metrics'] + + self.assertEqual(assigned_labels.shape, torch.Size([batch_size, 20])) + self.assertEqual(assigned_bboxes.shape, torch.Size([batch_size, 20, + 5])) + self.assertEqual(assigned_labels_weights.shape, + torch.Size([batch_size, 20])) + self.assertEqual(assign_metrics.shape, torch.Size([batch_size, 20])) diff --git a/models/YOLO-World/third_party/mmyolo/tests/test_models/test_task_modules/test_assigners/test_batch_task_aligned_assigner.py b/models/YOLO-World/third_party/mmyolo/tests/test_models/test_task_modules/test_assigners/test_batch_task_aligned_assigner.py new file mode 100644 index 0000000000000000000000000000000000000000..fe474b53122703af556ff11a3ef42fa0a3ced736 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/tests/test_models/test_task_modules/test_assigners/test_batch_task_aligned_assigner.py @@ -0,0 +1,56 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import torch + +from mmyolo.models.task_modules.assigners import BatchTaskAlignedAssigner + + +class TestBatchTaskAlignedAssigner(TestCase): + + def test_batch_task_aligned_assigner(self): + batch_size = 2 + num_classes = 4 + assigner = BatchTaskAlignedAssigner( + num_classes=num_classes, alpha=1, beta=6, topk=13, eps=1e-9) + pred_scores = torch.FloatTensor([ + [0.1, 0.2], + [0.2, 0.3], + [0.3, 0.4], + [0.4, 0.5], + ]).unsqueeze(0).repeat(batch_size, 21, 1) + priors = torch.FloatTensor([ + [0, 0, 4., 4.], + [0, 0, 12., 4.], + [0, 0, 20., 4.], + [0, 0, 28., 4.], + ]).repeat(21, 1) + gt_bboxes = torch.FloatTensor([ + [0, 0, 60, 93], + [229, 0, 532, 157], + ]).unsqueeze(0).repeat(batch_size, 1, 1) + gt_labels = torch.LongTensor([[0], [1] + ]).unsqueeze(0).repeat(batch_size, 1, 1) + pad_bbox_flag = torch.FloatTensor([[1], [0]]).unsqueeze(0).repeat( + batch_size, 1, 1) + pred_bboxes = torch.FloatTensor([ + [-4., -4., 12., 12.], + [4., -4., 20., 12.], + [12., -4., 28., 12.], + [20., -4., 36., 12.], + ]).unsqueeze(0).repeat(batch_size, 21, 1) + + assign_result = assigner.forward(pred_bboxes, pred_scores, priors, + gt_labels, gt_bboxes, pad_bbox_flag) + + assigned_labels = assign_result['assigned_labels'] + assigned_bboxes = assign_result['assigned_bboxes'] + assigned_scores = assign_result['assigned_scores'] + fg_mask_pre_prior = assign_result['fg_mask_pre_prior'] + + self.assertEqual(assigned_labels.shape, torch.Size([batch_size, 84])) + self.assertEqual(assigned_bboxes.shape, torch.Size([batch_size, 84, + 4])) + self.assertEqual(assigned_scores.shape, + torch.Size([batch_size, 84, num_classes])) + self.assertEqual(fg_mask_pre_prior.shape, torch.Size([batch_size, 84])) diff --git a/models/YOLO-World/third_party/mmyolo/tests/test_models/test_task_modules/test_assigners/test_pose_sim_ota_assigner.py b/models/YOLO-World/third_party/mmyolo/tests/test_models/test_task_modules/test_assigners/test_pose_sim_ota_assigner.py new file mode 100644 index 0000000000000000000000000000000000000000..fb4793f7e4ed0066545e821352f0a5e263d3b9fd --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/tests/test_models/test_task_modules/test_assigners/test_pose_sim_ota_assigner.py @@ -0,0 +1,85 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import torch +from mmengine.structures import InstanceData +from mmengine.testing import assert_allclose + +from mmyolo.models.task_modules.assigners import PoseSimOTAAssigner + + +class TestPoseSimOTAAssigner(TestCase): + + def test_assign(self): + assigner = PoseSimOTAAssigner( + center_radius=2.5, + candidate_topk=1, + iou_weight=3.0, + cls_weight=1.0, + iou_calculator=dict(type='mmdet.BboxOverlaps2D')) + pred_instances = InstanceData( + bboxes=torch.Tensor([[23, 23, 43, 43] + [1] * 51, + [4, 5, 6, 7] + [1] * 51]), + scores=torch.FloatTensor([[0.2], [0.8]]), + priors=torch.Tensor([[30, 30, 8, 8], [4, 5, 6, 7]])) + gt_instances = InstanceData( + bboxes=torch.Tensor([[23, 23, 43, 43]]), + labels=torch.LongTensor([0]), + keypoints_visible=torch.Tensor([[ + 1., 1., 1., 1., 1., 1., 1., 1., 0., 1., 1., 0., 0., 0., 0., 0., + 0. + ]]), + keypoints=torch.Tensor([[[30, 30], [30, 30], [30, 30], [30, 30], + [30, 30], [30, 30], [30, 30], [30, 30], + [30, 30], [30, 30], [30, 30], [30, 30], + [30, 30], [30, 30], [30, 30], [30, 30], + [30, 30]]])) + assign_result = assigner.assign( + pred_instances=pred_instances, gt_instances=gt_instances) + + expected_gt_inds = torch.LongTensor([1, 0]) + assert_allclose(assign_result.gt_inds, expected_gt_inds) + + def test_assign_with_no_valid_bboxes(self): + assigner = PoseSimOTAAssigner( + center_radius=2.5, + candidate_topk=1, + iou_weight=3.0, + cls_weight=1.0, + iou_calculator=dict(type='mmdet.BboxOverlaps2D')) + pred_instances = InstanceData( + bboxes=torch.Tensor([[123, 123, 143, 143], [114, 151, 161, 171]]), + scores=torch.FloatTensor([[0.2], [0.8]]), + priors=torch.Tensor([[30, 30, 8, 8], [55, 55, 8, 8]])) + gt_instances = InstanceData( + bboxes=torch.Tensor([[0, 0, 1, 1]]), + labels=torch.LongTensor([0]), + keypoints_visible=torch.zeros((1, 17)), + keypoints=torch.zeros((1, 17, 2))) + assign_result = assigner.assign( + pred_instances=pred_instances, gt_instances=gt_instances) + + expected_gt_inds = torch.LongTensor([0, 0]) + assert_allclose(assign_result.gt_inds, expected_gt_inds) + + def test_assign_with_empty_gt(self): + assigner = PoseSimOTAAssigner( + center_radius=2.5, + candidate_topk=1, + iou_weight=3.0, + cls_weight=1.0, + iou_calculator=dict(type='mmdet.BboxOverlaps2D')) + pred_instances = InstanceData( + bboxes=torch.Tensor([[[30, 40, 50, 60]], [[4, 5, 6, 7]]]), + scores=torch.FloatTensor([[0.2], [0.8]]), + priors=torch.Tensor([[0, 12, 23, 34], [4, 5, 6, 7]])) + gt_instances = InstanceData( + bboxes=torch.empty(0, 4), + labels=torch.empty(0), + keypoints_visible=torch.empty(0, 17), + keypoints=torch.empty(0, 17, 2)) + + assign_result = assigner.assign( + pred_instances=pred_instances, gt_instances=gt_instances) + expected_gt_inds = torch.LongTensor([0, 0]) + assert_allclose(assign_result.gt_inds, expected_gt_inds) diff --git a/models/YOLO-World/third_party/mmyolo/tests/test_models/test_task_modules/test_coders/__init__.py b/models/YOLO-World/third_party/mmyolo/tests/test_models/test_task_modules/test_coders/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ef101fec61e72abc0eb90266d453b5b22331378d --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/tests/test_models/test_task_modules/test_coders/__init__.py @@ -0,0 +1 @@ +# Copyright (c) OpenMMLab. All rights reserved. diff --git a/models/YOLO-World/third_party/mmyolo/tests/test_models/test_task_modules/test_coders/test_distance_point_bbox_coder.py b/models/YOLO-World/third_party/mmyolo/tests/test_models/test_task_modules/test_coders/test_distance_point_bbox_coder.py new file mode 100644 index 0000000000000000000000000000000000000000..10b0215c27d7a1f88f894f459cf641555833da9e --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/tests/test_models/test_task_modules/test_coders/test_distance_point_bbox_coder.py @@ -0,0 +1,29 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import torch + +from mmyolo.models.task_modules.coders import DistancePointBBoxCoder + + +class TestDistancePointBBoxCoder(TestCase): + + def test_decoder(self): + coder = DistancePointBBoxCoder() + + points = torch.Tensor([[74., 61.], [-29., 106.], [138., 61.], + [29., 170.]]) + pred_bboxes = torch.Tensor([[0, -1, 3, 3], [-1, -7, -4.8, 9], + [-23, -1, 12, 1], [14.5, -13, 10, 18.3]]) + expected_distance = torch.Tensor([[74, 63, 80, 67], + [-25, 134, -48.2, 142], + [276, 67, 210, 67], + [-58, 248, 89, 279.8]]) + strides = torch.Tensor([2, 4, 6, 6]) + out_distance = coder.decode(points, pred_bboxes, strides) + assert expected_distance.allclose(out_distance) + + batch_priors = points.unsqueeze(0).repeat(2, 1, 1) + batch_pred_bboxes = pred_bboxes.unsqueeze(0).repeat(2, 1, 1) + batch_out = coder.decode(batch_priors, batch_pred_bboxes, strides)[0] + assert out_distance.allclose(batch_out) diff --git a/models/YOLO-World/third_party/mmyolo/tests/test_models/test_task_modules/test_coders/test_yolov5_bbox_coder.py b/models/YOLO-World/third_party/mmyolo/tests/test_models/test_task_modules/test_coders/test_yolov5_bbox_coder.py new file mode 100644 index 0000000000000000000000000000000000000000..e1d4ebe1fd9dc5263b09e8d07a456a41e61bbc3b --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/tests/test_models/test_task_modules/test_coders/test_yolov5_bbox_coder.py @@ -0,0 +1,32 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import torch + +from mmyolo.models.task_modules.coders import YOLOv5BBoxCoder + + +class TestYOLOv5Coder(TestCase): + + def test_decoder(self): + coder = YOLOv5BBoxCoder() + + priors = torch.Tensor([[10., 10., 20., 20.], [10., 8., 10., 10.], + [15., 8., 20., 3.], [2., 5., 5., 8.]]) + pred_bboxes = torch.Tensor([[0.0000, 0.0000, 1.0000, 1.0000], + [0.1409, 0.1409, 2.8591, 2.8591], + [0.0000, 0.3161, 4.1945, 0.6839], + [1.0000, 5.0000, 9.0000, 5.0000]]) + strides = torch.Tensor([2, 4, 8, 8]) + expected_decode_bboxes = torch.Tensor( + [[4.3111, 4.3111, 25.6889, 25.6889], + [10.2813, 5.7033, 10.2813, 12.8594], + [7.7949, 11.1710, 27.2051, 2.3369], + [1.1984, 8.4730, 13.1955, 20.3129]]) + out = coder.decode(priors, pred_bboxes, strides) + assert expected_decode_bboxes.allclose(out, atol=1e-04) + + batch_priors = priors.unsqueeze(0).repeat(2, 1, 1) + batch_pred_bboxes = pred_bboxes.unsqueeze(0).repeat(2, 1, 1) + batch_out = coder.decode(batch_priors, batch_pred_bboxes, strides)[0] + assert out.allclose(batch_out) diff --git a/models/YOLO-World/third_party/mmyolo/tests/test_models/test_task_modules/test_coders/test_yolox_bbox_coder.py b/models/YOLO-World/third_party/mmyolo/tests/test_models/test_task_modules/test_coders/test_yolox_bbox_coder.py new file mode 100644 index 0000000000000000000000000000000000000000..00d6c3164b840ad05fe112ff629ad74faffb2418 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/tests/test_models/test_task_modules/test_coders/test_yolox_bbox_coder.py @@ -0,0 +1,31 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import torch + +from mmyolo.models.task_modules.coders import YOLOXBBoxCoder + + +class TestYOLOv5Coder(TestCase): + + def test_decoder(self): + coder = YOLOXBBoxCoder() + + priors = torch.Tensor([[10., 10.], [8., 8.], [15., 8.], [2., 5.]]) + pred_bboxes = torch.Tensor([[0.0000, 0.0000, 1.0000, 1.0000], + [0.0409, 0.1409, 0.8591, 0.8591], + [0.0000, 0.3161, 0.1945, 0.6839], + [1.0000, 5.0000, 0.2000, 0.6000]]) + strides = torch.Tensor([2, 4, 6, 6]) + expected_decode_bboxes = torch.Tensor( + [[7.2817, 7.2817, 12.7183, 12.7183], + [3.4415, 3.8415, 12.8857, 13.2857], + [11.3559, 3.9518, 18.6441, 15.8414], + [4.3358, 29.5336, 11.6642, 40.4664]]) + out = coder.decode(priors, pred_bboxes, strides) + assert expected_decode_bboxes.allclose(out, atol=1e-04) + + batch_priors = priors.unsqueeze(0).repeat(2, 1, 1) + batch_pred_bboxes = pred_bboxes.unsqueeze(0).repeat(2, 1, 1) + batch_out = coder.decode(batch_priors, batch_pred_bboxes, strides)[0] + assert out.allclose(batch_out) diff --git a/models/YOLO-World/third_party/mmyolo/tests/test_models/test_utils/__init__.py b/models/YOLO-World/third_party/mmyolo/tests/test_models/test_utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ef101fec61e72abc0eb90266d453b5b22331378d --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/tests/test_models/test_utils/__init__.py @@ -0,0 +1 @@ +# Copyright (c) OpenMMLab. All rights reserved. diff --git a/models/YOLO-World/third_party/mmyolo/tests/test_models/test_utils/test_misc.py b/models/YOLO-World/third_party/mmyolo/tests/test_models/test_utils/test_misc.py new file mode 100644 index 0000000000000000000000000000000000000000..dce9502571e4294757ac6f2b9bb524e35c372c29 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/tests/test_models/test_utils/test_misc.py @@ -0,0 +1,35 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +import pytest +import torch +from mmengine.structures import InstanceData +from torch import Tensor + +from mmyolo.models.utils import gt_instances_preprocess +from mmyolo.utils import register_all_modules + +register_all_modules() + + +class TestGtInstancesPreprocess: + + @pytest.mark.parametrize('box_dim', [4, 5]) + def test(self, box_dim): + gt_instances = InstanceData( + bboxes=torch.empty((0, box_dim)), labels=torch.LongTensor([])) + batch_size = 1 + batch_instance = gt_instances_preprocess([gt_instances], batch_size) + assert isinstance(batch_instance, Tensor) + assert len(batch_instance.shape) == 3, 'the len of result must be 3.' + assert batch_instance.size(-1) == box_dim + 1 + + @pytest.mark.parametrize('box_dim', [4, 5]) + def test_fast_version(self, box_dim: int): + gt_instances = torch.from_numpy( + np.array([[0., 1., *(0., ) * box_dim]], dtype=np.float32)) + batch_size = 1 + batch_instance = gt_instances_preprocess(gt_instances, batch_size) + assert isinstance(batch_instance, Tensor) + assert len(batch_instance.shape) == 3, 'the len of result must be 3.' + assert batch_instance.shape[1] == 1 + assert batch_instance.shape[2] == box_dim + 1 diff --git a/models/YOLO-World/third_party/mmyolo/tests/test_utils/test_collect_env.py b/models/YOLO-World/third_party/mmyolo/tests/test_utils/test_collect_env.py new file mode 100644 index 0000000000000000000000000000000000000000..913f46fa3c9286e9c3cbd656ad5e93def143aea0 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/tests/test_utils/test_collect_env.py @@ -0,0 +1,33 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import sys +from unittest import TestCase + +import mmcv +import mmdet +import mmengine + +from mmyolo.utils import collect_env + + +class TestCollectEnv(TestCase): + + def test_collect_env(self): + env_info = collect_env() + print(env_info) + expected_keys = [ + 'sys.platform', 'Python', 'CUDA available', 'PyTorch', + 'PyTorch compiling details', 'OpenCV', 'MMEngine', 'GCC' + ] + for key in expected_keys: + assert key in env_info + + if env_info['CUDA available']: + for key in ['CUDA_HOME', 'NVCC']: + assert key in env_info + + assert env_info['sys.platform'] == sys.platform + assert env_info['Python'] == sys.version.replace('\n', '') + + assert env_info['MMEngine'] == mmengine.__version__ + assert env_info['MMCV'] == mmcv.__version__ + assert env_info['MMDetection'] == mmdet.__version__ diff --git a/models/YOLO-World/third_party/mmyolo/tests/test_utils/test_setup_env.py b/models/YOLO-World/third_party/mmyolo/tests/test_utils/test_setup_env.py new file mode 100644 index 0000000000000000000000000000000000000000..e6bd6890b31bbe9179553bd440cc0e8bc44329c2 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/tests/test_utils/test_setup_env.py @@ -0,0 +1,39 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import datetime +import sys +from unittest import TestCase + +from mmengine import DefaultScope + +from mmyolo.utils import register_all_modules + + +class TestSetupEnv(TestCase): + + def test_register_all_modules(self): + from mmyolo.registry import DATASETS + + # not init default scope + sys.modules.pop('mmyolo.datasets', None) + sys.modules.pop('mmyolo.datasets.yolov5_coco', None) + DATASETS._module_dict.pop('YOLOv5CocoDataset', None) + self.assertFalse('YOLOv5CocoDataset' in DATASETS.module_dict) + register_all_modules(init_default_scope=False) + self.assertTrue('YOLOv5CocoDataset' in DATASETS.module_dict) + + # init default scope + sys.modules.pop('mmyolo.datasets', None) + sys.modules.pop('mmyolo.datasets.yolov5_coco', None) + DATASETS._module_dict.pop('YOLOv5CocoDataset', None) + self.assertFalse('YOLOv5CocoDataset' in DATASETS.module_dict) + register_all_modules(init_default_scope=True) + self.assertTrue('YOLOv5CocoDataset' in DATASETS.module_dict) + self.assertEqual(DefaultScope.get_current_instance().scope_name, + 'mmyolo') + + # init default scope when another scope is init + name = f'test-{datetime.datetime.now()}' + DefaultScope.get_instance(name, scope_name='test') + with self.assertWarnsRegex( + Warning, 'The current default scope "test" is not "mmyolo"'): + register_all_modules(init_default_scope=True) diff --git a/models/YOLO-World/third_party/mmyolo/tools/analysis_tools/benchmark.py b/models/YOLO-World/third_party/mmyolo/tools/analysis_tools/benchmark.py new file mode 100644 index 0000000000000000000000000000000000000000..29f53a4768c3339d656d4bb71dae3396e5501265 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/tools/analysis_tools/benchmark.py @@ -0,0 +1,188 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import copy +import os +import time + +import torch +from mmengine import Config, DictAction +from mmengine.dist import get_world_size, init_dist +from mmengine.logging import MMLogger, print_log +from mmengine.registry import init_default_scope +from mmengine.runner import Runner, load_checkpoint +from mmengine.utils import mkdir_or_exist +from mmengine.utils.dl_utils import set_multi_processing + +from mmyolo.registry import MODELS + + +# TODO: Refactoring and improving +def parse_args(): + parser = argparse.ArgumentParser(description='MMYOLO benchmark a model') + parser.add_argument('config', help='test config file path') + parser.add_argument('checkpoint', help='checkpoint file') + parser.add_argument( + '--repeat-num', + type=int, + default=1, + help='number of repeat times of measurement for averaging the results') + parser.add_argument( + '--max-iter', type=int, default=2000, help='num of max iter') + parser.add_argument( + '--log-interval', type=int, default=50, help='interval of logging') + parser.add_argument( + '--work-dir', + help='the directory to save the file containing ' + 'benchmark metrics') + parser.add_argument( + '--fuse-conv-bn', + action='store_true', + help='Whether to fuse conv and bn, this will slightly increase' + 'the inference speed') + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. If the value to ' + 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' + 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' + 'Note that the quotation marks are necessary and that no white space ' + 'is allowed.') + parser.add_argument( + '--launcher', + choices=['none', 'pytorch', 'slurm', 'mpi'], + default='none', + help='job launcher') + parser.add_argument('--local_rank', type=int, default=0) + args = parser.parse_args() + if 'LOCAL_RANK' not in os.environ: + os.environ['LOCAL_RANK'] = str(args.local_rank) + return args + + +def measure_inference_speed(cfg, checkpoint, max_iter, log_interval, + is_fuse_conv_bn): + env_cfg = cfg.get('env_cfg') + if env_cfg.get('cudnn_benchmark'): + torch.backends.cudnn.benchmark = True + + mp_cfg: dict = env_cfg.get('mp_cfg', {}) + set_multi_processing(**mp_cfg, distributed=cfg.distributed) + + # Because multiple processes will occupy additional CPU resources, + # FPS statistics will be more unstable when num_workers is not 0. + # It is reasonable to set num_workers to 0. + dataloader_cfg = cfg.test_dataloader + dataloader_cfg['num_workers'] = 0 + dataloader_cfg['batch_size'] = 1 + dataloader_cfg['persistent_workers'] = False + data_loader = Runner.build_dataloader(dataloader_cfg) + + # build the model and load checkpoint + model = MODELS.build(cfg.model) + load_checkpoint(model, checkpoint, map_location='cpu') + model = model.cuda() + model.eval() + + # the first several iterations may be very slow so skip them + num_warmup = 5 + pure_inf_time = 0 + fps = 0 + + # benchmark with 2000 image and take the average + for i, data in enumerate(data_loader): + + torch.cuda.synchronize() + start_time = time.perf_counter() + + with torch.no_grad(): + model.test_step(data) + + torch.cuda.synchronize() + elapsed = time.perf_counter() - start_time + + if i >= num_warmup: + pure_inf_time += elapsed + if (i + 1) % log_interval == 0: + fps = (i + 1 - num_warmup) / pure_inf_time + print_log( + f'Done image [{i + 1:<3}/ {max_iter}], ' + f'fps: {fps:.1f} img / s, ' + f'times per image: {1000 / fps:.1f} ms / img', 'current') + + if (i + 1) == max_iter: + fps = (i + 1 - num_warmup) / pure_inf_time + print_log( + f'Overall fps: {fps:.1f} img / s, ' + f'times per image: {1000 / fps:.1f} ms / img', 'current') + break + return fps + + +def repeat_measure_inference_speed(cfg, + checkpoint, + max_iter, + log_interval, + is_fuse_conv_bn, + repeat_num=1): + assert repeat_num >= 1 + + fps_list = [] + + for _ in range(repeat_num): + cp_cfg = copy.deepcopy(cfg) + + fps_list.append( + measure_inference_speed(cp_cfg, checkpoint, max_iter, log_interval, + is_fuse_conv_bn)) + + if repeat_num > 1: + fps_list_ = [round(fps, 1) for fps in fps_list] + times_pre_image_list_ = [round(1000 / fps, 1) for fps in fps_list] + mean_fps_ = sum(fps_list_) / len(fps_list_) + mean_times_pre_image_ = sum(times_pre_image_list_) / len( + times_pre_image_list_) + print_log( + f'Overall fps: {fps_list_}[{mean_fps_:.1f}] img / s, ' + f'times per image: ' + f'{times_pre_image_list_}[{mean_times_pre_image_:.1f}] ms / img', + 'current') + return fps_list + + return fps_list[0] + + +# TODO: refactoring +def main(): + args = parse_args() + + cfg = Config.fromfile(args.config) + if args.cfg_options is not None: + cfg.merge_from_dict(args.cfg_options) + + init_default_scope(cfg.get('default_scope', 'mmyolo')) + + distributed = False + if args.launcher != 'none': + init_dist(args.launcher, **cfg.get('env_cfg', {}).get('dist_cfg', {})) + distributed = True + assert get_world_size( + ) == 1, 'Inference benchmark does not allow distributed multi-GPU' + + cfg.distributed = distributed + + log_file = None + if args.work_dir: + log_file = os.path.join(args.work_dir, 'benchmark.log') + mkdir_or_exist(args.work_dir) + + MMLogger.get_instance('mmyolo', log_file=log_file, log_level='INFO') + + repeat_measure_inference_speed(cfg, args.checkpoint, args.max_iter, + args.log_interval, args.fuse_conv_bn, + args.repeat_num) + + +if __name__ == '__main__': + main() diff --git a/models/YOLO-World/third_party/mmyolo/tools/analysis_tools/browse_coco_json.py b/models/YOLO-World/third_party/mmyolo/tools/analysis_tools/browse_coco_json.py new file mode 100644 index 0000000000000000000000000000000000000000..71a2fc2a942d234e1ce2e3e93901a66bacb123df --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/tools/analysis_tools/browse_coco_json.py @@ -0,0 +1,147 @@ +import argparse +import os.path as osp + +import cv2 +import matplotlib.pyplot as plt +import numpy as np +from matplotlib.collections import PatchCollection +from matplotlib.patches import Polygon +from pycocotools.coco import COCO + + +def show_coco_json(args): + if args.data_root is not None: + coco = COCO(osp.join(args.data_root, args.ann_file)) + else: + coco = COCO(args.ann_file) + print(f'Total number of images:{len(coco.getImgIds())}') + categories = coco.loadCats(coco.getCatIds()) + category_names = [category['name'] for category in categories] + print(f'Total number of Categories : {len(category_names)}') + print('Categories: \n{}\n'.format(' '.join(category_names))) + + if args.category_names is None: + category_ids = [] + else: + assert set(category_names) > set(args.category_names) + category_ids = coco.getCatIds(args.category_names) + + image_ids = coco.getImgIds(catIds=category_ids) + + if args.shuffle: + np.random.shuffle(image_ids) + + for i in range(len(image_ids)): + image_data = coco.loadImgs(image_ids[i])[0] + if args.data_root is not None: + image_path = osp.join(args.data_root, args.img_dir, + image_data['file_name']) + else: + image_path = osp.join(args.img_dir, image_data['file_name']) + + annotation_ids = coco.getAnnIds( + imgIds=image_data['id'], catIds=category_ids, iscrowd=0) + annotations = coco.loadAnns(annotation_ids) + + image = cv2.imread(image_path) + image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) + + plt.figure() + plt.imshow(image) + + if args.disp_all: + coco.showAnns(annotations) + else: + show_bbox_only(coco, annotations) + + if args.wait_time == 0: + plt.show() + else: + plt.show(block=False) + plt.pause(args.wait_time) + + plt.close() + + +def show_bbox_only(coco, anns, show_label_bbox=True, is_filling=True): + """Show bounding box of annotations Only.""" + if len(anns) == 0: + return + + ax = plt.gca() + ax.set_autoscale_on(False) + + image2color = dict() + for cat in coco.getCatIds(): + image2color[cat] = (np.random.random((1, 3)) * 0.7 + 0.3).tolist()[0] + + polygons = [] + colors = [] + + for ann in anns: + color = image2color[ann['category_id']] + bbox_x, bbox_y, bbox_w, bbox_h = ann['bbox'] + poly = [[bbox_x, bbox_y], [bbox_x, bbox_y + bbox_h], + [bbox_x + bbox_w, bbox_y + bbox_h], [bbox_x + bbox_w, bbox_y]] + polygons.append(Polygon(np.array(poly).reshape((4, 2)))) + colors.append(color) + + if show_label_bbox: + label_bbox = dict(facecolor=color) + else: + label_bbox = None + + ax.text( + bbox_x, + bbox_y, + '%s' % (coco.loadCats(ann['category_id'])[0]['name']), + color='white', + bbox=label_bbox) + + if is_filling: + p = PatchCollection( + polygons, facecolor=colors, linewidths=0, alpha=0.4) + ax.add_collection(p) + p = PatchCollection( + polygons, facecolor='none', edgecolors=colors, linewidths=2) + ax.add_collection(p) + + +def parse_args(): + parser = argparse.ArgumentParser(description='Show coco json file') + parser.add_argument('--data-root', default=None, help='dataset root') + parser.add_argument( + '--img-dir', default='data/coco/train2017', help='image folder path') + parser.add_argument( + '--ann-file', + default='data/coco/annotations/instances_train2017.json', + help='ann file path') + parser.add_argument( + '--wait-time', type=float, default=2, help='the interval of show (s)') + parser.add_argument( + '--disp-all', + action='store_true', + help='Whether to display all types of data, ' + 'such as bbox and mask.' + ' Default is to display only bbox') + parser.add_argument( + '--category-names', + type=str, + default=None, + nargs='+', + help='Display category-specific data, e.g., "bicycle", "person"') + parser.add_argument( + '--shuffle', + action='store_true', + help='Whether to display in disorder') + args = parser.parse_args() + return args + + +def main(): + args = parse_args() + show_coco_json(args) + + +if __name__ == '__main__': + main() diff --git a/models/YOLO-World/third_party/mmyolo/tools/analysis_tools/browse_dataset.py b/models/YOLO-World/third_party/mmyolo/tools/analysis_tools/browse_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..21a1d709d3ced0e5f865748afa0a1e258a8751f9 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/tools/analysis_tools/browse_dataset.py @@ -0,0 +1,276 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os.path as osp +import sys +from typing import Tuple + +import cv2 +import mmcv +import numpy as np +from mmdet.models.utils import mask2ndarray +from mmdet.structures.bbox import BaseBoxes +from mmengine.config import Config, DictAction +from mmengine.dataset import Compose +from mmengine.registry import init_default_scope +from mmengine.utils import ProgressBar +from mmengine.visualization import Visualizer + +from mmyolo.registry import DATASETS, VISUALIZERS + + +# TODO: Support for printing the change in key of results +# TODO: Some bug. If you meet some bug, please use the original +def parse_args(): + parser = argparse.ArgumentParser(description='Browse a dataset') + parser.add_argument('config', help='train config file path') + parser.add_argument( + '--phase', + '-p', + default='train', + type=str, + choices=['train', 'test', 'val'], + help='phase of dataset to visualize, accept "train" "test" and "val".' + ' Defaults to "train".') + parser.add_argument( + '--mode', + '-m', + default='transformed', + type=str, + choices=['original', 'transformed', 'pipeline'], + help='display mode; display original pictures or ' + 'transformed pictures or comparison pictures. "original" ' + 'means show images load from disk; "transformed" means ' + 'to show images after transformed; "pipeline" means show all ' + 'the intermediate images. Defaults to "transformed".') + parser.add_argument( + '--out-dir', + default='output', + type=str, + help='If there is no display interface, you can save it.') + parser.add_argument('--not-show', default=False, action='store_true') + parser.add_argument( + '--show-number', + '-n', + type=int, + default=sys.maxsize, + help='number of images selected to visualize, ' + 'must bigger than 0. if the number is bigger than length ' + 'of dataset, show all the images in dataset; ' + 'default "sys.maxsize", show all images in dataset') + parser.add_argument( + '--show-interval', + '-i', + type=float, + default=3, + help='the interval of show (s)') + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. If the value to ' + 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' + 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' + 'Note that the quotation marks are necessary and that no white space ' + 'is allowed.') + args = parser.parse_args() + return args + + +def _get_adaptive_scale(img_shape: Tuple[int, int], + min_scale: float = 0.3, + max_scale: float = 3.0) -> float: + """Get adaptive scale according to image shape. + + The target scale depends on the the short edge length of the image. If the + short edge length equals 224, the output is 1.0. And output linear + scales according the short edge length. You can also specify the minimum + scale and the maximum scale to limit the linear scale. + + Args: + img_shape (Tuple[int, int]): The shape of the canvas image. + min_scale (int): The minimum scale. Defaults to 0.3. + max_scale (int): The maximum scale. Defaults to 3.0. + Returns: + int: The adaptive scale. + """ + short_edge_length = min(img_shape) + scale = short_edge_length / 224. + return min(max(scale, min_scale), max_scale) + + +def make_grid(imgs, names): + """Concat list of pictures into a single big picture, align height here.""" + visualizer = Visualizer.get_current_instance() + ori_shapes = [img.shape[:2] for img in imgs] + max_height = int(max(img.shape[0] for img in imgs) * 1.1) + min_width = min(img.shape[1] for img in imgs) + horizontal_gap = min_width // 10 + img_scale = _get_adaptive_scale((max_height, min_width)) + + texts = [] + text_positions = [] + start_x = 0 + for i, img in enumerate(imgs): + pad_height = (max_height - img.shape[0]) // 2 + pad_width = horizontal_gap // 2 + # make border + imgs[i] = cv2.copyMakeBorder( + img, + pad_height, + max_height - img.shape[0] - pad_height + int(img_scale * 30 * 2), + pad_width, + pad_width, + cv2.BORDER_CONSTANT, + value=(255, 255, 255)) + texts.append(f'{"execution: "}{i}\n{names[i]}\n{ori_shapes[i]}') + text_positions.append( + [start_x + img.shape[1] // 2 + pad_width, max_height]) + start_x += img.shape[1] + horizontal_gap + + display_img = np.concatenate(imgs, axis=1) + visualizer.set_image(display_img) + img_scale = _get_adaptive_scale(display_img.shape[:2]) + visualizer.draw_texts( + texts, + positions=np.array(text_positions), + font_sizes=img_scale * 7, + colors='black', + horizontal_alignments='center', + font_families='monospace') + return visualizer.get_image() + + +def swap_pipeline_position(dataset_cfg): + load_ann_tfm_name = 'LoadAnnotations' + pipeline = dataset_cfg.get('pipeline') + if (pipeline is None): + return dataset_cfg + all_transform_types = [tfm['type'] for tfm in pipeline] + if load_ann_tfm_name in all_transform_types: + load_ann_tfm_index = all_transform_types.index(load_ann_tfm_name) + load_ann_tfm = pipeline.pop(load_ann_tfm_index) + pipeline.insert(1, load_ann_tfm) + + +class InspectCompose(Compose): + """Compose multiple transforms sequentially. + + And record "img" field of all results in one list. + """ + + def __init__(self, transforms, intermediate_imgs): + super().__init__(transforms=transforms) + self.intermediate_imgs = intermediate_imgs + + def __call__(self, data): + if 'img' in data: + self.intermediate_imgs.append({ + 'name': 'original', + 'img': data['img'].copy() + }) + self.ptransforms = [ + self.transforms[i] for i in range(len(self.transforms) - 1) + ] + for t in self.ptransforms: + data = t(data) + # Keep the same meta_keys in the PackDetInputs + self.transforms[-1].meta_keys = [key for key in data] + data_sample = self.transforms[-1](data) + if data is None: + return None + if 'img' in data: + self.intermediate_imgs.append({ + 'name': + t.__class__.__name__, + 'dataset_sample': + data_sample['data_samples'] + }) + return data + + +def main(): + args = parse_args() + cfg = Config.fromfile(args.config) + if args.cfg_options is not None: + cfg.merge_from_dict(args.cfg_options) + + init_default_scope(cfg.get('default_scope', 'mmyolo')) + + dataset_cfg = cfg.get(args.phase + '_dataloader').get('dataset') + if (args.phase in ['test', 'val']): + swap_pipeline_position(dataset_cfg) + dataset = DATASETS.build(dataset_cfg) + visualizer = VISUALIZERS.build(cfg.visualizer) + visualizer.dataset_meta = dataset.metainfo + + intermediate_imgs = [] + + if not hasattr(dataset, 'pipeline'): + # for dataset_wrapper + dataset = dataset.dataset + + # TODO: The dataset wrapper occasion is not considered here + dataset.pipeline = InspectCompose(dataset.pipeline.transforms, + intermediate_imgs) + + # init visualization image number + assert args.show_number > 0 + display_number = min(args.show_number, len(dataset)) + + progress_bar = ProgressBar(display_number) + for i, item in zip(range(display_number), dataset): + image_i = [] + result_i = [result['dataset_sample'] for result in intermediate_imgs] + for k, datasample in enumerate(result_i): + image = datasample.img + gt_instances = datasample.gt_instances + image = image[..., [2, 1, 0]] # bgr to rgb + gt_bboxes = gt_instances.get('bboxes', None) + if gt_bboxes is not None and isinstance(gt_bboxes, BaseBoxes): + gt_instances.bboxes = gt_bboxes.tensor + gt_masks = gt_instances.get('masks', None) + if gt_masks is not None: + masks = mask2ndarray(gt_masks) + gt_instances.masks = masks.astype(bool) + datasample.gt_instances = gt_instances + # get filename from dataset or just use index as filename + visualizer.add_datasample( + 'result', + image, + datasample, + draw_pred=False, + draw_gt=True, + show=False) + image_show = visualizer.get_image() + image_i.append(image_show) + + if args.mode == 'original': + image = image_i[0] + elif args.mode == 'transformed': + image = image_i[-1] + else: + image = make_grid([result for result in image_i], + [result['name'] for result in intermediate_imgs]) + + if hasattr(datasample, 'img_path'): + filename = osp.basename(datasample.img_path) + else: + # some dataset have not image path + filename = f'{i}.jpg' + out_file = osp.join(args.out_dir, + filename) if args.out_dir is not None else None + + if out_file is not None: + mmcv.imwrite(image[..., ::-1], out_file) + + if not args.not_show: + visualizer.show( + image, win_name=filename, wait_time=args.show_interval) + + intermediate_imgs.clear() + progress_bar.update() + + +if __name__ == '__main__': + main() diff --git a/models/YOLO-World/third_party/mmyolo/tools/analysis_tools/browse_dataset_simple.py b/models/YOLO-World/third_party/mmyolo/tools/analysis_tools/browse_dataset_simple.py new file mode 100644 index 0000000000000000000000000000000000000000..ebacbde3a5a2e1212089e4d4038fa286d462071b --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/tools/analysis_tools/browse_dataset_simple.py @@ -0,0 +1,89 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os.path as osp + +from mmdet.models.utils import mask2ndarray +from mmdet.structures.bbox import BaseBoxes +from mmengine.config import Config, DictAction +from mmengine.registry import init_default_scope +from mmengine.utils import ProgressBar + +from mmyolo.registry import DATASETS, VISUALIZERS + + +def parse_args(): + parser = argparse.ArgumentParser(description='Browse a dataset') + parser.add_argument('config', help='train config file path') + parser.add_argument( + '--output-dir', + default=None, + type=str, + help='If there is no display interface, you can save it') + parser.add_argument('--not-show', default=False, action='store_true') + parser.add_argument( + '--show-interval', + type=float, + default=0, + help='the interval of show (s)') + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. If the value to ' + 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' + 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' + 'Note that the quotation marks are necessary and that no white space ' + 'is allowed.') + args = parser.parse_args() + return args + + +def main(): + args = parse_args() + cfg = Config.fromfile(args.config) + if args.cfg_options is not None: + cfg.merge_from_dict(args.cfg_options) + + # register all modules in mmdet into the registries + init_default_scope(cfg.get('default_scope', 'mmyolo')) + + dataset = DATASETS.build(cfg.train_dataloader.dataset) + visualizer = VISUALIZERS.build(cfg.visualizer) + visualizer.dataset_meta = dataset.metainfo + + progress_bar = ProgressBar(len(dataset)) + for item in dataset: + img = item['inputs'].permute(1, 2, 0).numpy() + data_sample = item['data_samples'].numpy() + gt_instances = data_sample.gt_instances + img_path = osp.basename(item['data_samples'].img_path) + + out_file = osp.join( + args.output_dir, + osp.basename(img_path)) if args.output_dir is not None else None + + img = img[..., [2, 1, 0]] # bgr to rgb + gt_bboxes = gt_instances.get('bboxes', None) + if gt_bboxes is not None and isinstance(gt_bboxes, BaseBoxes): + gt_instances.bboxes = gt_bboxes.tensor + gt_masks = gt_instances.get('masks', None) + if gt_masks is not None: + masks = mask2ndarray(gt_masks) + gt_instances.masks = masks.astype(bool) + data_sample.gt_instances = gt_instances + + visualizer.add_datasample( + osp.basename(img_path), + img, + data_sample, + draw_pred=False, + show=not args.not_show, + wait_time=args.show_interval, + out_file=out_file) + + progress_bar.update() + + +if __name__ == '__main__': + main() diff --git a/models/YOLO-World/third_party/mmyolo/tools/analysis_tools/confusion_matrix.py b/models/YOLO-World/third_party/mmyolo/tools/analysis_tools/confusion_matrix.py new file mode 100644 index 0000000000000000000000000000000000000000..f48abdb90eadba3d50bec106c2ad0ea7709e897d --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/tools/analysis_tools/confusion_matrix.py @@ -0,0 +1,273 @@ +import argparse +import os + +import matplotlib.pyplot as plt +import numpy as np +from matplotlib.ticker import MultipleLocator +from mmcv.ops import nms +from mmdet.evaluation import bbox_overlaps +from mmdet.utils import replace_cfg_vals, update_data_root +from mmengine import Config, DictAction +from mmengine.fileio import load +from mmengine.registry import init_default_scope +from mmengine.utils import ProgressBar + +from mmyolo.registry import DATASETS + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Generate confusion matrix from detection results') + parser.add_argument('config', help='test config file path') + parser.add_argument( + 'prediction_path', help='prediction path where test .pkl result') + parser.add_argument( + 'save_dir', help='directory where confusion matrix will be saved') + parser.add_argument( + '--show', action='store_true', help='show confusion matrix') + parser.add_argument( + '--color-theme', + default='plasma', + help='theme of the matrix color map') + parser.add_argument( + '--score-thr', + type=float, + default=0.3, + help='score threshold to filter detection bboxes') + parser.add_argument( + '--tp-iou-thr', + type=float, + default=0.5, + help='IoU threshold to be considered as matched') + parser.add_argument( + '--nms-iou-thr', + type=float, + default=None, + help='nms IoU threshold, only applied when users want to change the' + 'nms IoU threshold.') + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. If the value to ' + 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' + 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' + 'Note that the quotation marks are necessary and that no white space ' + 'is allowed.') + args = parser.parse_args() + return args + + +def calculate_confusion_matrix(dataset, + results, + score_thr=0, + nms_iou_thr=None, + tp_iou_thr=0.5): + """Calculate the confusion matrix. + + Args: + dataset (Dataset): Test or val dataset. + results (list[ndarray]): A list of detection results in each image. + score_thr (float|optional): Score threshold to filter bboxes. + Default: 0. + nms_iou_thr (float|optional): nms IoU threshold, the detection results + have done nms in the detector, only applied when users want to + change the nms IoU threshold. Default: None. + tp_iou_thr (float|optional): IoU threshold to be considered as matched. + Default: 0.5. + """ + num_classes = len(dataset.metainfo['classes']) + confusion_matrix = np.zeros(shape=[num_classes + 1, num_classes + 1]) + assert len(dataset) == len(results) + prog_bar = ProgressBar(len(results)) + for idx, per_img_res in enumerate(results): + res_bboxes = per_img_res['pred_instances'] + gts = dataset.get_data_info(idx)['instances'] + analyze_per_img_dets(confusion_matrix, gts, res_bboxes, score_thr, + tp_iou_thr, nms_iou_thr) + prog_bar.update() + return confusion_matrix + + +def analyze_per_img_dets(confusion_matrix, + gts, + result, + score_thr=0, + tp_iou_thr=0.5, + nms_iou_thr=None): + """Analyze detection results on each image. + + Args: + confusion_matrix (ndarray): The confusion matrix, + has shape (num_classes + 1, num_classes + 1). + gt_bboxes (ndarray): Ground truth bboxes, has shape (num_gt, 4). + gt_labels (ndarray): Ground truth labels, has shape (num_gt). + result (ndarray): Detection results, has shape + (num_classes, num_bboxes, 5). + score_thr (float): Score threshold to filter bboxes. + Default: 0. + tp_iou_thr (float): IoU threshold to be considered as matched. + Default: 0.5. + nms_iou_thr (float|optional): nms IoU threshold, the detection results + have done nms in the detector, only applied when users want to + change the nms IoU threshold. Default: None. + """ + true_positives = np.zeros(len(gts)) + gt_bboxes = [] + gt_labels = [] + for gt in gts: + gt_bboxes.append(gt['bbox']) + gt_labels.append(gt['bbox_label']) + + gt_bboxes = np.array(gt_bboxes) + gt_labels = np.array(gt_labels) + + unique_label = np.unique(result['labels'].numpy()) + + for det_label in unique_label: + mask = (result['labels'] == det_label) + det_bboxes = result['bboxes'][mask].numpy() + det_scores = result['scores'][mask].numpy() + + if nms_iou_thr: + det_bboxes, _ = nms( + det_bboxes, det_scores, nms_iou_thr, score_threshold=score_thr) + ious = bbox_overlaps(det_bboxes[:, :4], gt_bboxes) + for i, score in enumerate(det_scores): + det_match = 0 + if score >= score_thr: + for j, gt_label in enumerate(gt_labels): + if ious[i, j] >= tp_iou_thr: + det_match += 1 + if gt_label == det_label: + true_positives[j] += 1 # TP + confusion_matrix[gt_label, det_label] += 1 + if det_match == 0: # BG FP + confusion_matrix[-1, det_label] += 1 + for num_tp, gt_label in zip(true_positives, gt_labels): + if num_tp == 0: # FN + confusion_matrix[gt_label, -1] += 1 + + +def plot_confusion_matrix(confusion_matrix, + labels, + save_dir=None, + show=True, + title='Normalized Confusion Matrix', + color_theme='plasma'): + """Draw confusion matrix with matplotlib. + + Args: + confusion_matrix (ndarray): The confusion matrix. + labels (list[str]): List of class names. + save_dir (str|optional): If set, save the confusion matrix plot to the + given path. Default: None. + show (bool): Whether to show the plot. Default: True. + title (str): Title of the plot. Default: `Normalized Confusion Matrix`. + color_theme (str): Theme of the matrix color map. Default: `plasma`. + """ + # normalize the confusion matrix + per_label_sums = confusion_matrix.sum(axis=1)[:, np.newaxis] + confusion_matrix = \ + confusion_matrix.astype(np.float32) / per_label_sums * 100 + + num_classes = len(labels) + fig, ax = plt.subplots( + figsize=(0.5 * num_classes, 0.5 * num_classes * 0.8), dpi=180) + cmap = plt.get_cmap(color_theme) + im = ax.imshow(confusion_matrix, cmap=cmap) + plt.colorbar(mappable=im, ax=ax) + + title_font = {'weight': 'bold', 'size': 12} + ax.set_title(title, fontdict=title_font) + label_font = {'size': 10} + plt.ylabel('Ground Truth Label', fontdict=label_font) + plt.xlabel('Prediction Label', fontdict=label_font) + + # draw locator + xmajor_locator = MultipleLocator(1) + xminor_locator = MultipleLocator(0.5) + ax.xaxis.set_major_locator(xmajor_locator) + ax.xaxis.set_minor_locator(xminor_locator) + ymajor_locator = MultipleLocator(1) + yminor_locator = MultipleLocator(0.5) + ax.yaxis.set_major_locator(ymajor_locator) + ax.yaxis.set_minor_locator(yminor_locator) + + # draw grid + ax.grid(True, which='minor', linestyle='-') + + # draw label + ax.set_xticks(np.arange(num_classes)) + ax.set_yticks(np.arange(num_classes)) + ax.set_xticklabels(labels) + ax.set_yticklabels(labels) + + ax.tick_params( + axis='x', bottom=False, top=True, labelbottom=False, labeltop=True) + plt.setp( + ax.get_xticklabels(), rotation=45, ha='left', rotation_mode='anchor') + + # draw confution matrix value + for i in range(num_classes): + for j in range(num_classes): + ax.text( + j, + i, + '{}%'.format( + int(confusion_matrix[ + i, + j]) if not np.isnan(confusion_matrix[i, j]) else -1), + ha='center', + va='center', + color='w', + size=7) + + ax.set_ylim(len(confusion_matrix) - 0.5, -0.5) # matplotlib>3.1.1 + + fig.tight_layout() + if save_dir is not None: + plt.savefig( + os.path.join(save_dir, 'confusion_matrix.png'), format='png') + if show: + plt.show() + + +def main(): + args = parse_args() + + cfg = Config.fromfile(args.config) + + # replace the ${key} with the value of cfg.key + cfg = replace_cfg_vals(cfg) + + # update data root according to MMYOLO_DATASETS + update_data_root(cfg) + + if args.cfg_options is not None: + cfg.merge_from_dict(args.cfg_options) + + init_default_scope(cfg.get('default_scope', 'mmyolo')) + + results = load(args.prediction_path) + + if not os.path.exists(args.save_dir): + os.makedirs(args.save_dir) + + dataset = DATASETS.build(cfg.test_dataloader.dataset) + + confusion_matrix = calculate_confusion_matrix(dataset, results, + args.score_thr, + args.nms_iou_thr, + args.tp_iou_thr) + plot_confusion_matrix( + confusion_matrix, + dataset.metainfo['classes'] + ('background', ), + save_dir=args.save_dir, + show=args.show, + color_theme=args.color_theme) + + +if __name__ == '__main__': + main() diff --git a/models/YOLO-World/third_party/mmyolo/tools/analysis_tools/dataset_analysis.py b/models/YOLO-World/third_party/mmyolo/tools/analysis_tools/dataset_analysis.py new file mode 100644 index 0000000000000000000000000000000000000000..b2164e16b9809957b317b3c9406918292300707a --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/tools/analysis_tools/dataset_analysis.py @@ -0,0 +1,498 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os.path +from statistics import median + +import matplotlib.patches as mpatches +import matplotlib.pyplot as plt +import numpy as np +from mmengine.config import Config +from mmengine.registry import init_default_scope +from mmengine.utils import ProgressBar +from prettytable import PrettyTable + +from mmyolo.registry import DATASETS +from mmyolo.utils.misc import show_data_classes + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Distribution of categories and bbox instances') + parser.add_argument('config', help='config file path') + parser.add_argument( + '--val-dataset', + default=False, + action='store_true', + help='The default train_dataset.' + 'To change it to val_dataset, enter "--val-dataset"') + parser.add_argument( + '--class-name', + default=None, + type=str, + help='Display specific class, e.g., "bicycle"') + parser.add_argument( + '--area-rule', + default=None, + type=int, + nargs='+', + help='Redefine area rules,but no more than three numbers.' + ' e.g., 30 70 125') + parser.add_argument( + '--func', + default=None, + type=str, + choices=[ + 'show_bbox_num', 'show_bbox_wh', 'show_bbox_wh_ratio', + 'show_bbox_area' + ], + help='Dataset analysis function selection.') + parser.add_argument( + '--out-dir', + default='./dataset_analysis', + type=str, + help='Output directory of dataset analysis visualization results,' + ' Save in "./dataset_analysis/" by default') + args = parser.parse_args() + return args + + +def show_bbox_num(cfg, out_dir, fig_set, class_name, class_num): + """Display the distribution map of categories and number of bbox + instances.""" + print('\n\nDrawing bbox_num figure:') + # Draw designs + fig = plt.figure( + figsize=(fig_set['figsize'][0], fig_set['figsize'][1]), dpi=300) + plt.bar(class_name, class_num, align='center') + + # Draw titles, labels and so on + for x, y in enumerate(class_num): + plt.text(x, y, '%s' % y, ha='center', fontsize=fig_set['fontsize'] + 3) + plt.xticks(rotation=fig_set['xticks_angle']) + plt.xlabel('Category Name') + plt.ylabel('Num of instances') + plt.title(cfg.dataset_type) + + # Save figure + if not os.path.exists(out_dir): + os.makedirs(out_dir) + out_name = fig_set['out_name'] + fig.savefig( + f'{out_dir}/{out_name}_bbox_num.jpg', + bbox_inches='tight', + pad_inches=0.1) # Save Image + plt.close() + print(f'End and save in {out_dir}/{out_name}_bbox_num.jpg') + + +def show_bbox_wh(out_dir, fig_set, class_bbox_w, class_bbox_h, class_name): + """Display the width and height distribution of categories and bbox + instances.""" + print('\n\nDrawing bbox_wh figure:') + # Draw designs + fig, ax = plt.subplots( + figsize=(fig_set['figsize'][0], fig_set['figsize'][1]), dpi=300) + + # Set the position of the map and label on the x-axis + positions_w = list(range(0, 12 * len(class_name), 12)) + positions_h = list(range(6, 12 * len(class_name), 12)) + positions_x_label = list(range(3, 12 * len(class_name) + 1, 12)) + ax.violinplot( + class_bbox_w, positions_w, showmeans=True, showmedians=True, widths=4) + ax.violinplot( + class_bbox_h, positions_h, showmeans=True, showmedians=True, widths=4) + + # Draw titles, labels and so on + plt.xticks(rotation=fig_set['xticks_angle']) + plt.ylabel('The width or height of bbox') + plt.xlabel('Class name') + plt.title('Width or height distribution of classes and bbox instances') + + # Draw the max, min and median of wide data in violin chart + for i in range(len(class_bbox_w)): + plt.text( + positions_w[i], + median(class_bbox_w[i]), + f'{"%.2f" % median(class_bbox_w[i])}', + ha='center', + fontsize=fig_set['fontsize']) + plt.text( + positions_w[i], + max(class_bbox_w[i]), + f'{"%.2f" % max(class_bbox_w[i])}', + ha='center', + fontsize=fig_set['fontsize']) + plt.text( + positions_w[i], + min(class_bbox_w[i]), + f'{"%.2f" % min(class_bbox_w[i])}', + ha='center', + fontsize=fig_set['fontsize']) + + # Draw the max, min and median of height data in violin chart + for i in range(len(positions_h)): + plt.text( + positions_h[i], + median(class_bbox_h[i]), + f'{"%.2f" % median(class_bbox_h[i])}', + ha='center', + fontsize=fig_set['fontsize']) + plt.text( + positions_h[i], + max(class_bbox_h[i]), + f'{"%.2f" % max(class_bbox_h[i])}', + ha='center', + fontsize=fig_set['fontsize']) + plt.text( + positions_h[i], + min(class_bbox_h[i]), + f'{"%.2f" % min(class_bbox_h[i])}', + ha='center', + fontsize=fig_set['fontsize']) + + # Draw Legend + plt.setp(ax, xticks=positions_x_label, xticklabels=class_name) + labels = ['bbox_w', 'bbox_h'] + colors = ['steelblue', 'darkorange'] + patches = [ + mpatches.Patch(color=colors[i], label=f'{labels[i]:s}') + for i in range(len(colors)) + ] + ax = plt.gca() + box = ax.get_position() + ax.set_position([box.x0, box.y0, box.width, box.height * 0.8]) + ax.legend(loc='upper center', handles=patches, ncol=2) + + # Save figure + if not os.path.exists(out_dir): + os.makedirs(out_dir) + out_name = fig_set['out_name'] + fig.savefig( + f'{out_dir}/{out_name}_bbox_wh.jpg', + bbox_inches='tight', + pad_inches=0.1) # Save Image + plt.close() + print(f'End and save in {out_dir}/{out_name}_bbox_wh.jpg') + + +def show_bbox_wh_ratio(out_dir, fig_set, class_name, class_bbox_ratio): + """Display the distribution map of category and bbox instance width and + height ratio.""" + print('\n\nDrawing bbox_wh_ratio figure:') + # Draw designs + fig, ax = plt.subplots( + figsize=(fig_set['figsize'][0], fig_set['figsize'][1]), dpi=300) + + # Set the position of the map and label on the x-axis + positions = list(range(0, 6 * len(class_name), 6)) + ax.violinplot( + class_bbox_ratio, + positions, + showmeans=True, + showmedians=True, + widths=5) + + # Draw titles, labels and so on + plt.xticks(rotation=fig_set['xticks_angle']) + plt.ylabel('Ratio of width to height of bbox') + plt.xlabel('Class name') + plt.title('Width to height ratio distribution of class and bbox instances') + + # Draw the max, min and median of wide data in violin chart + for i in range(len(class_bbox_ratio)): + plt.text( + positions[i], + median(class_bbox_ratio[i]), + f'{"%.2f" % median(class_bbox_ratio[i])}', + ha='center', + fontsize=fig_set['fontsize']) + plt.text( + positions[i], + max(class_bbox_ratio[i]), + f'{"%.2f" % max(class_bbox_ratio[i])}', + ha='center', + fontsize=fig_set['fontsize']) + plt.text( + positions[i], + min(class_bbox_ratio[i]), + f'{"%.2f" % min(class_bbox_ratio[i])}', + ha='center', + fontsize=fig_set['fontsize']) + + # Set the position of the map and label on the x-axis + plt.setp(ax, xticks=positions, xticklabels=class_name) + + # Save figure + if not os.path.exists(out_dir): + os.makedirs(out_dir) + out_name = fig_set['out_name'] + fig.savefig( + f'{out_dir}/{out_name}_bbox_ratio.jpg', + bbox_inches='tight', + pad_inches=0.1) # Save Image + plt.close() + print(f'End and save in {out_dir}/{out_name}_bbox_ratio.jpg') + + +def show_bbox_area(out_dir, fig_set, area_rule, class_name, bbox_area_num): + """Display the distribution map of category and bbox instance area based on + the rules of large, medium and small objects.""" + print('\n\nDrawing bbox_area figure:') + # Set the direct distance of each label and the width of each histogram + # Set the required labels and colors + positions = np.arange(0, 2 * len(class_name), 2) + width = 0.4 + labels = ['Small', 'Mediun', 'Large', 'Huge'] + colors = ['#438675', '#F7B469', '#6BA6DA', '#913221'] + + # Draw designs + fig = plt.figure( + figsize=(fig_set['figsize'][0], fig_set['figsize'][1]), dpi=300) + for i in range(len(area_rule) - 1): + area_num = [bbox_area_num[idx][i] for idx in range(len(class_name))] + plt.bar( + positions + width * i, + area_num, + width, + label=labels[i], + color=colors[i]) + for idx, (x, y) in enumerate(zip(positions.tolist(), area_num)): + plt.text( + x + width * i, + y, + y, + ha='center', + fontsize=fig_set['fontsize'] - 1) + + # Draw titles, labels and so on + plt.xticks(rotation=fig_set['xticks_angle']) + plt.xticks(positions + width * ((len(area_rule) - 2) / 2), class_name) + plt.ylabel('Class Area') + plt.xlabel('Class Name') + plt.title( + 'Area and number of large, medium and small objects of each class') + + # Set and Draw Legend + patches = [ + mpatches.Patch(color=colors[i], label=f'{labels[i]:s}') + for i in range(len(area_rule) - 1) + ] + ax = plt.gca() + box = ax.get_position() + ax.set_position([box.x0, box.y0, box.width, box.height * 0.8]) + ax.legend(loc='upper center', handles=patches, ncol=len(area_rule) - 1) + + # Save figure + if not os.path.exists(out_dir): + os.makedirs(out_dir) + out_name = fig_set['out_name'] + fig.savefig( + f'{out_dir}/{out_name}_bbox_area.jpg', + bbox_inches='tight', + pad_inches=0.1) # Save Image + plt.close() + print(f'End and save in {out_dir}/{out_name}_bbox_area.jpg') + + +def show_class_list(classes, class_num): + """Print the data of the class obtained by the current run.""" + print('\n\nThe information obtained is as follows:') + class_info = PrettyTable() + class_info.title = 'Information of dataset class' + # List Print Settings + # If the quantity is too large, 25 rows will be displayed in each column + if len(classes) < 25: + class_info.add_column('Class name', classes) + class_info.add_column('Bbox num', class_num) + elif len(classes) % 25 != 0 and len(classes) > 25: + col_num = int(len(classes) / 25) + 1 + class_nums = class_num.tolist() + class_name_list = list(classes) + for i in range(0, (col_num * 25) - len(classes)): + class_name_list.append('') + class_nums.append('') + for i in range(0, len(class_name_list), 25): + class_info.add_column('Class name', class_name_list[i:i + 25]) + class_info.add_column('Bbox num', class_nums[i:i + 25]) + + # Align display data to the left + class_info.align['Class name'] = 'l' + class_info.align['Bbox num'] = 'l' + print(class_info) + + +def show_data_list(args, area_rule): + """Print run setup information.""" + print('\n\nPrint current running information:') + data_info = PrettyTable() + data_info.title = 'Dataset information' + # Print the corresponding information according to the settings + if args.val_dataset is False: + data_info.add_column('Dataset type', ['train_dataset']) + elif args.val_dataset is True: + data_info.add_column('Dataset type', ['val_dataset']) + if args.class_name is None: + data_info.add_column('Class name', ['All classes']) + else: + data_info.add_column('Class name', [args.class_name]) + if args.func is None: + data_info.add_column('Function', ['All function']) + else: + data_info.add_column('Function', [args.func]) + data_info.add_column('Area rule', [area_rule]) + + print(data_info) + + +def main(): + args = parse_args() + cfg = Config.fromfile(args.config) + + init_default_scope(cfg.get('default_scope', 'mmyolo')) + + def replace_pipeline_to_none(cfg): + """Recursively iterate over all dataset(or datasets) and set their + pipelines to none.Datasets are mean ConcatDataset. + + Recursively terminates only when all dataset(or datasets) have been + traversed + """ + + if cfg.get('dataset', None) is None and cfg.get('datasets', + None) is None: + return + dataset = cfg.dataset if cfg.get('dataset', None) else cfg.datasets + if isinstance(dataset, list): + for item in dataset: + item.pipeline = None + elif dataset.get('pipeline', None): + dataset.pipeline = None + else: + replace_pipeline_to_none(dataset) + + # 1.Build Dataset + if args.val_dataset is False: + replace_pipeline_to_none(cfg.train_dataloader) + dataset = DATASETS.build(cfg.train_dataloader.dataset) + else: + replace_pipeline_to_none(cfg.val_dataloader) + dataset = DATASETS.build(cfg.val_dataloader.dataset) + + # 2.Prepare data + # Drawing settings + fig_all_set = { + 'figsize': [35, 18], + 'fontsize': int(10 - 0.08 * len(dataset.metainfo['classes'])), + 'xticks_angle': 70, + 'out_name': cfg.dataset_type + } + fig_one_set = { + 'figsize': [15, 10], + 'fontsize': 10, + 'xticks_angle': 0, + 'out_name': args.class_name + } + + # Call the category name and save address + if args.class_name is None: + classes = dataset.metainfo['classes'] + classes_idx = [i for i in range(len(classes))] + fig_set = fig_all_set + elif args.class_name in dataset.metainfo['classes']: + classes = [args.class_name] + classes_idx = [dataset.metainfo['classes'].index(args.class_name)] + fig_set = fig_one_set + else: + data_classes = dataset.metainfo['classes'] + show_data_classes(data_classes) + raise RuntimeError(f'Expected args.class_name to be one of the list,' + f'but got "{args.class_name}"') + + # Building Area Rules + if args.area_rule is None: + area_rule = [0, 32, 96, 1e5] + elif args.area_rule and len(args.area_rule) <= 3: + area_rules = [0] + args.area_rule + [1e5] + area_rule = sorted(area_rules) + else: + raise RuntimeError( + f'Expected the "{args.area_rule}" to be e.g. 30 60 120, ' + 'and no more than three numbers.') + + # Build arrays or lists to store data for each category + class_num = np.zeros((len(classes), ), dtype=np.int64) + class_bbox = [[] for _ in classes] + class_name = [] + class_bbox_w = [] + class_bbox_h = [] + class_bbox_ratio = [] + bbox_area_num = [] + + show_data_list(args, area_rule) + # Get the quantity and bbox data corresponding to each category + print('\nRead the information of each picture in the dataset:') + progress_bar = ProgressBar(len(dataset)) + for index in range(len(dataset)): + for instance in dataset[index]['instances']: + if instance[ + 'bbox_label'] in classes_idx and args.class_name is None: + class_num[instance['bbox_label']] += 1 + class_bbox[instance['bbox_label']].append(instance['bbox']) + elif instance['bbox_label'] in classes_idx and args.class_name: + class_num[0] += 1 + class_bbox[0].append(instance['bbox']) + progress_bar.update() + show_class_list(classes, class_num) + # Get the width, height and area of bbox corresponding to each category + print('\nRead bbox information in each class:') + progress_bar_classes = ProgressBar(len(classes)) + for idx, (classes, classes_idx) in enumerate(zip(classes, classes_idx)): + bbox = np.array(class_bbox[idx]) + bbox_area_nums = np.zeros((len(area_rule) - 1, ), dtype=np.int64) + if len(bbox) > 0: + bbox_wh = bbox[:, 2:4] - bbox[:, 0:2] + bbox_ratio = bbox_wh[:, 0] / bbox_wh[:, 1] + bbox_area = bbox_wh[:, 0] * bbox_wh[:, 1] + class_bbox_w.append(bbox_wh[:, 0].tolist()) + class_bbox_h.append(bbox_wh[:, 1].tolist()) + class_bbox_ratio.append(bbox_ratio.tolist()) + + # The area rule, there is an section between two numbers + for i in range(len(area_rule) - 1): + bbox_area_nums[i] = np.logical_and( + bbox_area >= area_rule[i]**2, + bbox_area < area_rule[i + 1]**2).sum() + elif len(bbox) == 0: + class_bbox_w.append([0]) + class_bbox_h.append([0]) + class_bbox_ratio.append([0]) + + class_name.append(classes) + bbox_area_num.append(bbox_area_nums.tolist()) + progress_bar_classes.update() + + # 3.draw Dataset Information + if args.func is None: + show_bbox_num(cfg, args.out_dir, fig_set, class_name, class_num) + show_bbox_wh(args.out_dir, fig_set, class_bbox_w, class_bbox_h, + class_name) + show_bbox_wh_ratio(args.out_dir, fig_set, class_name, class_bbox_ratio) + show_bbox_area(args.out_dir, fig_set, area_rule, class_name, + bbox_area_num) + elif args.func == 'show_bbox_num': + show_bbox_num(cfg, args.out_dir, fig_set, class_name, class_num) + elif args.func == 'show_bbox_wh': + show_bbox_wh(args.out_dir, fig_set, class_bbox_w, class_bbox_h, + class_name) + elif args.func == 'show_bbox_wh_ratio': + show_bbox_wh_ratio(args.out_dir, fig_set, class_name, class_bbox_ratio) + elif args.func == 'show_bbox_area': + show_bbox_area(args.out_dir, fig_set, area_rule, class_name, + bbox_area_num) + else: + raise RuntimeError( + 'Please enter the correct func name, e.g., show_bbox_num') + + +if __name__ == '__main__': + main() diff --git a/models/YOLO-World/third_party/mmyolo/tools/analysis_tools/get_flops.py b/models/YOLO-World/third_party/mmyolo/tools/analysis_tools/get_flops.py new file mode 100644 index 0000000000000000000000000000000000000000..965660f7194de231770537d7f80e38f41876df56 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/tools/analysis_tools/get_flops.py @@ -0,0 +1,123 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import tempfile +from pathlib import Path + +import torch +from mmdet.registry import MODELS +from mmengine.analysis import get_model_complexity_info +from mmengine.config import Config, DictAction +from mmengine.logging import MMLogger +from mmengine.model import revert_sync_batchnorm +from mmengine.registry import init_default_scope + +from mmyolo.utils import switch_to_deploy + + +def parse_args(): + parser = argparse.ArgumentParser(description='Get a detector flops') + parser.add_argument('config', help='train config file path') + parser.add_argument( + '--shape', + type=int, + nargs='+', + default=[640, 640], + help='input image size') + parser.add_argument( + '--show-arch', + action='store_true', + help='whether return the statistics in the form of network layers') + parser.add_argument( + '--not-show-table', + action='store_true', + help='whether return the statistics in the form of table'), + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. If the value to ' + 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' + 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' + 'Note that the quotation marks are necessary and that no white space ' + 'is allowed.') + return parser.parse_args() + + +def inference(args, logger): + config_name = Path(args.config) + if not config_name.exists(): + logger.error(f'{config_name} not found.') + + cfg = Config.fromfile(args.config) + cfg.work_dir = tempfile.TemporaryDirectory().name + cfg.log_level = 'WARN' + if args.cfg_options is not None: + cfg.merge_from_dict(args.cfg_options) + + init_default_scope(cfg.get('default_scope', 'mmyolo')) + + if len(args.shape) == 1: + h = w = args.shape[0] + elif len(args.shape) == 2: + h, w = args.shape + else: + raise ValueError('invalid input shape') + + # model + model = MODELS.build(cfg.model) + if torch.cuda.is_available(): + model.cuda() + model = revert_sync_batchnorm(model) + model.eval() + switch_to_deploy(model) + + # input tensor + # automatically generate a input tensor with the given input_shape. + data_batch = {'inputs': [torch.rand(3, h, w)], 'batch_samples': [None]} + data = model.data_preprocessor(data_batch) + result = {'ori_shape': (h, w), 'pad_shape': data['inputs'].shape[-2:]} + outputs = get_model_complexity_info( + model, + input_shape=None, + inputs=data['inputs'], # the input tensor of the model + show_table=not args.not_show_table, # show the complexity table + show_arch=args.show_arch) # show the complexity arch + + result['flops'] = outputs['flops_str'] + result['params'] = outputs['params_str'] + result['out_table'] = outputs['out_table'] + result['out_arch'] = outputs['out_arch'] + + return result + + +def main(): + args = parse_args() + logger = MMLogger.get_instance(name='MMLogger') + result = inference(args, logger) + + split_line = '=' * 30 + + ori_shape = result['ori_shape'] + pad_shape = result['pad_shape'] + flops = result['flops'] + params = result['params'] + + print(result['out_table']) # print related information by table + print(result['out_arch']) # print related information by network layers + + if pad_shape != ori_shape: + print(f'{split_line}\nUse size divisor set input shape ' + f'from {ori_shape} to {pad_shape}') + + print(f'{split_line}\n' + f'Input shape: {pad_shape}\nModel Flops: {flops}\n' + f'Model Parameters: {params}\n{split_line}') + print('!!!Please be cautious if you use the results in papers. ' + 'You may need to check if all ops are supported and verify ' + 'that the flops computation is correct.') + + +if __name__ == '__main__': + main() diff --git a/models/YOLO-World/third_party/mmyolo/tools/analysis_tools/optimize_anchors.py b/models/YOLO-World/third_party/mmyolo/tools/analysis_tools/optimize_anchors.py new file mode 100644 index 0000000000000000000000000000000000000000..34d4d067a6470a610b53868f18203827676892a2 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/tools/analysis_tools/optimize_anchors.py @@ -0,0 +1,647 @@ +# Copyright (c) OpenMMLab. All rights reserved. +"""Optimize anchor settings on a specific dataset. + +This script provides three methods to optimize YOLO anchors including k-means +anchor cluster, differential evolution and v5-k-means. You can use +``--algorithm k-means``, ``--algorithm differential_evolution`` and +``--algorithm v5-k-means`` to switch those methods. + +Example: + Use k-means anchor cluster:: + + python tools/analysis_tools/optimize_anchors.py ${CONFIG} \ + --algorithm k-means --input-shape ${INPUT_SHAPE [WIDTH HEIGHT]} \ + --out-dir ${OUT_DIR} + + Use differential evolution to optimize anchors:: + + python tools/analysis_tools/optimize_anchors.py ${CONFIG} \ + --algorithm differential_evolution \ + --input-shape ${INPUT_SHAPE [WIDTH HEIGHT]} \ + --out-dir ${OUT_DIR} + + Use v5-k-means to optimize anchors:: + + python tools/analysis_tools/optimize_anchors.py ${CONFIG} \ + --algorithm v5-k-means \ + --input-shape ${INPUT_SHAPE [WIDTH HEIGHT]} \ + --prior_match_thr ${PRIOR_MATCH_THR} \ + --out-dir ${OUT_DIR} +""" +import argparse +import os.path as osp +import random +from typing import Tuple + +import numpy as np +import torch +from mmdet.structures.bbox import (bbox_cxcywh_to_xyxy, bbox_overlaps, + bbox_xyxy_to_cxcywh) +from mmdet.utils import replace_cfg_vals, update_data_root +from mmengine.config import Config +from mmengine.fileio import dump +from mmengine.logging import MMLogger +from mmengine.registry import init_default_scope +from mmengine.utils import ProgressBar +from scipy.optimize import differential_evolution +from torch import Tensor + +from mmyolo.registry import DATASETS + +try: + from scipy.cluster.vq import kmeans +except ImportError: + kmeans = None + + +def parse_args(): + parser = argparse.ArgumentParser(description='Optimize anchor parameters.') + parser.add_argument('config', help='Train config file path.') + parser.add_argument( + '--input-shape', + type=int, + nargs='+', + default=[640, 640], + help='input image size, represent [width, height]') + parser.add_argument( + '--algorithm', + default='DE', + help='Algorithm used for anchor optimizing.' + 'Support k-means and differential_evolution for YOLO,' + 'and v5-k-means is special for YOLOV5.') + parser.add_argument( + '--iters', + default=1000, + type=int, + help='Maximum iterations for optimizer.') + parser.add_argument( + '--prior-match-thr', + default=4.0, + type=float, + help='anchor-label `gt_filter_sizes` ratio threshold ' + 'hyperparameter used for training, default=4.0, this ' + 'parameter is unique to v5-k-means') + parser.add_argument( + '--mutation-args', + type=float, + nargs='+', + default=[0.9, 0.1], + help='paramter of anchor optimize method genetic algorithm, ' + 'represent [prob, sigma], this parameter is unique to v5-k-means') + parser.add_argument( + '--augment-args', + type=float, + nargs='+', + default=[0.9, 1.1], + help='scale factor of box size augment when metric box and anchor, ' + 'represent [min, max], this parameter is unique to v5-k-means') + parser.add_argument( + '--device', default='cuda:0', help='Device used for calculating.') + parser.add_argument( + '--out-dir', + default=None, + type=str, + help='Path to save anchor optimize result.') + + args = parser.parse_args() + return args + + +class BaseAnchorOptimizer: + """Base class for anchor optimizer. + + Args: + dataset (obj:`Dataset`): Dataset object. + input_shape (list[int]): Input image shape of the model. + Format in [width, height]. + num_anchor_per_level (list[int]) : Number of anchors for each level. + logger (obj:`logging.Logger`): The logger for logging. + device (str, optional): Device used for calculating. + Default: 'cuda:0' + out_dir (str, optional): Path to save anchor optimize result. + Default: None + """ + + def __init__(self, + dataset, + input_shape, + num_anchor_per_level, + logger, + device='cuda:0', + out_dir=None): + self.dataset = dataset + self.input_shape = input_shape + self.num_anchor_per_level = num_anchor_per_level + self.num_anchors = sum(num_anchor_per_level) + self.logger = logger + self.device = device + self.out_dir = out_dir + bbox_whs, img_shapes = self.get_whs_and_shapes() + ratios = img_shapes.max(1, keepdims=True) / np.array([input_shape]) + + # resize to input shape + self.bbox_whs = bbox_whs / ratios + + def get_whs_and_shapes(self): + """Get widths and heights of bboxes and shapes of images. + + Returns: + tuple[np.ndarray]: Array of bbox shapes and array of image + shapes with shape (num_bboxes, 2) in [width, height] format. + """ + self.logger.info('Collecting bboxes from annotation...') + bbox_whs = [] + img_shapes = [] + prog_bar = ProgressBar(len(self.dataset)) + for idx in range(len(self.dataset)): + data_info = self.dataset.get_data_info(idx) + img_shape = np.array([data_info['width'], data_info['height']]) + gt_instances = data_info['instances'] + for instance in gt_instances: + bbox = np.array(instance['bbox']) + gt_filter_sizes = bbox[2:4] - bbox[0:2] + img_shapes.append(img_shape) + bbox_whs.append(gt_filter_sizes) + + prog_bar.update() + print('\n') + bbox_whs = np.array(bbox_whs) + img_shapes = np.array(img_shapes) + self.logger.info(f'Collected {bbox_whs.shape[0]} bboxes.') + return bbox_whs, img_shapes + + def get_zero_center_bbox_tensor(self): + """Get a tensor of bboxes centered at (0, 0). + + Returns: + Tensor: Tensor of bboxes with shape (num_bboxes, 4) + in [xmin, ymin, xmax, ymax] format. + """ + whs = torch.from_numpy(self.bbox_whs).to( + self.device, dtype=torch.float32) + bboxes = bbox_cxcywh_to_xyxy( + torch.cat([torch.zeros_like(whs), whs], dim=1)) + return bboxes + + def optimize(self): + raise NotImplementedError + + def save_result(self, anchors, path=None): + + anchor_results = [] + start = 0 + for num in self.num_anchor_per_level: + end = num + start + anchor_results.append([(round(w), round(h)) + for w, h in anchors[start:end]]) + start = end + + self.logger.info(f'Anchor optimize result:{anchor_results}') + if path: + json_path = osp.join(path, 'anchor_optimize_result.json') + dump(anchor_results, json_path) + self.logger.info(f'Result saved in {json_path}') + + +class YOLOKMeansAnchorOptimizer(BaseAnchorOptimizer): + r"""YOLO anchor optimizer using k-means. Code refer to `AlexeyAB/darknet. + `_. + + Args: + iters (int): Maximum iterations for k-means. + """ + + def __init__(self, iters, **kwargs): + + super().__init__(**kwargs) + self.iters = iters + + def optimize(self): + anchors = self.kmeans_anchors() + self.save_result(anchors, self.out_dir) + + def kmeans_anchors(self): + self.logger.info( + f'Start cluster {self.num_anchors} YOLO anchors with K-means...') + bboxes = self.get_zero_center_bbox_tensor() + cluster_center_idx = torch.randint( + 0, bboxes.shape[0], (self.num_anchors, )).to(self.device) + + assignments = torch.zeros((bboxes.shape[0], )).to(self.device) + cluster_centers = bboxes[cluster_center_idx] + if self.num_anchors == 1: + cluster_centers = self.kmeans_maximization(bboxes, assignments, + cluster_centers) + anchors = bbox_xyxy_to_cxcywh(cluster_centers)[:, 2:].cpu().numpy() + anchors = sorted(anchors, key=lambda x: x[0] * x[1]) + return anchors + + prog_bar = ProgressBar(self.iters) + for i in range(self.iters): + converged, assignments = self.kmeans_expectation( + bboxes, assignments, cluster_centers) + if converged: + self.logger.info(f'K-means process has converged at iter {i}.') + break + cluster_centers = self.kmeans_maximization(bboxes, assignments, + cluster_centers) + prog_bar.update() + print('\n') + avg_iou = bbox_overlaps(bboxes, + cluster_centers).max(1)[0].mean().item() + + anchors = bbox_xyxy_to_cxcywh(cluster_centers)[:, 2:].cpu().numpy() + anchors = sorted(anchors, key=lambda x: x[0] * x[1]) + self.logger.info(f'Anchor cluster finish. Average IOU: {avg_iou}') + + return anchors + + def kmeans_maximization(self, bboxes, assignments, centers): + """Maximization part of EM algorithm(Expectation-Maximization)""" + new_centers = torch.zeros_like(centers) + for i in range(centers.shape[0]): + mask = (assignments == i) + if mask.sum(): + new_centers[i, :] = bboxes[mask].mean(0) + return new_centers + + def kmeans_expectation(self, bboxes, assignments, centers): + """Expectation part of EM algorithm(Expectation-Maximization)""" + ious = bbox_overlaps(bboxes, centers) + closest = ious.argmax(1) + converged = (closest == assignments).all() + return converged, closest + + +class YOLOV5KMeansAnchorOptimizer(BaseAnchorOptimizer): + r"""YOLOv5 anchor optimizer using shape k-means. + Code refer to `ultralytics/yolov5. + `_. + + Args: + iters (int): Maximum iterations for k-means. + prior_match_thr (float): anchor-label width height + ratio threshold hyperparameter. + """ + + def __init__(self, + iters, + prior_match_thr=4.0, + mutation_args=[0.9, 0.1], + augment_args=[0.9, 1.1], + **kwargs): + + super().__init__(**kwargs) + self.iters = iters + self.prior_match_thr = prior_match_thr + [self.mutation_prob, self.mutation_sigma] = mutation_args + [self.augment_min, self.augment_max] = augment_args + + def optimize(self): + self.logger.info( + f'Start cluster {self.num_anchors} YOLOv5 anchors with K-means...') + + bbox_whs = torch.from_numpy(self.bbox_whs).to( + self.device, dtype=torch.float32) + anchors = self.anchor_generate( + bbox_whs, + num=self.num_anchors, + img_size=self.input_shape[0], + prior_match_thr=self.prior_match_thr, + iters=self.iters) + best_ratio, mean_matched = self.anchor_metric(bbox_whs, anchors) + self.logger.info(f'{mean_matched:.2f} anchors/target {best_ratio:.3f} ' + 'Best Possible Recall (BPR). ') + self.save_result(anchors.tolist(), self.out_dir) + + def anchor_generate(self, + box_size: Tensor, + num: int = 9, + img_size: int = 640, + prior_match_thr: float = 4.0, + iters: int = 1000) -> Tensor: + """cluster boxes metric with anchors. + + Args: + box_size (Tensor): The size of the bxes, which shape is + (box_num, 2),the number 2 means width and height. + num (int): number of anchors. + img_size (int): image size used for training + prior_match_thr (float): width/height ratio threshold + used for training + iters (int): iterations to evolve anchors using genetic algorithm + + Returns: + anchors (Tensor): kmeans evolved anchors + """ + + thr = 1 / prior_match_thr + + # step1: filter small bbox + box_size = self._filter_box(box_size) + assert num <= len(box_size) + + # step2: init anchors + if kmeans: + try: + self.logger.info( + 'beginning init anchors with scipy kmeans method') + # sigmas for whitening + sigmas = box_size.std(0).cpu().numpy() + anchors = kmeans( + box_size.cpu().numpy() / sigmas, num, iter=30)[0] * sigmas + # kmeans may return fewer points than requested + # if width/height is insufficient or too similar + assert num == len(anchors) + except Exception: + self.logger.warning( + 'scipy kmeans method cannot get enough points ' + 'because of width/height is insufficient or too similar, ' + 'now switching strategies from kmeans to random init.') + anchors = np.sort(np.random.rand(num * 2)).reshape( + num, 2) * img_size + else: + self.logger.info( + 'cannot found scipy package, switching strategies from kmeans ' + 'to random init, you can install scipy package to ' + 'get better anchor init') + anchors = np.sort(np.random.rand(num * 2)).reshape(num, + 2) * img_size + + self.logger.info('init done, beginning evolve anchors...') + # sort small to large + anchors = torch.tensor(anchors[np.argsort(anchors.prod(1))]).to( + box_size.device, dtype=torch.float32) + + # step3: evolve anchors use Genetic Algorithm + prog_bar = ProgressBar(iters) + fitness = self._anchor_fitness(box_size, anchors, thr) + cluster_shape = anchors.shape + + for _ in range(iters): + mutate_result = np.ones(cluster_shape) + # mutate until a change occurs (prevent duplicates) + while (mutate_result == 1).all(): + # mutate_result is scale factor of anchors, between 0.3 and 3 + mutate_result = ( + (np.random.random(cluster_shape) < self.mutation_prob) * + random.random() * np.random.randn(*cluster_shape) * + self.mutation_sigma + 1).clip(0.3, 3.0) + mutate_result = torch.from_numpy(mutate_result).to(box_size.device) + new_anchors = (anchors.clone() * mutate_result).clip(min=2.0) + new_fitness = self._anchor_fitness(box_size, new_anchors, thr) + if new_fitness > fitness: + fitness = new_fitness + anchors = new_anchors.clone() + + prog_bar.update() + print('\n') + # sort small to large + anchors = anchors[torch.argsort(anchors.prod(1))] + self.logger.info(f'Anchor cluster finish. fitness = {fitness:.4f}') + + return anchors + + def anchor_metric(self, + box_size: Tensor, + anchors: Tensor, + threshold: float = 4.0) -> Tuple: + """compute boxes metric with anchors. + + Args: + box_size (Tensor): The size of the bxes, which shape + is (box_num, 2), the number 2 means width and height. + anchors (Tensor): The size of the bxes, which shape + is (anchor_num, 2), the number 2 means width and height. + threshold (float): the compare threshold of ratio + + Returns: + Tuple: a tuple of metric result, best_ratio_mean and mean_matched + """ + # step1: augment scale + # According to the uniform distribution,the scaling scale between + # augment_min and augment_max is randomly generated + scale = np.random.uniform( + self.augment_min, self.augment_max, size=(box_size.shape[0], 1)) + box_size = torch.tensor( + np.array( + [l[:, ] * s for s, l in zip(scale, + box_size.cpu().numpy())])).to( + box_size.device, + dtype=torch.float32) + # step2: calculate ratio + min_ratio, best_ratio = self._metric(box_size, anchors) + mean_matched = (min_ratio > 1 / threshold).float().sum(1).mean() + best_ratio_mean = (best_ratio > 1 / threshold).float().mean() + return best_ratio_mean, mean_matched + + def _filter_box(self, box_size: Tensor) -> Tensor: + small_cnt = (box_size < 3.0).any(1).sum() + if small_cnt: + self.logger.warning( + f'Extremely small objects found: {small_cnt} ' + f'of {len(box_size)} labels are <3 pixels in size') + # filter > 2 pixels + filter_sizes = box_size[(box_size >= 2.0).any(1)] + return filter_sizes + + def _anchor_fitness(self, box_size: Tensor, anchors: Tensor, thr: float): + """mutation fitness.""" + _, best = self._metric(box_size, anchors) + return (best * (best > thr).float()).mean() + + def _metric(self, box_size: Tensor, anchors: Tensor) -> Tuple: + """compute boxes metric with anchors. + + Args: + box_size (Tensor): The size of the bxes, which shape is + (box_num, 2), the number 2 means width and height. + anchors (Tensor): The size of the bxes, which shape is + (anchor_num, 2), the number 2 means width and height. + + Returns: + Tuple: a tuple of metric result, min_ratio and best_ratio + """ + + # ratio means the (width_1/width_2 and height_1/height_2) ratio of each + # box and anchor, the ratio shape is torch.Size([box_num,anchor_num,2]) + ratio = box_size[:, None] / anchors[None] + + # min_ratio records the min ratio of each box with all anchor, + # min_ratio.shape is torch.Size([box_num,anchor_num]) + # notice: + # smaller ratio means worse shape-match between boxes and anchors + min_ratio = torch.min(ratio, 1 / ratio).min(2)[0] + + # find the best shape-match ratio for each box + # box_best_ratio.shape is torch.Size([box_num]) + best_ratio = min_ratio.max(1)[0] + + return min_ratio, best_ratio + + +class YOLODEAnchorOptimizer(BaseAnchorOptimizer): + """YOLO anchor optimizer using differential evolution algorithm. + + Args: + iters (int): Maximum iterations for k-means. + strategy (str): The differential evolution strategy to use. + Should be one of: + + - 'best1bin' + - 'best1exp' + - 'rand1exp' + - 'randtobest1exp' + - 'currenttobest1exp' + - 'best2exp' + - 'rand2exp' + - 'randtobest1bin' + - 'currenttobest1bin' + - 'best2bin' + - 'rand2bin' + - 'rand1bin' + + Default: 'best1bin'. + population_size (int): Total population size of evolution algorithm. + Default: 15. + convergence_thr (float): Tolerance for convergence, the + optimizing stops when ``np.std(pop) <= abs(convergence_thr) + + convergence_thr * np.abs(np.mean(population_energies))``, + respectively. Default: 0.0001. + mutation (tuple[float]): Range of dithering randomly changes the + mutation constant. Default: (0.5, 1). + recombination (float): Recombination constant of crossover probability. + Default: 0.7. + """ + + def __init__(self, + iters, + strategy='best1bin', + population_size=15, + convergence_thr=0.0001, + mutation=(0.5, 1), + recombination=0.7, + **kwargs): + + super().__init__(**kwargs) + + self.iters = iters + self.strategy = strategy + self.population_size = population_size + self.convergence_thr = convergence_thr + self.mutation = mutation + self.recombination = recombination + + def optimize(self): + anchors = self.differential_evolution() + self.save_result(anchors, self.out_dir) + + def differential_evolution(self): + bboxes = self.get_zero_center_bbox_tensor() + + bounds = [] + for i in range(self.num_anchors): + bounds.extend([(0, self.input_shape[0]), (0, self.input_shape[1])]) + + result = differential_evolution( + func=self.avg_iou_cost, + bounds=bounds, + args=(bboxes, ), + strategy=self.strategy, + maxiter=self.iters, + popsize=self.population_size, + tol=self.convergence_thr, + mutation=self.mutation, + recombination=self.recombination, + updating='immediate', + disp=True) + self.logger.info( + f'Anchor evolution finish. Average IOU: {1 - result.fun}') + anchors = [(w, h) for w, h in zip(result.x[::2], result.x[1::2])] + anchors = sorted(anchors, key=lambda x: x[0] * x[1]) + return anchors + + @staticmethod + def avg_iou_cost(anchor_params, bboxes): + assert len(anchor_params) % 2 == 0 + anchor_whs = torch.tensor( + [[w, h] + for w, h in zip(anchor_params[::2], anchor_params[1::2])]).to( + bboxes.device, dtype=bboxes.dtype) + anchor_boxes = bbox_cxcywh_to_xyxy( + torch.cat([torch.zeros_like(anchor_whs), anchor_whs], dim=1)) + ious = bbox_overlaps(bboxes, anchor_boxes) + max_ious, _ = ious.max(1) + cost = 1 - max_ious.mean().item() + return cost + + +def main(): + logger = MMLogger.get_current_instance() + args = parse_args() + cfg = args.config + cfg = Config.fromfile(cfg) + + # replace the ${key} with the value of cfg.key + cfg = replace_cfg_vals(cfg) + + # update data root according to MMDET_DATASETS + update_data_root(cfg) + + init_default_scope(cfg.get('default_scope', 'mmyolo')) + + input_shape = args.input_shape + assert len(input_shape) == 2 + + anchor_type = cfg.model.bbox_head.prior_generator.type + assert anchor_type == 'mmdet.YOLOAnchorGenerator', \ + f'Only support optimize YOLOAnchor, but get {anchor_type}.' + + base_sizes = cfg.model.bbox_head.prior_generator.base_sizes + num_anchor_per_level = [len(sizes) for sizes in base_sizes] + + train_data_cfg = cfg.train_dataloader + while 'dataset' in train_data_cfg: + train_data_cfg = train_data_cfg['dataset'] + dataset = DATASETS.build(train_data_cfg) + + if args.algorithm == 'k-means': + optimizer = YOLOKMeansAnchorOptimizer( + dataset=dataset, + input_shape=input_shape, + device=args.device, + num_anchor_per_level=num_anchor_per_level, + iters=args.iters, + logger=logger, + out_dir=args.out_dir) + elif args.algorithm == 'DE': + optimizer = YOLODEAnchorOptimizer( + dataset=dataset, + input_shape=input_shape, + device=args.device, + num_anchor_per_level=num_anchor_per_level, + iters=args.iters, + logger=logger, + out_dir=args.out_dir) + elif args.algorithm == 'v5-k-means': + optimizer = YOLOV5KMeansAnchorOptimizer( + dataset=dataset, + input_shape=input_shape, + device=args.device, + num_anchor_per_level=num_anchor_per_level, + iters=args.iters, + prior_match_thr=args.prior_match_thr, + mutation_args=args.mutation_args, + augment_args=args.augment_args, + logger=logger, + out_dir=args.out_dir) + else: + raise NotImplementedError( + f'Only support k-means and differential_evolution, ' + f'but get {args.algorithm}') + + optimizer.optimize() + + +if __name__ == '__main__': + main() diff --git a/models/YOLO-World/third_party/mmyolo/tools/analysis_tools/vis_scheduler.py b/models/YOLO-World/third_party/mmyolo/tools/analysis_tools/vis_scheduler.py new file mode 100644 index 0000000000000000000000000000000000000000..8a2922d890d68e0be54925fc18c8afd43a4451f3 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/tools/analysis_tools/vis_scheduler.py @@ -0,0 +1,295 @@ +# Copyright (c) OpenMMLab. All rights reserved. +"""Hyper-parameter Scheduler Visualization. + +This tool aims to help the user to check +the hyper-parameter scheduler of the optimizer(without training), +which support the "learning rate", "momentum", and "weight_decay". + +Example: +```shell +python tools/analysis_tools/vis_scheduler.py \ + configs/rtmdet/rtmdet_s_syncbn_fast_8xb32-300e_coco.py \ + --dataset-size 118287 \ + --ngpus 8 \ + --out-dir ./output +``` +Modified from: https://github.com/open-mmlab/mmclassification/blob/1.x/tools/visualizations/vis_scheduler.py # noqa +""" +import argparse +import json +import os.path as osp +import re +from pathlib import Path +from unittest.mock import MagicMock + +import matplotlib.pyplot as plt +import rich +import torch.nn as nn +from mmengine.config import Config, DictAction +from mmengine.hooks import Hook +from mmengine.model import BaseModel +from mmengine.registry import init_default_scope +from mmengine.runner import Runner +from mmengine.utils.path import mkdir_or_exist +from mmengine.visualization import Visualizer +from rich.progress import BarColumn, MofNCompleteColumn, Progress, TextColumn + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Visualize a hyper-parameter scheduler') + parser.add_argument('config', help='config file path') + parser.add_argument( + '-p', + '--parameter', + type=str, + default='lr', + choices=['lr', 'momentum', 'wd'], + help='The parameter to visualize its change curve, choose from' + '"lr", "wd" and "momentum". Defaults to "lr".') + parser.add_argument( + '-d', + '--dataset-size', + type=int, + help='The size of the dataset. If specify, `DATASETS.build` will ' + 'be skipped and use this size as the dataset size.') + parser.add_argument( + '-n', + '--ngpus', + type=int, + default=1, + help='The number of GPUs used in training.') + parser.add_argument( + '-o', '--out-dir', type=Path, help='Path to output file') + parser.add_argument( + '--log-level', + default='WARNING', + help='The log level of the handler and logger. Defaults to ' + 'WARNING.') + parser.add_argument('--title', type=str, help='title of figure') + parser.add_argument( + '--style', type=str, default='whitegrid', help='style of plt') + parser.add_argument('--not-show', default=False, action='store_true') + parser.add_argument( + '--window-size', + default='12*7', + help='Size of the window to display images, in format of "$W*$H".') + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. If the value to ' + 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' + 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' + 'Note that the quotation marks are necessary and that no white space ' + 'is allowed.') + args = parser.parse_args() + if args.window_size != '': + assert re.match(r'\d+\*\d+', args.window_size), \ + "'window-size' must be in format 'W*H'." + + return args + + +class SimpleModel(BaseModel): + """simple model that do nothing in train_step.""" + + def __init__(self): + super().__init__() + self.data_preprocessor = nn.Identity() + self.conv = nn.Conv2d(1, 1, 1) + + def forward(self, inputs, data_samples, mode='tensor'): + pass + + def train_step(self, data, optim_wrapper): + pass + + +class ParamRecordHook(Hook): + + def __init__(self, by_epoch): + super().__init__() + self.by_epoch = by_epoch + self.lr_list = [] + self.momentum_list = [] + self.wd_list = [] + self.task_id = 0 + self.progress = Progress(BarColumn(), MofNCompleteColumn(), + TextColumn('{task.description}')) + + def before_train(self, runner): + if self.by_epoch: + total = runner.train_loop.max_epochs + self.task_id = self.progress.add_task( + 'epochs', start=True, total=total) + else: + total = runner.train_loop.max_iters + self.task_id = self.progress.add_task( + 'iters', start=True, total=total) + self.progress.start() + + def after_train_epoch(self, runner): + if self.by_epoch: + self.progress.update(self.task_id, advance=1) + + # TODO: Support multiple schedulers + def after_train_iter(self, runner, batch_idx, data_batch, outputs): + if not self.by_epoch: + self.progress.update(self.task_id, advance=1) + self.lr_list.append(runner.optim_wrapper.get_lr()['lr'][0]) + self.momentum_list.append( + runner.optim_wrapper.get_momentum()['momentum'][0]) + self.wd_list.append( + runner.optim_wrapper.param_groups[0]['weight_decay']) + + def after_train(self, runner): + self.progress.stop() + + +def plot_curve(lr_list, args, param_name, iters_per_epoch, by_epoch=True): + """Plot learning rate vs iter graph.""" + try: + import seaborn as sns + sns.set_style(args.style) + except ImportError: + pass + + wind_w, wind_h = args.window_size.split('*') + wind_w, wind_h = int(wind_w), int(wind_h) + plt.figure(figsize=(wind_w, wind_h)) + + ax: plt.Axes = plt.subplot() + ax.plot(lr_list, linewidth=1) + + if by_epoch: + ax.xaxis.tick_top() + ax.set_xlabel('Iters') + ax.xaxis.set_label_position('top') + sec_ax = ax.secondary_xaxis( + 'bottom', + functions=(lambda x: x / iters_per_epoch, + lambda y: y * iters_per_epoch)) + sec_ax.set_xlabel('Epochs') + else: + plt.xlabel('Iters') + plt.ylabel(param_name) + + if args.title is None: + plt.title(f'{osp.basename(args.config)} {param_name} curve') + else: + plt.title(args.title) + + +def simulate_train(data_loader, cfg, by_epoch): + model = SimpleModel() + param_record_hook = ParamRecordHook(by_epoch=by_epoch) + default_hooks = dict( + param_scheduler=cfg.default_hooks['param_scheduler'], + runtime_info=None, + timer=None, + logger=None, + checkpoint=None, + sampler_seed=None, + param_record=param_record_hook) + + runner = Runner( + model=model, + work_dir=cfg.work_dir, + train_dataloader=data_loader, + train_cfg=cfg.train_cfg, + log_level=cfg.log_level, + optim_wrapper=cfg.optim_wrapper, + param_scheduler=cfg.param_scheduler, + default_scope=cfg.default_scope, + default_hooks=default_hooks, + visualizer=MagicMock(spec=Visualizer), + custom_hooks=cfg.get('custom_hooks', None)) + + runner.train() + + param_dict = dict( + lr=param_record_hook.lr_list, + momentum=param_record_hook.momentum_list, + wd=param_record_hook.wd_list) + + return param_dict + + +def main(): + args = parse_args() + cfg = Config.fromfile(args.config) + if args.cfg_options is not None: + cfg.merge_from_dict(args.cfg_options) + if cfg.get('work_dir', None) is None: + # use config filename as default work_dir if cfg.work_dir is None + cfg.work_dir = osp.join('./work_dirs', + osp.splitext(osp.basename(args.config))[0]) + + cfg.log_level = args.log_level + + init_default_scope(cfg.get('default_scope', 'mmyolo')) + + # init logger + print('Param_scheduler :') + rich.print_json(json.dumps(cfg.param_scheduler)) + + # prepare data loader + batch_size = cfg.train_dataloader.batch_size * args.ngpus + + if 'by_epoch' in cfg.train_cfg: + by_epoch = cfg.train_cfg.get('by_epoch') + elif 'type' in cfg.train_cfg: + by_epoch = cfg.train_cfg.get('type') == 'EpochBasedTrainLoop' + else: + raise ValueError('please set `train_cfg`.') + + if args.dataset_size is None and by_epoch: + from mmyolo.registry import DATASETS + dataset_size = len(DATASETS.build(cfg.train_dataloader.dataset)) + else: + dataset_size = args.dataset_size or batch_size + + class FakeDataloader(list): + dataset = MagicMock(metainfo=None) + + data_loader = FakeDataloader(range(dataset_size // batch_size)) + dataset_info = ( + f'\nDataset infos:' + f'\n - Dataset size: {dataset_size}' + f'\n - Batch size per GPU: {cfg.train_dataloader.batch_size}' + f'\n - Number of GPUs: {args.ngpus}' + f'\n - Total batch size: {batch_size}') + if by_epoch: + dataset_info += f'\n - Iterations per epoch: {len(data_loader)}' + rich.print(dataset_info + '\n') + + # simulation training process + param_dict = simulate_train(data_loader, cfg, by_epoch) + param_list = param_dict[args.parameter] + + if args.parameter == 'lr': + param_name = 'Learning Rate' + elif args.parameter == 'momentum': + param_name = 'Momentum' + else: + param_name = 'Weight Decay' + plot_curve(param_list, args, param_name, len(data_loader), by_epoch) + + if args.out_dir: + # make dir for output + mkdir_or_exist(args.out_dir) + + # save the graph + out_file = osp.join( + args.out_dir, f'{osp.basename(args.config)}-{args.parameter}.jpg') + plt.savefig(out_file) + print(f'\nThe {param_name} graph is saved at {out_file}') + + if not args.not_show: + plt.show() + + +if __name__ == '__main__': + main() diff --git a/models/YOLO-World/third_party/mmyolo/tools/dataset_converters/balloon2coco.py b/models/YOLO-World/third_party/mmyolo/tools/dataset_converters/balloon2coco.py new file mode 100644 index 0000000000000000000000000000000000000000..65eb660cb09f850bafb1e743ff840b14200fa975 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/tools/dataset_converters/balloon2coco.py @@ -0,0 +1,58 @@ +import os.path as osp + +import mmcv +import mmengine + + +def convert_balloon_to_coco(ann_file, out_file, image_prefix): + + data_infos = mmengine.load(ann_file) + + annotations = [] + images = [] + obj_count = 0 + for idx, v in enumerate(mmengine.track_iter_progress(data_infos.values())): + filename = v['filename'] + img_path = osp.join(image_prefix, filename) + height, width = mmcv.imread(img_path).shape[:2] + + images.append( + dict(id=idx, file_name=filename, height=height, width=width)) + + for _, obj in v['regions'].items(): + assert not obj['region_attributes'] + obj = obj['shape_attributes'] + px = obj['all_points_x'] + py = obj['all_points_y'] + poly = [(x + 0.5, y + 0.5) for x, y in zip(px, py)] + poly = [p for x in poly for p in x] + + x_min, y_min, x_max, y_max = (min(px), min(py), max(px), max(py)) + + data_anno = dict( + image_id=idx, + id=obj_count, + category_id=0, + bbox=[x_min, y_min, x_max - x_min, y_max - y_min], + area=(x_max - x_min) * (y_max - y_min), + segmentation=[poly], + iscrowd=0) + annotations.append(data_anno) + obj_count += 1 + + coco_format_json = dict( + images=images, + annotations=annotations, + categories=[{ + 'id': 0, + 'name': 'balloon' + }]) + mmengine.dump(coco_format_json, out_file) + + +if __name__ == '__main__': + + convert_balloon_to_coco('data/balloon/train/via_region_data.json', + 'data/balloon/train.json', 'data/balloon/train/') + convert_balloon_to_coco('data/balloon/val/via_region_data.json', + 'data/balloon/val.json', 'data/balloon/val/') diff --git a/models/YOLO-World/third_party/mmyolo/tools/dataset_converters/dota/README.md b/models/YOLO-World/third_party/mmyolo/tools/dataset_converters/dota/README.md new file mode 100644 index 0000000000000000000000000000000000000000..a166e2793a0aeb0d08d9e19e7bd5abfd8d8240e5 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/tools/dataset_converters/dota/README.md @@ -0,0 +1,3 @@ +# Preparing DOTA Dataset + +Please refer to [Dataset preparation and description](../../../docs/en/recommended_topics/dataset_preparation.md) diff --git a/models/YOLO-World/third_party/mmyolo/tools/dataset_converters/dota/dota_split.py b/models/YOLO-World/third_party/mmyolo/tools/dataset_converters/dota/dota_split.py new file mode 100644 index 0000000000000000000000000000000000000000..0418e9d3c9a7c87a04b825c152f4784f2a7150fa --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/tools/dataset_converters/dota/dota_split.py @@ -0,0 +1,603 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# Reference: https://github.com/jbwang1997/BboxToolkit + +import argparse +import codecs +import datetime +import itertools +import os +import os.path as osp +import time +from functools import partial, reduce +from math import ceil +from multiprocessing import Manager, Pool +from typing import List, Sequence + +import cv2 +import numpy as np +from mmengine import Config, MMLogger, mkdir_or_exist, print_log +from PIL import Image + +Image.MAX_IMAGE_PIXELS = None + +try: + import shapely.geometry as shgeo +except ImportError: + raise ImportError('Please run "pip install shapely" ' + 'to install shapely first.') + +PHASE_REQUIRE_SETS = dict( + trainval=['train', 'val'], + train=[ + 'train', + ], + val=[ + 'val', + ], + test=[ + 'test', + ], +) + + +def parse_args(): + """Parse arguments.""" + parser = argparse.ArgumentParser() + parser.add_argument( + 'split_config', type=str, help='The split config for image slicing.') + parser.add_argument( + 'data_root', type=str, help='Root dir of DOTA dataset.') + parser.add_argument( + 'out_dir', type=str, help='Output dir for split result.') + parser.add_argument( + '--ann-subdir', + default='labelTxt-v1.0', + type=str, + help='output directory') + parser.add_argument( + '--phase', + '-p', + nargs='+', + default=['trainval', 'test'], + type=str, + choices=['trainval', 'train', 'val', 'test'], + help='Phase of the data set to be prepared.') + parser.add_argument( + '--nproc', default=8, type=int, help='Number of processes.') + parser.add_argument( + '--save-ext', + default=None, + type=str, + help='Extension of the saved image.') + parser.add_argument( + '--overwrite', + action='store_true', + help='Whether to allow overwrite if annotation folder exist.') + args = parser.parse_args() + + assert args.split_config is not None, "argument split_config can't be None" + split_cfg = Config.fromfile(args.split_config) + + # assert arguments + assert args.data_root is not None, "argument data_root can't be None" + if args.save_ext: + assert args.save_ext in ['png', 'jpg', 'bmp', 'tif'] + + assert len(split_cfg.patch_sizes) == len(split_cfg.patch_overlap_sizes) + assert 0 <= split_cfg.iof_thr <= 1 + if split_cfg.get('padding'): + padding_value = split_cfg.get('padding_value') + assert padding_value is not None, \ + "padding_value can't be None when padding is True." + padding_value = padding_value[0] \ + if len(padding_value) == 1 else padding_value + split_cfg.padding_value = padding_value + else: + split_cfg.padding = False + split_cfg.padding_value = None + return args, split_cfg + + +def _make_dirs(out_dir: str, phase: List[str], allow_overwrite: bool): + """Prepare folder for DOTA dataset. + + Args: + out_dir (str): The output dir for DOTA split. + phase (List[str]): The phase to prepare. + allow_overwrite (bool): Whether to allow overwrite when folder exist. + """ + logger = MMLogger.get_current_instance() + for p in phase: + phase_dir = osp.join(out_dir, p) + if not allow_overwrite: + assert not osp.exists(phase_dir), \ + f'{osp.join(phase_dir)} already exists,' \ + 'If you want to ignore existing files, set --overwrite' + else: + if osp.exists(phase_dir): + logger.warning( + f'{p} set in {osp.join(phase_dir)} will be overwritten') + mkdir_or_exist(phase_dir) + mkdir_or_exist(osp.join(phase_dir, 'images')) + mkdir_or_exist(osp.join(phase_dir, 'annfiles')) + + +def load_original_annotations(data_root: str, + ann_subdir: str = 'labelTxt-v1.0', + phase: str = 'train', + nproc: int = 8): + img_dir = osp.join(data_root, phase, 'images') + assert osp.isdir(img_dir), f'The {img_dir} is not an existing dir!' + + if phase == 'test': + ann_dir = None + else: + ann_dir = osp.join(data_root, phase, ann_subdir, 'labelTxt') + assert osp.isdir(ann_dir), f'The {ann_dir} is not an existing dir!' + + _load_func = partial(_load_dota_single, img_dir=img_dir, ann_dir=ann_dir) + if nproc > 1: + pool = Pool(nproc) + contents = pool.map(_load_func, os.listdir(img_dir)) + pool.close() + else: + contents = list(map(_load_func, os.listdir(img_dir))) + infos = [c for c in contents if c is not None] + return infos + + +def _load_dota_single(imgfile: str, img_dir: str, ann_dir: str): + """Load DOTA's single image. + + Args: + imgfile (str): Filename of single image. + img_dir (str): Path of images. + ann_dir (str): Path of annotations. + + Returns: + result (dict): Information of a single image. + + - ``id``: Image id. + - ``filename``: Filename of single image. + - ``filepath``: Filepath of single image. + - ``width``: The width of image. + - ``height``: The height of image. + - ``annotations``: The annotation of single image. + - ``gsd``: The ground sampling distance. + """ + img_id, ext = osp.splitext(imgfile) + if ext not in ['.jpg', '.JPG', '.png', '.tif', '.bmp']: + return None + + imgpath = osp.join(img_dir, imgfile) + size = Image.open(imgpath).size + txtfile = None if ann_dir is None else osp.join(ann_dir, img_id + '.txt') + content = _load_dota_txt(txtfile) + + content.update( + dict( + width=size[0], + height=size[1], + filename=imgfile, + filepath=imgpath, + id=img_id)) + return content + + +def _load_dota_txt(txtfile): + """Load DOTA's txt annotation. + + Args: + txtfile (str): Filename of single Dota txt annotation. + + Returns: + result (dict): Annotation of single image. + + - ``annotations``: The annotation of single image. + - ``gsd``: The ground sampling distance. + """ + gsd, bboxes, labels, diffs = None, [], [], [] + if txtfile is None: + pass + elif not osp.isfile(txtfile): + print(f"Can't find {txtfile}, treated as empty txtfile") + else: + with open(txtfile) as f: + for line in f: + if line.startswith('gsd'): + num = line.split(':')[-1] + try: + gsd = float(num) + except ValueError: + gsd = None + continue + + items = line.split(' ') + if len(items) >= 9: + bboxes.append([float(i) for i in items[:8]]) + labels.append(items[8]) + diffs.append(int(items[9]) if len(items) == 10 else 0) + + bboxes = np.array(bboxes, dtype=np.float32) if bboxes else \ + np.zeros((0, 8), dtype=np.float32) + diffs = np.array(diffs, dtype=np.int64) if diffs else \ + np.zeros((0,), dtype=np.int64) + ann = dict(bboxes=bboxes, labels=labels, diffs=diffs) + return dict(gsd=gsd, annotations=ann) + + +def poly2hbb(polys): + """Convert polygons to horizontal bboxes. + + Args: + polys (np.array): Polygons with shape (N, 8) + + Returns: + np.array: Horizontal bboxes. + """ + shape = polys.shape + polys = polys.reshape(*shape[:-1], shape[-1] // 2, 2) + lt_point = np.min(polys, axis=-2) + rb_point = np.max(polys, axis=-2) + return np.concatenate([lt_point, rb_point], axis=-1) + + +def get_sliding_window(info, patch_settings, img_rate_thr): + """Get sliding windows. + + Args: + info (dict): Dict of image's width and height. + patch_settings (list): List of patch settings, + each in format (patch_size, patch_overlap). + img_rate_thr (float): Threshold of window area divided by image area. + + Returns: + list[np.array]: Information of valid windows. + """ + eps = 0.01 + windows = [] + width, height = info['width'], info['height'] + for (size, gap) in patch_settings: + assert size > gap, f'invaild size gap pair [{size} {gap}]' + step = size - gap + + x_num = 1 if width <= size else ceil((width - size) / step + 1) + x_start = [step * i for i in range(x_num)] + if len(x_start) > 1 and x_start[-1] + size > width: + x_start[-1] = width - size + + y_num = 1 if height <= size else ceil((height - size) / step + 1) + y_start = [step * i for i in range(y_num)] + if len(y_start) > 1 and y_start[-1] + size > height: + y_start[-1] = height - size + + start = np.array( + list(itertools.product(x_start, y_start)), dtype=np.int64) + stop = start + size + windows.append(np.concatenate([start, stop], axis=1)) + windows = np.concatenate(windows, axis=0) + + img_in_wins = windows.copy() + img_in_wins[:, 0::2] = np.clip(img_in_wins[:, 0::2], 0, width) + img_in_wins[:, 1::2] = np.clip(img_in_wins[:, 1::2], 0, height) + img_areas = (img_in_wins[:, 2] - img_in_wins[:, 0]) * \ + (img_in_wins[:, 3] - img_in_wins[:, 1]) + win_areas = (windows[:, 2] - windows[:, 0]) * \ + (windows[:, 3] - windows[:, 1]) + img_rates = img_areas / win_areas + if not (img_rates > img_rate_thr).any(): + max_rate = img_rates.max() + img_rates[abs(img_rates - max_rate) < eps] = 1 + return windows[img_rates > img_rate_thr] + + +def get_window_annotation(info, windows, iof_thr): + """Get annotation by sliding windows. + + Args: + info (dict): Dict of bbox annotations. + windows (np.array): information of sliding windows. + iof_thr (float): Threshold of overlaps between bbox and window. + + Returns: + list[dict]: List of bbox annotations of every window. + """ + bboxes = info['annotations']['bboxes'] + iofs = ann_window_iof(bboxes, windows) + + window_anns = [] + for i in range(windows.shape[0]): + win_iofs = iofs[:, i] + pos_inds = np.nonzero(win_iofs >= iof_thr)[0].tolist() + + win_ann = dict() + for k, v in info['annotations'].items(): + try: + win_ann[k] = v[pos_inds] + except TypeError: + win_ann[k] = [v[i] for i in pos_inds] + win_ann['trunc'] = win_iofs[pos_inds] < 1 + window_anns.append(win_ann) + return window_anns + + +def ann_window_iof(anns, window, eps=1e-6): + """Compute overlaps (iof) between annotations (poly) and window (hbox). + + Args: + anns (np.array): quadri annotations with shape (n, 8). + window (np.array): slide windows with shape (m, 4). + eps (float, optional): Defaults to 1e-6. + + Returns: + np.array: iof between box and window. + """ + rows = anns.shape[0] + cols = window.shape[0] + + if rows * cols == 0: + return np.zeros((rows, cols), dtype=np.float32) + + hbboxes_ann = poly2hbb(anns) + hbboxes_win = window + hbboxes_ann = hbboxes_ann[:, None, :] + lt = np.maximum(hbboxes_ann[..., :2], hbboxes_win[..., :2]) + rb = np.minimum(hbboxes_ann[..., 2:], hbboxes_win[..., 2:]) + wh = np.clip(rb - lt, 0, np.inf) + h_overlaps = wh[..., 0] * wh[..., 1] + + l, t, r, b = (window[..., i] for i in range(4)) + polys_win = np.stack([l, t, r, t, r, b, l, b], axis=-1) + sg_polys_ann = [shgeo.Polygon(p) for p in anns.reshape(rows, -1, 2)] + sg_polys_win = [shgeo.Polygon(p) for p in polys_win.reshape(cols, -1, 2)] + overlaps = np.zeros(h_overlaps.shape) + for p in zip(*np.nonzero(h_overlaps)): + overlaps[p] = sg_polys_ann[p[0]].intersection(sg_polys_win[p[-1]]).area + unions = np.array([p.area for p in sg_polys_ann], dtype=np.float32) + unions = unions[..., None] + + unions = np.clip(unions, eps, np.inf) + outputs = overlaps / unions + if outputs.ndim == 1: + outputs = outputs[..., None] + return outputs + + +def crop_and_save_img(info, windows, window_anns, padding, padding_value, + save_dir, anno_dir, img_ext): + """Crop the image and save. + + Args: + info (dict): Image's information. + windows (np.array): information of sliding windows. + window_anns (list[dict]): List of bbox annotations of every window. + padding (bool): If True, with padding. + padding_value (tuple[int|float]): Padding value. + save_dir (str): Save filename. + anno_dir (str): Annotation filename. + img_ext (str): Picture suffix. + + Returns: + list[dict]: Information of paths. + """ + img = cv2.imread(info['filepath']) + patch_infos = [] + for window, ann in zip(windows, window_anns): + patch_info = dict() + for k, v in info.items(): + if k not in [ + 'id', 'filename', 'filepath', 'width', 'height', + 'annotations' + ]: + patch_info[k] = v + + x_start, y_start, x_stop, y_stop = window.tolist() + patch_info['x_start'] = x_start + patch_info['y_start'] = y_start + patch_info['id'] = \ + info['id'] + '__' + str(x_stop - x_start) + \ + '__' + str(x_start) + '___' + str(y_start) + patch_info['ori_id'] = info['id'] + + ann['bboxes'] = shift_qbboxes(ann['bboxes'], [-x_start, -y_start]) + patch_info['ann'] = ann + + patch = img[y_start:y_stop, x_start:x_stop] + if padding: + height = y_stop - y_start + width = x_stop - x_start + if height > patch.shape[0] or width > patch.shape[1]: + padding_patch = np.empty((height, width, patch.shape[-1]), + dtype=np.uint8) + if not isinstance(padding_value, (int, float)): + assert len(padding_value) == patch.shape[-1] + padding_patch[...] = padding_value + padding_patch[:patch.shape[0], :patch.shape[1], ...] = patch + patch = padding_patch + patch_info['height'] = patch.shape[0] + patch_info['width'] = patch.shape[1] + + cv2.imwrite( + osp.join(save_dir, patch_info['id'] + '.' + img_ext), patch) + patch_info['filename'] = patch_info['id'] + '.' + img_ext + patch_infos.append(patch_info) + + bboxes_num = patch_info['ann']['bboxes'].shape[0] + outdir = os.path.join(anno_dir, patch_info['id'] + '.txt') + + with codecs.open(outdir, 'w', 'utf-8') as f_out: + if bboxes_num == 0: + pass + else: + for idx in range(bboxes_num): + obj = patch_info['ann'] + outline = ' '.join(list(map(str, obj['bboxes'][idx]))) + diffs = str( + obj['diffs'][idx]) if not obj['trunc'][idx] else '2' + outline = outline + ' ' + obj['labels'][idx] + ' ' + diffs + f_out.write(outline + '\n') + + return patch_infos + + +def shift_qbboxes(bboxes, offset: Sequence[float]): + """Map bboxes from window coordinate back to original coordinate. TODO + Refactor and move to `mmyolo/utils/large_image.py` + + Args: + bboxes (np.array): quadrilateral boxes with window coordinate. + offset (Sequence[float]): The translation offsets with shape of (2, ). + + Returns: + np.array: bboxes with original coordinate. + """ + dim = bboxes.shape[-1] + translated = bboxes + np.array(offset * int(dim / 2), dtype=np.float32) + return translated + + +def single_split(info, patch_settings, min_img_ratio, iof_thr, padding, + padding_value, save_dir, anno_dir, img_ext, lock, prog, + total): + """Single image split. TODO Refactoring to make it more generic. + + Args: + info (dict): Image info and annotations. + patch_settings (list): List of patch settings, + each in format (patch_size, patch_overlap). + min_img_ratio (float): Threshold of window area divided by image area. + iof_thr (float): Threshold of overlaps between bbox and window. + padding (bool): If True, with padding. + padding_value (tuple[int|float]): Padding value. + save_dir (str): Save filename. + anno_dir (str): Annotation filename. + img_ext (str): Picture suffix. + lock (Lock): Lock of Manager. + prog (object): Progress of Manager. + total (int): Length of infos. + + Returns: + list[dict]: Information of paths. + """ + img_ext = img_ext if img_ext is not None else info['filename'].split( + '.')[-1] + windows = get_sliding_window(info, patch_settings, min_img_ratio) + window_anns = get_window_annotation(info, windows, iof_thr) + patch_infos = crop_and_save_img(info, windows, window_anns, padding, + padding_value, save_dir, anno_dir, img_ext) + assert patch_infos + + lock.acquire() + prog.value += 1 + msg = f'({prog.value / total:3.1%} {prog.value}:{total})' + msg += ' - ' + f"Filename: {info['filename']}" + msg += ' - ' + f"width: {info['width']:<5d}" + msg += ' - ' + f"height: {info['height']:<5d}" + msg += ' - ' + f"Objects: {len(info['annotations']['bboxes']):<5d}" + msg += ' - ' + f'Patches: {len(patch_infos)}' + print_log(msg, 'current') + lock.release() + + return patch_infos + + +def main(): + args, split_cfg = parse_args() + + mkdir_or_exist(args.out_dir) + + # init logger + log_file_name = datetime.datetime.now().strftime('%Y%m%d_%H%M%S') + '.log' + logger: MMLogger = MMLogger.get_instance( + 'mmyolo', + log_file=osp.join(args.out_dir, log_file_name), + log_level='INFO') + + # print configs + arg_str = '' + for arg in args._get_kwargs(): + arg_str += arg[0] + ' = ' + str(arg[1]) + '\n' + + logger.info('Base Settings:\n' + arg_str) + logger.info('Split Settings:\n' + split_cfg.pretty_text) + + # make dirs + _make_dirs(args.out_dir, args.phase, args.overwrite) + + # Load original dota data + required_sets = [] + for p in args.phase: + required_sets.extend(PHASE_REQUIRE_SETS[p]) + required_sets = set(required_sets) + + loaded_data_set = dict() + for req_set in required_sets: + logger.info(f'Starting loading DOTA {req_set} set information.') + start_time = time.time() + + infos = load_original_annotations( + data_root=args.data_root, + ann_subdir=args.ann_subdir, + phase=req_set) + + end_time = time.time() + result_log = f'Finishing loading {req_set} set, ' + result_log += f'get {len(infos)} images, ' + result_log += f'using {end_time - start_time:.3f}s.' + logger.info(result_log) + + loaded_data_set[req_set] = infos + + # Preprocess patch settings + patch_settings = [] + for ratio in split_cfg.img_resize_ratio: + for size, gap in zip(split_cfg.patch_sizes, + split_cfg.patch_overlap_sizes): + size_gap = (int(size / ratio), int(gap / ratio)) + if size_gap not in patch_settings: + patch_settings.append(size_gap) + + # Split data + for p in args.phase: + save_imgs_dir = osp.join(args.out_dir, p, 'images') + save_anns_dir = osp.join(args.out_dir, p, 'annfiles') + + logger.info(f'Start splitting {p} set images!') + start = time.time() + manager = Manager() + + data_infos = [] + for req_set in PHASE_REQUIRE_SETS[p]: + data_infos.extend(loaded_data_set[req_set]) + + worker = partial( + single_split, + patch_settings=patch_settings, + min_img_ratio=split_cfg.min_img_ratio, + iof_thr=split_cfg.iof_thr, + padding=split_cfg.padding, + padding_value=split_cfg.padding_value, + save_dir=save_imgs_dir, + anno_dir=save_anns_dir, + img_ext=args.save_ext, + lock=manager.Lock(), + prog=manager.Value('i', 0), + total=len(data_infos)) + + if args.nproc > 1: + pool = Pool(args.nproc) + patch_infos = pool.map(worker, data_infos) + pool.close() + else: + patch_infos = list(map(worker, data_infos)) + + patch_infos = reduce(lambda x, y: x + y, patch_infos) + stop = time.time() + logger.info( + f'Finish splitting {p} set images in {int(stop - start)} second!!!' + ) + logger.info(f'Total images number: {len(patch_infos)}') + + +if __name__ == '__main__': + main() diff --git a/models/YOLO-World/third_party/mmyolo/tools/dataset_converters/dota/split_config/multi_scale.json b/models/YOLO-World/third_party/mmyolo/tools/dataset_converters/dota/split_config/multi_scale.json new file mode 100644 index 0000000000000000000000000000000000000000..8cbdc93a4420abec7298f188a01ee71f38b94eb8 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/tools/dataset_converters/dota/split_config/multi_scale.json @@ -0,0 +1,19 @@ +{ + "patch_sizes": [ + 1024 + ], + "patch_overlap_sizes": [ + 500 + ], + "img_resize_ratio": [ + 0.5, 1.0, 1.5 + ], + "min_img_ratio": 0.6, + "iof_thr": 0.7, + "padding": true, + "padding_value": [ + 104, + 116, + 124 + ] +} diff --git a/models/YOLO-World/third_party/mmyolo/tools/dataset_converters/dota/split_config/single_scale.json b/models/YOLO-World/third_party/mmyolo/tools/dataset_converters/dota/split_config/single_scale.json new file mode 100644 index 0000000000000000000000000000000000000000..8c65c40ad63d522b3ab82956f6a7befdef874818 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/tools/dataset_converters/dota/split_config/single_scale.json @@ -0,0 +1,19 @@ +{ + "patch_sizes": [ + 1024 + ], + "patch_overlap_sizes": [ + 200 + ], + "img_resize_ratio": [ + 1.0 + ], + "min_img_ratio": 0.6, + "iof_thr": 0.7, + "padding": true, + "padding_value": [ + 104, + 116, + 124 + ] +} diff --git a/models/YOLO-World/third_party/mmyolo/tools/dataset_converters/labelme2coco.py b/models/YOLO-World/third_party/mmyolo/tools/dataset_converters/labelme2coco.py new file mode 100644 index 0000000000000000000000000000000000000000..e68b935db3236177d4c17973ef2a43159150ffc7 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/tools/dataset_converters/labelme2coco.py @@ -0,0 +1,325 @@ +# Copyright (c) OpenMMLab. All rights reserved. +"""This script helps to convert labelme-style dataset to the coco format. + +Usage: + $ python labelme2coco.py \ + --img-dir /path/to/images \ + --labels-dir /path/to/labels \ + --out /path/to/coco_instances.json \ + [--class-id-txt /path/to/class_with_id.txt] + +Note: + Labels dir file structure: + . + └── PATH_TO_LABELS + ├── image1.json + ├── image2.json + └── ... + + Images dir file structure: + . + └── PATH_TO_IMAGES + ├── image1.jpg + ├── image2.png + └── ... + + If user set `--class-id-txt` then will use it in `categories` field, + if not set, then will generate auto base on the all labelme label + files to `class_with_id.json`. + + class_with_id.txt example, each line is "id class_name": + ```text + 1 cat + 2 dog + 3 bicycle + 4 motorcycle + + ``` +""" +import argparse +import json +from pathlib import Path +from typing import Optional + +import numpy as np +from mmengine import track_iter_progress + +from mmyolo.utils.misc import IMG_EXTENSIONS + + +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument('--img-dir', type=str, help='Dataset image directory') + parser.add_argument( + '--labels-dir', type=str, help='Dataset labels directory') + parser.add_argument('--out', type=str, help='COCO label json output path') + parser.add_argument( + '--class-id-txt', default=None, type=str, help='All class id txt path') + args = parser.parse_args() + return args + + +def format_coco_annotations(points: list, image_id: int, annotations_id: int, + category_id: int) -> dict: + """Gen COCO annotations format label from labelme format label. + + Args: + points (list): Coordinates of four vertices of rectangle bbox. + image_id (int): Image id. + annotations_id (int): Annotations id. + category_id (int): Image dir path. + + Return: + annotation_info (dict): COCO annotation data. + """ + annotation_info = dict() + annotation_info['iscrowd'] = 0 + annotation_info['category_id'] = category_id + annotation_info['id'] = annotations_id + annotation_info['image_id'] = image_id + + # bbox is [x1, y1, w, h] + annotation_info['bbox'] = [ + points[0][0], points[0][1], points[1][0] - points[0][0], + points[1][1] - points[0][1] + ] + + annotation_info['area'] = annotation_info['bbox'][2] * annotation_info[ + 'bbox'][3] # bbox w * h + segmentation_points = np.asarray(points).copy() + segmentation_points[1, :] = np.asarray(points)[2, :] + segmentation_points[2, :] = np.asarray(points)[1, :] + annotation_info['segmentation'] = [list(segmentation_points.flatten())] + + return annotation_info + + +def parse_labelme_to_coco( + image_dir: str, + labels_root: str, + all_classes_id: Optional[dict] = None) -> (dict, dict): + """Gen COCO json format label from labelme format label. + + Args: + image_dir (str): Image dir path. + labels_root (str): Image label root path. + all_classes_id (Optional[dict]): All class with id. Default None. + + Return: + coco_json (dict): COCO json data. + category_to_id (dict): category id and name. + + COCO json example: + + { + "images": [ + { + "height": 3000, + "width": 4000, + "id": 1, + "file_name": "IMG_20210627_225110.jpg" + }, + ... + ], + "categories": [ + { + "id": 1, + "name": "cat" + }, + ... + ], + "annotations": [ + { + "iscrowd": 0, + "category_id": 1, + "id": 1, + "image_id": 1, + "bbox": [ + 1183.7313232421875, + 1230.0509033203125, + 1270.9998779296875, + 927.0848388671875 + ], + "area": 1178324.7170306593, + "segmentation": [ + [ + 1183.7313232421875, + 1230.0509033203125, + 1183.7313232421875, + 2157.1357421875, + 2454.731201171875, + 2157.1357421875, + 2454.731201171875, + 1230.0509033203125 + ] + ] + }, + ... + ] + } + """ + + # init coco json field + coco_json = {'images': [], 'categories': [], 'annotations': []} + + image_id = 0 + annotations_id = 0 + if all_classes_id is None: + category_to_id = dict() + categories_labels = [] + else: + category_to_id = all_classes_id + categories_labels = list(all_classes_id.keys()) + + # add class_ids and class_names to the categories list in coco_json + for class_name, class_id in category_to_id.items(): + coco_json['categories'].append({ + 'id': class_id, + 'name': class_name + }) + + # filter incorrect image file + img_file_list = [ + img_file for img_file in Path(image_dir).iterdir() + if img_file.suffix.lower() in IMG_EXTENSIONS + ] + + for img_file in track_iter_progress(img_file_list): + + # get label file according to the image file name + label_path = Path(labels_root).joinpath( + img_file.stem).with_suffix('.json') + if not label_path.exists(): + print(f'Can not find label file: {label_path}, skip...') + continue + + # load labelme label + with open(label_path, encoding='utf-8') as f: + labelme_data = json.load(f) + + image_id = image_id + 1 # coco id begin from 1 + + # update coco 'images' field + coco_json['images'].append({ + 'height': + labelme_data['imageHeight'], + 'width': + labelme_data['imageWidth'], + 'id': + image_id, + 'file_name': + Path(labelme_data['imagePath']).name + }) + + for label_shapes in labelme_data['shapes']: + + # Update coco 'categories' field + class_name = label_shapes['label'] + + if (all_classes_id is None) and (class_name + not in categories_labels): + # only update when not been added before + coco_json['categories'].append({ + 'id': + len(categories_labels) + 1, # categories id start with 1 + 'name': class_name + }) + categories_labels.append(class_name) + category_to_id[class_name] = len(categories_labels) + + elif (all_classes_id is not None) and (class_name + not in categories_labels): + # check class name + raise ValueError(f'Got unexpected class name {class_name}, ' + 'which is not in your `--class-id-txt`.') + + # get shape type and convert it to coco format + shape_type = label_shapes['shape_type'] + if shape_type != 'rectangle': + print(f'not support `{shape_type}` yet, skip...') + continue + + annotations_id = annotations_id + 1 + # convert point from [xmin, ymin, xmax, ymax] to [x1, y1, w, h] + (x1, y1), (x2, y2) = label_shapes['points'] + x1, x2 = sorted([x1, x2]) # xmin, xmax + y1, y2 = sorted([y1, y2]) # ymin, ymax + points = [[x1, y1], [x2, y2], [x1, y2], [x2, y1]] + coco_annotations = format_coco_annotations( + points, image_id, annotations_id, category_to_id[class_name]) + coco_json['annotations'].append(coco_annotations) + + print(f'Total image = {image_id}') + print(f'Total annotations = {annotations_id}') + print(f'Number of categories = {len(categories_labels)}, ' + f'which is {categories_labels}') + + return coco_json, category_to_id + + +def convert_labelme_to_coco(image_dir: str, + labels_dir: str, + out_path: str, + class_id_txt: Optional[str] = None): + """Convert labelme format label to COCO json format label. + + Args: + image_dir (str): Image dir path. + labels_dir (str): Image label path. + out_path (str): COCO json file save path. + class_id_txt (Optional[str]): All class id txt file path. + Default None. + """ + assert Path(out_path).suffix == '.json' + + if class_id_txt is not None: + assert Path(class_id_txt).suffix == '.txt' + + all_classes_id = dict() + with open(class_id_txt, encoding='utf-8') as f: + txt_lines = f.read().splitlines() + assert len(txt_lines) > 0 + + for txt_line in txt_lines: + class_info = txt_line.split(' ') + if len(class_info) != 2: + raise ValueError('Error parse "class_id_txt" file ' + f'{class_id_txt}, please check if some of ' + 'the class names is blank, like "1 " -> ' + '"1 blank", or class name has space between' + ' words, like "1 Big house" -> "1 ' + 'Big-house".') + v, k = class_info + all_classes_id.update({k: int(v)}) + else: + all_classes_id = None + + # convert to coco json + coco_json_data, category_to_id = parse_labelme_to_coco( + image_dir, labels_dir, all_classes_id) + + # save json result + Path(out_path).parent.mkdir(exist_ok=True, parents=True) + print(f'Saving json to {out_path}') + json.dump(coco_json_data, open(out_path, 'w'), indent=2) + + if class_id_txt is None: + category_to_id_path = Path(out_path).with_name('class_with_id.txt') + print(f'Saving class id txt to {category_to_id_path}') + with open(category_to_id_path, 'w', encoding='utf-8') as f: + for k, v in category_to_id.items(): + f.write(f'{v} {k}\n') + else: + print('Not Saving new class id txt, user should using ' + f'{class_id_txt} for training config') + + +def main(): + args = parse_args() + convert_labelme_to_coco(args.img_dir, args.labels_dir, args.out, + args.class_id_txt) + print('All done!') + + +if __name__ == '__main__': + main() diff --git a/models/YOLO-World/third_party/mmyolo/tools/dataset_converters/yolo2coco.py b/models/YOLO-World/third_party/mmyolo/tools/dataset_converters/yolo2coco.py new file mode 100644 index 0000000000000000000000000000000000000000..19f1366622a3305f001e6e6650ad31f98c54b7c7 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/tools/dataset_converters/yolo2coco.py @@ -0,0 +1,264 @@ +"""This script helps to convert yolo-style dataset to the coco format. + +Usage: + $ python yolo2coco.py /path/to/dataset # image_dir + +Note: + 1. Before running this script, please make sure the root directory + of your dataset is formatted in the following struction: + . + └── $ROOT_PATH + ├── classes.txt + ├── labels + │ ├── a.txt + │ ├── b.txt + │ └── ... + ├── images + │ ├── a.jpg + │ ├── b.png + │ └── ... + └── ... + 2. The script will automatically check whether the corresponding + `train.txt`, ` val.txt`, and `test.txt` exist under your `image_dir` + or not. If these files are detected, the script will organize the + dataset. The image paths in these files must be ABSOLUTE paths. + 3. Once the script finishes, the result files will be saved in the + directory named 'annotations' in the root directory of your dataset. + The default output file is result.json. The root directory folder may + look like this in the root directory after the converting: + . + └── $ROOT_PATH + ├── annotations + │ ├── result.json + │ └── ... + ├── classes.txt + ├── labels + │ ├── a.txt + │ ├── b.txt + │ └── ... + ├── images + │ ├── a.jpg + │ ├── b.png + │ └── ... + └── ... + 4. After converting to coco, you can use the + `tools/analysis_tools/browse_coco_json.py` script to visualize + whether it is correct. +""" +import argparse +import os +import os.path as osp + +import mmcv +import mmengine + +IMG_EXTENSIONS = ('.jpg', '.png', '.jpeg') + + +def check_existence(file_path: str): + """Check if target file is existed.""" + if not osp.exists(file_path): + raise FileNotFoundError(f'{file_path} does not exist!') + + +def get_image_info(yolo_image_dir, idx, file_name): + """Retrieve image information.""" + img_path = osp.join(yolo_image_dir, file_name) + check_existence(img_path) + + img = mmcv.imread(img_path) + height, width = img.shape[:2] + img_info_dict = { + 'file_name': file_name, + 'id': idx, + 'width': width, + 'height': height + } + return img_info_dict, height, width + + +def convert_bbox_info(label, idx, obj_count, image_height, image_width): + """Convert yolo-style bbox info to the coco format.""" + label = label.strip().split() + x = float(label[1]) + y = float(label[2]) + w = float(label[3]) + h = float(label[4]) + + # convert x,y,w,h to x1,y1,x2,y2 + x1 = (x - w / 2) * image_width + y1 = (y - h / 2) * image_height + x2 = (x + w / 2) * image_width + y2 = (y + h / 2) * image_height + + cls_id = int(label[0]) + width = max(0., x2 - x1) + height = max(0., y2 - y1) + coco_format_info = { + 'image_id': idx, + 'id': obj_count, + 'category_id': cls_id, + 'bbox': [x1, y1, width, height], + 'area': width * height, + 'segmentation': [[x1, y1, x2, y1, x2, y2, x1, y2]], + 'iscrowd': 0 + } + obj_count += 1 + return coco_format_info, obj_count + + +def organize_by_existing_files(image_dir: str, existed_categories: list): + """Format annotations by existing train/val/test files.""" + categories = ['train', 'val', 'test'] + image_list = [] + + for cat in categories: + if cat in existed_categories: + txt_file = osp.join(image_dir, f'{cat}.txt') + print(f'Start to read {cat} dataset definition') + assert osp.exists(txt_file) + + with open(txt_file) as f: + img_paths = f.readlines() + img_paths = [ + os.path.split(img_path.strip())[1] + for img_path in img_paths + ] # split the absolute path + image_list.append(img_paths) + else: + image_list.append([]) + return image_list[0], image_list[1], image_list[2] + + +def convert_yolo_to_coco(image_dir: str): + """Convert annotations from yolo style to coco style. + + Args: + image_dir (str): the root directory of your datasets which contains + labels, images, classes.txt, etc + """ + print(f'Start to load existing images and annotations from {image_dir}') + check_existence(image_dir) + + # check local environment + yolo_label_dir = osp.join(image_dir, 'labels') + yolo_image_dir = osp.join(image_dir, 'images') + yolo_class_txt = osp.join(image_dir, 'classes.txt') + check_existence(yolo_label_dir) + check_existence(yolo_image_dir) + check_existence(yolo_class_txt) + print(f'All necessary files are located at {image_dir}') + + train_txt_path = osp.join(image_dir, 'train.txt') + val_txt_path = osp.join(image_dir, 'val.txt') + test_txt_path = osp.join(image_dir, 'test.txt') + existed_categories = [] + print(f'Checking if train.txt, val.txt, and test.txt are in {image_dir}') + if osp.exists(train_txt_path): + print('Found train.txt') + existed_categories.append('train') + if osp.exists(val_txt_path): + print('Found val.txt') + existed_categories.append('val') + if osp.exists(test_txt_path): + print('Found test.txt') + existed_categories.append('test') + + # prepare the output folders + output_folder = osp.join(image_dir, 'annotations') + if not osp.exists(output_folder): + os.makedirs(output_folder) + check_existence(output_folder) + + # start the convert procedure + with open(yolo_class_txt) as f: + classes = f.read().strip().split() + + indices = os.listdir(yolo_image_dir) + total = len(indices) + + dataset = {'images': [], 'annotations': [], 'categories': []} + if existed_categories == []: + print('These files are not located, no need to organize separately.') + for i, cls in enumerate(classes, 0): + dataset['categories'].append({'id': i, 'name': cls}) + else: + print('Need to organize the data accordingly.') + train_dataset = {'images': [], 'annotations': [], 'categories': []} + val_dataset = {'images': [], 'annotations': [], 'categories': []} + test_dataset = {'images': [], 'annotations': [], 'categories': []} + + # category id starts from 0 + for i, cls in enumerate(classes, 0): + train_dataset['categories'].append({'id': i, 'name': cls}) + val_dataset['categories'].append({'id': i, 'name': cls}) + test_dataset['categories'].append({'id': i, 'name': cls}) + train_img, val_img, test_img = organize_by_existing_files( + image_dir, existed_categories) + + obj_count = 0 + skipped = 0 + converted = 0 + for idx, image in enumerate(mmengine.track_iter_progress(indices)): + img_info_dict, image_height, image_width = get_image_info( + yolo_image_dir, idx, image) + + if existed_categories != []: + if image in train_img: + dataset = train_dataset + elif image in val_img: + dataset = val_dataset + elif image in test_img: + dataset = test_dataset + + dataset['images'].append(img_info_dict) + + img_name = osp.splitext(image)[0] + label_path = f'{osp.join(yolo_label_dir, img_name)}.txt' + if not osp.exists(label_path): + # if current image is not annotated or the annotation file failed + print( + f'WARNING: {label_path} does not exist. Please check the file.' + ) + skipped += 1 + continue + + with open(label_path) as f: + labels = f.readlines() + for label in labels: + coco_info, obj_count = convert_bbox_info( + label, idx, obj_count, image_height, image_width) + dataset['annotations'].append(coco_info) + converted += 1 + + # saving results to result json + if existed_categories == []: + out_file = osp.join(image_dir, 'annotations/result.json') + print(f'Saving converted results to {out_file} ...') + mmengine.dump(dataset, out_file) + else: + for category in existed_categories: + out_file = osp.join(output_folder, f'{category}.json') + print(f'Saving converted results to {out_file} ...') + if category == 'train': + mmengine.dump(train_dataset, out_file) + elif category == 'val': + mmengine.dump(val_dataset, out_file) + elif category == 'test': + mmengine.dump(test_dataset, out_file) + + # simple statistics + print(f'Process finished! Please check at {output_folder} .') + print(f'Number of images found: {total}, converted: {converted},', + f'and skipped: {skipped}. Total annotation count: {obj_count}.') + print('You can use tools/analysis_tools/browse_coco_json.py to visualize!') + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument( + 'image_dir', + type=str, + help='dataset directory with ./images and ./labels, classes.txt, etc.') + arg = parser.parse_args() + convert_yolo_to_coco(arg.image_dir) diff --git a/models/YOLO-World/third_party/mmyolo/tools/dist_test.sh b/models/YOLO-World/third_party/mmyolo/tools/dist_test.sh new file mode 100644 index 0000000000000000000000000000000000000000..dea131b43ea8f1222661d20603d40c18ea7f28a1 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/tools/dist_test.sh @@ -0,0 +1,22 @@ +#!/usr/bin/env bash + +CONFIG=$1 +CHECKPOINT=$2 +GPUS=$3 +NNODES=${NNODES:-1} +NODE_RANK=${NODE_RANK:-0} +PORT=${PORT:-29500} +MASTER_ADDR=${MASTER_ADDR:-"127.0.0.1"} + +PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ +python -m torch.distributed.launch \ + --nnodes=$NNODES \ + --node_rank=$NODE_RANK \ + --master_addr=$MASTER_ADDR \ + --nproc_per_node=$GPUS \ + --master_port=$PORT \ + $(dirname "$0")/test.py \ + $CONFIG \ + $CHECKPOINT \ + --launcher pytorch \ + ${@:4} diff --git a/models/YOLO-World/third_party/mmyolo/tools/dist_train.sh b/models/YOLO-World/third_party/mmyolo/tools/dist_train.sh new file mode 100644 index 0000000000000000000000000000000000000000..3fca7641dec4090930c85991a079c28409529d4e --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/tools/dist_train.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +CONFIG=$1 +GPUS=$2 +NNODES=${NNODES:-1} +NODE_RANK=${NODE_RANK:-0} +PORT=${PORT:-29500} +MASTER_ADDR=${MASTER_ADDR:-"127.0.0.1"} + +PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ +python -m torch.distributed.launch \ + --nnodes=$NNODES \ + --node_rank=$NODE_RANK \ + --master_addr=$MASTER_ADDR \ + --nproc_per_node=$GPUS \ + --master_port=$PORT \ + $(dirname "$0")/train.py \ + $CONFIG \ + --launcher pytorch ${@:3} diff --git a/models/YOLO-World/third_party/mmyolo/tools/misc/coco_split.py b/models/YOLO-World/third_party/mmyolo/tools/misc/coco_split.py new file mode 100644 index 0000000000000000000000000000000000000000..8ce70349b6e85f48704e6ef5c8e5c0164bc6084e --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/tools/misc/coco_split.py @@ -0,0 +1,122 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import json +import random +from pathlib import Path + +import numpy as np +from pycocotools.coco import COCO + + +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument( + '--json', type=str, required=True, help='COCO json label path') + parser.add_argument( + '--out-dir', type=str, required=True, help='output path') + parser.add_argument( + '--ratios', + nargs='+', + type=float, + help='ratio for sub dataset, if set 2 number then will generate ' + 'trainval + test (eg. "0.8 0.1 0.1" or "2 1 1"), if set 3 number ' + 'then will generate train + val + test (eg. "0.85 0.15" or "2 1")') + parser.add_argument( + '--shuffle', + action='store_true', + help='Whether to display in disorder') + parser.add_argument('--seed', default=-1, type=int, help='seed') + args = parser.parse_args() + return args + + +def split_coco_dataset(coco_json_path: str, save_dir: str, ratios: list, + shuffle: bool, seed: int): + if not Path(coco_json_path).exists(): + raise FileNotFoundError(f'Can not not found {coco_json_path}') + + if not Path(save_dir).exists(): + Path(save_dir).mkdir(parents=True) + + # ratio normalize + ratios = np.array(ratios) / np.array(ratios).sum() + + if len(ratios) == 2: + ratio_train, ratio_test = ratios + ratio_val = 0 + train_type = 'trainval' + elif len(ratios) == 3: + ratio_train, ratio_val, ratio_test = ratios + train_type = 'train' + else: + raise ValueError('ratios must set 2 or 3 group!') + + # Read coco info + coco = COCO(coco_json_path) + coco_image_ids = coco.getImgIds() + + # gen image number of each dataset + val_image_num = int(len(coco_image_ids) * ratio_val) + test_image_num = int(len(coco_image_ids) * ratio_test) + train_image_num = len(coco_image_ids) - val_image_num - test_image_num + print('Split info: ====== \n' + f'Train ratio = {ratio_train}, number = {train_image_num}\n' + f'Val ratio = {ratio_val}, number = {val_image_num}\n' + f'Test ratio = {ratio_test}, number = {test_image_num}') + + seed = int(seed) + if seed != -1: + print(f'Set the global seed: {seed}') + np.random.seed(seed) + + if shuffle: + print('shuffle dataset.') + random.shuffle(coco_image_ids) + + # split each dataset + train_image_ids = coco_image_ids[:train_image_num] + if val_image_num != 0: + val_image_ids = coco_image_ids[train_image_num:train_image_num + + val_image_num] + else: + val_image_ids = None + test_image_ids = coco_image_ids[train_image_num + val_image_num:] + + # Save new json + categories = coco.loadCats(coco.getCatIds()) + for img_id_list in [train_image_ids, val_image_ids, test_image_ids]: + if img_id_list is None: + continue + + # Gen new json + img_dict = { + 'images': coco.loadImgs(ids=img_id_list), + 'categories': categories, + 'annotations': coco.loadAnns(coco.getAnnIds(imgIds=img_id_list)) + } + + # save json + if img_id_list == train_image_ids: + json_file_path = Path(save_dir, f'{train_type}.json') + elif img_id_list == val_image_ids: + json_file_path = Path(save_dir, 'val.json') + elif img_id_list == test_image_ids: + json_file_path = Path(save_dir, 'test.json') + else: + raise ValueError('img_id_list ERROR!') + + print(f'Saving json to {json_file_path}') + with open(json_file_path, 'w') as f_json: + json.dump(img_dict, f_json, ensure_ascii=False, indent=2) + + print('All done!') + + +def main(): + args = parse_args() + split_coco_dataset(args.json, args.out_dir, args.ratios, args.shuffle, + args.seed) + + +if __name__ == '__main__': + main() diff --git a/models/YOLO-World/third_party/mmyolo/tools/misc/download_dataset.py b/models/YOLO-World/third_party/mmyolo/tools/misc/download_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..7d1c64d82ec21285c348afc65a102d49452f2d4a --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/tools/misc/download_dataset.py @@ -0,0 +1,112 @@ +import argparse +from itertools import repeat +from multiprocessing.pool import ThreadPool +from pathlib import Path +from tarfile import TarFile +from zipfile import ZipFile + +import torch + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Download datasets for training') + parser.add_argument( + '--dataset-name', type=str, help='dataset name', default='coco2017') + parser.add_argument( + '--save-dir', + type=str, + help='the dir to save dataset', + default='data/coco') + parser.add_argument( + '--unzip', + action='store_true', + help='whether unzip dataset or not, zipped files will be saved') + parser.add_argument( + '--delete', + action='store_true', + help='delete the download zipped files') + parser.add_argument( + '--threads', type=int, help='number of threading', default=4) + args = parser.parse_args() + return args + + +def download(url, dir, unzip=True, delete=False, threads=1): + + def download_one(url, dir): + f = dir / Path(url).name + if Path(url).is_file(): + Path(url).rename(f) + elif not f.exists(): + print(f'Downloading {url} to {f}') + torch.hub.download_url_to_file(url, f, progress=True) + if unzip and f.suffix in ('.zip', '.tar'): + print(f'Unzipping {f.name}') + if f.suffix == '.zip': + ZipFile(f).extractall(path=dir) + elif f.suffix == '.tar': + TarFile(f).extractall(path=dir) + if delete: + f.unlink() + print(f'Delete {f}') + + dir = Path(dir) + if threads > 1: + pool = ThreadPool(threads) + pool.imap(lambda x: download_one(*x), zip(url, repeat(dir))) + pool.close() + pool.join() + else: + for u in [url] if isinstance(url, (str, Path)) else url: + download_one(u, dir) + + +def main(): + args = parse_args() + path = Path(args.save_dir) + if not path.exists(): + path.mkdir(parents=True, exist_ok=True) + data2url = dict( + # TODO: Support for downloading Panoptic Segmentation of COCO + coco2017=[ + 'http://images.cocodataset.org/zips/train2017.zip', + 'http://images.cocodataset.org/zips/val2017.zip', + 'http://images.cocodataset.org/zips/test2017.zip', + 'http://images.cocodataset.org/annotations/' + + 'annotations_trainval2017.zip' + ], + lvis=[ + 'https://s3-us-west-2.amazonaws.com/dl.fbaipublicfiles.com/LVIS/lvis_v1_train.json.zip', # noqa + 'https://s3-us-west-2.amazonaws.com/dl.fbaipublicfiles.com/LVIS/lvis_v1_train.json.zip', # noqa + ], + voc2007=[ + 'http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtrainval_06-Nov-2007.tar', # noqa + 'http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtest_06-Nov-2007.tar', # noqa + 'http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCdevkit_08-Jun-2007.tar', # noqa + ], + voc2012=[ + 'http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCtrainval_11-May-2012.tar', # noqa + ], + balloon=[ + # src link: https://github.com/matterport/Mask_RCNN/releases/download/v2.1/balloon_dataset.zip # noqa + 'https://download.openmmlab.com/mmyolo/data/balloon_dataset.zip' + ], + cat=[ + 'https://download.openmmlab.com/mmyolo/data/cat_dataset.zip' # noqa + ], + ) + url = data2url.get(args.dataset_name, None) + if url is None: + print('Only support COCO, VOC, balloon, cat and LVIS now!') + return + download( + url, + dir=path, + unzip=args.unzip, + delete=args.delete, + threads=args.threads) + + +if __name__ == '__main__': + main() diff --git a/models/YOLO-World/third_party/mmyolo/tools/misc/extract_subcoco.py b/models/YOLO-World/third_party/mmyolo/tools/misc/extract_subcoco.py new file mode 100644 index 0000000000000000000000000000000000000000..31528e0b338bf26bdf5abbca0e2254413e87e186 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/tools/misc/extract_subcoco.py @@ -0,0 +1,160 @@ +# Copyright (c) OpenMMLab. All rights reserved. +"""Extracting subsets from coco2017 dataset. + +This script is mainly used to debug and verify the correctness of the +program quickly. +The root folder format must be in the following format: + +├── root +│ ├── annotations +│ ├── train2017 +│ ├── val2017 +│ ├── test2017 + +Currently, only support COCO2017. In the future will support user-defined +datasets of standard coco JSON format. + +Example: + python tools/misc/extract_subcoco.py ${ROOT} ${OUT_DIR} --num-img ${NUM_IMG} +""" + +import argparse +import os.path as osp +import shutil + +import mmengine +import numpy as np +from pycocotools.coco import COCO + + +# TODO: Currently only supports coco2017 +def _process_data(args, + in_dataset_type: str, + out_dataset_type: str, + year: str = '2017'): + assert in_dataset_type in ('train', 'val') + assert out_dataset_type in ('train', 'val') + + int_ann_file_name = f'annotations/instances_{in_dataset_type}{year}.json' + out_ann_file_name = f'annotations/instances_{out_dataset_type}{year}.json' + + ann_path = osp.join(args.root, int_ann_file_name) + json_data = mmengine.load(ann_path) + + new_json_data = { + 'info': json_data['info'], + 'licenses': json_data['licenses'], + 'categories': json_data['categories'], + 'images': [], + 'annotations': [] + } + + area_dict = { + 'small': [0., 32 * 32], + 'medium': [32 * 32, 96 * 96], + 'large': [96 * 96, float('inf')] + } + + coco = COCO(ann_path) + + # filter annotations by category ids and area range + areaRng = area_dict[args.area_size] if args.area_size else [] + catIds = coco.getCatIds(args.classes) if args.classes else [] + ann_ids = coco.getAnnIds(catIds=catIds, areaRng=areaRng) + ann_info = coco.loadAnns(ann_ids) + + # get image ids by anns set + filter_img_ids = {ann['image_id'] for ann in ann_info} + filter_img = coco.loadImgs(filter_img_ids) + + # shuffle + np.random.shuffle(filter_img) + + num_img = args.num_img if args.num_img > 0 else len(filter_img) + if num_img > len(filter_img): + print( + f'num_img is too big, will be set to {len(filter_img)}, ' + 'because of not enough image after filter by classes and area_size' + ) + num_img = len(filter_img) + + progress_bar = mmengine.ProgressBar(num_img) + + for i in range(num_img): + file_name = filter_img[i]['file_name'] + image_path = osp.join(args.root, in_dataset_type + year, file_name) + + ann_ids = coco.getAnnIds( + imgIds=[filter_img[i]['id']], catIds=catIds, areaRng=areaRng) + img_ann_info = coco.loadAnns(ann_ids) + + new_json_data['images'].append(filter_img[i]) + new_json_data['annotations'].extend(img_ann_info) + + shutil.copy(image_path, osp.join(args.out_dir, + out_dataset_type + year)) + + progress_bar.update() + + mmengine.dump(new_json_data, osp.join(args.out_dir, out_ann_file_name)) + + +def _make_dirs(out_dir): + mmengine.mkdir_or_exist(out_dir) + mmengine.mkdir_or_exist(osp.join(out_dir, 'annotations')) + mmengine.mkdir_or_exist(osp.join(out_dir, 'train2017')) + mmengine.mkdir_or_exist(osp.join(out_dir, 'val2017')) + + +def parse_args(): + parser = argparse.ArgumentParser(description='Extract coco subset') + parser.add_argument('root', help='root path') + parser.add_argument( + 'out_dir', type=str, help='directory where subset coco will be saved.') + parser.add_argument( + '--num-img', + default=50, + type=int, + help='num of extract image, -1 means all images') + parser.add_argument( + '--area-size', + choices=['small', 'medium', 'large'], + help='filter ground-truth info by area size') + parser.add_argument( + '--classes', nargs='+', help='filter ground-truth by class name') + parser.add_argument( + '--use-training-set', + action='store_true', + help='Whether to use the training set when extract the training set. ' + 'The training subset is extracted from the validation set by ' + 'default which can speed up.') + parser.add_argument('--seed', default=-1, type=int, help='seed') + args = parser.parse_args() + return args + + +def main(): + args = parse_args() + assert args.out_dir != args.root, \ + 'The file will be overwritten in place, ' \ + 'so the same folder is not allowed !' + + seed = int(args.seed) + if seed != -1: + print(f'Set the global seed: {seed}') + np.random.seed(int(args.seed)) + + _make_dirs(args.out_dir) + + print('====Start processing train dataset====') + if args.use_training_set: + _process_data(args, 'train', 'train') + else: + _process_data(args, 'val', 'train') + print('\n====Start processing val dataset====') + _process_data(args, 'val', 'val') + print(f'\n Result save to {args.out_dir}') + + +if __name__ == '__main__': + main() diff --git a/models/YOLO-World/third_party/mmyolo/tools/misc/print_config.py b/models/YOLO-World/third_party/mmyolo/tools/misc/print_config.py new file mode 100644 index 0000000000000000000000000000000000000000..2c2efe33d5f388638d8b9c7b21f8a2eab12bd28e --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/tools/misc/print_config.py @@ -0,0 +1,59 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os + +from mmdet.utils import replace_cfg_vals, update_data_root +from mmengine import Config, DictAction + + +def parse_args(): + parser = argparse.ArgumentParser(description='Print the whole config') + parser.add_argument('config', help='config file path') + parser.add_argument( + '--save-path', + default=None, + help='save path of whole config, suffixed with .py, .json or .yml') + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. If the value to ' + 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' + 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' + 'Note that the quotation marks are necessary and that no white space ' + 'is allowed.') + args = parser.parse_args() + + return args + + +def main(): + args = parse_args() + + cfg = Config.fromfile(args.config) + + # replace the ${key} with the value of cfg.key + cfg = replace_cfg_vals(cfg) + + # update data root according to MMDET_DATASETS + update_data_root(cfg) + + if args.cfg_options is not None: + cfg.merge_from_dict(args.cfg_options) + print(f'Config:\n{cfg.pretty_text}') + + if args.save_path is not None: + save_path = args.save_path + + suffix = os.path.splitext(save_path)[-1] + assert suffix in ['.py', '.json', '.yml'] + + if not os.path.exists(os.path.split(save_path)[0]): + os.makedirs(os.path.split(save_path)[0]) + cfg.dump(save_path) + print(f'Config saving at {save_path}') + + +if __name__ == '__main__': + main() diff --git a/models/YOLO-World/third_party/mmyolo/tools/misc/publish_model.py b/models/YOLO-World/third_party/mmyolo/tools/misc/publish_model.py new file mode 100644 index 0000000000000000000000000000000000000000..a2ccbf080a4b162fe05d542409eec7d3b6441118 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/tools/misc/publish_model.py @@ -0,0 +1,57 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import subprocess + +import torch + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Process a checkpoint to be published') + parser.add_argument('in_file', help='input checkpoint filename') + parser.add_argument('out_file', help='output checkpoint filename') + args = parser.parse_args() + return args + + +def process_checkpoint(in_file, out_file): + checkpoint = torch.load(in_file, map_location='cpu') + + # remove optimizer for smaller file size + if 'optimizer' in checkpoint: + del checkpoint['optimizer'] + if 'message_hub' in checkpoint: + del checkpoint['message_hub'] + if 'ema_state_dict' in checkpoint: + del checkpoint['ema_state_dict'] + + for key in list(checkpoint['state_dict']): + if key.startswith('data_preprocessor'): + checkpoint['state_dict'].pop(key) + elif 'priors_base_sizes' in key: + checkpoint['state_dict'].pop(key) + elif 'grid_offset' in key: + checkpoint['state_dict'].pop(key) + elif 'prior_inds' in key: + checkpoint['state_dict'].pop(key) + + if torch.__version__ >= '1.6': + torch.save(checkpoint, out_file, _use_new_zipfile_serialization=False) + else: + torch.save(checkpoint, out_file) + sha = subprocess.check_output(['sha256sum', out_file]).decode() + if out_file.endswith('.pth'): + out_file_name = out_file[:-4] + else: + out_file_name = out_file + final_file = out_file_name + f'-{sha[:8]}.pth' + subprocess.Popen(['mv', out_file, final_file]) + + +def main(): + args = parse_args() + process_checkpoint(args.in_file, args.out_file) + + +if __name__ == '__main__': + main() diff --git a/models/YOLO-World/third_party/mmyolo/tools/model_converters/convert_kd_ckpt_to_student.py b/models/YOLO-World/third_party/mmyolo/tools/model_converters/convert_kd_ckpt_to_student.py new file mode 100644 index 0000000000000000000000000000000000000000..d2f787e47584d3edbed2269760832670530c146b --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/tools/model_converters/convert_kd_ckpt_to_student.py @@ -0,0 +1,54 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os.path as osp +from pathlib import Path + +from mmengine.runner import CheckpointLoader, save_checkpoint +from mmengine.utils import mkdir_or_exist + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Convert KD checkpoint to student-only checkpoint') + parser.add_argument('checkpoint', help='input checkpoint filename') + parser.add_argument('--out-path', help='save checkpoint path') + parser.add_argument( + '--inplace', action='store_true', help='replace origin ckpt') + args = parser.parse_args() + return args + + +def main(): + args = parse_args() + checkpoint = CheckpointLoader.load_checkpoint( + args.checkpoint, map_location='cpu') + new_state_dict = dict() + new_meta = checkpoint['meta'] + + for key, value in checkpoint['state_dict'].items(): + if key.startswith('architecture.'): + new_key = key.replace('architecture.', '') + new_state_dict[new_key] = value + + checkpoint = dict() + checkpoint['meta'] = new_meta + checkpoint['state_dict'] = new_state_dict + + if args.inplace: + assert osp.exists(args.checkpoint), \ + 'can not find the checkpoint path: {args.checkpoint}' + save_checkpoint(checkpoint, args.checkpoint) + else: + ckpt_path = Path(args.checkpoint) + ckpt_name = ckpt_path.stem + if args.out_path: + ckpt_dir = Path(args.out_path) + else: + ckpt_dir = ckpt_path.parent + mkdir_or_exist(ckpt_dir) + new_ckpt_path = osp.join(ckpt_dir, f'{ckpt_name}_student.pth') + save_checkpoint(checkpoint, new_ckpt_path) + + +if __name__ == '__main__': + main() diff --git a/models/YOLO-World/third_party/mmyolo/tools/model_converters/ppyoloe_to_mmyolo.py b/models/YOLO-World/third_party/mmyolo/tools/model_converters/ppyoloe_to_mmyolo.py new file mode 100644 index 0000000000000000000000000000000000000000..75c4af6963a8c58834507dd823930b1f9fcab6ac --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/tools/model_converters/ppyoloe_to_mmyolo.py @@ -0,0 +1,184 @@ +import argparse +import pickle +from collections import OrderedDict + +import torch + + +def convert_bn(k: str): + name = k.replace('._mean', + '.running_mean').replace('._variance', '.running_var') + return name + + +def convert_repvgg(k: str): + if '.conv2.conv1.' in k: + name = k.replace('.conv2.conv1.', '.conv2.rbr_dense.') + return name + elif '.conv2.conv2.' in k: + name = k.replace('.conv2.conv2.', '.conv2.rbr_1x1.') + return name + else: + return k + + +def convert(src: str, dst: str, imagenet_pretrain: bool = False): + with open(src, 'rb') as f: + model = pickle.load(f) + + new_state_dict = OrderedDict() + if imagenet_pretrain: + for k, v in model.items(): + if '@@' in k: + continue + if 'stem.' in k: + # backbone.stem.conv1.conv.weight + # -> backbone.stem.0.conv.weight + org_ind = k.split('.')[1][-1] + new_ind = str(int(org_ind) - 1) + name = k.replace('stem.conv%s.' % org_ind, + 'stem.%s.' % new_ind) + else: + # backbone.stages.1.conv2.bn._variance + # -> backbone.stage2.0.conv2.bn.running_var + org_stage_ind = k.split('.')[1] + new_stage_ind = str(int(org_stage_ind) + 1) + name = k.replace('stages.%s.' % org_stage_ind, + 'stage%s.0.' % new_stage_ind) + name = convert_repvgg(name) + if '.attn.' in k: + name = name.replace('.attn.fc.', '.attn.fc.conv.') + name = convert_bn(name) + name = 'backbone.' + name + + new_state_dict[name] = torch.from_numpy(v) + else: + for k, v in model.items(): + name = k + if k.startswith('backbone.'): + if '.stem.' in k: + # backbone.stem.conv1.conv.weight + # -> backbone.stem.0.conv.weight + org_ind = k.split('.')[2][-1] + new_ind = str(int(org_ind) - 1) + name = k.replace('.stem.conv%s.' % org_ind, + '.stem.%s.' % new_ind) + else: + # backbone.stages.1.conv2.bn._variance + # -> backbone.stage2.0.conv2.bn.running_var + org_stage_ind = k.split('.')[2] + new_stage_ind = str(int(org_stage_ind) + 1) + name = k.replace('.stages.%s.' % org_stage_ind, + '.stage%s.0.' % new_stage_ind) + name = convert_repvgg(name) + if '.attn.' in k: + name = name.replace('.attn.fc.', '.attn.fc.conv.') + name = convert_bn(name) + elif k.startswith('neck.'): + # fpn_stages + if k.startswith('neck.fpn_stages.'): + # neck.fpn_stages.0.0.conv1.conv.weight + # -> neck.reduce_layers.2.0.conv1.conv.weight + if k.startswith('neck.fpn_stages.0.0.'): + name = k.replace('neck.fpn_stages.0.0.', + 'neck.reduce_layers.2.0.') + if '.spp.' in name: + name = name.replace('.spp.conv.', '.spp.conv2.') + # neck.fpn_stages.1.0.conv1.conv.weight + # -> neck.top_down_layers.0.0.conv1.conv.weight + elif k.startswith('neck.fpn_stages.1.0.'): + name = k.replace('neck.fpn_stages.1.0.', + 'neck.top_down_layers.0.0.') + elif k.startswith('neck.fpn_stages.2.0.'): + name = k.replace('neck.fpn_stages.2.0.', + 'neck.top_down_layers.1.0.') + else: + raise NotImplementedError('Not implemented.') + name = name.replace('.0.convs.', '.0.blocks.') + elif k.startswith('neck.fpn_routes.'): + # neck.fpn_routes.0.conv.weight + # -> neck.upsample_layers.0.0.conv.weight + index = k.split('.')[2] + name = 'neck.upsample_layers.' + index + '.0.' + '.'.join( + k.split('.')[-2:]) + name = name.replace('.0.convs.', '.0.blocks.') + elif k.startswith('neck.pan_stages.'): + # neck.pan_stages.0.0.conv1.conv.weight + # -> neck.bottom_up_layers.1.0.conv1.conv.weight + ind = k.split('.')[2] + name = k.replace( + 'neck.pan_stages.' + ind, 'neck.bottom_up_layers.' + + ('0' if ind == '1' else '1')) + name = name.replace('.0.convs.', '.0.blocks.') + elif k.startswith('neck.pan_routes.'): + # neck.pan_routes.0.conv.weight + # -> neck.downsample_layers.0.conv.weight + ind = k.split('.')[2] + name = k.replace( + 'neck.pan_routes.' + ind, 'neck.downsample_layers.' + + ('0' if ind == '1' else '1')) + name = name.replace('.0.convs.', '.0.blocks.') + + else: + raise NotImplementedError('Not implement.') + name = convert_repvgg(name) + name = convert_bn(name) + elif k.startswith('yolo_head.'): + if ('anchor_points' in k) or ('stride_tensor' in k): + continue + if 'proj_conv' in k: + name = k.replace('yolo_head.proj_conv.', + 'bbox_head.head_module.proj_conv.') + else: + for org_key, rep_key in [ + [ + 'yolo_head.stem_cls.', + 'bbox_head.head_module.cls_stems.' + ], + [ + 'yolo_head.stem_reg.', + 'bbox_head.head_module.reg_stems.' + ], + [ + 'yolo_head.pred_cls.', + 'bbox_head.head_module.cls_preds.' + ], + [ + 'yolo_head.pred_reg.', + 'bbox_head.head_module.reg_preds.' + ] + ]: + name = name.replace(org_key, rep_key) + name = name.split('.') + ind = name[3] + name[3] = str(2 - int(ind)) + name = '.'.join(name) + name = convert_bn(name) + else: + continue + + new_state_dict[name] = torch.from_numpy(v) + data = {'state_dict': new_state_dict} + torch.save(data, dst) + + +def main(): + parser = argparse.ArgumentParser(description='Convert model keys') + parser.add_argument( + '--src', + default='ppyoloe_plus_crn_s_80e_coco.pdparams', + help='src ppyoloe model path') + parser.add_argument( + '--dst', default='mmppyoloe_plus_s.pt', help='save path') + parser.add_argument( + '--imagenet-pretrain', + action='store_true', + default=False, + help='Load model pretrained on imagenet dataset which only ' + 'have weight for backbone.') + args = parser.parse_args() + convert(args.src, args.dst, args.imagenet_pretrain) + + +if __name__ == '__main__': + main() diff --git a/models/YOLO-World/third_party/mmyolo/tools/model_converters/rtmdet_to_mmyolo.py b/models/YOLO-World/third_party/mmyolo/tools/model_converters/rtmdet_to_mmyolo.py new file mode 100644 index 0000000000000000000000000000000000000000..9c6f237d44464fdfb8882c898f332ef51ba12ae8 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/tools/model_converters/rtmdet_to_mmyolo.py @@ -0,0 +1,61 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +from collections import OrderedDict + +import torch + + +def convert(src, dst): + """Convert keys in pretrained RTMDet models to MMYOLO style.""" + blobs = torch.load(src)['state_dict'] + state_dict = OrderedDict() + + for key, weight in blobs.items(): + if 'neck.reduce_layers.0' in key: + new_key = key.replace('.0', '.2') + state_dict[new_key] = weight + elif 'neck.reduce_layers.1' in key: + new_key = key.replace('reduce_layers.1', 'top_down_layers.0.1') + state_dict[new_key] = weight + elif 'neck.top_down_blocks.0' in key: + new_key = key.replace('down_blocks', 'down_layers.0') + state_dict[new_key] = weight + elif 'neck.top_down_blocks.1' in key: + new_key = key.replace('down_blocks', 'down_layers') + state_dict[new_key] = weight + elif 'downsamples' in key: + new_key = key.replace('downsamples', 'downsample_layers') + state_dict[new_key] = weight + elif 'bottom_up_blocks' in key: + new_key = key.replace('bottom_up_blocks', 'bottom_up_layers') + state_dict[new_key] = weight + elif 'out_convs' in key: + new_key = key.replace('out_convs', 'out_layers') + state_dict[new_key] = weight + elif 'bbox_head' in key: + new_key = key.replace('bbox_head', 'bbox_head.head_module') + state_dict[new_key] = weight + elif 'data_preprocessor' in key: + continue + else: + new_key = key + state_dict[new_key] = weight + print(f'Convert {key} to {new_key}') + + # save checkpoint + checkpoint = dict() + checkpoint['state_dict'] = state_dict + checkpoint['meta'] = blobs.get('meta') + torch.save(checkpoint, dst) + + +def main(): + parser = argparse.ArgumentParser(description='Convert model keys') + parser.add_argument('src', help='src rtm model path') + parser.add_argument('dst', help='save path') + args = parser.parse_args() + convert(args.src, args.dst) + + +if __name__ == '__main__': + main() diff --git a/models/YOLO-World/third_party/mmyolo/tools/model_converters/yolov5_to_mmyolo.py b/models/YOLO-World/third_party/mmyolo/tools/model_converters/yolov5_to_mmyolo.py new file mode 100644 index 0000000000000000000000000000000000000000..a4e62a2f7787444862990e35d1fb20c0be9f0961 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/tools/model_converters/yolov5_to_mmyolo.py @@ -0,0 +1,128 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +from collections import OrderedDict + +import torch + +convert_dict_p5 = { + 'model.0': 'backbone.stem', + 'model.1': 'backbone.stage1.0', + 'model.2': 'backbone.stage1.1', + 'model.3': 'backbone.stage2.0', + 'model.4': 'backbone.stage2.1', + 'model.5': 'backbone.stage3.0', + 'model.6': 'backbone.stage3.1', + 'model.7': 'backbone.stage4.0', + 'model.8': 'backbone.stage4.1', + 'model.9.cv1': 'backbone.stage4.2.conv1', + 'model.9.cv2': 'backbone.stage4.2.conv2', + 'model.10': 'neck.reduce_layers.2', + 'model.13': 'neck.top_down_layers.0.0', + 'model.14': 'neck.top_down_layers.0.1', + 'model.17': 'neck.top_down_layers.1', + 'model.18': 'neck.downsample_layers.0', + 'model.20': 'neck.bottom_up_layers.0', + 'model.21': 'neck.downsample_layers.1', + 'model.23': 'neck.bottom_up_layers.1', + 'model.24.m': 'bbox_head.head_module.convs_pred', + 'model.24.proto': 'bbox_head.head_module.proto_preds', +} + +convert_dict_p6 = { + 'model.0': 'backbone.stem', + 'model.1': 'backbone.stage1.0', + 'model.2': 'backbone.stage1.1', + 'model.3': 'backbone.stage2.0', + 'model.4': 'backbone.stage2.1', + 'model.5': 'backbone.stage3.0', + 'model.6': 'backbone.stage3.1', + 'model.7': 'backbone.stage4.0', + 'model.8': 'backbone.stage4.1', + 'model.9': 'backbone.stage5.0', + 'model.10': 'backbone.stage5.1', + 'model.11.cv1': 'backbone.stage5.2.conv1', + 'model.11.cv2': 'backbone.stage5.2.conv2', + 'model.12': 'neck.reduce_layers.3', + 'model.15': 'neck.top_down_layers.0.0', + 'model.16': 'neck.top_down_layers.0.1', + 'model.19': 'neck.top_down_layers.1.0', + 'model.20': 'neck.top_down_layers.1.1', + 'model.23': 'neck.top_down_layers.2', + 'model.24': 'neck.downsample_layers.0', + 'model.26': 'neck.bottom_up_layers.0', + 'model.27': 'neck.downsample_layers.1', + 'model.29': 'neck.bottom_up_layers.1', + 'model.30': 'neck.downsample_layers.2', + 'model.32': 'neck.bottom_up_layers.2', + 'model.33.m': 'bbox_head.head_module.convs_pred', + 'model.33.proto': 'bbox_head.head_module.proto_preds', +} + + +def convert(src, dst): + """Convert keys in pretrained YOLOv5 models to mmyolo style.""" + if src.endswith('6.pt'): + convert_dict = convert_dict_p6 + is_p6_model = True + print('Converting P6 model') + else: + convert_dict = convert_dict_p5 + is_p6_model = False + print('Converting P5 model') + try: + yolov5_model = torch.load(src)['model'] + blobs = yolov5_model.state_dict() + except ModuleNotFoundError: + raise RuntimeError( + 'This script must be placed under the ultralytics/yolov5 repo,' + ' because loading the official pretrained model need' + ' `model.py` to build model.') + state_dict = OrderedDict() + + for key, weight in blobs.items(): + + num, module = key.split('.')[1:3] + if (is_p6_model and + (num == '11' or num == '33')) or (not is_p6_model and + (num == '9' or num == '24')): + if module == 'anchors': + continue + prefix = f'model.{num}.{module}' + else: + prefix = f'model.{num}' + + new_key = key.replace(prefix, convert_dict[prefix]) + + if '.m.' in new_key: + new_key = new_key.replace('.m.', '.blocks.') + new_key = new_key.replace('.cv', '.conv') + elif 'bbox_head.head_module.proto_preds.cv' in new_key: + new_key = new_key.replace( + 'bbox_head.head_module.proto_preds.cv', + 'bbox_head.head_module.proto_preds.conv') + else: + new_key = new_key.replace('.cv1', '.main_conv') + new_key = new_key.replace('.cv2', '.short_conv') + new_key = new_key.replace('.cv3', '.final_conv') + + state_dict[new_key] = weight + print(f'Convert {key} to {new_key}') + + # save checkpoint + checkpoint = dict() + checkpoint['state_dict'] = state_dict + torch.save(checkpoint, dst) + + +# Note: This script must be placed under the yolov5 repo to run. +def main(): + parser = argparse.ArgumentParser(description='Convert model keys') + parser.add_argument( + '--src', default='yolov5s.pt', help='src yolov5 model path') + parser.add_argument('--dst', default='mmyolov5s.pt', help='save path') + args = parser.parse_args() + convert(args.src, args.dst) + + +if __name__ == '__main__': + main() diff --git a/models/YOLO-World/third_party/mmyolo/tools/model_converters/yolov5u_to_mmyolo.py b/models/YOLO-World/third_party/mmyolo/tools/model_converters/yolov5u_to_mmyolo.py new file mode 100644 index 0000000000000000000000000000000000000000..806c76cb47b17a3b0291f80e092e7b8d9856a0ab --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/tools/model_converters/yolov5u_to_mmyolo.py @@ -0,0 +1,88 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +from collections import OrderedDict + +import torch + +convert_dict_p5 = { + 'model.0': 'backbone.stem', + 'model.1': 'backbone.stage1.0', + 'model.2': 'backbone.stage1.1', + 'model.3': 'backbone.stage2.0', + 'model.4': 'backbone.stage2.1', + 'model.5': 'backbone.stage3.0', + 'model.6': 'backbone.stage3.1', + 'model.7': 'backbone.stage4.0', + 'model.8': 'backbone.stage4.1', + 'model.9': 'backbone.stage4.2', + 'model.10': 'neck.reduce_layers.2', + 'model.13': 'neck.top_down_layers.0.0', + 'model.14': 'neck.top_down_layers.0.1', + 'model.17': 'neck.top_down_layers.1', + 'model.18': 'neck.downsample_layers.0', + 'model.20': 'neck.bottom_up_layers.0', + 'model.21': 'neck.downsample_layers.1', + 'model.23': 'neck.bottom_up_layers.1', + 'model.24': 'bbox_head.head_module', +} + + +def convert(src, dst): + """Convert keys in pretrained YOLOv5u models to mmyolo style.""" + convert_dict = convert_dict_p5 + + print('Converting P5 model') + try: + yolov5_model = torch.load(src)['model'] + blobs = yolov5_model.state_dict() + except ModuleNotFoundError: + raise RuntimeError( + 'This script must be placed under the ultralytics repo,' + ' because loading the official pretrained model need' + ' `model.py` to build model.') + state_dict = OrderedDict() + + for key, weight in blobs.items(): + + num, module = key.split('.')[1:3] + prefix = f'model.{num}' + new_key = key.replace(prefix, convert_dict[prefix]) + + if '.m.' in new_key: + new_key = new_key.replace('.m.', '.blocks.') + new_key = new_key.replace('.cv', '.conv') + elif 'bbox_head.head_module' in new_key: + new_key = new_key.replace('.cv2', '.reg_preds') + new_key = new_key.replace('.cv3', '.cls_preds') + elif 'backbone.stage4.2' in new_key: + new_key = new_key.replace('.cv', '.conv') + else: + new_key = new_key.replace('.cv1', '.main_conv') + new_key = new_key.replace('.cv2', '.short_conv') + new_key = new_key.replace('.cv3', '.final_conv') + + if 'bbox_head.head_module.dfl.conv.weight' == new_key: + print('Drop "bbox_head.head_module.dfl.conv.weight", ' + 'because it is useless') + continue + state_dict[new_key] = weight + print(f'Convert {key} to {new_key}') + + # save checkpoint + checkpoint = dict() + checkpoint['state_dict'] = state_dict + torch.save(checkpoint, dst) + + +# Note: This script must be placed under the ultralytics repo to run. +def main(): + parser = argparse.ArgumentParser(description='Convert model keys') + parser.add_argument( + '--src', default='yolov5su.pt', help='src yolov5u model path') + parser.add_argument('--dst', default='mmyolov5su.pth', help='save path') + args = parser.parse_args() + convert(args.src, args.dst) + + +if __name__ == '__main__': + main() diff --git a/models/YOLO-World/third_party/mmyolo/tools/model_converters/yolov6_to_mmyolo.py b/models/YOLO-World/third_party/mmyolo/tools/model_converters/yolov6_to_mmyolo.py new file mode 100644 index 0000000000000000000000000000000000000000..e9e86ab46d6cae30aede92ea3598291fbcd009a7 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/tools/model_converters/yolov6_to_mmyolo.py @@ -0,0 +1,115 @@ +import argparse +from collections import OrderedDict + +import torch + + +def convert(src, dst): + import sys + sys.path.append('yolov6') + try: + ckpt = torch.load(src, map_location=torch.device('cpu')) + except ModuleNotFoundError: + raise RuntimeError( + 'This script must be placed under the meituan/YOLOv6 repo,' + ' because loading the official pretrained model need' + ' some python files to build model.') + # The saved model is the model before reparameterization + model = ckpt['ema' if ckpt.get('ema') else 'model'].float() + new_state_dict = OrderedDict() + for k, v in model.state_dict().items(): + name = k + if 'detect' in k: + if 'proj' in k: + continue + name = k.replace('detect', 'bbox_head.head_module') + if k.find('anchors') >= 0 or k.find('anchor_grid') >= 0: + continue + + if 'ERBlock_2' in k: + name = k.replace('ERBlock_2', 'stage1.0') + if '.cv' in k: + name = name.replace('.cv', '.conv') + if '.m.' in k: + name = name.replace('.m.', '.block.') + elif 'ERBlock_3' in k: + name = k.replace('ERBlock_3', 'stage2.0') + if '.cv' in k: + name = name.replace('.cv', '.conv') + if '.m.' in k: + name = name.replace('.m.', '.block.') + elif 'ERBlock_4' in k: + name = k.replace('ERBlock_4', 'stage3.0') + if '.cv' in k: + name = name.replace('.cv', '.conv') + if '.m.' in k: + name = name.replace('.m.', '.block.') + elif 'ERBlock_5' in k: + name = k.replace('ERBlock_5', 'stage4.0') + if '.cv' in k: + name = name.replace('.cv', '.conv') + if '.m.' in k: + name = name.replace('.m.', '.block.') + if 'stage4.0.2' in name: + name = name.replace('stage4.0.2', 'stage4.1') + name = name.replace('cv', 'conv') + elif 'reduce_layer0' in k: + name = k.replace('reduce_layer0', 'reduce_layers.2') + elif 'Rep_p4' in k: + name = k.replace('Rep_p4', 'top_down_layers.0.0') + if '.cv' in k: + name = name.replace('.cv', '.conv') + if '.m.' in k: + name = name.replace('.m.', '.block.') + elif 'reduce_layer1' in k: + name = k.replace('reduce_layer1', 'top_down_layers.0.1') + if '.cv' in k: + name = name.replace('.cv', '.conv') + if '.m.' in k: + name = name.replace('.m.', '.block.') + elif 'Rep_p3' in k: + name = k.replace('Rep_p3', 'top_down_layers.1') + if '.cv' in k: + name = name.replace('.cv', '.conv') + if '.m.' in k: + name = name.replace('.m.', '.block.') + elif 'upsample0' in k: + name = k.replace('upsample0.upsample_transpose', + 'upsample_layers.0') + elif 'upsample1' in k: + name = k.replace('upsample1.upsample_transpose', + 'upsample_layers.1') + elif 'Rep_n3' in k: + name = k.replace('Rep_n3', 'bottom_up_layers.0') + if '.cv' in k: + name = name.replace('.cv', '.conv') + if '.m.' in k: + name = name.replace('.m.', '.block.') + elif 'Rep_n4' in k: + name = k.replace('Rep_n4', 'bottom_up_layers.1') + if '.cv' in k: + name = name.replace('.cv', '.conv') + if '.m.' in k: + name = name.replace('.m.', '.block.') + elif 'downsample2' in k: + name = k.replace('downsample2', 'downsample_layers.0') + elif 'downsample1' in k: + name = k.replace('downsample1', 'downsample_layers.1') + + new_state_dict[name] = v + data = {'state_dict': new_state_dict} + torch.save(data, dst) + + +# Note: This script must be placed under the yolov6 repo to run. +def main(): + parser = argparse.ArgumentParser(description='Convert model keys') + parser.add_argument( + '--src', default='yolov6s.pt', help='src yolov6 model path') + parser.add_argument('--dst', default='mmyolov6.pt', help='save path') + args = parser.parse_args() + convert(args.src, args.dst) + + +if __name__ == '__main__': + main() diff --git a/models/YOLO-World/third_party/mmyolo/tools/model_converters/yolov6_v3_to_mmyolo.py b/models/YOLO-World/third_party/mmyolo/tools/model_converters/yolov6_v3_to_mmyolo.py new file mode 100644 index 0000000000000000000000000000000000000000..bc87664241eb699454c165aa1d760d1da910f7dd --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/tools/model_converters/yolov6_v3_to_mmyolo.py @@ -0,0 +1,145 @@ +import argparse +from collections import OrderedDict + +import torch + + +def convert(src, dst): + import sys + sys.path.append('yolov6') + try: + ckpt = torch.load(src, map_location=torch.device('cpu')) + except ModuleNotFoundError: + raise RuntimeError( + 'This script must be placed under the meituan/YOLOv6 repo,' + ' because loading the official pretrained model need' + ' some python files to build model.') + # The saved model is the model before reparameterization + model = ckpt['ema' if ckpt.get('ema') else 'model'].float() + new_state_dict = OrderedDict() + is_ns = False + for k, v in model.state_dict().items(): + name = k + if 'detect' in k: + if 'proj' in k: + continue + if 'reg_preds_lrtb' in k: + is_ns = True + name = k.replace('detect', 'bbox_head.head_module') + if k.find('anchors') >= 0 or k.find('anchor_grid') >= 0: + continue + + if 'ERBlock_2' in k: + name = k.replace('ERBlock_2', 'stage1.0') + if '.cv' in k: + name = name.replace('.cv', '.conv') + if '.m.' in k: + name = name.replace('.m.', '.block.') + elif 'ERBlock_3' in k: + name = k.replace('ERBlock_3', 'stage2.0') + if '.cv' in k: + name = name.replace('.cv', '.conv') + if '.m.' in k: + name = name.replace('.m.', '.block.') + elif 'ERBlock_4' in k: + name = k.replace('ERBlock_4', 'stage3.0') + if '.cv' in k: + name = name.replace('.cv', '.conv') + if '.m.' in k: + name = name.replace('.m.', '.block.') + elif 'ERBlock_5' in k: + name = k.replace('ERBlock_5', 'stage4.0') + if '.cv' in k: + name = name.replace('.cv', '.conv') + if '.m.' in k: + name = name.replace('.m.', '.block.') + if 'stage4.0.2' in name: + name = name.replace('stage4.0.2', 'stage4.1') + name = name.replace('cv', 'conv') + elif 'reduce_layer0' in k: + name = k.replace('reduce_layer0', 'reduce_layers.2') + elif 'Rep_p4' in k: + name = k.replace('Rep_p4', 'top_down_layers.0.0') + if '.cv' in k: + name = name.replace('.cv', '.conv') + if '.m.' in k: + name = name.replace('.m.', '.block.') + elif 'reduce_layer1' in k: + name = k.replace('reduce_layer1', 'top_down_layers.0.1') + if '.cv' in k: + name = name.replace('.cv', '.conv') + if '.m.' in k: + name = name.replace('.m.', '.block.') + elif 'Rep_p3' in k: + name = k.replace('Rep_p3', 'top_down_layers.1') + if '.cv' in k: + name = name.replace('.cv', '.conv') + if '.m.' in k: + name = name.replace('.m.', '.block.') + elif 'Bifusion0' in k: + name = k.replace('Bifusion0', 'upsample_layers.0') + if '.cv' in k: + name = name.replace('.cv', '.conv') + if '.m.' in k: + name = name.replace('.m.', '.block.') + if '.upsample_transpose.' in k: + name = name.replace('.upsample_transpose.', '.') + elif 'Bifusion1' in k: + name = k.replace('Bifusion1', 'upsample_layers.1') + if '.cv' in k: + name = name.replace('.cv', '.conv') + if '.m.' in k: + name = name.replace('.m.', '.block.') + if '.upsample_transpose.' in k: + name = name.replace('.upsample_transpose.', '.') + elif 'Rep_n3' in k: + name = k.replace('Rep_n3', 'bottom_up_layers.0') + if '.cv' in k: + name = name.replace('.cv', '.conv') + if '.m.' in k: + name = name.replace('.m.', '.block.') + elif 'Rep_n4' in k: + name = k.replace('Rep_n4', 'bottom_up_layers.1') + if '.cv' in k: + name = name.replace('.cv', '.conv') + if '.m.' in k: + name = name.replace('.m.', '.block.') + elif 'downsample2' in k: + name = k.replace('downsample2', 'downsample_layers.0') + elif 'downsample1' in k: + name = k.replace('downsample1', 'downsample_layers.1') + + new_state_dict[name] = v + + # The yolov6_v3_n/s has two regression heads. + # One called 'reg_preds_lrtb' is a regular anchor-free head, + # which is used for inference. + # One called 'reg_preds' is a DFL style head, which + # is only used in training. + if is_ns: + tmp_state_dict = OrderedDict() + for k, v in new_state_dict.items(): + name = k + if 'reg_preds_lrtb' in k: + name = k.replace('reg_preds_lrtb', 'reg_preds') + elif 'reg_preds' in k: + name = k.replace('reg_preds', 'distill_ns_head') + tmp_state_dict[name] = v + new_state_dict = tmp_state_dict + + data = {'state_dict': new_state_dict} + torch.save(data, dst) + + +# Note: This script must be placed under the yolov6 repo to run. +def main(): + parser = argparse.ArgumentParser(description='Convert model keys') + parser.add_argument( + '--src', default='yolov6s.pt', help='src yolov6 model path') + parser.add_argument('--dst', default='mmyolov6.pt', help='save path') + args = parser.parse_args() + convert(args.src, args.dst) + + +if __name__ == '__main__': + main() diff --git a/models/YOLO-World/third_party/mmyolo/tools/model_converters/yolov7_to_mmyolo.py b/models/YOLO-World/third_party/mmyolo/tools/model_converters/yolov7_to_mmyolo.py new file mode 100644 index 0000000000000000000000000000000000000000..1c1f54d0cbf0375dc026c8e6fb234ce9335d85cc --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/tools/model_converters/yolov7_to_mmyolo.py @@ -0,0 +1,1093 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os.path as osp +from collections import OrderedDict + +import torch + +convert_dict_tiny = { + # stem + 'model.0': 'backbone.stem.0', + 'model.1': 'backbone.stem.1', + + # stage1 TinyDownSampleBlock + 'model.2': 'backbone.stage1.0.short_conv', + 'model.3': 'backbone.stage1.0.main_convs.0', + 'model.4': 'backbone.stage1.0.main_convs.1', + 'model.5': 'backbone.stage1.0.main_convs.2', + 'model.7': 'backbone.stage1.0.final_conv', + + # stage2 TinyDownSampleBlock + 'model.9': 'backbone.stage2.1.short_conv', + 'model.10': 'backbone.stage2.1.main_convs.0', + 'model.11': 'backbone.stage2.1.main_convs.1', + 'model.12': 'backbone.stage2.1.main_convs.2', + 'model.14': 'backbone.stage2.1.final_conv', + + # stage3 TinyDownSampleBlock + 'model.16': 'backbone.stage3.1.short_conv', + 'model.17': 'backbone.stage3.1.main_convs.0', + 'model.18': 'backbone.stage3.1.main_convs.1', + 'model.19': 'backbone.stage3.1.main_convs.2', + 'model.21': 'backbone.stage3.1.final_conv', + + # stage4 TinyDownSampleBlock + 'model.23': 'backbone.stage4.1.short_conv', + 'model.24': 'backbone.stage4.1.main_convs.0', + 'model.25': 'backbone.stage4.1.main_convs.1', + 'model.26': 'backbone.stage4.1.main_convs.2', + 'model.28': 'backbone.stage4.1.final_conv', + + # neck SPPCSPBlock + 'model.29': 'neck.reduce_layers.2.short_layer', + 'model.30': 'neck.reduce_layers.2.main_layers', + 'model.35': 'neck.reduce_layers.2.fuse_layers', + 'model.37': 'neck.reduce_layers.2.final_conv', + 'model.38': 'neck.upsample_layers.0.0', + 'model.40': 'neck.reduce_layers.1', + 'model.42': 'neck.top_down_layers.0.short_conv', + 'model.43': 'neck.top_down_layers.0.main_convs.0', + 'model.44': 'neck.top_down_layers.0.main_convs.1', + 'model.45': 'neck.top_down_layers.0.main_convs.2', + 'model.47': 'neck.top_down_layers.0.final_conv', + 'model.48': 'neck.upsample_layers.1.0', + 'model.50': 'neck.reduce_layers.0', + 'model.52': 'neck.top_down_layers.1.short_conv', + 'model.53': 'neck.top_down_layers.1.main_convs.0', + 'model.54': 'neck.top_down_layers.1.main_convs.1', + 'model.55': 'neck.top_down_layers.1.main_convs.2', + 'model.57': 'neck.top_down_layers.1.final_conv', + 'model.58': 'neck.downsample_layers.0', + 'model.60': 'neck.bottom_up_layers.0.short_conv', + 'model.61': 'neck.bottom_up_layers.0.main_convs.0', + 'model.62': 'neck.bottom_up_layers.0.main_convs.1', + 'model.63': 'neck.bottom_up_layers.0.main_convs.2', + 'model.65': 'neck.bottom_up_layers.0.final_conv', + 'model.66': 'neck.downsample_layers.1', + 'model.68': 'neck.bottom_up_layers.1.short_conv', + 'model.69': 'neck.bottom_up_layers.1.main_convs.0', + 'model.70': 'neck.bottom_up_layers.1.main_convs.1', + 'model.71': 'neck.bottom_up_layers.1.main_convs.2', + 'model.73': 'neck.bottom_up_layers.1.final_conv', + 'model.74': 'neck.out_layers.0', + 'model.75': 'neck.out_layers.1', + 'model.76': 'neck.out_layers.2', + + # head + 'model.77.m.0': 'bbox_head.head_module.convs_pred.0.1', + 'model.77.m.1': 'bbox_head.head_module.convs_pred.1.1', + 'model.77.m.2': 'bbox_head.head_module.convs_pred.2.1' +} + +convert_dict_l = { + # stem + 'model.0': 'backbone.stem.0', + 'model.1': 'backbone.stem.1', + 'model.2': 'backbone.stem.2', + + # stage1 + # ConvModule + 'model.3': 'backbone.stage1.0', + # ELANBlock expand_channel_2x + 'model.4': 'backbone.stage1.1.short_conv', + 'model.5': 'backbone.stage1.1.main_conv', + 'model.6': 'backbone.stage1.1.blocks.0.0', + 'model.7': 'backbone.stage1.1.blocks.0.1', + 'model.8': 'backbone.stage1.1.blocks.1.0', + 'model.9': 'backbone.stage1.1.blocks.1.1', + 'model.11': 'backbone.stage1.1.final_conv', + + # stage2 + # MaxPoolBlock reduce_channel_2x + 'model.13': 'backbone.stage2.0.maxpool_branches.1', + 'model.14': 'backbone.stage2.0.stride_conv_branches.0', + 'model.15': 'backbone.stage2.0.stride_conv_branches.1', + # ELANBlock expand_channel_2x + 'model.17': 'backbone.stage2.1.short_conv', + 'model.18': 'backbone.stage2.1.main_conv', + 'model.19': 'backbone.stage2.1.blocks.0.0', + 'model.20': 'backbone.stage2.1.blocks.0.1', + 'model.21': 'backbone.stage2.1.blocks.1.0', + 'model.22': 'backbone.stage2.1.blocks.1.1', + 'model.24': 'backbone.stage2.1.final_conv', + + # stage3 + # MaxPoolBlock reduce_channel_2x + 'model.26': 'backbone.stage3.0.maxpool_branches.1', + 'model.27': 'backbone.stage3.0.stride_conv_branches.0', + 'model.28': 'backbone.stage3.0.stride_conv_branches.1', + # ELANBlock expand_channel_2x + 'model.30': 'backbone.stage3.1.short_conv', + 'model.31': 'backbone.stage3.1.main_conv', + 'model.32': 'backbone.stage3.1.blocks.0.0', + 'model.33': 'backbone.stage3.1.blocks.0.1', + 'model.34': 'backbone.stage3.1.blocks.1.0', + 'model.35': 'backbone.stage3.1.blocks.1.1', + 'model.37': 'backbone.stage3.1.final_conv', + + # stage4 + # MaxPoolBlock reduce_channel_2x + 'model.39': 'backbone.stage4.0.maxpool_branches.1', + 'model.40': 'backbone.stage4.0.stride_conv_branches.0', + 'model.41': 'backbone.stage4.0.stride_conv_branches.1', + # ELANBlock no_change_channel + 'model.43': 'backbone.stage4.1.short_conv', + 'model.44': 'backbone.stage4.1.main_conv', + 'model.45': 'backbone.stage4.1.blocks.0.0', + 'model.46': 'backbone.stage4.1.blocks.0.1', + 'model.47': 'backbone.stage4.1.blocks.1.0', + 'model.48': 'backbone.stage4.1.blocks.1.1', + 'model.50': 'backbone.stage4.1.final_conv', + + # neck SPPCSPBlock + 'model.51.cv1': 'neck.reduce_layers.2.main_layers.0', + 'model.51.cv3': 'neck.reduce_layers.2.main_layers.1', + 'model.51.cv4': 'neck.reduce_layers.2.main_layers.2', + 'model.51.cv5': 'neck.reduce_layers.2.fuse_layers.0', + 'model.51.cv6': 'neck.reduce_layers.2.fuse_layers.1', + 'model.51.cv2': 'neck.reduce_layers.2.short_layer', + 'model.51.cv7': 'neck.reduce_layers.2.final_conv', + + # neck + 'model.52': 'neck.upsample_layers.0.0', + 'model.54': 'neck.reduce_layers.1', + + # neck ELANBlock reduce_channel_2x + 'model.56': 'neck.top_down_layers.0.short_conv', + 'model.57': 'neck.top_down_layers.0.main_conv', + 'model.58': 'neck.top_down_layers.0.blocks.0', + 'model.59': 'neck.top_down_layers.0.blocks.1', + 'model.60': 'neck.top_down_layers.0.blocks.2', + 'model.61': 'neck.top_down_layers.0.blocks.3', + 'model.63': 'neck.top_down_layers.0.final_conv', + 'model.64': 'neck.upsample_layers.1.0', + 'model.66': 'neck.reduce_layers.0', + + # neck ELANBlock reduce_channel_2x + 'model.68': 'neck.top_down_layers.1.short_conv', + 'model.69': 'neck.top_down_layers.1.main_conv', + 'model.70': 'neck.top_down_layers.1.blocks.0', + 'model.71': 'neck.top_down_layers.1.blocks.1', + 'model.72': 'neck.top_down_layers.1.blocks.2', + 'model.73': 'neck.top_down_layers.1.blocks.3', + 'model.75': 'neck.top_down_layers.1.final_conv', + + # neck MaxPoolBlock no_change_channel + 'model.77': 'neck.downsample_layers.0.maxpool_branches.1', + 'model.78': 'neck.downsample_layers.0.stride_conv_branches.0', + 'model.79': 'neck.downsample_layers.0.stride_conv_branches.1', + + # neck ELANBlock reduce_channel_2x + 'model.81': 'neck.bottom_up_layers.0.short_conv', + 'model.82': 'neck.bottom_up_layers.0.main_conv', + 'model.83': 'neck.bottom_up_layers.0.blocks.0', + 'model.84': 'neck.bottom_up_layers.0.blocks.1', + 'model.85': 'neck.bottom_up_layers.0.blocks.2', + 'model.86': 'neck.bottom_up_layers.0.blocks.3', + 'model.88': 'neck.bottom_up_layers.0.final_conv', + + # neck MaxPoolBlock no_change_channel + 'model.90': 'neck.downsample_layers.1.maxpool_branches.1', + 'model.91': 'neck.downsample_layers.1.stride_conv_branches.0', + 'model.92': 'neck.downsample_layers.1.stride_conv_branches.1', + + # neck ELANBlock reduce_channel_2x + 'model.94': 'neck.bottom_up_layers.1.short_conv', + 'model.95': 'neck.bottom_up_layers.1.main_conv', + 'model.96': 'neck.bottom_up_layers.1.blocks.0', + 'model.97': 'neck.bottom_up_layers.1.blocks.1', + 'model.98': 'neck.bottom_up_layers.1.blocks.2', + 'model.99': 'neck.bottom_up_layers.1.blocks.3', + 'model.101': 'neck.bottom_up_layers.1.final_conv', + + # RepVGGBlock + 'model.102.rbr_dense.0': 'neck.out_layers.0.rbr_dense.conv', + 'model.102.rbr_dense.1': 'neck.out_layers.0.rbr_dense.bn', + 'model.102.rbr_1x1.0': 'neck.out_layers.0.rbr_1x1.conv', + 'model.102.rbr_1x1.1': 'neck.out_layers.0.rbr_1x1.bn', + 'model.103.rbr_dense.0': 'neck.out_layers.1.rbr_dense.conv', + 'model.103.rbr_dense.1': 'neck.out_layers.1.rbr_dense.bn', + 'model.103.rbr_1x1.0': 'neck.out_layers.1.rbr_1x1.conv', + 'model.103.rbr_1x1.1': 'neck.out_layers.1.rbr_1x1.bn', + 'model.104.rbr_dense.0': 'neck.out_layers.2.rbr_dense.conv', + 'model.104.rbr_dense.1': 'neck.out_layers.2.rbr_dense.bn', + 'model.104.rbr_1x1.0': 'neck.out_layers.2.rbr_1x1.conv', + 'model.104.rbr_1x1.1': 'neck.out_layers.2.rbr_1x1.bn', + + # head + 'model.105.m.0': 'bbox_head.head_module.convs_pred.0.1', + 'model.105.m.1': 'bbox_head.head_module.convs_pred.1.1', + 'model.105.m.2': 'bbox_head.head_module.convs_pred.2.1' +} + +convert_dict_x = { + # stem + 'model.0': 'backbone.stem.0', + 'model.1': 'backbone.stem.1', + 'model.2': 'backbone.stem.2', + + # stage1 + # ConvModule + 'model.3': 'backbone.stage1.0', + # ELANBlock expand_channel_2x + 'model.4': 'backbone.stage1.1.short_conv', + 'model.5': 'backbone.stage1.1.main_conv', + 'model.6': 'backbone.stage1.1.blocks.0.0', + 'model.7': 'backbone.stage1.1.blocks.0.1', + 'model.8': 'backbone.stage1.1.blocks.1.0', + 'model.9': 'backbone.stage1.1.blocks.1.1', + 'model.10': 'backbone.stage1.1.blocks.2.0', + 'model.11': 'backbone.stage1.1.blocks.2.1', + 'model.13': 'backbone.stage1.1.final_conv', + + # stage2 + # MaxPoolBlock reduce_channel_2x + 'model.15': 'backbone.stage2.0.maxpool_branches.1', + 'model.16': 'backbone.stage2.0.stride_conv_branches.0', + 'model.17': 'backbone.stage2.0.stride_conv_branches.1', + + # ELANBlock expand_channel_2x + 'model.19': 'backbone.stage2.1.short_conv', + 'model.20': 'backbone.stage2.1.main_conv', + 'model.21': 'backbone.stage2.1.blocks.0.0', + 'model.22': 'backbone.stage2.1.blocks.0.1', + 'model.23': 'backbone.stage2.1.blocks.1.0', + 'model.24': 'backbone.stage2.1.blocks.1.1', + 'model.25': 'backbone.stage2.1.blocks.2.0', + 'model.26': 'backbone.stage2.1.blocks.2.1', + 'model.28': 'backbone.stage2.1.final_conv', + + # stage3 + # MaxPoolBlock reduce_channel_2x + 'model.30': 'backbone.stage3.0.maxpool_branches.1', + 'model.31': 'backbone.stage3.0.stride_conv_branches.0', + 'model.32': 'backbone.stage3.0.stride_conv_branches.1', + # ELANBlock expand_channel_2x + 'model.34': 'backbone.stage3.1.short_conv', + 'model.35': 'backbone.stage3.1.main_conv', + 'model.36': 'backbone.stage3.1.blocks.0.0', + 'model.37': 'backbone.stage3.1.blocks.0.1', + 'model.38': 'backbone.stage3.1.blocks.1.0', + 'model.39': 'backbone.stage3.1.blocks.1.1', + 'model.40': 'backbone.stage3.1.blocks.2.0', + 'model.41': 'backbone.stage3.1.blocks.2.1', + 'model.43': 'backbone.stage3.1.final_conv', + + # stage4 + # MaxPoolBlock reduce_channel_2x + 'model.45': 'backbone.stage4.0.maxpool_branches.1', + 'model.46': 'backbone.stage4.0.stride_conv_branches.0', + 'model.47': 'backbone.stage4.0.stride_conv_branches.1', + # ELANBlock no_change_channel + 'model.49': 'backbone.stage4.1.short_conv', + 'model.50': 'backbone.stage4.1.main_conv', + 'model.51': 'backbone.stage4.1.blocks.0.0', + 'model.52': 'backbone.stage4.1.blocks.0.1', + 'model.53': 'backbone.stage4.1.blocks.1.0', + 'model.54': 'backbone.stage4.1.blocks.1.1', + 'model.55': 'backbone.stage4.1.blocks.2.0', + 'model.56': 'backbone.stage4.1.blocks.2.1', + 'model.58': 'backbone.stage4.1.final_conv', + + # neck SPPCSPBlock + 'model.59.cv1': 'neck.reduce_layers.2.main_layers.0', + 'model.59.cv3': 'neck.reduce_layers.2.main_layers.1', + 'model.59.cv4': 'neck.reduce_layers.2.main_layers.2', + 'model.59.cv5': 'neck.reduce_layers.2.fuse_layers.0', + 'model.59.cv6': 'neck.reduce_layers.2.fuse_layers.1', + 'model.59.cv2': 'neck.reduce_layers.2.short_layer', + 'model.59.cv7': 'neck.reduce_layers.2.final_conv', + + # neck + 'model.60': 'neck.upsample_layers.0.0', + 'model.62': 'neck.reduce_layers.1', + + # neck ELANBlock reduce_channel_2x + 'model.64': 'neck.top_down_layers.0.short_conv', + 'model.65': 'neck.top_down_layers.0.main_conv', + 'model.66': 'neck.top_down_layers.0.blocks.0.0', + 'model.67': 'neck.top_down_layers.0.blocks.0.1', + 'model.68': 'neck.top_down_layers.0.blocks.1.0', + 'model.69': 'neck.top_down_layers.0.blocks.1.1', + 'model.70': 'neck.top_down_layers.0.blocks.2.0', + 'model.71': 'neck.top_down_layers.0.blocks.2.1', + 'model.73': 'neck.top_down_layers.0.final_conv', + 'model.74': 'neck.upsample_layers.1.0', + 'model.76': 'neck.reduce_layers.0', + + # neck ELANBlock reduce_channel_2x + 'model.78': 'neck.top_down_layers.1.short_conv', + 'model.79': 'neck.top_down_layers.1.main_conv', + 'model.80': 'neck.top_down_layers.1.blocks.0.0', + 'model.81': 'neck.top_down_layers.1.blocks.0.1', + 'model.82': 'neck.top_down_layers.1.blocks.1.0', + 'model.83': 'neck.top_down_layers.1.blocks.1.1', + 'model.84': 'neck.top_down_layers.1.blocks.2.0', + 'model.85': 'neck.top_down_layers.1.blocks.2.1', + 'model.87': 'neck.top_down_layers.1.final_conv', + + # neck MaxPoolBlock no_change_channel + 'model.89': 'neck.downsample_layers.0.maxpool_branches.1', + 'model.90': 'neck.downsample_layers.0.stride_conv_branches.0', + 'model.91': 'neck.downsample_layers.0.stride_conv_branches.1', + + # neck ELANBlock reduce_channel_2x + 'model.93': 'neck.bottom_up_layers.0.short_conv', + 'model.94': 'neck.bottom_up_layers.0.main_conv', + 'model.95': 'neck.bottom_up_layers.0.blocks.0.0', + 'model.96': 'neck.bottom_up_layers.0.blocks.0.1', + 'model.97': 'neck.bottom_up_layers.0.blocks.1.0', + 'model.98': 'neck.bottom_up_layers.0.blocks.1.1', + 'model.99': 'neck.bottom_up_layers.0.blocks.2.0', + 'model.100': 'neck.bottom_up_layers.0.blocks.2.1', + 'model.102': 'neck.bottom_up_layers.0.final_conv', + + # neck MaxPoolBlock no_change_channel + 'model.104': 'neck.downsample_layers.1.maxpool_branches.1', + 'model.105': 'neck.downsample_layers.1.stride_conv_branches.0', + 'model.106': 'neck.downsample_layers.1.stride_conv_branches.1', + + # neck ELANBlock reduce_channel_2x + 'model.108': 'neck.bottom_up_layers.1.short_conv', + 'model.109': 'neck.bottom_up_layers.1.main_conv', + 'model.110': 'neck.bottom_up_layers.1.blocks.0.0', + 'model.111': 'neck.bottom_up_layers.1.blocks.0.1', + 'model.112': 'neck.bottom_up_layers.1.blocks.1.0', + 'model.113': 'neck.bottom_up_layers.1.blocks.1.1', + 'model.114': 'neck.bottom_up_layers.1.blocks.2.0', + 'model.115': 'neck.bottom_up_layers.1.blocks.2.1', + 'model.117': 'neck.bottom_up_layers.1.final_conv', + + # Conv + 'model.118': 'neck.out_layers.0', + 'model.119': 'neck.out_layers.1', + 'model.120': 'neck.out_layers.2', + + # head + 'model.121.m.0': 'bbox_head.head_module.convs_pred.0.1', + 'model.121.m.1': 'bbox_head.head_module.convs_pred.1.1', + 'model.121.m.2': 'bbox_head.head_module.convs_pred.2.1' +} + +convert_dict_w = { + # stem + 'model.1': 'backbone.stem.conv', + + # stage1 + # ConvModule + 'model.2': 'backbone.stage1.0', + # ELANBlock + 'model.3': 'backbone.stage1.1.short_conv', + 'model.4': 'backbone.stage1.1.main_conv', + 'model.5': 'backbone.stage1.1.blocks.0.0', + 'model.6': 'backbone.stage1.1.blocks.0.1', + 'model.7': 'backbone.stage1.1.blocks.1.0', + 'model.8': 'backbone.stage1.1.blocks.1.1', + 'model.10': 'backbone.stage1.1.final_conv', + + # stage2 + 'model.11': 'backbone.stage2.0', + # ELANBlock + 'model.12': 'backbone.stage2.1.short_conv', + 'model.13': 'backbone.stage2.1.main_conv', + 'model.14': 'backbone.stage2.1.blocks.0.0', + 'model.15': 'backbone.stage2.1.blocks.0.1', + 'model.16': 'backbone.stage2.1.blocks.1.0', + 'model.17': 'backbone.stage2.1.blocks.1.1', + 'model.19': 'backbone.stage2.1.final_conv', + + # stage3 + 'model.20': 'backbone.stage3.0', + # ELANBlock + 'model.21': 'backbone.stage3.1.short_conv', + 'model.22': 'backbone.stage3.1.main_conv', + 'model.23': 'backbone.stage3.1.blocks.0.0', + 'model.24': 'backbone.stage3.1.blocks.0.1', + 'model.25': 'backbone.stage3.1.blocks.1.0', + 'model.26': 'backbone.stage3.1.blocks.1.1', + 'model.28': 'backbone.stage3.1.final_conv', + + # stage4 + 'model.29': 'backbone.stage4.0', + # ELANBlock + 'model.30': 'backbone.stage4.1.short_conv', + 'model.31': 'backbone.stage4.1.main_conv', + 'model.32': 'backbone.stage4.1.blocks.0.0', + 'model.33': 'backbone.stage4.1.blocks.0.1', + 'model.34': 'backbone.stage4.1.blocks.1.0', + 'model.35': 'backbone.stage4.1.blocks.1.1', + 'model.37': 'backbone.stage4.1.final_conv', + + # stage5 + 'model.38': 'backbone.stage5.0', + # ELANBlock + 'model.39': 'backbone.stage5.1.short_conv', + 'model.40': 'backbone.stage5.1.main_conv', + 'model.41': 'backbone.stage5.1.blocks.0.0', + 'model.42': 'backbone.stage5.1.blocks.0.1', + 'model.43': 'backbone.stage5.1.blocks.1.0', + 'model.44': 'backbone.stage5.1.blocks.1.1', + 'model.46': 'backbone.stage5.1.final_conv', + + # neck SPPCSPBlock + 'model.47.cv1': 'neck.reduce_layers.3.main_layers.0', + 'model.47.cv3': 'neck.reduce_layers.3.main_layers.1', + 'model.47.cv4': 'neck.reduce_layers.3.main_layers.2', + 'model.47.cv5': 'neck.reduce_layers.3.fuse_layers.0', + 'model.47.cv6': 'neck.reduce_layers.3.fuse_layers.1', + 'model.47.cv2': 'neck.reduce_layers.3.short_layer', + 'model.47.cv7': 'neck.reduce_layers.3.final_conv', + + # neck + 'model.48': 'neck.upsample_layers.0.0', + 'model.50': 'neck.reduce_layers.2', + + # neck ELANBlock + 'model.52': 'neck.top_down_layers.0.short_conv', + 'model.53': 'neck.top_down_layers.0.main_conv', + 'model.54': 'neck.top_down_layers.0.blocks.0', + 'model.55': 'neck.top_down_layers.0.blocks.1', + 'model.56': 'neck.top_down_layers.0.blocks.2', + 'model.57': 'neck.top_down_layers.0.blocks.3', + 'model.59': 'neck.top_down_layers.0.final_conv', + 'model.60': 'neck.upsample_layers.1.0', + 'model.62': 'neck.reduce_layers.1', + + # neck ELANBlock reduce_channel_2x + 'model.64': 'neck.top_down_layers.1.short_conv', + 'model.65': 'neck.top_down_layers.1.main_conv', + 'model.66': 'neck.top_down_layers.1.blocks.0', + 'model.67': 'neck.top_down_layers.1.blocks.1', + 'model.68': 'neck.top_down_layers.1.blocks.2', + 'model.69': 'neck.top_down_layers.1.blocks.3', + 'model.71': 'neck.top_down_layers.1.final_conv', + 'model.72': 'neck.upsample_layers.2.0', + 'model.74': 'neck.reduce_layers.0', + 'model.76': 'neck.top_down_layers.2.short_conv', + 'model.77': 'neck.top_down_layers.2.main_conv', + 'model.78': 'neck.top_down_layers.2.blocks.0', + 'model.79': 'neck.top_down_layers.2.blocks.1', + 'model.80': 'neck.top_down_layers.2.blocks.2', + 'model.81': 'neck.top_down_layers.2.blocks.3', + 'model.83': 'neck.top_down_layers.2.final_conv', + 'model.84': 'neck.downsample_layers.0', + + # neck ELANBlock + 'model.86': 'neck.bottom_up_layers.0.short_conv', + 'model.87': 'neck.bottom_up_layers.0.main_conv', + 'model.88': 'neck.bottom_up_layers.0.blocks.0', + 'model.89': 'neck.bottom_up_layers.0.blocks.1', + 'model.90': 'neck.bottom_up_layers.0.blocks.2', + 'model.91': 'neck.bottom_up_layers.0.blocks.3', + 'model.93': 'neck.bottom_up_layers.0.final_conv', + 'model.94': 'neck.downsample_layers.1', + + # neck ELANBlock reduce_channel_2x + 'model.96': 'neck.bottom_up_layers.1.short_conv', + 'model.97': 'neck.bottom_up_layers.1.main_conv', + 'model.98': 'neck.bottom_up_layers.1.blocks.0', + 'model.99': 'neck.bottom_up_layers.1.blocks.1', + 'model.100': 'neck.bottom_up_layers.1.blocks.2', + 'model.101': 'neck.bottom_up_layers.1.blocks.3', + 'model.103': 'neck.bottom_up_layers.1.final_conv', + 'model.104': 'neck.downsample_layers.2', + + # neck ELANBlock reduce_channel_2x + 'model.106': 'neck.bottom_up_layers.2.short_conv', + 'model.107': 'neck.bottom_up_layers.2.main_conv', + 'model.108': 'neck.bottom_up_layers.2.blocks.0', + 'model.109': 'neck.bottom_up_layers.2.blocks.1', + 'model.110': 'neck.bottom_up_layers.2.blocks.2', + 'model.111': 'neck.bottom_up_layers.2.blocks.3', + 'model.113': 'neck.bottom_up_layers.2.final_conv', + 'model.114': 'bbox_head.head_module.main_convs_pred.0.0', + 'model.115': 'bbox_head.head_module.main_convs_pred.1.0', + 'model.116': 'bbox_head.head_module.main_convs_pred.2.0', + 'model.117': 'bbox_head.head_module.main_convs_pred.3.0', + + # head + 'model.118.m.0': 'bbox_head.head_module.main_convs_pred.0.2', + 'model.118.m.1': 'bbox_head.head_module.main_convs_pred.1.2', + 'model.118.m.2': 'bbox_head.head_module.main_convs_pred.2.2', + 'model.118.m.3': 'bbox_head.head_module.main_convs_pred.3.2' +} + +convert_dict_e = { + # stem + 'model.1': 'backbone.stem.conv', + + # stage1 + 'model.2.cv1': 'backbone.stage1.0.stride_conv_branches.0', + 'model.2.cv2': 'backbone.stage1.0.stride_conv_branches.1', + 'model.2.cv3': 'backbone.stage1.0.maxpool_branches.1', + + # ELANBlock + 'model.3': 'backbone.stage1.1.short_conv', + 'model.4': 'backbone.stage1.1.main_conv', + 'model.5': 'backbone.stage1.1.blocks.0.0', + 'model.6': 'backbone.stage1.1.blocks.0.1', + 'model.7': 'backbone.stage1.1.blocks.1.0', + 'model.8': 'backbone.stage1.1.blocks.1.1', + 'model.9': 'backbone.stage1.1.blocks.2.0', + 'model.10': 'backbone.stage1.1.blocks.2.1', + 'model.12': 'backbone.stage1.1.final_conv', + + # stage2 + 'model.13.cv1': 'backbone.stage2.0.stride_conv_branches.0', + 'model.13.cv2': 'backbone.stage2.0.stride_conv_branches.1', + 'model.13.cv3': 'backbone.stage2.0.maxpool_branches.1', + + # ELANBlock + 'model.14': 'backbone.stage2.1.short_conv', + 'model.15': 'backbone.stage2.1.main_conv', + 'model.16': 'backbone.stage2.1.blocks.0.0', + 'model.17': 'backbone.stage2.1.blocks.0.1', + 'model.18': 'backbone.stage2.1.blocks.1.0', + 'model.19': 'backbone.stage2.1.blocks.1.1', + 'model.20': 'backbone.stage2.1.blocks.2.0', + 'model.21': 'backbone.stage2.1.blocks.2.1', + 'model.23': 'backbone.stage2.1.final_conv', + + # stage3 + 'model.24.cv1': 'backbone.stage3.0.stride_conv_branches.0', + 'model.24.cv2': 'backbone.stage3.0.stride_conv_branches.1', + 'model.24.cv3': 'backbone.stage3.0.maxpool_branches.1', + + # ELANBlock + 'model.25': 'backbone.stage3.1.short_conv', + 'model.26': 'backbone.stage3.1.main_conv', + 'model.27': 'backbone.stage3.1.blocks.0.0', + 'model.28': 'backbone.stage3.1.blocks.0.1', + 'model.29': 'backbone.stage3.1.blocks.1.0', + 'model.30': 'backbone.stage3.1.blocks.1.1', + 'model.31': 'backbone.stage3.1.blocks.2.0', + 'model.32': 'backbone.stage3.1.blocks.2.1', + 'model.34': 'backbone.stage3.1.final_conv', + + # stage4 + 'model.35.cv1': 'backbone.stage4.0.stride_conv_branches.0', + 'model.35.cv2': 'backbone.stage4.0.stride_conv_branches.1', + 'model.35.cv3': 'backbone.stage4.0.maxpool_branches.1', + + # ELANBlock + 'model.36': 'backbone.stage4.1.short_conv', + 'model.37': 'backbone.stage4.1.main_conv', + 'model.38': 'backbone.stage4.1.blocks.0.0', + 'model.39': 'backbone.stage4.1.blocks.0.1', + 'model.40': 'backbone.stage4.1.blocks.1.0', + 'model.41': 'backbone.stage4.1.blocks.1.1', + 'model.42': 'backbone.stage4.1.blocks.2.0', + 'model.43': 'backbone.stage4.1.blocks.2.1', + 'model.45': 'backbone.stage4.1.final_conv', + + # stage5 + 'model.46.cv1': 'backbone.stage5.0.stride_conv_branches.0', + 'model.46.cv2': 'backbone.stage5.0.stride_conv_branches.1', + 'model.46.cv3': 'backbone.stage5.0.maxpool_branches.1', + + # ELANBlock + 'model.47': 'backbone.stage5.1.short_conv', + 'model.48': 'backbone.stage5.1.main_conv', + 'model.49': 'backbone.stage5.1.blocks.0.0', + 'model.50': 'backbone.stage5.1.blocks.0.1', + 'model.51': 'backbone.stage5.1.blocks.1.0', + 'model.52': 'backbone.stage5.1.blocks.1.1', + 'model.53': 'backbone.stage5.1.blocks.2.0', + 'model.54': 'backbone.stage5.1.blocks.2.1', + 'model.56': 'backbone.stage5.1.final_conv', + + # neck SPPCSPBlock + 'model.57.cv1': 'neck.reduce_layers.3.main_layers.0', + 'model.57.cv3': 'neck.reduce_layers.3.main_layers.1', + 'model.57.cv4': 'neck.reduce_layers.3.main_layers.2', + 'model.57.cv5': 'neck.reduce_layers.3.fuse_layers.0', + 'model.57.cv6': 'neck.reduce_layers.3.fuse_layers.1', + 'model.57.cv2': 'neck.reduce_layers.3.short_layer', + 'model.57.cv7': 'neck.reduce_layers.3.final_conv', + + # neck + 'model.58': 'neck.upsample_layers.0.0', + 'model.60': 'neck.reduce_layers.2', + + # neck ELANBlock + 'model.62': 'neck.top_down_layers.0.short_conv', + 'model.63': 'neck.top_down_layers.0.main_conv', + 'model.64': 'neck.top_down_layers.0.blocks.0', + 'model.65': 'neck.top_down_layers.0.blocks.1', + 'model.66': 'neck.top_down_layers.0.blocks.2', + 'model.67': 'neck.top_down_layers.0.blocks.3', + 'model.68': 'neck.top_down_layers.0.blocks.4', + 'model.69': 'neck.top_down_layers.0.blocks.5', + 'model.71': 'neck.top_down_layers.0.final_conv', + 'model.72': 'neck.upsample_layers.1.0', + 'model.74': 'neck.reduce_layers.1', + + # neck ELANBlock + 'model.76': 'neck.top_down_layers.1.short_conv', + 'model.77': 'neck.top_down_layers.1.main_conv', + 'model.78': 'neck.top_down_layers.1.blocks.0', + 'model.79': 'neck.top_down_layers.1.blocks.1', + 'model.80': 'neck.top_down_layers.1.blocks.2', + 'model.81': 'neck.top_down_layers.1.blocks.3', + 'model.82': 'neck.top_down_layers.1.blocks.4', + 'model.83': 'neck.top_down_layers.1.blocks.5', + 'model.85': 'neck.top_down_layers.1.final_conv', + 'model.86': 'neck.upsample_layers.2.0', + 'model.88': 'neck.reduce_layers.0', + 'model.90': 'neck.top_down_layers.2.short_conv', + 'model.91': 'neck.top_down_layers.2.main_conv', + 'model.92': 'neck.top_down_layers.2.blocks.0', + 'model.93': 'neck.top_down_layers.2.blocks.1', + 'model.94': 'neck.top_down_layers.2.blocks.2', + 'model.95': 'neck.top_down_layers.2.blocks.3', + 'model.96': 'neck.top_down_layers.2.blocks.4', + 'model.97': 'neck.top_down_layers.2.blocks.5', + 'model.99': 'neck.top_down_layers.2.final_conv', + 'model.100.cv1': 'neck.downsample_layers.0.stride_conv_branches.0', + 'model.100.cv2': 'neck.downsample_layers.0.stride_conv_branches.1', + 'model.100.cv3': 'neck.downsample_layers.0.maxpool_branches.1', + + # neck ELANBlock + 'model.102': 'neck.bottom_up_layers.0.short_conv', + 'model.103': 'neck.bottom_up_layers.0.main_conv', + 'model.104': 'neck.bottom_up_layers.0.blocks.0', + 'model.105': 'neck.bottom_up_layers.0.blocks.1', + 'model.106': 'neck.bottom_up_layers.0.blocks.2', + 'model.107': 'neck.bottom_up_layers.0.blocks.3', + 'model.108': 'neck.bottom_up_layers.0.blocks.4', + 'model.109': 'neck.bottom_up_layers.0.blocks.5', + 'model.111': 'neck.bottom_up_layers.0.final_conv', + 'model.112.cv1': 'neck.downsample_layers.1.stride_conv_branches.0', + 'model.112.cv2': 'neck.downsample_layers.1.stride_conv_branches.1', + 'model.112.cv3': 'neck.downsample_layers.1.maxpool_branches.1', + + # neck ELANBlock + 'model.114': 'neck.bottom_up_layers.1.short_conv', + 'model.115': 'neck.bottom_up_layers.1.main_conv', + 'model.116': 'neck.bottom_up_layers.1.blocks.0', + 'model.117': 'neck.bottom_up_layers.1.blocks.1', + 'model.118': 'neck.bottom_up_layers.1.blocks.2', + 'model.119': 'neck.bottom_up_layers.1.blocks.3', + 'model.120': 'neck.bottom_up_layers.1.blocks.4', + 'model.121': 'neck.bottom_up_layers.1.blocks.5', + 'model.123': 'neck.bottom_up_layers.1.final_conv', + 'model.124.cv1': 'neck.downsample_layers.2.stride_conv_branches.0', + 'model.124.cv2': 'neck.downsample_layers.2.stride_conv_branches.1', + 'model.124.cv3': 'neck.downsample_layers.2.maxpool_branches.1', + + # neck ELANBlock + 'model.126': 'neck.bottom_up_layers.2.short_conv', + 'model.127': 'neck.bottom_up_layers.2.main_conv', + 'model.128': 'neck.bottom_up_layers.2.blocks.0', + 'model.129': 'neck.bottom_up_layers.2.blocks.1', + 'model.130': 'neck.bottom_up_layers.2.blocks.2', + 'model.131': 'neck.bottom_up_layers.2.blocks.3', + 'model.132': 'neck.bottom_up_layers.2.blocks.4', + 'model.133': 'neck.bottom_up_layers.2.blocks.5', + 'model.135': 'neck.bottom_up_layers.2.final_conv', + 'model.136': 'bbox_head.head_module.main_convs_pred.0.0', + 'model.137': 'bbox_head.head_module.main_convs_pred.1.0', + 'model.138': 'bbox_head.head_module.main_convs_pred.2.0', + 'model.139': 'bbox_head.head_module.main_convs_pred.3.0', + + # head + 'model.140.m.0': 'bbox_head.head_module.main_convs_pred.0.2', + 'model.140.m.1': 'bbox_head.head_module.main_convs_pred.1.2', + 'model.140.m.2': 'bbox_head.head_module.main_convs_pred.2.2', + 'model.140.m.3': 'bbox_head.head_module.main_convs_pred.3.2' +} + +convert_dict_e2e = { + # stem + 'model.1': 'backbone.stem.conv', + + # stage1 + 'model.2.cv1': 'backbone.stage1.0.stride_conv_branches.0', + 'model.2.cv2': 'backbone.stage1.0.stride_conv_branches.1', + 'model.2.cv3': 'backbone.stage1.0.maxpool_branches.1', + + # E-ELANBlock + 'model.3': 'backbone.stage1.1.e_elan_blocks.0.short_conv', + 'model.4': 'backbone.stage1.1.e_elan_blocks.0.main_conv', + 'model.5': 'backbone.stage1.1.e_elan_blocks.0.blocks.0.0', + 'model.6': 'backbone.stage1.1.e_elan_blocks.0.blocks.0.1', + 'model.7': 'backbone.stage1.1.e_elan_blocks.0.blocks.1.0', + 'model.8': 'backbone.stage1.1.e_elan_blocks.0.blocks.1.1', + 'model.9': 'backbone.stage1.1.e_elan_blocks.0.blocks.2.0', + 'model.10': 'backbone.stage1.1.e_elan_blocks.0.blocks.2.1', + 'model.12': 'backbone.stage1.1.e_elan_blocks.0.final_conv', + 'model.13': 'backbone.stage1.1.e_elan_blocks.1.short_conv', + 'model.14': 'backbone.stage1.1.e_elan_blocks.1.main_conv', + 'model.15': 'backbone.stage1.1.e_elan_blocks.1.blocks.0.0', + 'model.16': 'backbone.stage1.1.e_elan_blocks.1.blocks.0.1', + 'model.17': 'backbone.stage1.1.e_elan_blocks.1.blocks.1.0', + 'model.18': 'backbone.stage1.1.e_elan_blocks.1.blocks.1.1', + 'model.19': 'backbone.stage1.1.e_elan_blocks.1.blocks.2.0', + 'model.20': 'backbone.stage1.1.e_elan_blocks.1.blocks.2.1', + 'model.22': 'backbone.stage1.1.e_elan_blocks.1.final_conv', + + # stage2 + 'model.24.cv1': 'backbone.stage2.0.stride_conv_branches.0', + 'model.24.cv2': 'backbone.stage2.0.stride_conv_branches.1', + 'model.24.cv3': 'backbone.stage2.0.maxpool_branches.1', + + # E-ELANBlock + 'model.25': 'backbone.stage2.1.e_elan_blocks.0.short_conv', + 'model.26': 'backbone.stage2.1.e_elan_blocks.0.main_conv', + 'model.27': 'backbone.stage2.1.e_elan_blocks.0.blocks.0.0', + 'model.28': 'backbone.stage2.1.e_elan_blocks.0.blocks.0.1', + 'model.29': 'backbone.stage2.1.e_elan_blocks.0.blocks.1.0', + 'model.30': 'backbone.stage2.1.e_elan_blocks.0.blocks.1.1', + 'model.31': 'backbone.stage2.1.e_elan_blocks.0.blocks.2.0', + 'model.32': 'backbone.stage2.1.e_elan_blocks.0.blocks.2.1', + 'model.34': 'backbone.stage2.1.e_elan_blocks.0.final_conv', + 'model.35': 'backbone.stage2.1.e_elan_blocks.1.short_conv', + 'model.36': 'backbone.stage2.1.e_elan_blocks.1.main_conv', + 'model.37': 'backbone.stage2.1.e_elan_blocks.1.blocks.0.0', + 'model.38': 'backbone.stage2.1.e_elan_blocks.1.blocks.0.1', + 'model.39': 'backbone.stage2.1.e_elan_blocks.1.blocks.1.0', + 'model.40': 'backbone.stage2.1.e_elan_blocks.1.blocks.1.1', + 'model.41': 'backbone.stage2.1.e_elan_blocks.1.blocks.2.0', + 'model.42': 'backbone.stage2.1.e_elan_blocks.1.blocks.2.1', + 'model.44': 'backbone.stage2.1.e_elan_blocks.1.final_conv', + + # stage3 + 'model.46.cv1': 'backbone.stage3.0.stride_conv_branches.0', + 'model.46.cv2': 'backbone.stage3.0.stride_conv_branches.1', + 'model.46.cv3': 'backbone.stage3.0.maxpool_branches.1', + + # E-ELANBlock + 'model.47': 'backbone.stage3.1.e_elan_blocks.0.short_conv', + 'model.48': 'backbone.stage3.1.e_elan_blocks.0.main_conv', + 'model.49': 'backbone.stage3.1.e_elan_blocks.0.blocks.0.0', + 'model.50': 'backbone.stage3.1.e_elan_blocks.0.blocks.0.1', + 'model.51': 'backbone.stage3.1.e_elan_blocks.0.blocks.1.0', + 'model.52': 'backbone.stage3.1.e_elan_blocks.0.blocks.1.1', + 'model.53': 'backbone.stage3.1.e_elan_blocks.0.blocks.2.0', + 'model.54': 'backbone.stage3.1.e_elan_blocks.0.blocks.2.1', + 'model.56': 'backbone.stage3.1.e_elan_blocks.0.final_conv', + 'model.57': 'backbone.stage3.1.e_elan_blocks.1.short_conv', + 'model.58': 'backbone.stage3.1.e_elan_blocks.1.main_conv', + 'model.59': 'backbone.stage3.1.e_elan_blocks.1.blocks.0.0', + 'model.60': 'backbone.stage3.1.e_elan_blocks.1.blocks.0.1', + 'model.61': 'backbone.stage3.1.e_elan_blocks.1.blocks.1.0', + 'model.62': 'backbone.stage3.1.e_elan_blocks.1.blocks.1.1', + 'model.63': 'backbone.stage3.1.e_elan_blocks.1.blocks.2.0', + 'model.64': 'backbone.stage3.1.e_elan_blocks.1.blocks.2.1', + 'model.66': 'backbone.stage3.1.e_elan_blocks.1.final_conv', + + # stage4 + 'model.68.cv1': 'backbone.stage4.0.stride_conv_branches.0', + 'model.68.cv2': 'backbone.stage4.0.stride_conv_branches.1', + 'model.68.cv3': 'backbone.stage4.0.maxpool_branches.1', + + # E-ELANBlock + 'model.69': 'backbone.stage4.1.e_elan_blocks.0.short_conv', + 'model.70': 'backbone.stage4.1.e_elan_blocks.0.main_conv', + 'model.71': 'backbone.stage4.1.e_elan_blocks.0.blocks.0.0', + 'model.72': 'backbone.stage4.1.e_elan_blocks.0.blocks.0.1', + 'model.73': 'backbone.stage4.1.e_elan_blocks.0.blocks.1.0', + 'model.74': 'backbone.stage4.1.e_elan_blocks.0.blocks.1.1', + 'model.75': 'backbone.stage4.1.e_elan_blocks.0.blocks.2.0', + 'model.76': 'backbone.stage4.1.e_elan_blocks.0.blocks.2.1', + 'model.78': 'backbone.stage4.1.e_elan_blocks.0.final_conv', + 'model.79': 'backbone.stage4.1.e_elan_blocks.1.short_conv', + 'model.80': 'backbone.stage4.1.e_elan_blocks.1.main_conv', + 'model.81': 'backbone.stage4.1.e_elan_blocks.1.blocks.0.0', + 'model.82': 'backbone.stage4.1.e_elan_blocks.1.blocks.0.1', + 'model.83': 'backbone.stage4.1.e_elan_blocks.1.blocks.1.0', + 'model.84': 'backbone.stage4.1.e_elan_blocks.1.blocks.1.1', + 'model.85': 'backbone.stage4.1.e_elan_blocks.1.blocks.2.0', + 'model.86': 'backbone.stage4.1.e_elan_blocks.1.blocks.2.1', + 'model.88': 'backbone.stage4.1.e_elan_blocks.1.final_conv', + + # stage5 + 'model.90.cv1': 'backbone.stage5.0.stride_conv_branches.0', + 'model.90.cv2': 'backbone.stage5.0.stride_conv_branches.1', + 'model.90.cv3': 'backbone.stage5.0.maxpool_branches.1', + + # E-ELANBlock + 'model.91': 'backbone.stage5.1.e_elan_blocks.0.short_conv', + 'model.92': 'backbone.stage5.1.e_elan_blocks.0.main_conv', + 'model.93': 'backbone.stage5.1.e_elan_blocks.0.blocks.0.0', + 'model.94': 'backbone.stage5.1.e_elan_blocks.0.blocks.0.1', + 'model.95': 'backbone.stage5.1.e_elan_blocks.0.blocks.1.0', + 'model.96': 'backbone.stage5.1.e_elan_blocks.0.blocks.1.1', + 'model.97': 'backbone.stage5.1.e_elan_blocks.0.blocks.2.0', + 'model.98': 'backbone.stage5.1.e_elan_blocks.0.blocks.2.1', + 'model.100': 'backbone.stage5.1.e_elan_blocks.0.final_conv', + 'model.101': 'backbone.stage5.1.e_elan_blocks.1.short_conv', + 'model.102': 'backbone.stage5.1.e_elan_blocks.1.main_conv', + 'model.103': 'backbone.stage5.1.e_elan_blocks.1.blocks.0.0', + 'model.104': 'backbone.stage5.1.e_elan_blocks.1.blocks.0.1', + 'model.105': 'backbone.stage5.1.e_elan_blocks.1.blocks.1.0', + 'model.106': 'backbone.stage5.1.e_elan_blocks.1.blocks.1.1', + 'model.107': 'backbone.stage5.1.e_elan_blocks.1.blocks.2.0', + 'model.108': 'backbone.stage5.1.e_elan_blocks.1.blocks.2.1', + 'model.110': 'backbone.stage5.1.e_elan_blocks.1.final_conv', + + # neck SPPCSPBlock + 'model.112.cv1': 'neck.reduce_layers.3.main_layers.0', + 'model.112.cv3': 'neck.reduce_layers.3.main_layers.1', + 'model.112.cv4': 'neck.reduce_layers.3.main_layers.2', + 'model.112.cv5': 'neck.reduce_layers.3.fuse_layers.0', + 'model.112.cv6': 'neck.reduce_layers.3.fuse_layers.1', + 'model.112.cv2': 'neck.reduce_layers.3.short_layer', + 'model.112.cv7': 'neck.reduce_layers.3.final_conv', + + # neck + 'model.113': 'neck.upsample_layers.0.0', + 'model.115': 'neck.reduce_layers.2', + + # neck E-ELANBlock + 'model.117': 'neck.top_down_layers.0.e_elan_blocks.0.short_conv', + 'model.118': 'neck.top_down_layers.0.e_elan_blocks.0.main_conv', + 'model.119': 'neck.top_down_layers.0.e_elan_blocks.0.blocks.0', + 'model.120': 'neck.top_down_layers.0.e_elan_blocks.0.blocks.1', + 'model.121': 'neck.top_down_layers.0.e_elan_blocks.0.blocks.2', + 'model.122': 'neck.top_down_layers.0.e_elan_blocks.0.blocks.3', + 'model.123': 'neck.top_down_layers.0.e_elan_blocks.0.blocks.4', + 'model.124': 'neck.top_down_layers.0.e_elan_blocks.0.blocks.5', + 'model.126': 'neck.top_down_layers.0.e_elan_blocks.0.final_conv', + 'model.127': 'neck.top_down_layers.0.e_elan_blocks.1.short_conv', + 'model.128': 'neck.top_down_layers.0.e_elan_blocks.1.main_conv', + 'model.129': 'neck.top_down_layers.0.e_elan_blocks.1.blocks.0', + 'model.130': 'neck.top_down_layers.0.e_elan_blocks.1.blocks.1', + 'model.131': 'neck.top_down_layers.0.e_elan_blocks.1.blocks.2', + 'model.132': 'neck.top_down_layers.0.e_elan_blocks.1.blocks.3', + 'model.133': 'neck.top_down_layers.0.e_elan_blocks.1.blocks.4', + 'model.134': 'neck.top_down_layers.0.e_elan_blocks.1.blocks.5', + 'model.136': 'neck.top_down_layers.0.e_elan_blocks.1.final_conv', + 'model.138': 'neck.upsample_layers.1.0', + 'model.140': 'neck.reduce_layers.1', + + # neck E-ELANBlock + 'model.142': 'neck.top_down_layers.1.e_elan_blocks.0.short_conv', + 'model.143': 'neck.top_down_layers.1.e_elan_blocks.0.main_conv', + 'model.144': 'neck.top_down_layers.1.e_elan_blocks.0.blocks.0', + 'model.145': 'neck.top_down_layers.1.e_elan_blocks.0.blocks.1', + 'model.146': 'neck.top_down_layers.1.e_elan_blocks.0.blocks.2', + 'model.147': 'neck.top_down_layers.1.e_elan_blocks.0.blocks.3', + 'model.148': 'neck.top_down_layers.1.e_elan_blocks.0.blocks.4', + 'model.149': 'neck.top_down_layers.1.e_elan_blocks.0.blocks.5', + 'model.151': 'neck.top_down_layers.1.e_elan_blocks.0.final_conv', + 'model.152': 'neck.top_down_layers.1.e_elan_blocks.1.short_conv', + 'model.153': 'neck.top_down_layers.1.e_elan_blocks.1.main_conv', + 'model.154': 'neck.top_down_layers.1.e_elan_blocks.1.blocks.0', + 'model.155': 'neck.top_down_layers.1.e_elan_blocks.1.blocks.1', + 'model.156': 'neck.top_down_layers.1.e_elan_blocks.1.blocks.2', + 'model.157': 'neck.top_down_layers.1.e_elan_blocks.1.blocks.3', + 'model.158': 'neck.top_down_layers.1.e_elan_blocks.1.blocks.4', + 'model.159': 'neck.top_down_layers.1.e_elan_blocks.1.blocks.5', + 'model.161': 'neck.top_down_layers.1.e_elan_blocks.1.final_conv', + 'model.163': 'neck.upsample_layers.2.0', + 'model.165': 'neck.reduce_layers.0', + 'model.167': 'neck.top_down_layers.2.e_elan_blocks.0.short_conv', + 'model.168': 'neck.top_down_layers.2.e_elan_blocks.0.main_conv', + 'model.169': 'neck.top_down_layers.2.e_elan_blocks.0.blocks.0', + 'model.170': 'neck.top_down_layers.2.e_elan_blocks.0.blocks.1', + 'model.171': 'neck.top_down_layers.2.e_elan_blocks.0.blocks.2', + 'model.172': 'neck.top_down_layers.2.e_elan_blocks.0.blocks.3', + 'model.173': 'neck.top_down_layers.2.e_elan_blocks.0.blocks.4', + 'model.174': 'neck.top_down_layers.2.e_elan_blocks.0.blocks.5', + 'model.176': 'neck.top_down_layers.2.e_elan_blocks.0.final_conv', + 'model.177': 'neck.top_down_layers.2.e_elan_blocks.1.short_conv', + 'model.178': 'neck.top_down_layers.2.e_elan_blocks.1.main_conv', + 'model.179': 'neck.top_down_layers.2.e_elan_blocks.1.blocks.0', + 'model.180': 'neck.top_down_layers.2.e_elan_blocks.1.blocks.1', + 'model.181': 'neck.top_down_layers.2.e_elan_blocks.1.blocks.2', + 'model.182': 'neck.top_down_layers.2.e_elan_blocks.1.blocks.3', + 'model.183': 'neck.top_down_layers.2.e_elan_blocks.1.blocks.4', + 'model.184': 'neck.top_down_layers.2.e_elan_blocks.1.blocks.5', + 'model.186': 'neck.top_down_layers.2.e_elan_blocks.1.final_conv', + 'model.188.cv1': 'neck.downsample_layers.0.stride_conv_branches.0', + 'model.188.cv2': 'neck.downsample_layers.0.stride_conv_branches.1', + 'model.188.cv3': 'neck.downsample_layers.0.maxpool_branches.1', + + # neck E-ELANBlock + 'model.190': 'neck.bottom_up_layers.0.e_elan_blocks.0.short_conv', + 'model.191': 'neck.bottom_up_layers.0.e_elan_blocks.0.main_conv', + 'model.192': 'neck.bottom_up_layers.0.e_elan_blocks.0.blocks.0', + 'model.193': 'neck.bottom_up_layers.0.e_elan_blocks.0.blocks.1', + 'model.194': 'neck.bottom_up_layers.0.e_elan_blocks.0.blocks.2', + 'model.195': 'neck.bottom_up_layers.0.e_elan_blocks.0.blocks.3', + 'model.196': 'neck.bottom_up_layers.0.e_elan_blocks.0.blocks.4', + 'model.197': 'neck.bottom_up_layers.0.e_elan_blocks.0.blocks.5', + 'model.199': 'neck.bottom_up_layers.0.e_elan_blocks.0.final_conv', + 'model.200': 'neck.bottom_up_layers.0.e_elan_blocks.1.short_conv', + 'model.201': 'neck.bottom_up_layers.0.e_elan_blocks.1.main_conv', + 'model.202': 'neck.bottom_up_layers.0.e_elan_blocks.1.blocks.0', + 'model.203': 'neck.bottom_up_layers.0.e_elan_blocks.1.blocks.1', + 'model.204': 'neck.bottom_up_layers.0.e_elan_blocks.1.blocks.2', + 'model.205': 'neck.bottom_up_layers.0.e_elan_blocks.1.blocks.3', + 'model.206': 'neck.bottom_up_layers.0.e_elan_blocks.1.blocks.4', + 'model.207': 'neck.bottom_up_layers.0.e_elan_blocks.1.blocks.5', + 'model.209': 'neck.bottom_up_layers.0.e_elan_blocks.1.final_conv', + 'model.211.cv1': 'neck.downsample_layers.1.stride_conv_branches.0', + 'model.211.cv2': 'neck.downsample_layers.1.stride_conv_branches.1', + 'model.211.cv3': 'neck.downsample_layers.1.maxpool_branches.1', + 'model.213': 'neck.bottom_up_layers.1.e_elan_blocks.0.short_conv', + 'model.214': 'neck.bottom_up_layers.1.e_elan_blocks.0.main_conv', + 'model.215': 'neck.bottom_up_layers.1.e_elan_blocks.0.blocks.0', + 'model.216': 'neck.bottom_up_layers.1.e_elan_blocks.0.blocks.1', + 'model.217': 'neck.bottom_up_layers.1.e_elan_blocks.0.blocks.2', + 'model.218': 'neck.bottom_up_layers.1.e_elan_blocks.0.blocks.3', + 'model.219': 'neck.bottom_up_layers.1.e_elan_blocks.0.blocks.4', + 'model.220': 'neck.bottom_up_layers.1.e_elan_blocks.0.blocks.5', + 'model.222': 'neck.bottom_up_layers.1.e_elan_blocks.0.final_conv', + 'model.223': 'neck.bottom_up_layers.1.e_elan_blocks.1.short_conv', + 'model.224': 'neck.bottom_up_layers.1.e_elan_blocks.1.main_conv', + 'model.225': 'neck.bottom_up_layers.1.e_elan_blocks.1.blocks.0', + 'model.226': 'neck.bottom_up_layers.1.e_elan_blocks.1.blocks.1', + 'model.227': 'neck.bottom_up_layers.1.e_elan_blocks.1.blocks.2', + 'model.228': 'neck.bottom_up_layers.1.e_elan_blocks.1.blocks.3', + 'model.229': 'neck.bottom_up_layers.1.e_elan_blocks.1.blocks.4', + 'model.230': 'neck.bottom_up_layers.1.e_elan_blocks.1.blocks.5', + 'model.232': 'neck.bottom_up_layers.1.e_elan_blocks.1.final_conv', + 'model.234.cv1': 'neck.downsample_layers.2.stride_conv_branches.0', + 'model.234.cv2': 'neck.downsample_layers.2.stride_conv_branches.1', + 'model.234.cv3': 'neck.downsample_layers.2.maxpool_branches.1', + + # neck E-ELANBlock + 'model.236': 'neck.bottom_up_layers.2.e_elan_blocks.0.short_conv', + 'model.237': 'neck.bottom_up_layers.2.e_elan_blocks.0.main_conv', + 'model.238': 'neck.bottom_up_layers.2.e_elan_blocks.0.blocks.0', + 'model.239': 'neck.bottom_up_layers.2.e_elan_blocks.0.blocks.1', + 'model.240': 'neck.bottom_up_layers.2.e_elan_blocks.0.blocks.2', + 'model.241': 'neck.bottom_up_layers.2.e_elan_blocks.0.blocks.3', + 'model.242': 'neck.bottom_up_layers.2.e_elan_blocks.0.blocks.4', + 'model.243': 'neck.bottom_up_layers.2.e_elan_blocks.0.blocks.5', + 'model.245': 'neck.bottom_up_layers.2.e_elan_blocks.0.final_conv', + 'model.246': 'neck.bottom_up_layers.2.e_elan_blocks.1.short_conv', + 'model.247': 'neck.bottom_up_layers.2.e_elan_blocks.1.main_conv', + 'model.248': 'neck.bottom_up_layers.2.e_elan_blocks.1.blocks.0', + 'model.249': 'neck.bottom_up_layers.2.e_elan_blocks.1.blocks.1', + 'model.250': 'neck.bottom_up_layers.2.e_elan_blocks.1.blocks.2', + 'model.251': 'neck.bottom_up_layers.2.e_elan_blocks.1.blocks.3', + 'model.252': 'neck.bottom_up_layers.2.e_elan_blocks.1.blocks.4', + 'model.253': 'neck.bottom_up_layers.2.e_elan_blocks.1.blocks.5', + 'model.255': 'neck.bottom_up_layers.2.e_elan_blocks.1.final_conv', + 'model.257': 'bbox_head.head_module.main_convs_pred.0.0', + 'model.258': 'bbox_head.head_module.main_convs_pred.1.0', + 'model.259': 'bbox_head.head_module.main_convs_pred.2.0', + 'model.260': 'bbox_head.head_module.main_convs_pred.3.0', + + # head + 'model.261.m.0': 'bbox_head.head_module.main_convs_pred.0.2', + 'model.261.m.1': 'bbox_head.head_module.main_convs_pred.1.2', + 'model.261.m.2': 'bbox_head.head_module.main_convs_pred.2.2', + 'model.261.m.3': 'bbox_head.head_module.main_convs_pred.3.2' +} + +convert_dicts = { + 'yolov7-tiny.pt': convert_dict_tiny, + 'yolov7-w6.pt': convert_dict_w, + 'yolov7-e6.pt': convert_dict_e, + 'yolov7-e6e.pt': convert_dict_e2e, + 'yolov7.pt': convert_dict_l, + 'yolov7x.pt': convert_dict_x +} + + +def convert(src, dst): + src_key = osp.basename(src) + convert_dict = convert_dicts[osp.basename(src)] + + num_levels = 3 + if src_key == 'yolov7.pt': + indexes = [102, 51] + in_channels = [256, 512, 1024] + elif src_key == 'yolov7x.pt': + indexes = [121, 59] + in_channels = [320, 640, 1280] + elif src_key == 'yolov7-tiny.pt': + indexes = [77, 1000] + in_channels = [128, 256, 512] + elif src_key == 'yolov7-w6.pt': + indexes = [118, 47] + in_channels = [256, 512, 768, 1024] + num_levels = 4 + elif src_key == 'yolov7-e6.pt': + indexes = [140, [2, 13, 24, 35, 46, 57, 100, 112, 124]] + in_channels = 320, 640, 960, 1280 + num_levels = 4 + elif src_key == 'yolov7-e6e.pt': + indexes = [261, [2, 24, 46, 68, 90, 112, 188, 211, 234]] + in_channels = 320, 640, 960, 1280 + num_levels = 4 + + if isinstance(indexes[1], int): + indexes[1] = [indexes[1]] + """Convert keys in detectron pretrained YOLOv7 models to mmyolo style.""" + try: + yolov7_model = torch.load(src)['model'].float() + blobs = yolov7_model.state_dict() + except ModuleNotFoundError: + raise RuntimeError( + 'This script must be placed under the WongKinYiu/yolov7 repo,' + ' because loading the official pretrained model need' + ' `model.py` to build model.') + state_dict = OrderedDict() + + for key, weight in blobs.items(): + if key.find('anchors') >= 0 or key.find('anchor_grid') >= 0: + continue + + num, module = key.split('.')[1:3] + if int(num) < indexes[0] and int(num) not in indexes[1]: + prefix = f'model.{num}' + new_key = key.replace(prefix, convert_dict[prefix]) + state_dict[new_key] = weight + print(f'Convert {key} to {new_key}') + elif int(num) in indexes[1]: + strs_key = key.split('.')[:3] + new_key = key.replace('.'.join(strs_key), + convert_dict['.'.join(strs_key)]) + state_dict[new_key] = weight + print(f'Convert {key} to {new_key}') + else: + strs_key = key.split('.')[:4] + new_key = key.replace('.'.join(strs_key), + convert_dict['.'.join(strs_key)]) + state_dict[new_key] = weight + print(f'Convert {key} to {new_key}') + + # Add ImplicitA and ImplicitM + for i in range(num_levels): + if num_levels == 3: + implicit_a = f'bbox_head.head_module.' \ + f'convs_pred.{i}.0.implicit' + state_dict[implicit_a] = torch.zeros((1, in_channels[i], 1, 1)) + implicit_m = f'bbox_head.head_module.' \ + f'convs_pred.{i}.2.implicit' + state_dict[implicit_m] = torch.ones((1, 3 * 85, 1, 1)) + else: + implicit_a = f'bbox_head.head_module.' \ + f'main_convs_pred.{i}.1.implicit' + state_dict[implicit_a] = torch.zeros((1, in_channels[i], 1, 1)) + implicit_m = f'bbox_head.head_module.' \ + f'main_convs_pred.{i}.3.implicit' + state_dict[implicit_m] = torch.ones((1, 3 * 85, 1, 1)) + + # save checkpoint + checkpoint = dict() + checkpoint['state_dict'] = state_dict + torch.save(checkpoint, dst) + + +# Note: This script must be placed under the yolov7 repo to run. +def main(): + parser = argparse.ArgumentParser(description='Convert model keys') + parser.add_argument( + 'src', default='yolov7.pt', help='src yolov7 model path') + parser.add_argument('dst', default='mm_yolov7l.pt', help='save path') + args = parser.parse_args() + convert(args.src, args.dst) + print('If your model weights are from P6 models, such as W6, E6, D6, \ + E6E, the auxiliary training module is not required to be loaded, \ + so it is normal for the weights of the auxiliary module \ + to be missing.') + + +if __name__ == '__main__': + main() diff --git a/models/YOLO-World/third_party/mmyolo/tools/model_converters/yolov8_to_mmyolo.py b/models/YOLO-World/third_party/mmyolo/tools/model_converters/yolov8_to_mmyolo.py new file mode 100644 index 0000000000000000000000000000000000000000..4ed64f2492ba0bece874c482fe704492fad4e8e9 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/tools/model_converters/yolov8_to_mmyolo.py @@ -0,0 +1,102 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +from collections import OrderedDict + +import torch + +convert_dict_s = { + # backbone + 'model.0': 'backbone.stem', + 'model.1': 'backbone.stage1.0', + 'model.2': 'backbone.stage1.1', + 'model.3': 'backbone.stage2.0', + 'model.4': 'backbone.stage2.1', + 'model.5': 'backbone.stage3.0', + 'model.6': 'backbone.stage3.1', + 'model.7': 'backbone.stage4.0', + 'model.8': 'backbone.stage4.1', + 'model.9': 'backbone.stage4.2', + + # neck + 'model.12': 'neck.top_down_layers.0', + 'model.15': 'neck.top_down_layers.1', + 'model.16': 'neck.downsample_layers.0', + 'model.18': 'neck.bottom_up_layers.0', + 'model.19': 'neck.downsample_layers.1', + 'model.21': 'neck.bottom_up_layers.1', + + # Detector + 'model.22': 'bbox_head.head_module', +} + + +def convert(src, dst): + """Convert keys in pretrained YOLOv8 models to mmyolo style.""" + convert_dict = convert_dict_s + + try: + yolov8_model = torch.load(src)['model'] + blobs = yolov8_model.state_dict() + except ModuleNotFoundError: + raise RuntimeError( + 'This script must be placed under the ultralytics repo,' + ' because loading the official pretrained model need' + ' `model.py` to build model.' + 'Also need to install hydra-core>=1.2.0 and thop>=0.1.1') + state_dict = OrderedDict() + + for key, weight in blobs.items(): + num, module = key.split('.')[1:3] + prefix = f'model.{num}' + new_key = key.replace(prefix, convert_dict[prefix]) + + if '.m.' in new_key: + new_key = new_key.replace('.m.', '.blocks.') + new_key = new_key.replace('.cv', '.conv') + elif 'bbox_head.head_module.proto.cv' in new_key: + new_key = new_key.replace( + 'bbox_head.head_module.proto.cv', + 'bbox_head.head_module.proto_preds.conv') + elif 'bbox_head.head_module.proto' in new_key: + new_key = new_key.replace('bbox_head.head_module.proto', + 'bbox_head.head_module.proto_preds') + elif 'bbox_head.head_module.cv4.' in new_key: + new_key = new_key.replace( + 'bbox_head.head_module.cv4', + 'bbox_head.head_module.mask_coeff_preds') + new_key = new_key.replace('.2.weight', '.2.conv.weight') + new_key = new_key.replace('.2.bias', '.2.conv.bias') + elif 'bbox_head.head_module' in new_key: + new_key = new_key.replace('.cv2', '.reg_preds') + new_key = new_key.replace('.cv3', '.cls_preds') + elif 'backbone.stage4.2' in new_key: + new_key = new_key.replace('.cv', '.conv') + else: + new_key = new_key.replace('.cv1', '.main_conv') + new_key = new_key.replace('.cv2', '.final_conv') + + if 'bbox_head.head_module.dfl.conv.weight' == new_key: + print('Drop "bbox_head.head_module.dfl.conv.weight", ' + 'because it is useless') + continue + state_dict[new_key] = weight + print(f'Convert {key} to {new_key}') + + # save checkpoint + checkpoint = dict() + checkpoint['state_dict'] = state_dict + torch.save(checkpoint, dst) + + +# Note: This script must be placed under the ultralytics repo to run. +def main(): + parser = argparse.ArgumentParser(description='Convert model keys') + parser.add_argument( + '--src', default='yolov8s.pt', help='src YOLOv8 model path') + parser.add_argument('--dst', default='mmyolov8s.pth', help='save path') + args = parser.parse_args() + convert(args.src, args.dst) + + +if __name__ == '__main__': + main() diff --git a/models/YOLO-World/third_party/mmyolo/tools/model_converters/yolox_to_mmyolo.py b/models/YOLO-World/third_party/mmyolo/tools/model_converters/yolox_to_mmyolo.py new file mode 100644 index 0000000000000000000000000000000000000000..5fcc7356780444db59517c931ce1a3557ec8340a --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/tools/model_converters/yolox_to_mmyolo.py @@ -0,0 +1,110 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +from collections import OrderedDict + +import torch + +neck_dict = { + 'backbone.lateral_conv0': 'neck.reduce_layers.2', + 'backbone.C3_p4.conv': 'neck.top_down_layers.0.0.cv', + 'backbone.C3_p4.m.0.': 'neck.top_down_layers.0.0.m.0.', + 'backbone.reduce_conv1': 'neck.top_down_layers.0.1', + 'backbone.C3_p3.conv': 'neck.top_down_layers.1.cv', + 'backbone.C3_p3.m.0.': 'neck.top_down_layers.1.m.0.', + 'backbone.bu_conv2': 'neck.downsample_layers.0', + 'backbone.C3_n3.conv': 'neck.bottom_up_layers.0.cv', + 'backbone.C3_n3.m.0.': 'neck.bottom_up_layers.0.m.0.', + 'backbone.bu_conv1': 'neck.downsample_layers.1', + 'backbone.C3_n4.conv': 'neck.bottom_up_layers.1.cv', + 'backbone.C3_n4.m.0.': 'neck.bottom_up_layers.1.m.0.', +} + + +def convert_stem(model_key, model_weight, state_dict, converted_names): + new_key = model_key[9:] + state_dict[new_key] = model_weight + converted_names.add(model_key) + print(f'Convert {model_key} to {new_key}') + + +def convert_backbone(model_key, model_weight, state_dict, converted_names): + new_key = model_key.replace('backbone.dark', 'stage') + num = int(new_key[14]) - 1 + new_key = new_key[:14] + str(num) + new_key[15:] + if '.m.' in model_key: + new_key = new_key.replace('.m.', '.blocks.') + elif not new_key[16] == '0' and 'stage4.1' not in new_key: + new_key = new_key.replace('conv1', 'main_conv') + new_key = new_key.replace('conv2', 'short_conv') + new_key = new_key.replace('conv3', 'final_conv') + state_dict[new_key] = model_weight + converted_names.add(model_key) + print(f'Convert {model_key} to {new_key}') + + +def convert_neck(model_key, model_weight, state_dict, converted_names): + for old, new in neck_dict.items(): + if old in model_key: + new_key = model_key.replace(old, new) + if '.m.' in model_key: + new_key = new_key.replace('.m.', '.blocks.') + elif '.C' in model_key: + new_key = new_key.replace('cv1', 'main_conv') + new_key = new_key.replace('cv2', 'short_conv') + new_key = new_key.replace('cv3', 'final_conv') + state_dict[new_key] = model_weight + converted_names.add(model_key) + print(f'Convert {model_key} to {new_key}') + + +def convert_head(model_key, model_weight, state_dict, converted_names): + if 'stem' in model_key: + new_key = model_key.replace('head.stem', 'neck.out_layer') + elif 'cls_convs' in model_key: + new_key = model_key.replace( + 'head.cls_convs', 'bbox_head.head_module.multi_level_cls_convs') + elif 'reg_convs' in model_key: + new_key = model_key.replace( + 'head.reg_convs', 'bbox_head.head_module.multi_level_reg_convs') + elif 'preds' in model_key: + new_key = model_key.replace('head.', + 'bbox_head.head_module.multi_level_conv_') + new_key = new_key.replace('_preds', '') + state_dict[new_key] = model_weight + converted_names.add(model_key) + print(f'Convert {model_key} to {new_key}') + + +def convert(src, dst): + """Convert keys in detectron pretrained YOLOX models to mmyolo style.""" + blobs = torch.load(src)['model'] + state_dict = OrderedDict() + converted_names = set() + + for key, weight in blobs.items(): + if 'backbone.stem' in key: + convert_stem(key, weight, state_dict, converted_names) + elif 'backbone.backbone' in key: + convert_backbone(key, weight, state_dict, converted_names) + elif 'backbone.neck' not in key and 'head' not in key: + convert_neck(key, weight, state_dict, converted_names) + elif 'head' in key: + convert_head(key, weight, state_dict, converted_names) + + # save checkpoint + checkpoint = dict() + checkpoint['state_dict'] = state_dict + torch.save(checkpoint, dst) + + +def main(): + parser = argparse.ArgumentParser(description='Convert model keys') + parser.add_argument( + '--src', default='yolox_s.pth', help='src yolox model path') + parser.add_argument('--dst', default='mmyoloxs.pt', help='save path') + args = parser.parse_args() + convert(args.src, args.dst) + + +if __name__ == '__main__': + main() diff --git a/models/YOLO-World/third_party/mmyolo/tools/slurm_test.sh b/models/YOLO-World/third_party/mmyolo/tools/slurm_test.sh new file mode 100644 index 0000000000000000000000000000000000000000..6dd67e57442b741fc30f26102eb5afe16139edb1 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/tools/slurm_test.sh @@ -0,0 +1,24 @@ +#!/usr/bin/env bash + +set -x + +PARTITION=$1 +JOB_NAME=$2 +CONFIG=$3 +CHECKPOINT=$4 +GPUS=${GPUS:-8} +GPUS_PER_NODE=${GPUS_PER_NODE:-8} +CPUS_PER_TASK=${CPUS_PER_TASK:-5} +PY_ARGS=${@:5} +SRUN_ARGS=${SRUN_ARGS:-""} + +PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ +srun -p ${PARTITION} \ + --job-name=${JOB_NAME} \ + --gres=gpu:${GPUS_PER_NODE} \ + --ntasks=${GPUS} \ + --ntasks-per-node=${GPUS_PER_NODE} \ + --cpus-per-task=${CPUS_PER_TASK} \ + --kill-on-bad-exit=1 \ + ${SRUN_ARGS} \ + python -u tools/test.py ${CONFIG} ${CHECKPOINT} --launcher="slurm" ${PY_ARGS} diff --git a/models/YOLO-World/third_party/mmyolo/tools/slurm_train.sh b/models/YOLO-World/third_party/mmyolo/tools/slurm_train.sh new file mode 100644 index 0000000000000000000000000000000000000000..b3feb3d9c7a6c33d82739cdf5ee10365673aaded --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/tools/slurm_train.sh @@ -0,0 +1,24 @@ +#!/usr/bin/env bash + +set -x + +PARTITION=$1 +JOB_NAME=$2 +CONFIG=$3 +WORK_DIR=$4 +GPUS=${GPUS:-8} +GPUS_PER_NODE=${GPUS_PER_NODE:-8} +CPUS_PER_TASK=${CPUS_PER_TASK:-5} +SRUN_ARGS=${SRUN_ARGS:-""} +PY_ARGS=${@:5} + +PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ +srun -p ${PARTITION} \ + --job-name=${JOB_NAME} \ + --gres=gpu:${GPUS_PER_NODE} \ + --ntasks=${GPUS} \ + --ntasks-per-node=${GPUS_PER_NODE} \ + --cpus-per-task=${CPUS_PER_TASK} \ + --kill-on-bad-exit=1 \ + ${SRUN_ARGS} \ + python -u tools/train.py ${CONFIG} --work-dir=${WORK_DIR} --launcher="slurm" ${PY_ARGS} diff --git a/models/YOLO-World/third_party/mmyolo/tools/test.py b/models/YOLO-World/third_party/mmyolo/tools/test.py new file mode 100644 index 0000000000000000000000000000000000000000..f0ac8bde429c946ec18c7f29ea8d7cbad102e262 --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/tools/test.py @@ -0,0 +1,158 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os +import os.path as osp + +from mmdet.engine.hooks.utils import trigger_visualization_hook +from mmdet.utils import setup_cache_size_limit_of_dynamo +from mmengine.config import Config, ConfigDict, DictAction +from mmengine.evaluator import DumpResults +from mmengine.runner import Runner + +from mmyolo.registry import RUNNERS +from mmyolo.utils import is_metainfo_lower + + +# TODO: support fuse_conv_bn +def parse_args(): + parser = argparse.ArgumentParser( + description='MMYOLO test (and eval) a model') + parser.add_argument('config', help='test config file path') + parser.add_argument('checkpoint', help='checkpoint file') + parser.add_argument( + '--work-dir', + help='the directory to save the file containing evaluation metrics') + parser.add_argument( + '--out', + type=str, + help='output result file (must be a .pkl file) in pickle format') + parser.add_argument( + '--json-prefix', + type=str, + help='the prefix of the output json file without perform evaluation, ' + 'which is useful when you want to format the result to a specific ' + 'format and submit it to the test server') + parser.add_argument( + '--tta', + action='store_true', + help='Whether to use test time augmentation') + parser.add_argument( + '--show', action='store_true', help='show prediction results') + parser.add_argument( + '--deploy', + action='store_true', + help='Switch model to deployment mode') + parser.add_argument( + '--show-dir', + help='directory where painted images will be saved. ' + 'If specified, it will be automatically saved ' + 'to the work_dir/timestamp/show_dir') + parser.add_argument( + '--wait-time', type=float, default=2, help='the interval of show (s)') + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. If the value to ' + 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' + 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' + 'Note that the quotation marks are necessary and that no white space ' + 'is allowed.') + parser.add_argument( + '--launcher', + choices=['none', 'pytorch', 'slurm', 'mpi'], + default='none', + help='job launcher') + # When using PyTorch version >= 2.0.0, the `torch.distributed.launch` + # will pass the `--local-rank` parameter to `tools/train.py` instead + # of `--local_rank`. + parser.add_argument('--local_rank', '--local-rank', type=int, default=0) + args = parser.parse_args() + if 'LOCAL_RANK' not in os.environ: + os.environ['LOCAL_RANK'] = str(args.local_rank) + return args + + +def main(): + args = parse_args() + + # Reduce the number of repeated compilations and improve + # training speed. + setup_cache_size_limit_of_dynamo() + + # load config + cfg = Config.fromfile(args.config) + # replace the ${key} with the value of cfg.key + # cfg = replace_cfg_vals(cfg) + cfg.launcher = args.launcher + if args.cfg_options is not None: + cfg.merge_from_dict(args.cfg_options) + + # work_dir is determined in this priority: CLI > segment in file > filename + if args.work_dir is not None: + # update configs according to CLI args if args.work_dir is not None + cfg.work_dir = args.work_dir + elif cfg.get('work_dir', None) is None: + # use config filename as default work_dir if cfg.work_dir is None + cfg.work_dir = osp.join('./work_dirs', + osp.splitext(osp.basename(args.config))[0]) + + cfg.load_from = args.checkpoint + + if args.show or args.show_dir: + cfg = trigger_visualization_hook(cfg, args) + + if args.deploy: + cfg.custom_hooks.append(dict(type='SwitchToDeployHook')) + + # add `format_only` and `outfile_prefix` into cfg + if args.json_prefix is not None: + cfg_json = { + 'test_evaluator.format_only': True, + 'test_evaluator.outfile_prefix': args.json_prefix + } + cfg.merge_from_dict(cfg_json) + + # Determine whether the custom metainfo fields are all lowercase + is_metainfo_lower(cfg) + + if args.tta: + assert 'tta_model' in cfg, 'Cannot find ``tta_model`` in config.' \ + " Can't use tta !" + assert 'tta_pipeline' in cfg, 'Cannot find ``tta_pipeline`` ' \ + "in config. Can't use tta !" + + cfg.model = ConfigDict(**cfg.tta_model, module=cfg.model) + test_data_cfg = cfg.test_dataloader.dataset + while 'dataset' in test_data_cfg: + test_data_cfg = test_data_cfg['dataset'] + + # batch_shapes_cfg will force control the size of the output image, + # it is not compatible with tta. + if 'batch_shapes_cfg' in test_data_cfg: + test_data_cfg.batch_shapes_cfg = None + test_data_cfg.pipeline = cfg.tta_pipeline + + # build the runner from config + if 'runner_type' not in cfg: + # build the default runner + runner = Runner.from_cfg(cfg) + else: + # build customized runner from the registry + # if 'runner_type' is set in the cfg + runner = RUNNERS.build(cfg) + + # add `DumpResults` dummy metric + if args.out is not None: + assert args.out.endswith(('.pkl', '.pickle')), \ + 'The dump file must be a pkl file.' + runner.test_evaluator.metrics.append( + DumpResults(out_file_path=args.out)) + + # start testing + runner.test() + + +if __name__ == '__main__': + main() diff --git a/models/YOLO-World/third_party/mmyolo/tools/train.py b/models/YOLO-World/third_party/mmyolo/tools/train.py new file mode 100644 index 0000000000000000000000000000000000000000..61f94980d2236295c4ca317520842a53b1813f0a --- /dev/null +++ b/models/YOLO-World/third_party/mmyolo/tools/train.py @@ -0,0 +1,123 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import logging +import os +import os.path as osp + +from mmdet.utils import setup_cache_size_limit_of_dynamo +from mmengine.config import Config, DictAction +from mmengine.logging import print_log +from mmengine.runner import Runner + +from mmyolo.registry import RUNNERS +from mmyolo.utils import is_metainfo_lower + + +def parse_args(): + parser = argparse.ArgumentParser(description='Train a detector') + parser.add_argument('config', help='train config file path') + parser.add_argument('--work-dir', help='the dir to save logs and models') + parser.add_argument( + '--amp', + action='store_true', + default=False, + help='enable automatic-mixed-precision training') + parser.add_argument( + '--resume', + nargs='?', + type=str, + const='auto', + help='If specify checkpoint path, resume from it, while if not ' + 'specify, try to auto resume from the latest checkpoint ' + 'in the work directory.') + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. If the value to ' + 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' + 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' + 'Note that the quotation marks are necessary and that no white space ' + 'is allowed.') + parser.add_argument( + '--launcher', + choices=['none', 'pytorch', 'slurm', 'mpi'], + default='none', + help='job launcher') + # When using PyTorch version >= 2.0.0, the `torch.distributed.launch` + # will pass the `--local-rank` parameter to `tools/train.py` instead + # of `--local_rank`. + parser.add_argument('--local_rank', '--local-rank', type=int, default=0) + args = parser.parse_args() + if 'LOCAL_RANK' not in os.environ: + os.environ['LOCAL_RANK'] = str(args.local_rank) + + return args + + +def main(): + args = parse_args() + + # Reduce the number of repeated compilations and improve + # training speed. + setup_cache_size_limit_of_dynamo() + + # load config + cfg = Config.fromfile(args.config) + # replace the ${key} with the value of cfg.key + # cfg = replace_cfg_vals(cfg) + cfg.launcher = args.launcher + if args.cfg_options is not None: + cfg.merge_from_dict(args.cfg_options) + + # work_dir is determined in this priority: CLI > segment in file > filename + if args.work_dir is not None: + # update configs according to CLI args if args.work_dir is not None + cfg.work_dir = args.work_dir + elif cfg.get('work_dir', None) is None: + # use config filename as default work_dir if cfg.work_dir is None + cfg.work_dir = osp.join('./work_dirs', + osp.splitext(osp.basename(args.config))[0]) + + # enable automatic-mixed-precision training + if args.amp is True: + optim_wrapper = cfg.optim_wrapper.type + if optim_wrapper == 'AmpOptimWrapper': + print_log( + 'AMP training is already enabled in your config.', + logger='current', + level=logging.WARNING) + else: + assert optim_wrapper == 'OptimWrapper', ( + '`--amp` is only supported when the optimizer wrapper type is ' + f'`OptimWrapper` but got {optim_wrapper}.') + cfg.optim_wrapper.type = 'AmpOptimWrapper' + cfg.optim_wrapper.loss_scale = 'dynamic' + + # resume is determined in this priority: resume from > auto_resume + if args.resume == 'auto': + cfg.resume = True + cfg.load_from = None + elif args.resume is not None: + cfg.resume = True + cfg.load_from = args.resume + + # Determine whether the custom metainfo fields are all lowercase + is_metainfo_lower(cfg) + + # build the runner from config + if 'runner_type' not in cfg: + # build the default runner + runner = Runner.from_cfg(cfg) + else: + # build customized runner from the registry + # if 'runner_type' is set in the cfg + runner = RUNNERS.build(cfg) + + # start training + runner.train() + + +if __name__ == '__main__': + main() diff --git a/models/YOLO-World/tools/dist_test.sh b/models/YOLO-World/tools/dist_test.sh new file mode 100644 index 0000000000000000000000000000000000000000..dea131b43ea8f1222661d20603d40c18ea7f28a1 --- /dev/null +++ b/models/YOLO-World/tools/dist_test.sh @@ -0,0 +1,22 @@ +#!/usr/bin/env bash + +CONFIG=$1 +CHECKPOINT=$2 +GPUS=$3 +NNODES=${NNODES:-1} +NODE_RANK=${NODE_RANK:-0} +PORT=${PORT:-29500} +MASTER_ADDR=${MASTER_ADDR:-"127.0.0.1"} + +PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ +python -m torch.distributed.launch \ + --nnodes=$NNODES \ + --node_rank=$NODE_RANK \ + --master_addr=$MASTER_ADDR \ + --nproc_per_node=$GPUS \ + --master_port=$PORT \ + $(dirname "$0")/test.py \ + $CONFIG \ + $CHECKPOINT \ + --launcher pytorch \ + ${@:4} diff --git a/models/YOLO-World/tools/dist_train.sh b/models/YOLO-World/tools/dist_train.sh new file mode 100644 index 0000000000000000000000000000000000000000..ea56f698d1f00b992eec8d481c75b273d202acf5 --- /dev/null +++ b/models/YOLO-World/tools/dist_train.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +CONFIG=$1 +GPUS=$2 +NNODES=${NNODES:-1} +NODE_RANK=${NODE_RANK:-0} +PORT=${MASTER_PORT:-29500} +MASTER_ADDR=${MASTER_ADDR:-"127.0.0.1"} + +PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ +python -m torch.distributed.launch \ + --nnodes=$NNODES \ + --node_rank=$NODE_RANK \ + --master_addr=$MASTER_ADDR \ + --nproc_per_node=$GPUS \ + --master_port=$PORT \ + $(dirname "$0")/train.py \ + $CONFIG \ + --launcher pytorch ${@:3} diff --git a/models/YOLO-World/tools/generate_image_prompts.py b/models/YOLO-World/tools/generate_image_prompts.py new file mode 100644 index 0000000000000000000000000000000000000000..ba0d0236c282a3df3b244874899a0370a124fa29 --- /dev/null +++ b/models/YOLO-World/tools/generate_image_prompts.py @@ -0,0 +1,59 @@ +import os +import tqdm +import argparse +import os.path as osp +import numpy as np +from PIL import Image +from transformers import (AutoTokenizer, AutoProcessor, + CLIPVisionModelWithProjection, + CLIPTextModelWithProjection) + +if __name__ == "__main__": + + parser = argparse.ArgumentParser() + parser.add_argument( + '--model', + type=str, + default='../pretrained_models/open-ai-clip-vit-base-patch32') + parser.add_argument('--image-dir', type=str, default='data/samples.txt') + parser.add_argument('--out-dir', type=str, default='') + parser.add_argument('--out-file', type=str) + + args = parser.parse_args() + + tokenizer = AutoTokenizer.from_pretrained(args.model) + vision_model = CLIPVisionModelWithProjection.from_pretrained(args.model) + text_model = CLIPTextModelWithProjection.from_pretrained(args.model) + processor = AutoProcessor.from_pretrained(args.model) + + # padding prompts + device = 'cuda:0' + text_model.to(device) + texts = tokenizer(text=[' '], return_tensors='pt', padding=True) + texts = texts.to(device) + text_outputs = text_model(**texts) + txt_feats = text_outputs.text_embeds + txt_feats = txt_feats / txt_feats.norm(p=2, dim=-1, keepdim=True) + txt_feats = txt_feats.reshape(-1, txt_feats.shape[-1]).cpu().data.numpy() + + images = os.listdir(args.image_dir) + category_embeds = [] + + def _forward_vision_model(image_name): + image_path = osp.join(args.image_dir, image_name) + # category = image_name.split('-')[1] + image = Image.open(image_path).convert("RGB") + inputs = processor(images=image, return_tensors="pt", padding=True) + image_outputs = vision_model(**inputs) + img_feats = image_outputs.image_embeds + # img_feats + img_feats = img_feats / img_feats.norm(p=2, dim=-1, keepdim=True) + img_feats = img_feats.reshape( + -1, img_feats.shape[-1])[0].cpu().data.numpy() + category_embeds.append(img_feats) + + for image_ in tqdm.tqdm(images): + _forward_vision_model(image_) + category_embeds.append(txt_feats) + category_embeds = np.stack(category_embeds) + np.save(osp.join(args.out_dir, args.out_file), category_embeds) diff --git a/models/YOLO-World/tools/generate_text_prompts.py b/models/YOLO-World/tools/generate_text_prompts.py new file mode 100644 index 0000000000000000000000000000000000000000..7dc00aad87a310da0d8b5774b1af8df2176f2015 --- /dev/null +++ b/models/YOLO-World/tools/generate_text_prompts.py @@ -0,0 +1,36 @@ +import json +import argparse +import numpy as np +from transformers import (AutoTokenizer, CLIPTextModelWithProjection) + + +if __name__ == "__main__": + + parser = argparse.ArgumentParser() + parser.add_argument( + '--model', + type=str, + default='./pretrained_models/clip-vit-base-patch32-projection') + parser.add_argument('--text', + type=str, + default='data/captions/coco_class_captions.json') + parser.add_argument('--out', type=str, default='output.npy') + + args = parser.parse_args() + + tokenizer = AutoTokenizer.from_pretrained(args.model) + model = CLIPTextModelWithProjection.from_pretrained(args.model) + + with open(args.text) as f: + data = json.load(f) + texts = [x[0] for x in data] + device = 'cuda:0' + model.to(device) + texts = tokenizer(text=texts, return_tensors='pt', padding=True) + texts = texts.to(device) + text_outputs = model(**texts) + txt_feats = text_outputs.text_embeds + txt_feats = txt_feats / txt_feats.norm(p=2, dim=-1, keepdim=True) + txt_feats = txt_feats.reshape(-1, txt_feats.shape[-1]) + + np.save(args.out, txt_feats.cpu().data.numpy()) diff --git a/models/YOLO-World/tools/reparameterize_yoloworld.py b/models/YOLO-World/tools/reparameterize_yoloworld.py new file mode 100644 index 0000000000000000000000000000000000000000..0257f637dc342bcec0cfe1e1f053edced7bbb0af --- /dev/null +++ b/models/YOLO-World/tools/reparameterize_yoloworld.py @@ -0,0 +1,139 @@ +import os +import argparse + +import torch +import numpy as np + + +def parse_args(): + + parser = argparse.ArgumentParser("Reparameterize YOLO-World") + parser.add_argument('--model', help='model checkpoints to reparameterize') + parser.add_argument('--out-dir', help='output checkpoints') + parser.add_argument( + '--text-embed', + help='text embeddings to reparameterized into YOLO-World') + parser.add_argument('--conv-neck', + action='store_true', + help='whether using 1x1 conv in RepVL-PAN') + + args = parser.parse_args() + return args + + +def convert_head(scale, bias, text_embed): + N, D = text_embed.shape + weight = (text_embed * scale.exp()).view(N, D, 1, 1) + bias = torch.ones(N) * bias + return weight, bias + + +def reparameterize_head(state_dict, embeds): + + cls_layers = [ + 'bbox_head.head_module.cls_contrasts.0', + 'bbox_head.head_module.cls_contrasts.1', + 'bbox_head.head_module.cls_contrasts.2' + ] + + for i in range(3): + scale = state_dict[cls_layers[i] + '.logit_scale'] + bias = state_dict[cls_layers[i] + '.bias'] + weight, bias = convert_head(scale, bias, embeds) + state_dict[cls_layers[i] + '.conv.weight'] = weight + state_dict[cls_layers[i] + '.conv.bias'] = bias + del state_dict[cls_layers[i] + '.bias'] + del state_dict[cls_layers[i] + '.logit_scale'] + return state_dict + + +def convert_neck_split_conv(input_state_dict, block_name, text_embeds, + num_heads): + if block_name + '.guide_fc.weight' not in input_state_dict: + return input_state_dict + guide_fc_weight = input_state_dict[block_name + '.guide_fc.weight'] + guide_fc_bias = input_state_dict[block_name + '.guide_fc.bias'] + guide = text_embeds @ guide_fc_weight.transpose(0, + 1) + guide_fc_bias[None, :] + N, D = guide.shape + guide = list(guide.split(D // num_heads, dim=1)) + del input_state_dict[block_name + '.guide_fc.weight'] + del input_state_dict[block_name + '.guide_fc.bias'] + for i in range(num_heads): + input_state_dict[block_name + + f'.guide_convs.{i}.weight'] = guide[i][:, :, None, + None] + return input_state_dict + + +def convert_neck_weight(input_state_dict, block_name, embeds, num_heads): + guide_fc_weight = input_state_dict[block_name + '.guide_fc.weight'] + guide_fc_bias = input_state_dict[block_name + '.guide_fc.bias'] + guide = embeds @ guide_fc_weight.transpose(0, 1) + guide_fc_bias[None, :] + N, D = guide.shape + del input_state_dict[block_name + '.guide_fc.weight'] + del input_state_dict[block_name + '.guide_fc.bias'] + input_state_dict[block_name + '.guide_weight'] = guide.view( + N, D // num_heads, num_heads) + return input_state_dict + + +def reparameterize_neck(state_dict, embeds, type='conv'): + + neck_blocks = [ + 'neck.top_down_layers.0.attn_block', + 'neck.top_down_layers.1.attn_block', + 'neck.bottom_up_layers.0.attn_block', + 'neck.bottom_up_layers.1.attn_block' + ] + if "neck.top_down_layers.0.attn_block.bias" not in state_dict: + return state_dict + for block in neck_blocks: + num_heads = state_dict[block + '.bias'].shape[0] + if type == 'conv': + convert_neck_split_conv(state_dict, block, embeds, num_heads) + else: + convert_neck_weight(state_dict, block, embeds, num_heads) + return state_dict + + +def main(): + + args = parse_args() + + # load checkpoint + model = torch.load(args.model, map_location='cpu') + state_dict = model['state_dict'] + + # load embeddings + embeddings = torch.from_numpy(np.load(args.text_embed)) + + # remove text encoder + keys = list(state_dict.keys()) + keys = [x for x in keys if "text_model" not in x] + + state_dict_wo_text = {x: state_dict[x] for x in keys} + print("removing text encoder") + + state_dict_wo_text = reparameterize_head(state_dict_wo_text, embeddings) + print("reparameterizing head") + + if args.conv_neck: + neck_type = "conv" + else: + neck_type = "linear" + + state_dict_wo_text = reparameterize_neck(state_dict_wo_text, embeddings, + neck_type) + + print("reparameterizing neck") + + model['state_dict'] = state_dict_wo_text + + model_name = os.path.basename(args.model) + model_name = model_name.replace('.pth', f'_rep_{neck_type}.pth') + torch.save(model, os.path.join(args.out_dir, model_name)) + + +if __name__ == "__main__": + main() diff --git a/models/YOLO-World/tools/test.py b/models/YOLO-World/tools/test.py new file mode 100644 index 0000000000000000000000000000000000000000..c05defe3c70a4cf4b8775a98bb89a84b7faba63a --- /dev/null +++ b/models/YOLO-World/tools/test.py @@ -0,0 +1,150 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os +import os.path as osp + +from mmdet.engine.hooks.utils import trigger_visualization_hook +from mmengine.config import Config, ConfigDict, DictAction +from mmengine.evaluator import DumpResults +from mmengine.runner import Runner + +from mmyolo.registry import RUNNERS +from mmyolo.utils import is_metainfo_lower + + +# TODO: support fuse_conv_bn +def parse_args(): + parser = argparse.ArgumentParser( + description='MMYOLO test (and eval) a model') + parser.add_argument('config', help='test config file path') + parser.add_argument('checkpoint', help='checkpoint file') + parser.add_argument( + '--work-dir', + help='the directory to save the file containing evaluation metrics') + parser.add_argument( + '--out', + type=str, + help='output result file (must be a .pkl file) in pickle format') + parser.add_argument( + '--json-prefix', + type=str, + help='the prefix of the output json file without perform evaluation, ' + 'which is useful when you want to format the result to a specific ' + 'format and submit it to the test server') + parser.add_argument( + '--tta', + action='store_true', + help='Whether to use test time augmentation') + parser.add_argument( + '--show', action='store_true', help='show prediction results') + parser.add_argument( + '--deploy', + action='store_true', + help='Switch model to deployment mode') + parser.add_argument( + '--show-dir', + help='directory where painted images will be saved. ' + 'If specified, it will be automatically saved ' + 'to the work_dir/timestamp/show_dir') + parser.add_argument( + '--wait-time', type=float, default=2, help='the interval of show (s)') + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. If the value to ' + 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' + 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' + 'Note that the quotation marks are necessary and that no white space ' + 'is allowed.') + parser.add_argument( + '--launcher', + choices=['none', 'pytorch', 'slurm', 'mpi'], + default='none', + help='job launcher') + parser.add_argument('--local_rank', type=int, default=0) + args = parser.parse_args() + if 'LOCAL_RANK' not in os.environ: + os.environ['LOCAL_RANK'] = str(args.local_rank) + return args + + +def main(): + args = parse_args() + + # load config + cfg = Config.fromfile(args.config) + # replace the ${key} with the value of cfg.key + # cfg = replace_cfg_vals(cfg) + cfg.launcher = args.launcher + if args.cfg_options is not None: + cfg.merge_from_dict(args.cfg_options) + + # work_dir is determined in this priority: CLI > segment in file > filename + if args.work_dir is not None: + # update configs according to CLI args if args.work_dir is not None + cfg.work_dir = args.work_dir + elif cfg.get('work_dir', None) is None: + # use config filename as default work_dir if cfg.work_dir is None + cfg.work_dir = osp.join('./work_dirs', + osp.splitext(osp.basename(args.config))[0]) + + cfg.load_from = args.checkpoint + + if args.show or args.show_dir: + cfg = trigger_visualization_hook(cfg, args) + + if args.deploy: + cfg.custom_hooks.append(dict(type='SwitchToDeployHook')) + + # add `format_only` and `outfile_prefix` into cfg + if args.json_prefix is not None: + cfg_json = { + 'test_evaluator.format_only': True, + 'test_evaluator.outfile_prefix': args.json_prefix + } + cfg.merge_from_dict(cfg_json) + + # Determine whether the custom metainfo fields are all lowercase + is_metainfo_lower(cfg) + + if args.tta: + assert 'tta_model' in cfg, 'Cannot find ``tta_model`` in config.' \ + " Can't use tta !" + assert 'tta_pipeline' in cfg, 'Cannot find ``tta_pipeline`` ' \ + "in config. Can't use tta !" + + cfg.model = ConfigDict(**cfg.tta_model, module=cfg.model) + test_data_cfg = cfg.test_dataloader.dataset + while 'dataset' in test_data_cfg: + test_data_cfg = test_data_cfg['dataset'] + + # batch_shapes_cfg will force control the size of the output image, + # it is not compatible with tta. + if 'batch_shapes_cfg' in test_data_cfg: + test_data_cfg.batch_shapes_cfg = None + test_data_cfg.pipeline = cfg.tta_pipeline + + # build the runner from config + if 'runner_type' not in cfg: + # build the default runner + runner = Runner.from_cfg(cfg) + else: + # build customized runner from the registry + # if 'runner_type' is set in the cfg + runner = RUNNERS.build(cfg) + + # add `DumpResults` dummy metric + if args.out is not None: + assert args.out.endswith(('.pkl', '.pickle')), \ + 'The dump file must be a pkl file.' + runner.test_evaluator.metrics.append( + DumpResults(out_file_path=args.out)) + + # start testing + runner.test() + + +if __name__ == '__main__': + main() diff --git a/models/YOLO-World/tools/train.py b/models/YOLO-World/tools/train.py new file mode 100644 index 0000000000000000000000000000000000000000..f634972af714badd6c501218e4774df58275d0d1 --- /dev/null +++ b/models/YOLO-World/tools/train.py @@ -0,0 +1,120 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import logging +import os +import os.path as osp + +from mmengine.config import Config, DictAction +from mmengine.logging import print_log +from mmengine.runner import Runner + +from mmyolo.registry import RUNNERS +from mmyolo.utils import is_metainfo_lower + + +def parse_args(): + parser = argparse.ArgumentParser(description='Train a detector') + parser.add_argument('config', help='train config file path') + parser.add_argument('--work-dir', help='the dir to save logs and models') + parser.add_argument( + '--amp', + action='store_true', + default=False, + help='enable automatic-mixed-precision training') + parser.add_argument( + '--resume', + nargs='?', + type=str, + const='auto', + help='If specify checkpoint path, resume from it, while if not ' + 'specify, try to auto resume from the latest checkpoint ' + 'in the work directory.') + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. If the value to ' + 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' + 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' + 'Note that the quotation marks are necessary and that no white space ' + 'is allowed.') + parser.add_argument( + '--launcher', + choices=['none', 'pytorch', 'slurm', 'mpi'], + default='none', + help='job launcher') + parser.add_argument('--local_rank', type=int, default=0) + args = parser.parse_args() + if 'LOCAL_RANK' not in os.environ: + os.environ['LOCAL_RANK'] = str(args.local_rank) + + return args + + +def main(): + args = parse_args() + + # load config + cfg = Config.fromfile(args.config) + # replace the ${key} with the value of cfg.key + # cfg = replace_cfg_vals(cfg) + cfg.launcher = args.launcher + if args.cfg_options is not None: + cfg.merge_from_dict(args.cfg_options) + + # work_dir is determined in this priority: CLI > segment in file > filename + if args.work_dir is not None: + # update configs according to CLI args if args.work_dir is not None + cfg.work_dir = args.work_dir + elif cfg.get('work_dir', None) is None: + # use config filename as default work_dir if cfg.work_dir is None + if args.config.startswith('projects/'): + config = args.config[len('projects/'):] + config = config.replace('/configs/', '/') + cfg.work_dir = osp.join('./work_dirs', osp.splitext(config)[0]) + else: + cfg.work_dir = osp.join('./work_dirs', + osp.splitext(osp.basename(args.config))[0]) + + # enable automatic-mixed-precision training + if args.amp is True: + optim_wrapper = cfg.optim_wrapper.type + if optim_wrapper == 'AmpOptimWrapper': + print_log( + 'AMP training is already enabled in your config.', + logger='current', + level=logging.WARNING) + else: + assert optim_wrapper == 'OptimWrapper', ( + '`--amp` is only supported when the optimizer wrapper type is ' + f'`OptimWrapper` but got {optim_wrapper}.') + cfg.optim_wrapper.type = 'AmpOptimWrapper' + cfg.optim_wrapper.loss_scale = 'dynamic' + + # resume is determined in this priority: resume from > auto_resume + if args.resume == 'auto': + cfg.resume = True + cfg.load_from = None + elif args.resume is not None: + cfg.resume = True + cfg.load_from = args.resume + + # Determine whether the custom metainfo fields are all lowercase + is_metainfo_lower(cfg) + + # build the runner from config + if 'runner_type' not in cfg: + # build the default runner + runner = Runner.from_cfg(cfg) + else: + # build customized runner from the registry + # if 'runner_type' is set in the cfg + runner = RUNNERS.build(cfg) + + # start training + runner.train() + + +if __name__ == '__main__': + main() diff --git a/models/YOLO-World/yolo_world/__init__.py b/models/YOLO-World/yolo_world/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3ce4f9f061c4ac63b9020cb5ddaaf8e2c8930315 --- /dev/null +++ b/models/YOLO-World/yolo_world/__init__.py @@ -0,0 +1,12 @@ +# Copyright (c) Tencent Inc. All rights reserved. +import importlib.metadata as importlib_metadata + +try: + __version__ = importlib_metadata.version(__package__ or __name__) +except importlib_metadata.PackageNotFoundError: + __version__ = '0.0.0' + + +from .models import * # noqa +from .datasets import * # noqa +from .engine import * # noqa diff --git a/models/YOLO-World/yolo_world/datasets/__init__.py b/models/YOLO-World/yolo_world/datasets/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3fbdad0ca10bca182c7323295d898afc03bd3913 --- /dev/null +++ b/models/YOLO-World/yolo_world/datasets/__init__.py @@ -0,0 +1,17 @@ +# Copyright (c) Tencent Inc. All rights reserved. +from .mm_dataset import ( + MultiModalDataset, MultiModalMixedDataset) +from .yolov5_obj365v1 import YOLOv5Objects365V1Dataset +from .yolov5_obj365v2 import YOLOv5Objects365V2Dataset +from .yolov5_mixed_grounding import YOLOv5MixedGroundingDataset +from .utils import yolow_collate +from .transformers import * # NOQA +from .yolov5_v3det import YOLOv5V3DetDataset +from .yolov5_lvis import YOLOv5LVISV1Dataset + +__all__ = [ + 'MultiModalDataset', 'YOLOv5Objects365V1Dataset', + 'YOLOv5Objects365V2Dataset', 'YOLOv5MixedGroundingDataset', + 'YOLOv5V3DetDataset', 'yolow_collate', + 'YOLOv5LVISV1Dataset', 'MultiModalMixedDataset', +] diff --git a/models/YOLO-World/yolo_world/datasets/mm_dataset.py b/models/YOLO-World/yolo_world/datasets/mm_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..510e8b935fc85a570247b92b2459eaf160632199 --- /dev/null +++ b/models/YOLO-World/yolo_world/datasets/mm_dataset.py @@ -0,0 +1,122 @@ +# Copyright (c) Tencent Inc. All rights reserved. +import copy +import json +import logging +from typing import Callable, List, Union + +from mmengine.logging import print_log +from mmengine.dataset.base_dataset import ( + BaseDataset, Compose, force_full_init) +from mmyolo.registry import DATASETS + + +@DATASETS.register_module() +class MultiModalDataset: + """Multi-modal dataset.""" + + def __init__(self, + dataset: Union[BaseDataset, dict], + class_text_path: str = None, + test_mode: bool = True, + pipeline: List[Union[dict, Callable]] = [], + lazy_init: bool = False) -> None: + self.dataset: BaseDataset + if isinstance(dataset, dict): + self.dataset = DATASETS.build(dataset) + elif isinstance(dataset, BaseDataset): + self.dataset = dataset + else: + raise TypeError( + 'dataset must be a dict or a BaseDataset, ' + f'but got {dataset}') + + if class_text_path is not None: + self.class_texts = json.load(open(class_text_path, 'r')) + # ori_classes = self.dataset.metainfo['classes'] + # assert len(ori_classes) == len(self.class_texts), \ + # ('The number of classes in the dataset and the class text' + # 'file must be the same.') + else: + self.class_texts = None + + self.test_mode = test_mode + self._metainfo = self.dataset.metainfo + self.pipeline = Compose(pipeline) + + self._fully_initialized = False + if not lazy_init: + self.full_init() + + @property + def metainfo(self) -> dict: + return copy.deepcopy(self._metainfo) + + def full_init(self) -> None: + """``full_init`` dataset.""" + if self._fully_initialized: + return + + self.dataset.full_init() + self._ori_len = len(self.dataset) + self._fully_initialized = True + + @force_full_init + def get_data_info(self, idx: int) -> dict: + """Get annotation by index.""" + data_info = self.dataset.get_data_info(idx) + if self.class_texts is not None: + data_info.update({'texts': self.class_texts}) + return data_info + + def __getitem__(self, idx): + if not self._fully_initialized: + print_log( + 'Please call `full_init` method manually to ' + 'accelerate the speed.', + logger='current', + level=logging.WARNING) + self.full_init() + + data_info = self.get_data_info(idx) + + if hasattr(self.dataset, 'test_mode') and not self.dataset.test_mode: + data_info['dataset'] = self + elif not self.test_mode: + data_info['dataset'] = self + return self.pipeline(data_info) + + @force_full_init + def __len__(self) -> int: + return self._ori_len + + +@DATASETS.register_module() +class MultiModalMixedDataset(MultiModalDataset): + """Multi-modal Mixed dataset. + mix "detection dataset" and "caption dataset" + Args: + dataset_type (str): dataset type, 'detection' or 'caption' + """ + def __init__(self, + dataset: Union[BaseDataset, dict], + class_text_path: str = None, + dataset_type: str = 'detection', + test_mode: bool = True, + pipeline: List[Union[dict, Callable]] = [], + lazy_init: bool = False) -> None: + self.dataset_type = dataset_type + super().__init__(dataset, + class_text_path, + test_mode, + pipeline, + lazy_init) + + @force_full_init + def get_data_info(self, idx: int) -> dict: + """Get annotation by index.""" + data_info = self.dataset.get_data_info(idx) + if self.class_texts is not None: + data_info.update({'texts': self.class_texts}) + data_info['is_detection'] = 1 \ + if self.dataset_type == 'detection' else 0 + return data_info diff --git a/models/YOLO-World/yolo_world/datasets/transformers/__init__.py b/models/YOLO-World/yolo_world/datasets/transformers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..495e981551f7ae51761a97e4e41e141c43fbc536 --- /dev/null +++ b/models/YOLO-World/yolo_world/datasets/transformers/__init__.py @@ -0,0 +1,9 @@ +# Copyright (c) Tencent Inc. All rights reserved. +from .mm_transforms import RandomLoadText, LoadText +from .mm_mix_img_transforms import ( + MultiModalMosaic, MultiModalMosaic9, YOLOv5MultiModalMixUp, + YOLOXMultiModalMixUp) + +__all__ = ['RandomLoadText', 'LoadText', 'MultiModalMosaic', + 'MultiModalMosaic9', 'YOLOv5MultiModalMixUp', + 'YOLOXMultiModalMixUp'] diff --git a/models/YOLO-World/yolo_world/datasets/transformers/mm_mix_img_transforms.py b/models/YOLO-World/yolo_world/datasets/transformers/mm_mix_img_transforms.py new file mode 100644 index 0000000000000000000000000000000000000000..0f4dfe084713a16438d13376ff36fd9265022a4e --- /dev/null +++ b/models/YOLO-World/yolo_world/datasets/transformers/mm_mix_img_transforms.py @@ -0,0 +1,1173 @@ +# Copyright (c) Tencent Inc. All rights reserved. +import collections +import copy +from abc import ABCMeta, abstractmethod +from typing import Optional, Sequence, Tuple, Union + +import mmcv +import numpy as np +from mmcv.transforms import BaseTransform +from mmdet.structures.bbox import autocast_box_type +from mmengine.dataset import BaseDataset +from mmengine.dataset.base_dataset import Compose +from numpy import random +from mmyolo.registry import TRANSFORMS + + +class BaseMultiModalMixImageTransform(BaseTransform, metaclass=ABCMeta): + """A Base Transform of Multimodal multiple images mixed. + + Suitable for training on multiple images mixed data augmentation like + mosaic and mixup. + + Cached mosaic transform will random select images from the cache + and combine them into one output image if use_cached is True. + + Args: + pre_transform(Sequence[str]): Sequence of transform object or + config dict to be composed. Defaults to None. + prob(float): The transformation probability. Defaults to 1.0. + use_cached (bool): Whether to use cache. Defaults to False. + max_cached_images (int): The maximum length of the cache. The larger + the cache, the stronger the randomness of this transform. As a + rule of thumb, providing 10 caches for each image suffices for + randomness. Defaults to 40. + random_pop (bool): Whether to randomly pop a result from the cache + when the cache is full. If set to False, use FIFO popping method. + Defaults to True. + max_refetch (int): The maximum number of retry iterations for getting + valid results from the pipeline. If the number of iterations is + greater than `max_refetch`, but results is still None, then the + iteration is terminated and raise the error. Defaults to 15. + """ + + def __init__(self, + pre_transform: Optional[Sequence[str]] = None, + prob: float = 1.0, + use_cached: bool = False, + max_cached_images: int = 40, + random_pop: bool = True, + max_refetch: int = 15): + + self.max_refetch = max_refetch + self.prob = prob + + self.use_cached = use_cached + self.max_cached_images = max_cached_images + self.random_pop = random_pop + self.results_cache = [] + + if pre_transform is None: + self.pre_transform = None + else: + self.pre_transform = Compose(pre_transform) + + @abstractmethod + def get_indexes(self, dataset: Union[BaseDataset, + list]) -> Union[list, int]: + """Call function to collect indexes. + + Args: + dataset (:obj:`Dataset` or list): The dataset or cached list. + + Returns: + list or int: indexes. + """ + pass + + @abstractmethod + def mix_img_transform(self, results: dict) -> dict: + """Mixed image data transformation. + + Args: + results (dict): Result dict. + + Returns: + results (dict): Updated result dict. + """ + pass + + def _update_label_text(self, results: dict) -> dict: + """Update label text.""" + if 'texts' not in results: + return results + + mix_texts = sum( + [results['texts']] + + [x['texts'] for x in results['mix_results']], []) + mix_texts = list({tuple(x) for x in mix_texts}) + text2id = {text: i for i, text in enumerate(mix_texts)} + + for res in [results] + results['mix_results']: + for i, label in enumerate(res['gt_bboxes_labels']): + text = res['texts'][label] + updated_id = text2id[tuple(text)] + res['gt_bboxes_labels'][i] = updated_id + res['texts'] = mix_texts + return results + + @autocast_box_type() + def transform(self, results: dict) -> dict: + """Data augmentation function. + + The transform steps are as follows: + 1. Randomly generate index list of other images. + 2. Before Mosaic or MixUp need to go through the necessary + pre_transform, such as MixUp' pre_transform pipeline + include: 'LoadImageFromFile','LoadAnnotations', + 'Mosaic' and 'RandomAffine'. + 3. Use mix_img_transform function to implement specific + mix operations. + + Args: + results (dict): Result dict. + + Returns: + results (dict): Updated result dict. + """ + + if random.uniform(0, 1) > self.prob: + return results + + if self.use_cached: + # Be careful: deep copying can be very time-consuming + # if results includes dataset. + dataset = results.pop('dataset', None) + self.results_cache.append(copy.deepcopy(results)) + if len(self.results_cache) > self.max_cached_images: + if self.random_pop: + index = random.randint(0, len(self.results_cache) - 1) + else: + index = 0 + self.results_cache.pop(index) + + if len(self.results_cache) <= 4: + return results + else: + assert 'dataset' in results + # Be careful: deep copying can be very time-consuming + # if results includes dataset. + dataset = results.pop('dataset', None) + + for _ in range(self.max_refetch): + # get index of one or three other images + if self.use_cached: + indexes = self.get_indexes(self.results_cache) + else: + indexes = self.get_indexes(dataset) + + if not isinstance(indexes, collections.abc.Sequence): + indexes = [indexes] + + if self.use_cached: + mix_results = [ + copy.deepcopy(self.results_cache[i]) for i in indexes + ] + else: + # get images information will be used for Mosaic or MixUp + mix_results = [ + copy.deepcopy(dataset.get_data_info(index)) + for index in indexes + ] + + if self.pre_transform is not None: + for i, data in enumerate(mix_results): + # pre_transform may also require dataset + data.update({'dataset': dataset}) + # before Mosaic or MixUp need to go through + # the necessary pre_transform + _results = self.pre_transform(data) + _results.pop('dataset') + mix_results[i] = _results + + if None not in mix_results: + results['mix_results'] = mix_results + break + print('Repeated calculation') + else: + raise RuntimeError( + 'The loading pipeline of the original dataset' + ' always return None. Please check the correctness ' + 'of the dataset and its pipeline.') + + # update labels and texts + results = self._update_label_text(results) + + # Mosaic or MixUp + results = self.mix_img_transform(results) + + if 'mix_results' in results: + results.pop('mix_results') + results['dataset'] = dataset + + return results + + +@TRANSFORMS.register_module() +class MultiModalMosaic(BaseMultiModalMixImageTransform): + """Mosaic augmentation. + + Given 4 images, mosaic transform combines them into + one output image. The output image is composed of the parts from each sub- + image. + + .. code:: text + + mosaic transform + center_x + +------------------------------+ + | pad | | + | +-----------+ pad | + | | | | + | | image1 +-----------+ + | | | | + | | | image2 | + center_y |----+-+-----------+-----------+ + | | cropped | | + |pad | image3 | image4 | + | | | | + +----|-------------+-----------+ + | | + +-------------+ + + The mosaic transform steps are as follows: + + 1. Choose the mosaic center as the intersections of 4 images + 2. Get the left top image according to the index, and randomly + sample another 3 images from the custom dataset. + 3. Sub image will be cropped if image is larger than mosaic patch + + Required Keys: + + - img + - gt_bboxes (BaseBoxes[torch.float32]) (optional) + - gt_bboxes_labels (np.int64) (optional) + - gt_ignore_flags (bool) (optional) + - mix_results (List[dict]) + + Modified Keys: + + - img + - img_shape + - gt_bboxes (optional) + - gt_bboxes_labels (optional) + - gt_ignore_flags (optional) + + Args: + img_scale (Sequence[int]): Image size after mosaic pipeline of single + image. The shape order should be (width, height). + Defaults to (640, 640). + center_ratio_range (Sequence[float]): Center ratio range of mosaic + output. Defaults to (0.5, 1.5). + bbox_clip_border (bool, optional): Whether to clip the objects outside + the border of the image. In some dataset like MOT17, the gt bboxes + are allowed to cross the border of images. Therefore, we don't + need to clip the gt bboxes in these cases. Defaults to True. + pad_val (int): Pad value. Defaults to 114. + pre_transform(Sequence[dict]): Sequence of transform object or + config dict to be composed. + prob (float): Probability of applying this transformation. + Defaults to 1.0. + use_cached (bool): Whether to use cache. Defaults to False. + max_cached_images (int): The maximum length of the cache. The larger + the cache, the stronger the randomness of this transform. As a + rule of thumb, providing 10 caches for each image suffices for + randomness. Defaults to 40. + random_pop (bool): Whether to randomly pop a result from the cache + when the cache is full. If set to False, use FIFO popping method. + Defaults to True. + max_refetch (int): The maximum number of retry iterations for getting + valid results from the pipeline. If the number of iterations is + greater than `max_refetch`, but results is still None, then the + iteration is terminated and raise the error. Defaults to 15. + """ + + def __init__(self, + img_scale: Tuple[int, int] = (640, 640), + center_ratio_range: Tuple[float, float] = (0.5, 1.5), + bbox_clip_border: bool = True, + pad_val: float = 114.0, + pre_transform: Sequence[dict] = None, + prob: float = 1.0, + use_cached: bool = False, + max_cached_images: int = 40, + random_pop: bool = True, + max_refetch: int = 15): + assert isinstance(img_scale, tuple) + assert 0 <= prob <= 1.0, 'The probability should be in range [0,1]. ' \ + f'got {prob}.' + if use_cached: + assert max_cached_images >= 4, 'The length of cache must >= 4, ' \ + f'but got {max_cached_images}.' + + super().__init__( + pre_transform=pre_transform, + prob=prob, + use_cached=use_cached, + max_cached_images=max_cached_images, + random_pop=random_pop, + max_refetch=max_refetch) + + self.img_scale = img_scale + self.center_ratio_range = center_ratio_range + self.bbox_clip_border = bbox_clip_border + self.pad_val = pad_val + + def get_indexes(self, dataset: Union[BaseDataset, list]) -> list: + """Call function to collect indexes. + + Args: + dataset (:obj:`Dataset` or list): The dataset or cached list. + + Returns: + list: indexes. + """ + indexes = [random.randint(0, len(dataset)) for _ in range(3)] + return indexes + + def mix_img_transform(self, results: dict) -> dict: + """Mixed image data transformation. + + Args: + results (dict): Result dict. + + Returns: + results (dict): Updated result dict. + """ + # print("use mosaic") + assert 'mix_results' in results + mosaic_bboxes = [] + mosaic_bboxes_labels = [] + mosaic_ignore_flags = [] + mosaic_masks = [] + with_mask = True if 'gt_masks' in results else False + # print("with_mask: ", with_mask) + # self.img_scale is wh format + img_scale_w, img_scale_h = self.img_scale + + if len(results['img'].shape) == 3: + mosaic_img = np.full( + (int(img_scale_h * 2), int(img_scale_w * 2), 3), + self.pad_val, + dtype=results['img'].dtype) + else: + mosaic_img = np.full((int(img_scale_h * 2), int(img_scale_w * 2)), + self.pad_val, + dtype=results['img'].dtype) + + # mosaic center x, y + center_x = int(random.uniform(*self.center_ratio_range) * img_scale_w) + center_y = int(random.uniform(*self.center_ratio_range) * img_scale_h) + center_position = (center_x, center_y) + + loc_strs = ('top_left', 'top_right', 'bottom_left', 'bottom_right') + for i, loc in enumerate(loc_strs): + if loc == 'top_left': + results_patch = results + else: + results_patch = results['mix_results'][i - 1] + + img_i = results_patch['img'] + h_i, w_i = img_i.shape[:2] + # keep_ratio resize + scale_ratio_i = min(img_scale_h / h_i, img_scale_w / w_i) + img_i = mmcv.imresize( + img_i, (int(w_i * scale_ratio_i), int(h_i * scale_ratio_i))) + + # compute the combine parameters + paste_coord, crop_coord = self._mosaic_combine( + loc, center_position, img_i.shape[:2][::-1]) + x1_p, y1_p, x2_p, y2_p = paste_coord + x1_c, y1_c, x2_c, y2_c = crop_coord + + # crop and paste image + mosaic_img[y1_p:y2_p, x1_p:x2_p] = img_i[y1_c:y2_c, x1_c:x2_c] + + # adjust coordinate + gt_bboxes_i = results_patch['gt_bboxes'] + gt_bboxes_labels_i = results_patch['gt_bboxes_labels'] + gt_ignore_flags_i = results_patch['gt_ignore_flags'] + + padw = x1_p - x1_c + padh = y1_p - y1_c + gt_bboxes_i.rescale_([scale_ratio_i, scale_ratio_i]) + gt_bboxes_i.translate_([padw, padh]) + mosaic_bboxes.append(gt_bboxes_i) + mosaic_bboxes_labels.append(gt_bboxes_labels_i) + mosaic_ignore_flags.append(gt_ignore_flags_i) + if with_mask and results_patch.get('gt_masks', None) is not None: + gt_masks_i = results_patch['gt_masks'] + gt_masks_i = gt_masks_i.rescale(float(scale_ratio_i)) + gt_masks_i = gt_masks_i.translate( + out_shape=(int(self.img_scale[0] * 2), + int(self.img_scale[1] * 2)), + offset=padw, + direction='horizontal') + gt_masks_i = gt_masks_i.translate( + out_shape=(int(self.img_scale[0] * 2), + int(self.img_scale[1] * 2)), + offset=padh, + direction='vertical') + mosaic_masks.append(gt_masks_i) + + mosaic_bboxes = mosaic_bboxes[0].cat(mosaic_bboxes, 0) + mosaic_bboxes_labels = np.concatenate(mosaic_bboxes_labels, 0) + mosaic_ignore_flags = np.concatenate(mosaic_ignore_flags, 0) + + if self.bbox_clip_border: + mosaic_bboxes.clip_([2 * img_scale_h, 2 * img_scale_w]) + if with_mask: + mosaic_masks = mosaic_masks[0].cat(mosaic_masks) + results['gt_masks'] = mosaic_masks + else: + # remove outside bboxes + inside_inds = mosaic_bboxes.is_inside( + [2 * img_scale_h, 2 * img_scale_w]).numpy() + mosaic_bboxes = mosaic_bboxes[inside_inds] + mosaic_bboxes_labels = mosaic_bboxes_labels[inside_inds] + mosaic_ignore_flags = mosaic_ignore_flags[inside_inds] + if with_mask: + mosaic_masks = mosaic_masks[0].cat(mosaic_masks)[inside_inds] + results['gt_masks'] = mosaic_masks + + results['img'] = mosaic_img + results['img_shape'] = mosaic_img.shape + results['gt_bboxes'] = mosaic_bboxes + results['gt_bboxes_labels'] = mosaic_bboxes_labels + results['gt_ignore_flags'] = mosaic_ignore_flags + + return results + + def _mosaic_combine( + self, loc: str, center_position_xy: Sequence[float], + img_shape_wh: Sequence[int]) -> Tuple[Tuple[int], Tuple[int]]: + """Calculate global coordinate of mosaic image and local coordinate of + cropped sub-image. + + Args: + loc (str): Index for the sub-image, loc in ('top_left', + 'top_right', 'bottom_left', 'bottom_right'). + center_position_xy (Sequence[float]): Mixing center for 4 images, + (x, y). + img_shape_wh (Sequence[int]): Width and height of sub-image + + Returns: + tuple[tuple[float]]: Corresponding coordinate of pasting and + cropping + - paste_coord (tuple): paste corner coordinate in mosaic image. + - crop_coord (tuple): crop corner coordinate in mosaic image. + """ + assert loc in ('top_left', 'top_right', 'bottom_left', 'bottom_right') + if loc == 'top_left': + # index0 to top left part of image + x1, y1, x2, y2 = max(center_position_xy[0] - img_shape_wh[0], 0), \ + max(center_position_xy[1] - img_shape_wh[1], 0), \ + center_position_xy[0], \ + center_position_xy[1] + crop_coord = img_shape_wh[0] - (x2 - x1), img_shape_wh[1] - ( + y2 - y1), img_shape_wh[0], img_shape_wh[1] + + elif loc == 'top_right': + # index1 to top right part of image + x1, y1, x2, y2 = center_position_xy[0], \ + max(center_position_xy[1] - img_shape_wh[1], 0), \ + min(center_position_xy[0] + img_shape_wh[0], + self.img_scale[0] * 2), \ + center_position_xy[1] + crop_coord = 0, img_shape_wh[1] - (y2 - y1), min( + img_shape_wh[0], x2 - x1), img_shape_wh[1] + + elif loc == 'bottom_left': + # index2 to bottom left part of image + x1, y1, x2, y2 = max(center_position_xy[0] - img_shape_wh[0], 0), \ + center_position_xy[1], \ + center_position_xy[0], \ + min(self.img_scale[1] * 2, center_position_xy[1] + + img_shape_wh[1]) + crop_coord = img_shape_wh[0] - (x2 - x1), 0, img_shape_wh[0], min( + y2 - y1, img_shape_wh[1]) + + else: + # index3 to bottom right part of image + x1, y1, x2, y2 = center_position_xy[0], \ + center_position_xy[1], \ + min(center_position_xy[0] + img_shape_wh[0], + self.img_scale[0] * 2), \ + min(self.img_scale[1] * 2, center_position_xy[1] + + img_shape_wh[1]) + crop_coord = 0, 0, min(img_shape_wh[0], + x2 - x1), min(y2 - y1, img_shape_wh[1]) + + paste_coord = x1, y1, x2, y2 + return paste_coord, crop_coord + + def __repr__(self) -> str: + repr_str = self.__class__.__name__ + repr_str += f'(img_scale={self.img_scale}, ' + repr_str += f'center_ratio_range={self.center_ratio_range}, ' + repr_str += f'pad_val={self.pad_val}, ' + repr_str += f'prob={self.prob})' + return repr_str + + +@TRANSFORMS.register_module() +class MultiModalMosaic9(BaseMultiModalMixImageTransform): + """Mosaic9 augmentation. + + Given 9 images, mosaic transform combines them into + one output image. The output image is composed of the parts from each sub- + image. + + .. code:: text + + +-------------------------------+------------+ + | pad | pad | | + | +----------+ | | + | | +---------------+ top_right | + | | | top | image2 | + | | top_left | image1 | | + | | image8 o--------+------+--------+---+ + | | | | | | + +----+----------+ | right |pad| + | | center | image3 | | + | left | image0 +---------------+---| + | image7 | | | | + +---+-----------+---+--------+ | | + | | cropped | | bottom_right |pad| + | |bottom_left| | image4 | | + | | image6 | bottom | | | + +---|-----------+ image5 +---------------+---| + | pad | | pad | + +-----------+------------+-------------------+ + + The mosaic transform steps are as follows: + + 1. Get the center image according to the index, and randomly + sample another 8 images from the custom dataset. + 2. Randomly offset the image after Mosaic + + Required Keys: + + - img + - gt_bboxes (BaseBoxes[torch.float32]) (optional) + - gt_bboxes_labels (np.int64) (optional) + - gt_ignore_flags (bool) (optional) + - mix_results (List[dict]) + + Modified Keys: + + - img + - img_shape + - gt_bboxes (optional) + - gt_bboxes_labels (optional) + - gt_ignore_flags (optional) + + Args: + img_scale (Sequence[int]): Image size after mosaic pipeline of single + image. The shape order should be (width, height). + Defaults to (640, 640). + bbox_clip_border (bool, optional): Whether to clip the objects outside + the border of the image. In some dataset like MOT17, the gt bboxes + are allowed to cross the border of images. Therefore, we don't + need to clip the gt bboxes in these cases. Defaults to True. + pad_val (int): Pad value. Defaults to 114. + pre_transform(Sequence[dict]): Sequence of transform object or + config dict to be composed. + prob (float): Probability of applying this transformation. + Defaults to 1.0. + use_cached (bool): Whether to use cache. Defaults to False. + max_cached_images (int): The maximum length of the cache. The larger + the cache, the stronger the randomness of this transform. As a + rule of thumb, providing 5 caches for each image suffices for + randomness. Defaults to 50. + random_pop (bool): Whether to randomly pop a result from the cache + when the cache is full. If set to False, use FIFO popping method. + Defaults to True. + max_refetch (int): The maximum number of retry iterations for getting + valid results from the pipeline. If the number of iterations is + greater than `max_refetch`, but results is still None, then the + iteration is terminated and raise the error. Defaults to 15. + """ + + def __init__(self, + img_scale: Tuple[int, int] = (640, 640), + bbox_clip_border: bool = True, + pad_val: Union[float, int] = 114.0, + pre_transform: Sequence[dict] = None, + prob: float = 1.0, + use_cached: bool = False, + max_cached_images: int = 50, + random_pop: bool = True, + max_refetch: int = 15): + assert isinstance(img_scale, tuple) + assert 0 <= prob <= 1.0, 'The probability should be in range [0,1]. ' \ + f'got {prob}.' + if use_cached: + assert max_cached_images >= 9, 'The length of cache must >= 9, ' \ + f'but got {max_cached_images}.' + + super().__init__( + pre_transform=pre_transform, + prob=prob, + use_cached=use_cached, + max_cached_images=max_cached_images, + random_pop=random_pop, + max_refetch=max_refetch) + + self.img_scale = img_scale + self.bbox_clip_border = bbox_clip_border + self.pad_val = pad_val + + # intermediate variables + self._current_img_shape = [0, 0] + self._center_img_shape = [0, 0] + self._previous_img_shape = [0, 0] + + def get_indexes(self, dataset: Union[BaseDataset, list]) -> list: + """Call function to collect indexes. + + Args: + dataset (:obj:`Dataset` or list): The dataset or cached list. + + Returns: + list: indexes. + """ + indexes = [random.randint(0, len(dataset)) for _ in range(8)] + return indexes + + def mix_img_transform(self, results: dict) -> dict: + """Mixed image data transformation. + + Args: + results (dict): Result dict. + + Returns: + results (dict): Updated result dict. + """ + assert 'mix_results' in results + + mosaic_bboxes = [] + mosaic_bboxes_labels = [] + mosaic_ignore_flags = [] + + img_scale_w, img_scale_h = self.img_scale + + if len(results['img'].shape) == 3: + mosaic_img = np.full( + (int(img_scale_h * 3), int(img_scale_w * 3), 3), + self.pad_val, + dtype=results['img'].dtype) + else: + mosaic_img = np.full((int(img_scale_h * 3), int(img_scale_w * 3)), + self.pad_val, + dtype=results['img'].dtype) + + # index = 0 is mean original image + # len(results['mix_results']) = 8 + loc_strs = ('center', 'top', 'top_right', 'right', 'bottom_right', + 'bottom', 'bottom_left', 'left', 'top_left') + + results_all = [results, *results['mix_results']] + for index, results_patch in enumerate(results_all): + img_i = results_patch['img'] + # keep_ratio resize + img_i_h, img_i_w = img_i.shape[:2] + scale_ratio_i = min(img_scale_h / img_i_h, img_scale_w / img_i_w) + img_i = mmcv.imresize( + img_i, + (int(img_i_w * scale_ratio_i), int(img_i_h * scale_ratio_i))) + + paste_coord = self._mosaic_combine(loc_strs[index], + img_i.shape[:2]) + + padw, padh = paste_coord[:2] + x1, y1, x2, y2 = (max(x, 0) for x in paste_coord) + mosaic_img[y1:y2, x1:x2] = img_i[y1 - padh:, x1 - padw:] + + gt_bboxes_i = results_patch['gt_bboxes'] + gt_bboxes_labels_i = results_patch['gt_bboxes_labels'] + gt_ignore_flags_i = results_patch['gt_ignore_flags'] + gt_bboxes_i.rescale_([scale_ratio_i, scale_ratio_i]) + gt_bboxes_i.translate_([padw, padh]) + + mosaic_bboxes.append(gt_bboxes_i) + mosaic_bboxes_labels.append(gt_bboxes_labels_i) + mosaic_ignore_flags.append(gt_ignore_flags_i) + + # Offset + offset_x = int(random.uniform(0, img_scale_w)) + offset_y = int(random.uniform(0, img_scale_h)) + mosaic_img = mosaic_img[offset_y:offset_y + 2 * img_scale_h, + offset_x:offset_x + 2 * img_scale_w] + + mosaic_bboxes = mosaic_bboxes[0].cat(mosaic_bboxes, 0) + mosaic_bboxes.translate_([-offset_x, -offset_y]) + mosaic_bboxes_labels = np.concatenate(mosaic_bboxes_labels, 0) + mosaic_ignore_flags = np.concatenate(mosaic_ignore_flags, 0) + + if self.bbox_clip_border: + mosaic_bboxes.clip_([2 * img_scale_h, 2 * img_scale_w]) + else: + # remove outside bboxes + inside_inds = mosaic_bboxes.is_inside( + [2 * img_scale_h, 2 * img_scale_w]).numpy() + mosaic_bboxes = mosaic_bboxes[inside_inds] + mosaic_bboxes_labels = mosaic_bboxes_labels[inside_inds] + mosaic_ignore_flags = mosaic_ignore_flags[inside_inds] + + results['img'] = mosaic_img + results['img_shape'] = mosaic_img.shape + results['gt_bboxes'] = mosaic_bboxes + results['gt_bboxes_labels'] = mosaic_bboxes_labels + results['gt_ignore_flags'] = mosaic_ignore_flags + return results + + def _mosaic_combine(self, loc: str, + img_shape_hw: Tuple[int, int]) -> Tuple[int, ...]: + """Calculate global coordinate of mosaic image. + + Args: + loc (str): Index for the sub-image. + img_shape_hw (Sequence[int]): Height and width of sub-image + + Returns: + paste_coord (tuple): paste corner coordinate in mosaic image. + """ + assert loc in ('center', 'top', 'top_right', 'right', 'bottom_right', + 'bottom', 'bottom_left', 'left', 'top_left') + + img_scale_w, img_scale_h = self.img_scale + + self._current_img_shape = img_shape_hw + current_img_h, current_img_w = self._current_img_shape + previous_img_h, previous_img_w = self._previous_img_shape + center_img_h, center_img_w = self._center_img_shape + + if loc == 'center': + self._center_img_shape = self._current_img_shape + # xmin, ymin, xmax, ymax + paste_coord = img_scale_w, \ + img_scale_h, \ + img_scale_w + current_img_w, \ + img_scale_h + current_img_h + elif loc == 'top': + paste_coord = img_scale_w, \ + img_scale_h - current_img_h, \ + img_scale_w + current_img_w, \ + img_scale_h + elif loc == 'top_right': + paste_coord = img_scale_w + previous_img_w, \ + img_scale_h - current_img_h, \ + img_scale_w + previous_img_w + current_img_w, \ + img_scale_h + elif loc == 'right': + paste_coord = img_scale_w + center_img_w, \ + img_scale_h, \ + img_scale_w + center_img_w + current_img_w, \ + img_scale_h + current_img_h + elif loc == 'bottom_right': + paste_coord = img_scale_w + center_img_w, \ + img_scale_h + previous_img_h, \ + img_scale_w + center_img_w + current_img_w, \ + img_scale_h + previous_img_h + current_img_h + elif loc == 'bottom': + paste_coord = img_scale_w + center_img_w - current_img_w, \ + img_scale_h + center_img_h, \ + img_scale_w + center_img_w, \ + img_scale_h + center_img_h + current_img_h + elif loc == 'bottom_left': + paste_coord = img_scale_w + center_img_w - \ + previous_img_w - current_img_w, \ + img_scale_h + center_img_h, \ + img_scale_w + center_img_w - previous_img_w, \ + img_scale_h + center_img_h + current_img_h + elif loc == 'left': + paste_coord = img_scale_w - current_img_w, \ + img_scale_h + center_img_h - current_img_h, \ + img_scale_w, \ + img_scale_h + center_img_h + elif loc == 'top_left': + paste_coord = img_scale_w - current_img_w, \ + img_scale_h + center_img_h - \ + previous_img_h - current_img_h, \ + img_scale_w, \ + img_scale_h + center_img_h - previous_img_h + + self._previous_img_shape = self._current_img_shape + # xmin, ymin, xmax, ymax + return paste_coord + + def __repr__(self) -> str: + repr_str = self.__class__.__name__ + repr_str += f'(img_scale={self.img_scale}, ' + repr_str += f'pad_val={self.pad_val}, ' + repr_str += f'prob={self.prob})' + return repr_str + + +@TRANSFORMS.register_module() +class YOLOv5MultiModalMixUp(BaseMultiModalMixImageTransform): + """MixUp data augmentation for YOLOv5. + + .. code:: text + + The mixup transform steps are as follows: + + 1. Another random image is picked by dataset. + 2. Randomly obtain the fusion ratio from the beta distribution, + then fuse the target + of the original image and mixup image through this ratio. + + Required Keys: + + - img + - gt_bboxes (BaseBoxes[torch.float32]) (optional) + - gt_bboxes_labels (np.int64) (optional) + - gt_ignore_flags (bool) (optional) + - mix_results (List[dict]) + + + Modified Keys: + + - img + - img_shape + - gt_bboxes (optional) + - gt_bboxes_labels (optional) + - gt_ignore_flags (optional) + + + Args: + alpha (float): parameter of beta distribution to get mixup ratio. + Defaults to 32. + beta (float): parameter of beta distribution to get mixup ratio. + Defaults to 32. + pre_transform (Sequence[dict]): Sequence of transform object or + config dict to be composed. + prob (float): Probability of applying this transformation. + Defaults to 1.0. + use_cached (bool): Whether to use cache. Defaults to False. + max_cached_images (int): The maximum length of the cache. The larger + the cache, the stronger the randomness of this transform. As a + rule of thumb, providing 10 caches for each image suffices for + randomness. Defaults to 20. + random_pop (bool): Whether to randomly pop a result from the cache + when the cache is full. If set to False, use FIFO popping method. + Defaults to True. + max_refetch (int): The maximum number of iterations. If the number of + iterations is greater than `max_refetch`, but gt_bbox is still + empty, then the iteration is terminated. Defaults to 15. + """ + + def __init__(self, + alpha: float = 32.0, + beta: float = 32.0, + pre_transform: Sequence[dict] = None, + prob: float = 1.0, + use_cached: bool = False, + max_cached_images: int = 20, + random_pop: bool = True, + max_refetch: int = 15): + if use_cached: + assert max_cached_images >= 2, 'The length of cache must >= 2, ' \ + f'but got {max_cached_images}.' + super().__init__( + pre_transform=pre_transform, + prob=prob, + use_cached=use_cached, + max_cached_images=max_cached_images, + random_pop=random_pop, + max_refetch=max_refetch) + self.alpha = alpha + self.beta = beta + + def get_indexes(self, dataset: Union[BaseDataset, list]) -> int: + """Call function to collect indexes. + + Args: + dataset (:obj:`Dataset` or list): The dataset or cached list. + + Returns: + int: indexes. + """ + return random.randint(0, len(dataset)) + + def mix_img_transform(self, results: dict) -> dict: + """YOLOv5 MixUp transform function. + + Args: + results (dict): Result dict + + Returns: + results (dict): Updated result dict. + """ + assert 'mix_results' in results + + retrieve_results = results['mix_results'][0] + retrieve_img = retrieve_results['img'] + ori_img = results['img'] + assert ori_img.shape == retrieve_img.shape + + # Randomly obtain the fusion ratio from the beta distribution, + # which is around 0.5 + ratio = np.random.beta(self.alpha, self.beta) + mixup_img = (ori_img * ratio + retrieve_img * (1 - ratio)) + + retrieve_gt_bboxes = retrieve_results['gt_bboxes'] + retrieve_gt_bboxes_labels = retrieve_results['gt_bboxes_labels'] + retrieve_gt_ignore_flags = retrieve_results['gt_ignore_flags'] + + mixup_gt_bboxes = retrieve_gt_bboxes.cat( + (results['gt_bboxes'], retrieve_gt_bboxes), dim=0) + mixup_gt_bboxes_labels = np.concatenate( + (results['gt_bboxes_labels'], retrieve_gt_bboxes_labels), axis=0) + mixup_gt_ignore_flags = np.concatenate( + (results['gt_ignore_flags'], retrieve_gt_ignore_flags), axis=0) + if 'gt_masks' in results: + assert 'gt_masks' in retrieve_results + mixup_gt_masks = results['gt_masks'].cat( + [results['gt_masks'], retrieve_results['gt_masks']]) + results['gt_masks'] = mixup_gt_masks + + results['img'] = mixup_img.astype(np.uint8) + results['img_shape'] = mixup_img.shape + results['gt_bboxes'] = mixup_gt_bboxes + results['gt_bboxes_labels'] = mixup_gt_bboxes_labels + results['gt_ignore_flags'] = mixup_gt_ignore_flags + + return results + + +@TRANSFORMS.register_module() +class YOLOXMultiModalMixUp(BaseMultiModalMixImageTransform): + """MixUp data augmentation for YOLOX. + + .. code:: text + + mixup transform + +---------------+--------------+ + | mixup image | | + | +--------|--------+ | + | | | | | + +---------------+ | | + | | | | + | | image | | + | | | | + | | | | + | +-----------------+ | + | pad | + +------------------------------+ + + The mixup transform steps are as follows: + + 1. Another random image is picked by dataset and embedded in + the top left patch(after padding and resizing) + 2. The target of mixup transform is the weighted average of mixup + image and origin image. + + Required Keys: + + - img + - gt_bboxes (BaseBoxes[torch.float32]) (optional) + - gt_bboxes_labels (np.int64) (optional) + - gt_ignore_flags (bool) (optional) + - mix_results (List[dict]) + + + Modified Keys: + + - img + - img_shape + - gt_bboxes (optional) + - gt_bboxes_labels (optional) + - gt_ignore_flags (optional) + + + Args: + img_scale (Sequence[int]): Image output size after mixup pipeline. + The shape order should be (width, height). Defaults to (640, 640). + ratio_range (Sequence[float]): Scale ratio of mixup image. + Defaults to (0.5, 1.5). + flip_ratio (float): Horizontal flip ratio of mixup image. + Defaults to 0.5. + pad_val (int): Pad value. Defaults to 114. + bbox_clip_border (bool, optional): Whether to clip the objects outside + the border of the image. In some dataset like MOT17, the gt bboxes + are allowed to cross the border of images. Therefore, we don't + need to clip the gt bboxes in these cases. Defaults to True. + pre_transform(Sequence[dict]): Sequence of transform object or + config dict to be composed. + prob (float): Probability of applying this transformation. + Defaults to 1.0. + use_cached (bool): Whether to use cache. Defaults to False. + max_cached_images (int): The maximum length of the cache. The larger + the cache, the stronger the randomness of this transform. As a + rule of thumb, providing 10 caches for each image suffices for + randomness. Defaults to 20. + random_pop (bool): Whether to randomly pop a result from the cache + when the cache is full. If set to False, use FIFO popping method. + Defaults to True. + max_refetch (int): The maximum number of iterations. If the number of + iterations is greater than `max_refetch`, but gt_bbox is still + empty, then the iteration is terminated. Defaults to 15. + """ + + def __init__(self, + img_scale: Tuple[int, int] = (640, 640), + ratio_range: Tuple[float, float] = (0.5, 1.5), + flip_ratio: float = 0.5, + pad_val: float = 114.0, + bbox_clip_border: bool = True, + pre_transform: Sequence[dict] = None, + prob: float = 1.0, + use_cached: bool = False, + max_cached_images: int = 20, + random_pop: bool = True, + max_refetch: int = 15): + assert isinstance(img_scale, tuple) + if use_cached: + assert max_cached_images >= 2, 'The length of cache must >= 2, ' \ + f'but got {max_cached_images}.' + super().__init__( + pre_transform=pre_transform, + prob=prob, + use_cached=use_cached, + max_cached_images=max_cached_images, + random_pop=random_pop, + max_refetch=max_refetch) + self.img_scale = img_scale + self.ratio_range = ratio_range + self.flip_ratio = flip_ratio + self.pad_val = pad_val + self.bbox_clip_border = bbox_clip_border + + def get_indexes(self, dataset: Union[BaseDataset, list]) -> int: + """Call function to collect indexes. + + Args: + dataset (:obj:`Dataset` or list): The dataset or cached list. + + Returns: + int: indexes. + """ + return random.randint(0, len(dataset)) + + def mix_img_transform(self, results: dict) -> dict: + """YOLOX MixUp transform function. + + Args: + results (dict): Result dict. + + Returns: + results (dict): Updated result dict. + """ + assert 'mix_results' in results + assert len( + results['mix_results']) == 1, 'MixUp only support 2 images now !' + + if results['mix_results'][0]['gt_bboxes'].shape[0] == 0: + # empty bbox + return results + + retrieve_results = results['mix_results'][0] + retrieve_img = retrieve_results['img'] + + jit_factor = random.uniform(*self.ratio_range) + is_filp = random.uniform(0, 1) > self.flip_ratio + + if len(retrieve_img.shape) == 3: + out_img = np.ones((self.img_scale[1], self.img_scale[0], 3), + dtype=retrieve_img.dtype) * self.pad_val + else: + out_img = np.ones( + self.img_scale[::-1], dtype=retrieve_img.dtype) * self.pad_val + + # 1. keep_ratio resize + scale_ratio = min(self.img_scale[1] / retrieve_img.shape[0], + self.img_scale[0] / retrieve_img.shape[1]) + retrieve_img = mmcv.imresize( + retrieve_img, (int(retrieve_img.shape[1] * scale_ratio), + int(retrieve_img.shape[0] * scale_ratio))) + + # 2. paste + out_img[:retrieve_img.shape[0], :retrieve_img.shape[1]] = retrieve_img + + # 3. scale jit + scale_ratio *= jit_factor + out_img = mmcv.imresize(out_img, (int(out_img.shape[1] * jit_factor), + int(out_img.shape[0] * jit_factor))) + + # 4. flip + if is_filp: + out_img = out_img[:, ::-1, :] + + # 5. random crop + ori_img = results['img'] + origin_h, origin_w = out_img.shape[:2] + target_h, target_w = ori_img.shape[:2] + padded_img = np.ones((max(origin_h, target_h), max( + origin_w, target_w), 3)) * self.pad_val + padded_img = padded_img.astype(np.uint8) + padded_img[:origin_h, :origin_w] = out_img + + x_offset, y_offset = 0, 0 + if padded_img.shape[0] > target_h: + y_offset = random.randint(0, padded_img.shape[0] - target_h) + if padded_img.shape[1] > target_w: + x_offset = random.randint(0, padded_img.shape[1] - target_w) + padded_cropped_img = padded_img[y_offset:y_offset + target_h, + x_offset:x_offset + target_w] + + # 6. adjust bbox + retrieve_gt_bboxes = retrieve_results['gt_bboxes'] + retrieve_gt_bboxes.rescale_([scale_ratio, scale_ratio]) + if self.bbox_clip_border: + retrieve_gt_bboxes.clip_([origin_h, origin_w]) + + if is_filp: + retrieve_gt_bboxes.flip_([origin_h, origin_w], + direction='horizontal') + + # 7. filter + cp_retrieve_gt_bboxes = retrieve_gt_bboxes.clone() + cp_retrieve_gt_bboxes.translate_([-x_offset, -y_offset]) + if self.bbox_clip_border: + cp_retrieve_gt_bboxes.clip_([target_h, target_w]) + + # 8. mix up + mixup_img = 0.5 * ori_img + 0.5 * padded_cropped_img + + retrieve_gt_bboxes_labels = retrieve_results['gt_bboxes_labels'] + retrieve_gt_ignore_flags = retrieve_results['gt_ignore_flags'] + + mixup_gt_bboxes = cp_retrieve_gt_bboxes.cat( + (results['gt_bboxes'], cp_retrieve_gt_bboxes), dim=0) + mixup_gt_bboxes_labels = np.concatenate( + (results['gt_bboxes_labels'], retrieve_gt_bboxes_labels), axis=0) + mixup_gt_ignore_flags = np.concatenate( + (results['gt_ignore_flags'], retrieve_gt_ignore_flags), axis=0) + + if not self.bbox_clip_border: + # remove outside bbox + inside_inds = mixup_gt_bboxes.is_inside([target_h, + target_w]).numpy() + mixup_gt_bboxes = mixup_gt_bboxes[inside_inds] + mixup_gt_bboxes_labels = mixup_gt_bboxes_labels[inside_inds] + mixup_gt_ignore_flags = mixup_gt_ignore_flags[inside_inds] + + results['img'] = mixup_img.astype(np.uint8) + results['img_shape'] = mixup_img.shape + results['gt_bboxes'] = mixup_gt_bboxes + results['gt_bboxes_labels'] = mixup_gt_bboxes_labels + results['gt_ignore_flags'] = mixup_gt_ignore_flags + + return results + + def __repr__(self) -> str: + repr_str = self.__class__.__name__ + repr_str += f'(img_scale={self.img_scale}, ' + repr_str += f'ratio_range={self.ratio_range}, ' + repr_str += f'flip_ratio={self.flip_ratio}, ' + repr_str += f'pad_val={self.pad_val}, ' + repr_str += f'max_refetch={self.max_refetch}, ' + repr_str += f'bbox_clip_border={self.bbox_clip_border})' + return repr_str diff --git a/models/YOLO-World/yolo_world/datasets/transformers/mm_transforms.py b/models/YOLO-World/yolo_world/datasets/transformers/mm_transforms.py new file mode 100644 index 0000000000000000000000000000000000000000..0008920b82fc29b3ccc0473e894cd718cdb21fa4 --- /dev/null +++ b/models/YOLO-World/yolo_world/datasets/transformers/mm_transforms.py @@ -0,0 +1,129 @@ +# Copyright (c) Tencent Inc. All rights reserved. +import json +import random +from typing import Tuple + +import numpy as np +from mmyolo.registry import TRANSFORMS + + +@TRANSFORMS.register_module() +class RandomLoadText: + + def __init__(self, + text_path: str = None, + prompt_format: str = '{}', + num_neg_samples: Tuple[int, int] = (80, 80), + max_num_samples: int = 80, + padding_to_max: bool = False, + padding_value: str = '') -> None: + self.prompt_format = prompt_format + self.num_neg_samples = num_neg_samples + self.max_num_samples = max_num_samples + self.padding_to_max = padding_to_max + self.padding_value = padding_value + if text_path is not None: + with open(text_path, 'r') as f: + self.class_texts = json.load(f) + + def __call__(self, results: dict) -> dict: + assert 'texts' in results or hasattr(self, 'class_texts'), ( + 'No texts found in results.') + class_texts = results.get( + 'texts', + getattr(self, 'class_texts', None)) + + num_classes = len(class_texts) + if 'gt_labels' in results: + gt_label_tag = 'gt_labels' + elif 'gt_bboxes_labels' in results: + gt_label_tag = 'gt_bboxes_labels' + else: + raise ValueError('No valid labels found in results.') + positive_labels = set(results[gt_label_tag]) + + if len(positive_labels) > self.max_num_samples: + positive_labels = set(random.sample(list(positive_labels), + k=self.max_num_samples)) + + num_neg_samples = min( + min(num_classes, self.max_num_samples) - len(positive_labels), + random.randint(*self.num_neg_samples)) + candidate_neg_labels = [] + for idx in range(num_classes): + if idx not in positive_labels: + candidate_neg_labels.append(idx) + negative_labels = random.sample( + candidate_neg_labels, k=num_neg_samples) + + sampled_labels = list(positive_labels) + list(negative_labels) + random.shuffle(sampled_labels) + + label2ids = {label: i for i, label in enumerate(sampled_labels)} + + gt_valid_mask = np.zeros(len(results['gt_bboxes']), dtype=bool) + for idx, label in enumerate(results[gt_label_tag]): + if label in label2ids: + gt_valid_mask[idx] = True + results[gt_label_tag][idx] = label2ids[label] + results['gt_bboxes'] = results['gt_bboxes'][gt_valid_mask] + results[gt_label_tag] = results[gt_label_tag][gt_valid_mask] + + if 'instances' in results: + retaged_instances = [] + for idx, inst in enumerate(results['instances']): + label = inst['bbox_label'] + if label in label2ids: + inst['bbox_label'] = label2ids[label] + retaged_instances.append(inst) + results['instances'] = retaged_instances + + texts = [] + for label in sampled_labels: + cls_caps = class_texts[label] + assert len(cls_caps) > 0 + cap_id = random.randrange(len(cls_caps)) + sel_cls_cap = self.prompt_format.format(cls_caps[cap_id]) + texts.append(sel_cls_cap) + + if self.padding_to_max: + num_valid_labels = len(positive_labels) + len(negative_labels) + num_padding = self.max_num_samples - num_valid_labels + if num_padding > 0: + texts += [self.padding_value] * num_padding + + results['texts'] = texts + + return results + + +@TRANSFORMS.register_module() +class LoadText: + + def __init__(self, + text_path: str = None, + prompt_format: str = '{}', + multi_prompt_flag: str = '/') -> None: + self.prompt_format = prompt_format + self.multi_prompt_flag = multi_prompt_flag + if text_path is not None: + with open(text_path, 'r') as f: + self.class_texts = json.load(f) + + def __call__(self, results: dict) -> dict: + assert 'texts' in results or hasattr(self, 'class_texts'), ( + 'No texts found in results.') + class_texts = results.get( + 'texts', + getattr(self, 'class_texts', None)) + + texts = [] + for idx, cls_caps in enumerate(class_texts): + assert len(cls_caps) > 0 + sel_cls_cap = cls_caps[0] + sel_cls_cap = self.prompt_format.format(sel_cls_cap) + texts.append(sel_cls_cap) + + results['texts'] = texts + + return results diff --git a/models/YOLO-World/yolo_world/datasets/utils.py b/models/YOLO-World/yolo_world/datasets/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..4731a45410c0d964ae0fa1b1a863304850a1eee7 --- /dev/null +++ b/models/YOLO-World/yolo_world/datasets/utils.py @@ -0,0 +1,60 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Sequence + +import torch +from mmengine.dataset import COLLATE_FUNCTIONS + + +@COLLATE_FUNCTIONS.register_module() +def yolow_collate(data_batch: Sequence, + use_ms_training: bool = False) -> dict: + """Rewrite collate_fn to get faster training speed. + + Args: + data_batch (Sequence): Batch of data. + use_ms_training (bool): Whether to use multi-scale training. + """ + batch_imgs = [] + batch_bboxes_labels = [] + batch_masks = [] + for i in range(len(data_batch)): + datasamples = data_batch[i]['data_samples'] + inputs = data_batch[i]['inputs'] + batch_imgs.append(inputs) + + gt_bboxes = datasamples.gt_instances.bboxes.tensor + gt_labels = datasamples.gt_instances.labels + if 'masks' in datasamples.gt_instances: + masks = datasamples.gt_instances.masks.to( + dtype=torch.bool, device=gt_bboxes.device) + batch_masks.append(masks) + batch_idx = gt_labels.new_full((len(gt_labels), 1), i) + bboxes_labels = torch.cat((batch_idx, gt_labels[:, None], gt_bboxes), + dim=1) + batch_bboxes_labels.append(bboxes_labels) + + collated_results = { + 'data_samples': { + 'bboxes_labels': torch.cat(batch_bboxes_labels, 0) + } + } + if len(batch_masks) > 0: + collated_results['data_samples']['masks'] = torch.cat(batch_masks, 0) + + if use_ms_training: + collated_results['inputs'] = batch_imgs + else: + collated_results['inputs'] = torch.stack(batch_imgs, 0) + + if hasattr(data_batch[0]['data_samples'], 'texts'): + batch_texts = [meta['data_samples'].texts for meta in data_batch] + collated_results['data_samples']['texts'] = batch_texts + + if hasattr(data_batch[0]['data_samples'], 'is_detection'): + # detection flag + batch_detection = [meta['data_samples'].is_detection + for meta in data_batch] + collated_results['data_samples']['is_detection'] = torch.tensor( + batch_detection) + + return collated_results diff --git a/models/YOLO-World/yolo_world/datasets/yolov5_lvis.py b/models/YOLO-World/yolo_world/datasets/yolov5_lvis.py new file mode 100644 index 0000000000000000000000000000000000000000..32585044ed82e839e77b414229e2e53481e9eaf5 --- /dev/null +++ b/models/YOLO-World/yolo_world/datasets/yolov5_lvis.py @@ -0,0 +1,15 @@ +# Copyright (c) Tencent Inc. All rights reserved. +from mmdet.datasets import LVISV1Dataset + +from mmyolo.datasets.yolov5_coco import BatchShapePolicyDataset +from mmyolo.registry import DATASETS + + +@DATASETS.register_module() +class YOLOv5LVISV1Dataset(BatchShapePolicyDataset, LVISV1Dataset): + """Dataset for YOLOv5 LVIS Dataset. + + We only add `BatchShapePolicy` function compared with Objects365V1Dataset. + See `mmyolo/datasets/utils.py#BatchShapePolicy` for details + """ + pass diff --git a/models/YOLO-World/yolo_world/datasets/yolov5_mixed_grounding.py b/models/YOLO-World/yolo_world/datasets/yolov5_mixed_grounding.py new file mode 100644 index 0000000000000000000000000000000000000000..98dd03104e3160b5226ace6eb6a432534125fcf4 --- /dev/null +++ b/models/YOLO-World/yolo_world/datasets/yolov5_mixed_grounding.py @@ -0,0 +1,200 @@ +# Copyright (c) Tencent Inc. All rights reserved. +import os.path as osp +from typing import List, Union + +from mmengine.fileio import get_local_path, join_path +from mmengine.utils import is_abs +from mmdet.datasets.coco import CocoDataset +from mmyolo.registry import DATASETS +from mmyolo.datasets.yolov5_coco import BatchShapePolicyDataset + + +@DATASETS.register_module() +class YOLOv5MixedGroundingDataset(BatchShapePolicyDataset, CocoDataset): + """Mixed grounding dataset.""" + + METAINFO = { + 'classes': ('object',), + 'palette': [(220, 20, 60)]} + + def load_data_list(self) -> List[dict]: + """Load annotations from an annotation file named as ``self.ann_file`` + + Returns: + List[dict]: A list of annotation. + """ # noqa: E501 + with get_local_path( + self.ann_file, backend_args=self.backend_args) as local_path: + self.coco = self.COCOAPI(local_path) + + img_ids = self.coco.get_img_ids() + data_list = [] + total_ann_ids = [] + for img_id in img_ids: + raw_img_info = self.coco.load_imgs([img_id])[0] + raw_img_info['img_id'] = img_id + + ann_ids = self.coco.get_ann_ids(img_ids=[img_id]) + raw_ann_info = self.coco.load_anns(ann_ids) + total_ann_ids.extend(ann_ids) + + parsed_data_info = self.parse_data_info({ + 'raw_ann_info': + raw_ann_info, + 'raw_img_info': + raw_img_info + }) + data_list.append(parsed_data_info) + if self.ANN_ID_UNIQUE: + assert len(set(total_ann_ids)) == len( + total_ann_ids + ), f"Annotation ids in '{self.ann_file}' are not unique!" + + del self.coco + # print(len(data_list)) + return data_list + + def parse_data_info(self, raw_data_info: dict) -> Union[dict, List[dict]]: + """Parse raw annotation to target format. + + Args: + raw_data_info (dict): Raw data information load from ``ann_file`` + + Returns: + Union[dict, List[dict]]: Parsed annotation. + """ + img_info = raw_data_info['raw_img_info'] + ann_info = raw_data_info['raw_ann_info'] + + data_info = {} + + img_path = None + img_prefix = self.data_prefix.get('img', None) + if isinstance(img_prefix, str): + img_path = osp.join(img_prefix, img_info['file_name']) + elif isinstance(img_prefix, (list, tuple)): + for prefix in img_prefix: + candidate_img_path = osp.join(prefix, img_info['file_name']) + if osp.exists(candidate_img_path): + img_path = candidate_img_path + break + assert img_path is not None, ( + f'Image path {img_info["file_name"]} not found in' + f'{img_prefix}') + if self.data_prefix.get('seg', None): + seg_map_path = osp.join( + self.data_prefix['seg'], + img_info['file_name'].rsplit('.', 1)[0] + self.seg_map_suffix) + else: + seg_map_path = None + data_info['img_path'] = img_path + data_info['img_id'] = img_info['img_id'] + data_info['seg_map_path'] = seg_map_path + data_info['height'] = float(img_info['height']) + data_info['width'] = float(img_info['width']) + + cat2id = {} + texts = [] + for ann in ann_info: + cat_name = ' '.join([img_info['caption'][t[0]:t[1]] + for t in ann['tokens_positive']]) + if cat_name not in cat2id: + cat2id[cat_name] = len(cat2id) + texts.append([cat_name]) + data_info['texts'] = texts + + instances = [] + for i, ann in enumerate(ann_info): + instance = {} + + if ann.get('ignore', False): + continue + x1, y1, w, h = ann['bbox'] + inter_w = max(0, + min(x1 + w, float(img_info['width'])) - max(x1, 0)) + inter_h = max(0, + min(y1 + h, float(img_info['height'])) - max(y1, 0)) + if inter_w * inter_h == 0: + continue + if ann['area'] <= 0 or w < 1 or h < 1: + continue + bbox = [x1, y1, x1 + w, y1 + h] + + if ann.get('iscrowd', False): + instance['ignore_flag'] = 1 + else: + instance['ignore_flag'] = 0 + instance['bbox'] = bbox + + cat_name = ' '.join([img_info['caption'][t[0]:t[1]] + for t in ann['tokens_positive']]) + instance['bbox_label'] = cat2id[cat_name] + + if ann.get('segmentation', None): + instance['mask'] = ann['segmentation'] + + instances.append(instance) + # NOTE: for detection task, we set `is_detection` to 1 + data_info['is_detection'] = 1 + data_info['instances'] = instances + # print(data_info['texts']) + return data_info + + def filter_data(self) -> List[dict]: + """Filter annotations according to filter_cfg. + + Returns: + List[dict]: Filtered results. + """ + if self.test_mode: + return self.data_list + + if self.filter_cfg is None: + return self.data_list + + filter_empty_gt = self.filter_cfg.get('filter_empty_gt', False) + min_size = self.filter_cfg.get('min_size', 0) + + # obtain images that contain annotation + ids_with_ann = set(data_info['img_id'] for data_info in self.data_list) + + valid_data_infos = [] + for i, data_info in enumerate(self.data_list): + img_id = data_info['img_id'] + width = int(data_info['width']) + height = int(data_info['height']) + if filter_empty_gt and img_id not in ids_with_ann: + continue + if min(width, height) >= min_size: + valid_data_infos.append(data_info) + + return valid_data_infos + + def _join_prefix(self): + """Join ``self.data_root`` with ``self.data_prefix`` and + ``self.ann_file``. + """ + # Automatically join annotation file path with `self.root` if + # `self.ann_file` is not an absolute path. + if self.ann_file and not is_abs(self.ann_file) and self.data_root: + self.ann_file = join_path(self.data_root, self.ann_file) + # Automatically join data directory with `self.root` if path value in + # `self.data_prefix` is not an absolute path. + for data_key, prefix in self.data_prefix.items(): + if isinstance(prefix, (list, tuple)): + abs_prefix = [] + for p in prefix: + if not is_abs(p) and self.data_root: + abs_prefix.append(join_path(self.data_root, p)) + else: + abs_prefix.append(p) + self.data_prefix[data_key] = abs_prefix + elif isinstance(prefix, str): + if not is_abs(prefix) and self.data_root: + self.data_prefix[data_key] = join_path( + self.data_root, prefix) + else: + self.data_prefix[data_key] = prefix + else: + raise TypeError('prefix should be a string, tuple or list,' + f'but got {type(prefix)}') diff --git a/models/YOLO-World/yolo_world/datasets/yolov5_obj365v1.py b/models/YOLO-World/yolo_world/datasets/yolov5_obj365v1.py new file mode 100644 index 0000000000000000000000000000000000000000..593dc86b6606eeec7e3fcc3fa00178fd29aad07e --- /dev/null +++ b/models/YOLO-World/yolo_world/datasets/yolov5_obj365v1.py @@ -0,0 +1,15 @@ +# Copyright (c) Tencent Inc. All rights reserved. +from mmdet.datasets import Objects365V1Dataset + +from mmyolo.datasets.yolov5_coco import BatchShapePolicyDataset +from mmyolo.registry import DATASETS + + +@DATASETS.register_module() +class YOLOv5Objects365V1Dataset(BatchShapePolicyDataset, Objects365V1Dataset): + """Dataset for YOLOv5 VOC Dataset. + + We only add `BatchShapePolicy` function compared with Objects365V1Dataset. + See `mmyolo/datasets/utils.py#BatchShapePolicy` for details + """ + pass diff --git a/models/YOLO-World/yolo_world/datasets/yolov5_obj365v2.py b/models/YOLO-World/yolo_world/datasets/yolov5_obj365v2.py new file mode 100644 index 0000000000000000000000000000000000000000..7008565c6bac3813810c025498def528c9a590d6 --- /dev/null +++ b/models/YOLO-World/yolo_world/datasets/yolov5_obj365v2.py @@ -0,0 +1,15 @@ +# Copyright (c) Tencent Inc. All rights reserved. +from mmdet.datasets import Objects365V2Dataset + +from mmyolo.datasets.yolov5_coco import BatchShapePolicyDataset +from mmyolo.registry import DATASETS + + +@DATASETS.register_module() +class YOLOv5Objects365V2Dataset(BatchShapePolicyDataset, Objects365V2Dataset): + """Dataset for YOLOv5 VOC Dataset. + + We only add `BatchShapePolicy` function compared with Objects365V1Dataset. + See `mmyolo/datasets/utils.py#BatchShapePolicy` for details + """ + pass diff --git a/models/YOLO-World/yolo_world/datasets/yolov5_v3det.py b/models/YOLO-World/yolo_world/datasets/yolov5_v3det.py new file mode 100644 index 0000000000000000000000000000000000000000..554a0a33e3a206a5c5ec8314f09d038a50c502ad --- /dev/null +++ b/models/YOLO-World/yolo_world/datasets/yolov5_v3det.py @@ -0,0 +1,110 @@ +# Copyright (c) Tencent Inc. All rights reserved. +import copy +import json +import os.path as osp +from typing import List + +from mmengine.fileio import get_local_path + +from mmdet.datasets.api_wrappers import COCO +from mmdet.datasets import CocoDataset + +from mmyolo.datasets.yolov5_coco import BatchShapePolicyDataset +from mmyolo.registry import DATASETS + +v3det_ignore_list = [ + 'a00013820/26_275_28143226914_ff3a247c53_c.jpg', + 'n03815615/12_1489_32968099046_be38fa580e_c.jpg', + 'n04550184/19_1480_2504784164_ffa3db8844_c.jpg', + 'a00008703/2_363_3576131784_dfac6fc6ce_c.jpg', + 'n02814533/28_2216_30224383848_a90697f1b3_c.jpg', + 'n12026476/29_186_15091304754_5c219872f7_c.jpg', + 'n01956764/12_2004_50133201066_72e0d9fea5_c.jpg', + 'n03785016/14_2642_518053131_d07abcb5da_c.jpg', + 'a00011156/33_250_4548479728_9ce5246596_c.jpg', + 'a00009461/19_152_2792869324_db95bebc84_c.jpg', +] + +# # ugly code here +# with open(osp.join("data/v3det/cats.json"), 'r') as f: +# _classes = json.load(f)['classes'] + + +@DATASETS.register_module() +class V3DetDataset(CocoDataset): + """Objects365 v1 dataset for detection.""" + + METAINFO = {'classes': 'classes', 'palette': None} + + COCOAPI = COCO + # ann_id is unique in coco dataset. + ANN_ID_UNIQUE = True + + def load_data_list(self) -> List[dict]: + """Load annotations from an annotation file named as ``self.ann_file`` + + Returns: + List[dict]: A list of annotation. + """ # noqa: E501 + with get_local_path(self.ann_file, + backend_args=self.backend_args) as local_path: + self.coco = self.COCOAPI(local_path) + + # 'categories' list in objects365_train.json and objects365_val.json + # is inconsistent, need sort list(or dict) before get cat_ids. + cats = self.coco.cats + sorted_cats = {i: cats[i] for i in sorted(cats)} + self.coco.cats = sorted_cats + categories = self.coco.dataset['categories'] + sorted_categories = sorted(categories, key=lambda i: i['id']) + self.coco.dataset['categories'] = sorted_categories + # The order of returned `cat_ids` will not + # change with the order of the `classes` + self.cat_ids = self.coco.get_cat_ids( + cat_names=self.metainfo['classes']) + self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)} + self.cat_img_map = copy.deepcopy(self.coco.cat_img_map) + + img_ids = self.coco.get_img_ids() + data_list = [] + total_ann_ids = [] + for img_id in img_ids: + raw_img_info = self.coco.load_imgs([img_id])[0] + raw_img_info['img_id'] = img_id + + ann_ids = self.coco.get_ann_ids(img_ids=[img_id]) + raw_ann_info = self.coco.load_anns(ann_ids) + total_ann_ids.extend(ann_ids) + + file_name = osp.join( + osp.split(osp.split(raw_img_info['file_name'])[0])[-1], + osp.split(raw_img_info['file_name'])[-1]) + + if file_name in v3det_ignore_list: + continue + + parsed_data_info = self.parse_data_info({ + 'raw_ann_info': + raw_ann_info, + 'raw_img_info': + raw_img_info + }) + data_list.append(parsed_data_info) + if self.ANN_ID_UNIQUE: + assert len(set(total_ann_ids)) == len( + total_ann_ids + ), f"Annotation ids in '{self.ann_file}' are not unique!" + + del self.coco + + return data_list + + +@DATASETS.register_module() +class YOLOv5V3DetDataset(BatchShapePolicyDataset, V3DetDataset): + """Dataset for YOLOv5 VOC Dataset. + + We only add `BatchShapePolicy` function compared with Objects365V1Dataset. + See `mmyolo/datasets/utils.py#BatchShapePolicy` for details + """ + pass diff --git a/models/YOLO-World/yolo_world/engine/__init__.py b/models/YOLO-World/yolo_world/engine/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..74177cd3c2f867cfa85c41ad6e41a75be478af80 --- /dev/null +++ b/models/YOLO-World/yolo_world/engine/__init__.py @@ -0,0 +1,2 @@ +# Copyright (c) Tencent Inc. All rights reserved. +from .optimizers import * # noqa diff --git a/models/YOLO-World/yolo_world/engine/optimizers/__init__.py b/models/YOLO-World/yolo_world/engine/optimizers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..607cefb27435590334926f1521734b1ecadc32ab --- /dev/null +++ b/models/YOLO-World/yolo_world/engine/optimizers/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) Tencent Inc. All rights reserved. +from .yolow_v5_optim_constructor import YOLOWv5OptimizerConstructor + +__all__ = ['YOLOWv5OptimizerConstructor'] diff --git a/models/YOLO-World/yolo_world/engine/optimizers/yolow_v5_optim_constructor.py b/models/YOLO-World/yolo_world/engine/optimizers/yolow_v5_optim_constructor.py new file mode 100644 index 0000000000000000000000000000000000000000..a8b625ebc9684c4cac2a27383f592a786a5a9e00 --- /dev/null +++ b/models/YOLO-World/yolo_world/engine/optimizers/yolow_v5_optim_constructor.py @@ -0,0 +1,187 @@ +# Copyright (c) Tencent Inc. All rights reserved. +import logging +from typing import List, Optional, Union + +import torch +import torch.nn as nn +from torch.nn import GroupNorm, LayerNorm +from mmengine.dist import get_world_size +from mmengine.logging import print_log +from mmengine.optim import OptimWrapper, DefaultOptimWrapperConstructor +from mmengine.utils.dl_utils import mmcv_full_available +from mmengine.utils.dl_utils.parrots_wrapper import _BatchNorm, _InstanceNorm + +from mmyolo.registry import (OPTIM_WRAPPER_CONSTRUCTORS, OPTIM_WRAPPERS, + OPTIMIZERS) + + +@OPTIM_WRAPPER_CONSTRUCTORS.register_module() +class YOLOWv5OptimizerConstructor(DefaultOptimWrapperConstructor): + """YOLO World v5 constructor for optimizers.""" + + def __init__(self, + optim_wrapper_cfg: dict, + paramwise_cfg: Optional[dict] = None) -> None: + super().__init__(optim_wrapper_cfg, paramwise_cfg) + self.base_total_batch_size = self.paramwise_cfg.pop( + 'base_total_batch_size', 64) + + def add_params(self, + params: List[dict], + module: nn.Module, + prefix: str = '', + is_dcn_module: Optional[Union[int, float]] = None) -> None: + """Add all parameters of module to the params list. + + The parameters of the given module will be added to the list of param + groups, with specific rules defined by paramwise_cfg. + + Args: + params (list[dict]): A list of param groups, it will be modified + in place. + module (nn.Module): The module to be added. + prefix (str): The prefix of the module + is_dcn_module (int|float|None): If the current module is a + submodule of DCN, `is_dcn_module` will be passed to + control conv_offset layer's learning rate. Defaults to None. + """ + # get param-wise options + custom_keys = self.paramwise_cfg.get('custom_keys', {}) + # first sort with alphabet order and then sort with reversed len of str + sorted_keys = sorted(sorted(custom_keys.keys()), key=len, reverse=True) + + bias_lr_mult = self.paramwise_cfg.get('bias_lr_mult', None) + bias_decay_mult = self.paramwise_cfg.get('bias_decay_mult', None) + norm_decay_mult = self.paramwise_cfg.get('norm_decay_mult', None) + dwconv_decay_mult = self.paramwise_cfg.get('dwconv_decay_mult', None) + flat_decay_mult = self.paramwise_cfg.get('flat_decay_mult', None) + bypass_duplicate = self.paramwise_cfg.get('bypass_duplicate', False) + dcn_offset_lr_mult = self.paramwise_cfg.get('dcn_offset_lr_mult', None) + + # special rules for norm layers and depth-wise conv layers + is_norm = isinstance(module, + (_BatchNorm, _InstanceNorm, GroupNorm, LayerNorm)) + is_dwconv = ( + isinstance(module, torch.nn.Conv2d) + and module.in_channels == module.groups) + + for name, param in module.named_parameters(recurse=False): + param_group = {'params': [param]} + if bypass_duplicate and self._is_in(param_group, params): + print_log( + f'{prefix} is duplicate. It is skipped since ' + f'bypass_duplicate={bypass_duplicate}', + logger='current', + level=logging.WARNING) + continue + if not param.requires_grad: + params.append(param_group) + continue + + # if the parameter match one of the custom keys, ignore other rules + for key in sorted_keys: + if key in f'{prefix}.{name}': + lr_mult = custom_keys[key].get('lr_mult', 1.) + param_group['lr'] = self.base_lr * lr_mult + if self.base_wd is not None: + decay_mult = custom_keys[key].get('decay_mult', 1.) + param_group['weight_decay'] = self.base_wd * decay_mult + # add custom settings to param_group + for k, v in custom_keys[key].items(): + param_group[k] = v + break + + # NOTE: the behavious is different from MMDetection + # bias_lr_mult affects all bias parameters + # except for norm.bias dcn.conv_offset.bias + if name == 'bias' and not ( + is_norm or is_dcn_module) and bias_lr_mult is not None: + param_group['lr'] = self.base_lr * bias_lr_mult + + if (prefix.find('conv_offset') != -1 and is_dcn_module + and dcn_offset_lr_mult is not None + and isinstance(module, torch.nn.Conv2d)): + # deal with both dcn_offset's bias & weight + param_group['lr'] = self.base_lr * dcn_offset_lr_mult + + # apply weight decay policies + if self.base_wd is not None: + # norm decay + if is_norm and norm_decay_mult is not None: + param_group[ + 'weight_decay'] = self.base_wd * norm_decay_mult + # bias lr and decay + elif (name == 'bias' and not is_dcn_module + and bias_decay_mult is not None): + param_group[ + 'weight_decay'] = self.base_wd * bias_decay_mult + # depth-wise conv + elif is_dwconv and dwconv_decay_mult is not None: + param_group[ + 'weight_decay'] = self.base_wd * dwconv_decay_mult + # flatten parameters except dcn offset + elif (param.ndim == 1 and not is_dcn_module + and flat_decay_mult is not None): + param_group[ + 'weight_decay'] = self.base_wd * flat_decay_mult + params.append(param_group) + for key, value in param_group.items(): + if key == 'params': + continue + full_name = f'{prefix}.{name}' if prefix else name + print_log( + f'paramwise_options -- {full_name}:{key}={value}', + logger='current') + + if mmcv_full_available(): + from mmcv.ops import DeformConv2d, ModulatedDeformConv2d + is_dcn_module = isinstance(module, + (DeformConv2d, ModulatedDeformConv2d)) + else: + is_dcn_module = False + for child_name, child_mod in module.named_children(): + child_prefix = f'{prefix}.{child_name}' if prefix else child_name + self.add_params( + params, + child_mod, + prefix=child_prefix, + is_dcn_module=is_dcn_module) + + def __call__(self, model: nn.Module) -> OptimWrapper: + if hasattr(model, 'module'): + model = model.module + + optim_wrapper_cfg = self.optim_wrapper_cfg.copy() + optim_wrapper_cfg.setdefault('type', 'OptimWrapper') + optimizer_cfg = self.optimizer_cfg.copy() + + # follow the original yolov5 implementation + if 'batch_size_per_gpu' in optimizer_cfg: + batch_size_per_gpu = optimizer_cfg.pop('batch_size_per_gpu') + # No scaling if total_batch_size is less than + # base_total_batch_size, otherwise linear scaling. + total_batch_size = get_world_size() * batch_size_per_gpu + accumulate = max( + round(self.base_total_batch_size / total_batch_size), 1) + scale_factor = total_batch_size * \ + accumulate / self.base_total_batch_size + + if scale_factor != 1: + weight_decay = optimizer_cfg.get('weight_decay', 0) + weight_decay *= scale_factor + optimizer_cfg['weight_decay'] = weight_decay + print_log(f'Scaled weight_decay to {weight_decay}', 'current') + + # if no paramwise option is specified, just use the global setting + if not self.paramwise_cfg: + optimizer_cfg['params'] = model.parameters() + optimizer = OPTIMIZERS.build(optimizer_cfg) + else: + # set param-wise lr and weight decay recursively + params: List = [] + self.add_params(params, model) + optimizer_cfg['params'] = params + optimizer = OPTIMIZERS.build(optimizer_cfg) + optim_wrapper = OPTIM_WRAPPERS.build( + optim_wrapper_cfg, default_args=dict(optimizer=optimizer)) + return optim_wrapper diff --git a/models/YOLO-World/yolo_world/models/__init__.py b/models/YOLO-World/yolo_world/models/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..98bbeaef134ba04bede5d409537d05c6616cb8f0 --- /dev/null +++ b/models/YOLO-World/yolo_world/models/__init__.py @@ -0,0 +1,9 @@ +# Copyright (c) Tencent Inc. All rights reserved. +from .backbones import * # noqa +from .layers import * # noqa +from .detectors import * # noqa +from .losses import * # noqa +from .data_preprocessors import * # noqa +from .dense_heads import * # noqa +from .necks import * # noqa +from .assigner import * # noqa diff --git a/models/YOLO-World/yolo_world/models/assigner/__init__.py b/models/YOLO-World/yolo_world/models/assigner/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..320009790289c81a43c57a93a12ce1896963a6b6 --- /dev/null +++ b/models/YOLO-World/yolo_world/models/assigner/__init__.py @@ -0,0 +1,3 @@ +from .task_aligned_assigner import YOLOWorldSegAssigner + +__all__ = ['YOLOWorldSegAssigner'] \ No newline at end of file diff --git a/models/YOLO-World/yolo_world/models/assigner/task_aligned_assigner.py b/models/YOLO-World/yolo_world/models/assigner/task_aligned_assigner.py new file mode 100644 index 0000000000000000000000000000000000000000..a6f0d242844eb703da88fd5c74c215e29f017d35 --- /dev/null +++ b/models/YOLO-World/yolo_world/models/assigner/task_aligned_assigner.py @@ -0,0 +1,108 @@ +# Copyright (c) Tencent Inc. All rights reserved. +import torch +from torch import Tensor +from mmyolo.registry import TASK_UTILS +from mmyolo.models.task_modules.assigners import BatchTaskAlignedAssigner +from mmyolo.models.task_modules.assigners.utils import select_highest_overlaps + +@TASK_UTILS.register_module() +class YOLOWorldSegAssigner(BatchTaskAlignedAssigner): + + def __init__(self, + num_classes: int, + topk: int = 13, + alpha: float = 1, + beta: float = 6, + eps: float = 1e-7, + use_ciou: bool = False): + super().__init__(num_classes, topk, alpha, beta, eps, use_ciou) + + @torch.no_grad() + def forward( + self, + pred_bboxes: Tensor, + pred_scores: Tensor, + priors: Tensor, + gt_labels: Tensor, + gt_bboxes: Tensor, + pad_bbox_flag: Tensor, + ) -> dict: + """Assign gt to bboxes. + + The assignment is done in following steps + 1. compute alignment metric between all bbox (bbox of all pyramid + levels) and gt + 2. select top-k bbox as candidates for each gt + 3. limit the positive sample's center in gt (because the anchor-free + detector only can predict positive distance) + Args: + pred_bboxes (Tensor): Predict bboxes, + shape(batch_size, num_priors, 4) + pred_scores (Tensor): Scores of predict bboxes, + shape(batch_size, num_priors, num_classes) + priors (Tensor): Model priors, shape (num_priors, 4) + gt_labels (Tensor): Ground true labels, + shape(batch_size, num_gt, 1) + gt_bboxes (Tensor): Ground true bboxes, + shape(batch_size, num_gt, 4) + pad_bbox_flag (Tensor): Ground truth bbox mask, + 1 means bbox, 0 means no bbox, + shape(batch_size, num_gt, 1) + Returns: + assigned_result (dict) Assigned result: + assigned_labels (Tensor): Assigned labels, + shape(batch_size, num_priors) + assigned_bboxes (Tensor): Assigned boxes, + shape(batch_size, num_priors, 4) + assigned_scores (Tensor): Assigned scores, + shape(batch_size, num_priors, num_classes) + fg_mask_pre_prior (Tensor): Force ground truth matching mask, + shape(batch_size, num_priors) + """ + # (num_priors, 4) -> (num_priors, 2) + priors = priors[:, :2] + + batch_size = pred_scores.size(0) + num_gt = gt_bboxes.size(1) + + assigned_result = { + 'assigned_labels': + gt_bboxes.new_full(pred_scores[..., 0].shape, self.num_classes), + 'assigned_bboxes': + gt_bboxes.new_full(pred_bboxes.shape, 0), + 'assigned_scores': + gt_bboxes.new_full(pred_scores.shape, 0), + 'fg_mask_pre_prior': + gt_bboxes.new_full(pred_scores[..., 0].shape, 0) + } + + if num_gt == 0: + return assigned_result + + pos_mask, alignment_metrics, overlaps = self.get_pos_mask( + pred_bboxes, pred_scores, priors, gt_labels, gt_bboxes, + pad_bbox_flag, batch_size, num_gt) + + (assigned_gt_idxs, fg_mask_pre_prior, + pos_mask) = select_highest_overlaps(pos_mask, overlaps, num_gt) + + # assigned target + assigned_labels, assigned_bboxes, assigned_scores = self.get_targets( + gt_labels, gt_bboxes, assigned_gt_idxs, fg_mask_pre_prior, + batch_size, num_gt) + + # normalize + alignment_metrics *= pos_mask + pos_align_metrics = alignment_metrics.max(axis=-1, keepdim=True)[0] + pos_overlaps = (overlaps * pos_mask).max(axis=-1, keepdim=True)[0] + norm_align_metric = ( + alignment_metrics * pos_overlaps / + (pos_align_metrics + self.eps)).max(-2)[0].unsqueeze(-1) + assigned_scores = assigned_scores * norm_align_metric + + assigned_result['assigned_labels'] = assigned_labels + assigned_result['assigned_bboxes'] = assigned_bboxes + assigned_result['assigned_scores'] = assigned_scores + assigned_result['fg_mask_pre_prior'] = fg_mask_pre_prior.bool() + assigned_result['assigned_gt_idxs'] = assigned_gt_idxs + return assigned_result diff --git a/models/YOLO-World/yolo_world/models/backbones/__init__.py b/models/YOLO-World/yolo_world/models/backbones/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..67698adfe7d6efe0beef29127ce7f34e9aa573ba --- /dev/null +++ b/models/YOLO-World/yolo_world/models/backbones/__init__.py @@ -0,0 +1,16 @@ +# Copyright (c) Tencent Inc. All rights reserved. +# YOLO Multi-Modal Backbone (Vision Language) +# Vision: YOLOv8 CSPDarknet +# Language: CLIP Text Encoder (12-layer transformer) +from .mm_backbone import ( + MultiModalYOLOBackbone, + HuggingVisionBackbone, + HuggingCLIPLanguageBackbone, + PseudoLanguageBackbone) + +__all__ = [ + 'MultiModalYOLOBackbone', + 'HuggingVisionBackbone', + 'HuggingCLIPLanguageBackbone', + 'PseudoLanguageBackbone' +] diff --git a/models/YOLO-World/yolo_world/models/backbones/mm_backbone.py b/models/YOLO-World/yolo_world/models/backbones/mm_backbone.py new file mode 100644 index 0000000000000000000000000000000000000000..37dab1537080f8fa573c5023be22e0926e730950 --- /dev/null +++ b/models/YOLO-World/yolo_world/models/backbones/mm_backbone.py @@ -0,0 +1,227 @@ +# Copyright (c) Tencent Inc. All rights reserved. +import itertools +from typing import List, Sequence, Tuple +import torch +from torch import Tensor +from torch.nn.modules.batchnorm import _BatchNorm +from mmengine.model import BaseModule +from mmyolo.registry import MODELS +from mmdet.utils import OptMultiConfig, ConfigType +from transformers import (AutoTokenizer, AutoModel, CLIPTextConfig) +from transformers import CLIPTextModelWithProjection as CLIPTP + + +@MODELS.register_module() +class HuggingVisionBackbone(BaseModule): + + def __init__(self, + model_name: str, + out_indices: Sequence[int] = (0, 1, 2, 3), + norm_eval: bool = True, + frozen_modules: Sequence[str] = (), + init_cfg: OptMultiConfig = None) -> None: + + super().__init__(init_cfg=init_cfg) + + self.norm_eval = norm_eval + self.frozen_modules = frozen_modules + self.model = AutoModel.from_pretrained(model_name) + + self._freeze_modules() + + def forward(self, image: Tensor) -> Tuple[Tensor]: + encoded_dict = self.image_model(pixel_values=image, + output_hidden_states=True) + hidden_states = encoded_dict.hidden_states + img_feats = encoded_dict.get('reshaped_hidden_states', hidden_states) + img_feats = [img_feats[i] for i in self.image_out_indices] + return tuple(img_feats) + + def _freeze_modules(self): + for name, module in self.model.named_modules(): + for frozen_name in self.frozen_modules: + if name.startswith(frozen_name): + module.eval() + for param in module.parameters(): + param.requires_grad = False + break + + def train(self, mode=True): + super().train(mode) + self._freeze_modules() + if mode and self.norm_eval: + for m in self.modules(): + # trick: eval have effect on BatchNorm only + if isinstance(m, _BatchNorm): + m.eval() + + +@MODELS.register_module() +class HuggingCLIPLanguageBackbone(BaseModule): + + def __init__(self, + model_name: str, + frozen_modules: Sequence[str] = (), + dropout: float = 0.0, + training_use_cache: bool = False, + init_cfg: OptMultiConfig = None) -> None: + + super().__init__(init_cfg=init_cfg) + + self.frozen_modules = frozen_modules + self.training_use_cache = training_use_cache + self.tokenizer = AutoTokenizer.from_pretrained(model_name) + clip_config = CLIPTextConfig.from_pretrained(model_name, + attention_dropout=dropout) + self.model = CLIPTP.from_pretrained(model_name, config=clip_config) + self._freeze_modules() + + def forward_tokenizer(self, texts): + if not hasattr(self, 'text'): + text = list(itertools.chain(*texts)) + text = self.tokenizer(text=text, return_tensors='pt', padding=True) + self.text = text.to(device=self.model.device) + return self.text + + def forward(self, text: List[List[str]]) -> Tensor: + num_per_batch = [len(t) for t in text] + assert max(num_per_batch) == min(num_per_batch), ( + 'number of sequences not equal in batch') + text = list(itertools.chain(*text)) + text = self.tokenizer(text=text, return_tensors='pt', padding=True) + text = text.to(device=self.model.device) + txt_outputs = self.model(**text) + txt_feats = txt_outputs.text_embeds + txt_feats = txt_feats / txt_feats.norm(p=2, dim=-1, keepdim=True) + txt_feats = txt_feats.reshape(-1, num_per_batch[0], + txt_feats.shape[-1]) + return txt_feats + + def _freeze_modules(self): + + if len(self.frozen_modules) == 0: + # not freeze + return + if self.frozen_modules[0] == "all": + self.model.eval() + for _, module in self.model.named_modules(): + module.eval() + for param in module.parameters(): + param.requires_grad = False + return + for name, module in self.model.named_modules(): + for frozen_name in self.frozen_modules: + if name.startswith(frozen_name): + module.eval() + for param in module.parameters(): + param.requires_grad = False + break + + def train(self, mode=True): + super().train(mode) + self._freeze_modules() + + +@MODELS.register_module() +class PseudoLanguageBackbone(BaseModule): + """Pseudo Language Backbone + Args: + text_embed_path (str): path to the text embedding file + """ + + def __init__(self, + text_embed_path: str = "", + test_embed_path: str = None, + init_cfg: OptMultiConfig = None): + super().__init__(init_cfg) + # {text:embed} + self.text_embed = torch.load(text_embed_path, map_location='cpu') + if test_embed_path is None: + self.test_embed = self.text_embed + else: + self.test_embed = torch.load(test_embed_path) + self.register_buffer("buff", torch.zeros([ + 1, + ])) + + def forward_cache(self, text: List[List[str]]) -> Tensor: + if not hasattr(self, "cache"): + self.cache = self.forward_text(text) + return self.cache + + def forward(self, text: List[List[str]]) -> Tensor: + if self.training: + return self.forward_text(text) + else: + return self.forward_cache(text) + + def forward_text(self, text: List[List[str]]) -> Tensor: + num_per_batch = [len(t) for t in text] + assert max(num_per_batch) == min(num_per_batch), ( + 'number of sequences not equal in batch') + text = list(itertools.chain(*text)) + if self.training: + text_embed_dict = self.text_embed + else: + text_embed_dict = self.test_embed + text_embeds = torch.stack( + [text_embed_dict[x.split("/")[0]] for x in text]) + # requires no grad and force to float + text_embeds = text_embeds.to( + self.buff.device).requires_grad_(False).float() + text_embeds = text_embeds.reshape(-1, num_per_batch[0], + text_embeds.shape[-1]) + return text_embeds + + +@MODELS.register_module() +class MultiModalYOLOBackbone(BaseModule): + + def __init__(self, + image_model: ConfigType, + text_model: ConfigType, + frozen_stages: int = -1, + with_text_model: bool = True, + init_cfg: OptMultiConfig = None) -> None: + super().__init__(init_cfg) + self.with_text_model = with_text_model + self.image_model = MODELS.build(image_model) + if self.with_text_model: + self.text_model = MODELS.build(text_model) + else: + self.text_model = None + self.frozen_stages = frozen_stages + self._freeze_stages() + + def _freeze_stages(self): + """Freeze the parameters of the specified stage so that they are no + longer updated.""" + if self.frozen_stages >= 0: + for i in range(self.frozen_stages + 1): + m = getattr(self.image_model, self.image_model.layers[i]) + m.eval() + for param in m.parameters(): + param.requires_grad = False + + def train(self, mode: bool = True): + """Convert the model into training mode while keep normalization layer + frozen.""" + super().train(mode) + self._freeze_stages() + + def forward(self, image: Tensor, + text: List[List[str]]) -> Tuple[Tuple[Tensor], Tensor]: + img_feats = self.image_model(image) + if self.with_text_model: + txt_feats = self.text_model(text) + return img_feats, txt_feats + else: + return img_feats, None + + def forward_text(self, text: List[List[str]]) -> Tensor: + assert self.with_text_model, "forward_text() requires a text model" + txt_feats = self.text_model(text) + return txt_feats + + def forward_image(self, image: Tensor) -> Tuple[Tensor]: + return self.image_model(image) diff --git a/models/YOLO-World/yolo_world/models/data_preprocessors/__init__.py b/models/YOLO-World/yolo_world/models/data_preprocessors/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e3959ac60693349c2ecd1a659aa0ca32c00c7eae --- /dev/null +++ b/models/YOLO-World/yolo_world/models/data_preprocessors/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) Tencent Inc. All rights reserved. +from .data_preprocessor import YOLOWDetDataPreprocessor + +__all__ = ['YOLOWDetDataPreprocessor'] diff --git a/models/YOLO-World/yolo_world/models/data_preprocessors/data_preprocessor.py b/models/YOLO-World/yolo_world/models/data_preprocessors/data_preprocessor.py new file mode 100644 index 0000000000000000000000000000000000000000..58787063c8da3cd654c6e33eb81919a106273ab9 --- /dev/null +++ b/models/YOLO-World/yolo_world/models/data_preprocessors/data_preprocessor.py @@ -0,0 +1,63 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Optional, Union + +import torch +from mmdet.models.data_preprocessors import DetDataPreprocessor +from mmengine.structures import BaseDataElement + +from mmyolo.registry import MODELS + +CastData = Union[tuple, dict, BaseDataElement, torch.Tensor, list, bytes, str, + None] + + +@MODELS.register_module() +class YOLOWDetDataPreprocessor(DetDataPreprocessor): + """Rewrite collate_fn to get faster training speed. + + Note: It must be used together with `mmyolo.datasets.utils.yolow_collate` + """ + + def __init__(self, *args, non_blocking: Optional[bool] = True, **kwargs): + super().__init__(*args, non_blocking=non_blocking, **kwargs) + + def forward(self, data: dict, training: bool = False) -> dict: + """Perform normalization, padding and bgr2rgb conversion based on + ``DetDataPreprocessorr``. + + Args: + data (dict): Data sampled from dataloader. + training (bool): Whether to enable training time augmentation. + + Returns: + dict: Data in the same format as the model input. + """ + if not training: + return super().forward(data, training) + + data = self.cast_data(data) + inputs, data_samples = data['inputs'], data['data_samples'] + assert isinstance(data['data_samples'], dict) + + # TODO: Supports multi-scale training + if self._channel_conversion and inputs.shape[1] == 3: + inputs = inputs[:, [2, 1, 0], ...] + if self._enable_normalize: + inputs = (inputs - self.mean) / self.std + + if self.batch_augments is not None: + for batch_aug in self.batch_augments: + inputs, data_samples = batch_aug(inputs, data_samples) + + img_metas = [{'batch_input_shape': inputs.shape[2:]}] * len(inputs) + data_samples_output = { + 'bboxes_labels': data_samples['bboxes_labels'], + 'texts': data_samples['texts'], + 'img_metas': img_metas + } + if 'masks' in data_samples: + data_samples_output['masks'] = data_samples['masks'] + if 'is_detection' in data_samples: + data_samples_output['is_detection'] = data_samples['is_detection'] + + return {'inputs': inputs, 'data_samples': data_samples_output} diff --git a/models/YOLO-World/yolo_world/models/dense_heads/__init__.py b/models/YOLO-World/yolo_world/models/dense_heads/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..9b6dc1be66225281d69b4a4bd739ac309db53e5a --- /dev/null +++ b/models/YOLO-World/yolo_world/models/dense_heads/__init__.py @@ -0,0 +1,8 @@ +# Copyright (c) Tencent Inc. All rights reserved. +from .yolo_world_head import YOLOWorldHead, YOLOWorldHeadModule, RepYOLOWorldHeadModule +from .yolo_world_seg_head import YOLOWorldSegHead, YOLOWorldSegHeadModule + +__all__ = [ + 'YOLOWorldHead', 'YOLOWorldHeadModule', 'YOLOWorldSegHead', + 'YOLOWorldSegHeadModule', 'RepYOLOWorldHeadModule' +] diff --git a/models/YOLO-World/yolo_world/models/dense_heads/yolo_world_head.py b/models/YOLO-World/yolo_world/models/dense_heads/yolo_world_head.py new file mode 100644 index 0000000000000000000000000000000000000000..45fde3e84f527e9f681ea3539900d1156df8086d --- /dev/null +++ b/models/YOLO-World/yolo_world/models/dense_heads/yolo_world_head.py @@ -0,0 +1,734 @@ +# Copyright (c) Tencent Inc. All rights reserved. +import math +import copy +from typing import List, Optional, Tuple, Union, Sequence +import torch +import torch.nn as nn +import torch.nn.functional as F +import numpy as np +from mmcv.cnn import ConvModule +from mmengine.config import ConfigDict +from mmengine.model import BaseModule +from torch import Tensor +from torch.nn.modules.batchnorm import _BatchNorm + +from mmengine.dist import get_dist_info +from mmengine.structures import InstanceData +from mmdet.structures import SampleList +from mmdet.utils import OptConfigType, InstanceList, OptInstanceList +from mmdet.models.utils import (multi_apply, unpack_gt_instances, + filter_scores_and_topk) +from mmyolo.registry import MODELS +from mmyolo.models.dense_heads import YOLOv8HeadModule, YOLOv8Head +from mmyolo.models.utils import gt_instances_preprocess +from mmcv.cnn.bricks import build_norm_layer + + +@MODELS.register_module() +class ContrastiveHead(BaseModule): + """Contrastive Head for YOLO-World + compute the region-text scores according to the + similarity between image and text features + Args: + embed_dims (int): embed dim of text and image features + """ + + def __init__(self, + embed_dims: int, + init_cfg: OptConfigType = None, + use_einsum: bool = True) -> None: + + super().__init__(init_cfg=init_cfg) + + self.bias = nn.Parameter(torch.zeros([])) + self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07)) + self.use_einsum = use_einsum + + def forward(self, x: Tensor, w: Tensor) -> Tensor: + """Forward function of contrastive learning.""" + x = F.normalize(x, dim=1, p=2) + w = F.normalize(w, dim=-1, p=2) + + if self.use_einsum: + x = torch.einsum('bchw,bkc->bkhw', x, w) + else: + batch, channel, height, width = x.shape + _, k, _ = w.shape + x = x.permute(0, 2, 3, 1) # bchw->bhwc + x = x.reshape(batch, -1, channel) # bhwc->b(hw)c + w = w.permute(0, 2, 1) # bkc->bck + x = torch.matmul(x, w) + x = x.reshape(batch, height, width, k) + x = x.permute(0, 3, 1, 2) + + x = x * self.logit_scale.exp() + self.bias + return x + + +@MODELS.register_module() +class BNContrastiveHead(BaseModule): + """ Batch Norm Contrastive Head for YOLO-World + using batch norm instead of l2-normalization + Args: + embed_dims (int): embed dim of text and image features + norm_cfg (dict): normalization params + """ + + def __init__(self, + embed_dims: int, + norm_cfg: ConfigDict, + init_cfg: OptConfigType = None, + use_einsum: bool = True) -> None: + + super().__init__(init_cfg=init_cfg) + self.norm = build_norm_layer(norm_cfg, embed_dims)[1] + self.bias = nn.Parameter(torch.zeros([])) + # use -1.0 is more stable + self.logit_scale = nn.Parameter(-1.0 * torch.ones([])) + self.use_einsum = use_einsum + + def forward(self, x: Tensor, w: Tensor) -> Tensor: + """Forward function of contrastive learning.""" + x = self.norm(x) + w = F.normalize(w, dim=-1, p=2) + + if self.use_einsum: + x = torch.einsum('bchw,bkc->bkhw', x, w) + else: + batch, channel, height, width = x.shape + _, k, _ = w.shape + x = x.permute(0, 2, 3, 1) # bchw->bhwc + x = x.reshape(batch, -1, channel) # bhwc->b(hw)c + w = w.permute(0, 2, 1) # bkc->bck + x = torch.matmul(x, w) + x = x.reshape(batch, height, width, k) + x = x.permute(0, 3, 1, 2) + + x = x * self.logit_scale.exp() + self.bias + return x + + +@MODELS.register_module() +class RepBNContrastiveHead(BaseModule): + """ Batch Norm Contrastive Head for YOLO-World + using batch norm instead of l2-normalization + Args: + embed_dims (int): embed dim of text and image features + norm_cfg (dict): normalization params + """ + + def __init__(self, + embed_dims: int, + num_guide_embeds: int, + norm_cfg: ConfigDict, + init_cfg: OptConfigType = None) -> None: + + super().__init__(init_cfg=init_cfg) + self.norm = build_norm_layer(norm_cfg, embed_dims)[1] + self.conv = nn.Conv2d(embed_dims, num_guide_embeds, kernel_size=1) + + def forward(self, x: Tensor) -> Tensor: + """Forward function of contrastive learning.""" + x = self.norm(x) + x = self.conv(x) + return x + + +@MODELS.register_module() +class YOLOWorldHeadModule(YOLOv8HeadModule): + """Head Module for YOLO-World + + Args: + embed_dims (int): embed dim for text feautures and image features + use_bn_head (bool): use batch normalization head + """ + + def __init__(self, + *args, + embed_dims: int, + use_bn_head: bool = False, + use_einsum: bool = True, + freeze_all: bool = False, + **kwargs) -> None: + self.embed_dims = embed_dims + self.use_bn_head = use_bn_head + self.use_einsum = use_einsum + self.freeze_all = freeze_all + super().__init__(*args, **kwargs) + + def init_weights(self, prior_prob=0.01): + """Initialize the weight and bias of PPYOLOE head.""" + super().init_weights() + for cls_pred, cls_contrast, stride in zip(self.cls_preds, + self.cls_contrasts, + self.featmap_strides): + cls_pred[-1].bias.data[:] = 0.0 # reset bias + if hasattr(cls_contrast, 'bias'): + nn.init.constant_( + cls_contrast.bias.data, + math.log(5 / self.num_classes / (640 / stride)**2)) + + def _init_layers(self) -> None: + """initialize conv layers in YOLOv8 head.""" + # Init decouple head + self.cls_preds = nn.ModuleList() + self.reg_preds = nn.ModuleList() + self.cls_contrasts = nn.ModuleList() + + reg_out_channels = max( + (16, self.in_channels[0] // 4, self.reg_max * 4)) + cls_out_channels = max(self.in_channels[0], self.num_classes) + + for i in range(self.num_levels): + self.reg_preds.append( + nn.Sequential( + ConvModule(in_channels=self.in_channels[i], + out_channels=reg_out_channels, + kernel_size=3, + stride=1, + padding=1, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg), + ConvModule(in_channels=reg_out_channels, + out_channels=reg_out_channels, + kernel_size=3, + stride=1, + padding=1, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg), + nn.Conv2d(in_channels=reg_out_channels, + out_channels=4 * self.reg_max, + kernel_size=1))) + self.cls_preds.append( + nn.Sequential( + ConvModule(in_channels=self.in_channels[i], + out_channels=cls_out_channels, + kernel_size=3, + stride=1, + padding=1, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg), + ConvModule(in_channels=cls_out_channels, + out_channels=cls_out_channels, + kernel_size=3, + stride=1, + padding=1, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg), + nn.Conv2d(in_channels=cls_out_channels, + out_channels=self.embed_dims, + kernel_size=1))) + if self.use_bn_head: + self.cls_contrasts.append( + BNContrastiveHead(self.embed_dims, + self.norm_cfg, + use_einsum=self.use_einsum)) + else: + self.cls_contrasts.append( + ContrastiveHead(self.embed_dims, + use_einsum=self.use_einsum)) + + proj = torch.arange(self.reg_max, dtype=torch.float) + self.register_buffer('proj', proj, persistent=False) + + if self.freeze_all: + self._freeze_all() + + def _freeze_all(self): + """Freeze the model.""" + for m in self.modules(): + if isinstance(m, _BatchNorm): + m.eval() + for param in m.parameters(): + param.requires_grad = False + + def train(self, mode=True): + super().train(mode) + if self.freeze_all: + self._freeze_all() + + def forward(self, img_feats: Tuple[Tensor], + txt_feats: Tensor) -> Tuple[List]: + """Forward features from the upstream network.""" + assert len(img_feats) == self.num_levels + txt_feats = [txt_feats for _ in range(self.num_levels)] + return multi_apply(self.forward_single, img_feats, txt_feats, + self.cls_preds, self.reg_preds, self.cls_contrasts) + + def forward_single(self, img_feat: Tensor, txt_feat: Tensor, + cls_pred: nn.ModuleList, reg_pred: nn.ModuleList, + cls_contrast: nn.ModuleList) -> Tuple: + """Forward feature of a single scale level.""" + b, _, h, w = img_feat.shape + cls_embed = cls_pred(img_feat) + cls_logit = cls_contrast(cls_embed, txt_feat) + bbox_dist_preds = reg_pred(img_feat) + if self.reg_max > 1: + bbox_dist_preds = bbox_dist_preds.reshape( + [-1, 4, self.reg_max, h * w]).permute(0, 3, 1, 2) + + # TODO: The get_flops script cannot handle the situation of + # matmul, and needs to be fixed later + # bbox_preds = bbox_dist_preds.softmax(3).matmul(self.proj) + bbox_preds = bbox_dist_preds.softmax(3).matmul( + self.proj.view([-1, 1])).squeeze(-1) + bbox_preds = bbox_preds.transpose(1, 2).reshape(b, -1, h, w) + else: + bbox_preds = bbox_dist_preds + if self.training: + return cls_logit, bbox_preds, bbox_dist_preds + else: + return cls_logit, bbox_preds + +@MODELS.register_module() +class RepYOLOWorldHeadModule(YOLOWorldHeadModule): + + def __init__(self, + *args, + embed_dims: int, + num_guide: int, + freeze_all: bool = False, + **kwargs) -> None: + super().__init__(*args, + embed_dims=embed_dims, + use_bn_head=True, + use_einsum=False, + freeze_all=freeze_all, + **kwargs) + + # using rep head + cls_contrasts = [] + for _ in range(self.num_levels): + cls_contrasts.append( + RepBNContrastiveHead( + embed_dims=embed_dims, + num_guide_embeds=num_guide, + norm_cfg=self.norm_cfg + ) + ) + self.cls_contrasts = nn.ModuleList(cls_contrasts) + + def forward_single(self, img_feat: Tensor, cls_pred: nn.ModuleList, + reg_pred: nn.ModuleList, + cls_contrast: nn.ModuleList) -> Tuple: + """Forward features from the upstream network.""" + b, _, h, w = img_feat.shape + cls_embed = cls_pred(img_feat) + cls_logit = cls_contrast(cls_embed) + bbox_dist_preds = reg_pred(img_feat) + if self.reg_max > 1: + bbox_dist_preds = bbox_dist_preds.reshape( + [-1, 4, self.reg_max, h * w]).permute(0, 3, 1, 2) + + # TODO: The get_flops script cannot handle the situation of + # matmul, and needs to be fixed later + # bbox_preds = bbox_dist_preds.softmax(3).matmul(self.proj) + bbox_preds = bbox_dist_preds.softmax(3).matmul( + self.proj.view([-1, 1])).squeeze(-1) + bbox_preds = bbox_preds.transpose(1, 2).reshape(b, -1, h, w) + else: + bbox_preds = bbox_dist_preds + if self.training: + return cls_logit, bbox_preds, bbox_dist_preds + else: + return cls_logit, bbox_preds + + def forward(self, img_feats: Tuple[Tensor]) -> Tuple[List]: + assert len(img_feats) == self.num_levels + return multi_apply(self.forward_single, img_feats, self.cls_preds, + self.reg_preds, self.cls_contrasts) + + +@MODELS.register_module() +class YOLOWorldHead(YOLOv8Head): + """YOLO-World Head + """ + + def __init__(self, world_size=-1, *args, **kwargs) -> None: + super().__init__(*args, **kwargs) + self.world_size = world_size + + """YOLO World v8 head.""" + + def loss(self, img_feats: Tuple[Tensor], txt_feats: Tensor, + batch_data_samples: Union[list, dict]) -> dict: + """Perform forward propagation and loss calculation of the detection + head on the features of the upstream network.""" + + outs = self(img_feats, txt_feats) + # Fast version + loss_inputs = outs + (batch_data_samples['bboxes_labels'], + batch_data_samples['img_metas']) + losses = self.loss_by_feat(*loss_inputs) + + return losses + + def loss_and_predict( + self, + img_feats: Tuple[Tensor], + txt_feats: Tensor, + batch_data_samples: SampleList, + proposal_cfg: Optional[ConfigDict] = None + ) -> Tuple[dict, InstanceList]: + """Perform forward propagation of the head, then calculate loss and + predictions from the features and data samples. + """ + outputs = unpack_gt_instances(batch_data_samples) + (batch_gt_instances, batch_gt_instances_ignore, + batch_img_metas) = outputs + + outs = self(img_feats, txt_feats) + + loss_inputs = outs + (batch_gt_instances, batch_img_metas, + batch_gt_instances_ignore) + losses = self.loss_by_feat(*loss_inputs) + + predictions = self.predict_by_feat(*outs, + batch_img_metas=batch_img_metas, + cfg=proposal_cfg) + return losses, predictions + + def forward(self, img_feats: Tuple[Tensor], + txt_feats: Tensor) -> Tuple[List]: + """Forward features from the upstream network.""" + return self.head_module(img_feats, txt_feats) + + def predict(self, + img_feats: Tuple[Tensor], + txt_feats: Tensor, + batch_data_samples: SampleList, + rescale: bool = False) -> InstanceList: + """Perform forward propagation of the detection head and predict + detection results on the features of the upstream network. + """ + batch_img_metas = [ + data_samples.metainfo for data_samples in batch_data_samples + ] + outs = self(img_feats, txt_feats) + predictions = self.predict_by_feat(*outs, + batch_img_metas=batch_img_metas, + rescale=rescale) + return predictions + + def aug_test(self, + aug_batch_feats, + aug_batch_img_metas, + rescale=False, + with_ori_nms=False, + **kwargs): + """Test function with test time augmentation.""" + raise NotImplementedError('aug_test is not implemented yet.') + + def loss_by_feat( + self, + cls_scores: Sequence[Tensor], + bbox_preds: Sequence[Tensor], + bbox_dist_preds: Sequence[Tensor], + batch_gt_instances: Sequence[InstanceData], + batch_img_metas: Sequence[dict], + batch_gt_instances_ignore: OptInstanceList = None) -> dict: + """Calculate the loss based on the features extracted by the detection + head. + + Args: + cls_scores (Sequence[Tensor]): Box scores for each scale level, + each is a 4D-tensor, the channel number is + num_priors * num_classes. + bbox_preds (Sequence[Tensor]): Box energies / deltas for each scale + level, each is a 4D-tensor, the channel number is + num_priors * 4. + bbox_dist_preds (Sequence[Tensor]): Box distribution logits for + each scale level with shape (bs, reg_max + 1, H*W, 4). + batch_gt_instances (list[:obj:`InstanceData`]): Batch of + gt_instance. It usually includes ``bboxes`` and ``labels`` + attributes. + batch_img_metas (list[dict]): Meta information of each image, e.g., + image size, scaling factor, etc. + batch_gt_instances_ignore (list[:obj:`InstanceData`], optional): + Batch of gt_instances_ignore. It includes ``bboxes`` attribute + data that is ignored during training and testing. + Defaults to None. + Returns: + dict[str, Tensor]: A dictionary of losses. + """ + num_imgs = len(batch_img_metas) + + current_featmap_sizes = [ + cls_score.shape[2:] for cls_score in cls_scores + ] + # If the shape does not equal, generate new one + if current_featmap_sizes != self.featmap_sizes_train: + self.featmap_sizes_train = current_featmap_sizes + + mlvl_priors_with_stride = self.prior_generator.grid_priors( + self.featmap_sizes_train, + dtype=cls_scores[0].dtype, + device=cls_scores[0].device, + with_stride=True) + + self.num_level_priors = [len(n) for n in mlvl_priors_with_stride] + self.flatten_priors_train = torch.cat(mlvl_priors_with_stride, + dim=0) + self.stride_tensor = self.flatten_priors_train[..., [2]] + + # gt info + gt_info = gt_instances_preprocess(batch_gt_instances, num_imgs) + gt_labels = gt_info[:, :, :1] + gt_bboxes = gt_info[:, :, 1:] # xyxy + pad_bbox_flag = (gt_bboxes.sum(-1, keepdim=True) > 0).float() + + # pred info + flatten_cls_preds = [ + cls_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, + self.num_classes) + for cls_pred in cls_scores + ] + flatten_pred_bboxes = [ + bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, 4) + for bbox_pred in bbox_preds + ] + # (bs, n, 4 * reg_max) + flatten_pred_dists = [ + bbox_pred_org.reshape(num_imgs, -1, self.head_module.reg_max * 4) + for bbox_pred_org in bbox_dist_preds + ] + + flatten_dist_preds = torch.cat(flatten_pred_dists, dim=1) + flatten_cls_preds = torch.cat(flatten_cls_preds, dim=1) + flatten_pred_bboxes = torch.cat(flatten_pred_bboxes, dim=1) + flatten_pred_bboxes = self.bbox_coder.decode( + self.flatten_priors_train[..., :2], flatten_pred_bboxes, + self.stride_tensor[..., 0]) + + assigned_result = self.assigner( + (flatten_pred_bboxes.detach()).type(gt_bboxes.dtype), + flatten_cls_preds.detach().sigmoid(), self.flatten_priors_train, + gt_labels, gt_bboxes, pad_bbox_flag) + + assigned_bboxes = assigned_result['assigned_bboxes'] + assigned_scores = assigned_result['assigned_scores'] + fg_mask_pre_prior = assigned_result['fg_mask_pre_prior'] + + assigned_scores_sum = assigned_scores.sum().clamp(min=1) + + loss_cls = self.loss_cls(flatten_cls_preds, assigned_scores).sum() + loss_cls /= assigned_scores_sum + + # rescale bbox + assigned_bboxes /= self.stride_tensor + flatten_pred_bboxes /= self.stride_tensor + + # select positive samples mask + num_pos = fg_mask_pre_prior.sum() + if num_pos > 0: + # when num_pos > 0, assigned_scores_sum will >0, so the loss_bbox + # will not report an error + # iou loss + prior_bbox_mask = fg_mask_pre_prior.unsqueeze(-1).repeat([1, 1, 4]) + pred_bboxes_pos = torch.masked_select( + flatten_pred_bboxes, prior_bbox_mask).reshape([-1, 4]) + assigned_bboxes_pos = torch.masked_select( + assigned_bboxes, prior_bbox_mask).reshape([-1, 4]) + bbox_weight = torch.masked_select(assigned_scores.sum(-1), + fg_mask_pre_prior).unsqueeze(-1) + loss_bbox = self.loss_bbox( + pred_bboxes_pos, assigned_bboxes_pos, + weight=bbox_weight) / assigned_scores_sum + + # dfl loss + pred_dist_pos = flatten_dist_preds[fg_mask_pre_prior] + assigned_ltrb = self.bbox_coder.encode( + self.flatten_priors_train[..., :2] / self.stride_tensor, + assigned_bboxes, + max_dis=self.head_module.reg_max - 1, + eps=0.01) + assigned_ltrb_pos = torch.masked_select( + assigned_ltrb, prior_bbox_mask).reshape([-1, 4]) + loss_dfl = self.loss_dfl(pred_dist_pos.reshape( + -1, self.head_module.reg_max), + assigned_ltrb_pos.reshape(-1), + weight=bbox_weight.expand(-1, + 4).reshape(-1), + avg_factor=assigned_scores_sum) + else: + loss_bbox = flatten_pred_bboxes.sum() * 0 + loss_dfl = flatten_pred_bboxes.sum() * 0 + if self.world_size == -1: + _, world_size = get_dist_info() + else: + world_size = self.world_size + return dict(loss_cls=loss_cls * num_imgs * world_size, + loss_bbox=loss_bbox * num_imgs * world_size, + loss_dfl=loss_dfl * num_imgs * world_size) + + def predict_by_feat(self, + cls_scores: List[Tensor], + bbox_preds: List[Tensor], + objectnesses: Optional[List[Tensor]] = None, + batch_img_metas: Optional[List[dict]] = None, + cfg: Optional[ConfigDict] = None, + rescale: bool = True, + with_nms: bool = True) -> List[InstanceData]: + """Transform a batch of output features extracted by the head into + bbox results. + Args: + cls_scores (list[Tensor]): Classification scores for all + scale levels, each is a 4D-tensor, has shape + (batch_size, num_priors * num_classes, H, W). + bbox_preds (list[Tensor]): Box energies / deltas for all + scale levels, each is a 4D-tensor, has shape + (batch_size, num_priors * 4, H, W). + objectnesses (list[Tensor], Optional): Score factor for + all scale level, each is a 4D-tensor, has shape + (batch_size, 1, H, W). + batch_img_metas (list[dict], Optional): Batch image meta info. + Defaults to None. + cfg (ConfigDict, optional): Test / postprocessing + configuration, if None, test_cfg would be used. + Defaults to None. + rescale (bool): If True, return boxes in original image space. + Defaults to False. + with_nms (bool): If True, do nms before return boxes. + Defaults to True. + + Returns: + list[:obj:`InstanceData`]: Object detection results of each image + after the post process. Each item usually contains following keys. + + - scores (Tensor): Classification scores, has a shape + (num_instance, ) + - labels (Tensor): Labels of bboxes, has a shape + (num_instances, ). + - bboxes (Tensor): Has a shape (num_instances, 4), + the last dimension 4 arrange as (x1, y1, x2, y2). + """ + assert len(cls_scores) == len(bbox_preds) + if objectnesses is None: + with_objectnesses = False + else: + with_objectnesses = True + assert len(cls_scores) == len(objectnesses) + + cfg = self.test_cfg if cfg is None else cfg + cfg = copy.deepcopy(cfg) + + multi_label = cfg.multi_label + multi_label &= self.num_classes > 1 + cfg.multi_label = multi_label + + num_imgs = len(batch_img_metas) + featmap_sizes = [cls_score.shape[2:] for cls_score in cls_scores] + + # If the shape does not change, use the previous mlvl_priors + if featmap_sizes != self.featmap_sizes: + self.mlvl_priors = self.prior_generator.grid_priors( + featmap_sizes, + dtype=cls_scores[0].dtype, + device=cls_scores[0].device) + self.featmap_sizes = featmap_sizes + flatten_priors = torch.cat(self.mlvl_priors) + + mlvl_strides = [ + flatten_priors.new_full( + (featmap_size.numel() * self.num_base_priors, ), stride) for + featmap_size, stride in zip(featmap_sizes, self.featmap_strides) + ] + flatten_stride = torch.cat(mlvl_strides) + + # flatten cls_scores, bbox_preds and objectness + flatten_cls_scores = [ + cls_score.permute(0, 2, 3, 1).reshape(num_imgs, -1, + self.num_classes) + for cls_score in cls_scores + ] + flatten_bbox_preds = [ + bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, 4) + for bbox_pred in bbox_preds + ] + + flatten_cls_scores = torch.cat(flatten_cls_scores, dim=1).sigmoid() + flatten_bbox_preds = torch.cat(flatten_bbox_preds, dim=1) + flatten_decoded_bboxes = self.bbox_coder.decode( + flatten_priors[None], flatten_bbox_preds, flatten_stride) + + if with_objectnesses: + flatten_objectness = [ + objectness.permute(0, 2, 3, 1).reshape(num_imgs, -1) + for objectness in objectnesses + ] + flatten_objectness = torch.cat(flatten_objectness, dim=1).sigmoid() + else: + flatten_objectness = [None for _ in range(num_imgs)] + # 8400 + # print(flatten_cls_scores.shape) + results_list = [] + for (bboxes, scores, objectness, + img_meta) in zip(flatten_decoded_bboxes, flatten_cls_scores, + flatten_objectness, batch_img_metas): + ori_shape = img_meta['ori_shape'] + scale_factor = img_meta['scale_factor'] + if 'pad_param' in img_meta: + pad_param = img_meta['pad_param'] + else: + pad_param = None + + score_thr = cfg.get('score_thr', -1) + # yolox_style does not require the following operations + if objectness is not None and score_thr > 0 and not cfg.get( + 'yolox_style', False): + conf_inds = objectness > score_thr + bboxes = bboxes[conf_inds, :] + scores = scores[conf_inds, :] + objectness = objectness[conf_inds] + + if objectness is not None: + # conf = obj_conf * cls_conf + scores *= objectness[:, None] + + if scores.shape[0] == 0: + empty_results = InstanceData() + empty_results.bboxes = bboxes + empty_results.scores = scores[:, 0] + empty_results.labels = scores[:, 0].int() + results_list.append(empty_results) + continue + + nms_pre = cfg.get('nms_pre', 100000) + if cfg.multi_label is False: + scores, labels = scores.max(1, keepdim=True) + scores, _, keep_idxs, results = filter_scores_and_topk( + scores, + score_thr, + nms_pre, + results=dict(labels=labels[:, 0])) + labels = results['labels'] + else: + scores, labels, keep_idxs, _ = filter_scores_and_topk( + scores, score_thr, nms_pre) + + results = InstanceData(scores=scores, + labels=labels, + bboxes=bboxes[keep_idxs]) + + if rescale: + if pad_param is not None: + results.bboxes -= results.bboxes.new_tensor([ + pad_param[2], pad_param[0], pad_param[2], pad_param[0] + ]) + results.bboxes /= results.bboxes.new_tensor( + scale_factor).repeat((1, 2)) + + if cfg.get('yolox_style', False): + # do not need max_per_img + cfg.max_per_img = len(results) + + results = self._bbox_post_process(results=results, + cfg=cfg, + rescale=False, + with_nms=with_nms, + img_meta=img_meta) + results.bboxes[:, 0::2].clamp_(0, ori_shape[1]) + results.bboxes[:, 1::2].clamp_(0, ori_shape[0]) + + results_list.append(results) + return results_list diff --git a/models/YOLO-World/yolo_world/models/dense_heads/yolo_world_seg_head.py b/models/YOLO-World/yolo_world/models/dense_heads/yolo_world_seg_head.py new file mode 100644 index 0000000000000000000000000000000000000000..c79f1cc5561c1f550b1f89b94b6c7529c42e5619 --- /dev/null +++ b/models/YOLO-World/yolo_world/models/dense_heads/yolo_world_seg_head.py @@ -0,0 +1,550 @@ +# Copyright (c) Lin Song. All rights reserved. +import math +from typing import List, Optional, Tuple, Union, Sequence + +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch import Tensor +from torch.nn.modules.batchnorm import _BatchNorm + +from mmcv.cnn import ConvModule +from mmengine.config import ConfigDict +from mmengine.dist import get_dist_info +from mmengine.structures import InstanceData +from mmdet.structures import SampleList +from mmdet.utils import (ConfigType, OptConfigType, OptInstanceList, + OptMultiConfig, InstanceList) +from mmdet.models.utils import multi_apply, unpack_gt_instances +from mmyolo.models.dense_heads import YOLOv8HeadModule +from mmyolo.models.utils import gt_instances_preprocess +from mmyolo.registry import MODELS, TASK_UTILS +from mmyolo.models.dense_heads.yolov5_ins_head import ( + ProtoModule, YOLOv5InsHead +) + +from .yolo_world_head import ContrastiveHead, BNContrastiveHead + + +@MODELS.register_module() +class YOLOWorldSegHeadModule(YOLOv8HeadModule): + def __init__(self, + *args, + embed_dims: int, + proto_channels: int, + mask_channels: int, + freeze_bbox: bool = False, + freeze_all: bool = False, + use_bn_head: bool = False, + **kwargs) -> None: + self.embed_dims = embed_dims + self.proto_channels = proto_channels + self.mask_channels = mask_channels + self.freeze_bbox = freeze_bbox + self.freeze_all = freeze_all + self.use_bn_head = use_bn_head + super().__init__(*args, **kwargs) + + def init_weights(self, prior_prob=0.01): + """Initialize the weight and bias of PPYOLOE head.""" + super().init_weights() + for cls_pred, cls_contrast, stride in zip(self.cls_preds, + self.cls_contrasts, + self.featmap_strides): + cls_pred[-1].bias.data[:] = 0.0 # reset bias + if hasattr(cls_contrast, 'bias'): + nn.init.constant_( + cls_contrast.bias.data, + math.log(5 / self.num_classes / (640 / stride)**2)) + + def _init_layers(self) -> None: + """initialize conv layers in YOLOv8 head.""" + # Init decouple head + self.cls_preds = nn.ModuleList() + self.reg_preds = nn.ModuleList() + self.seg_preds = nn.ModuleList() + self.cls_contrasts = nn.ModuleList() + + reg_out_channels = max( + (16, self.in_channels[0] // 4, self.reg_max * 4)) + seg_out_channels = max(self.in_channels[0] // 4, self.mask_channels) + cls_out_channels = max(self.in_channels[0], self.num_classes) + + bbox_norm_cfg = self.norm_cfg + bbox_norm_cfg['requires_grad'] = not self.freeze_bbox + if self.freeze_all: + self.norm_cfg['requires_grad'] = False + bbox_norm_cfg['requires_grad'] = False + + for i in range(self.num_levels): + self.reg_preds.append( + nn.Sequential( + ConvModule(in_channels=self.in_channels[i], + out_channels=reg_out_channels, + kernel_size=3, + stride=1, + padding=1, + norm_cfg=bbox_norm_cfg, + act_cfg=self.act_cfg), + ConvModule(in_channels=reg_out_channels, + out_channels=reg_out_channels, + kernel_size=3, + stride=1, + padding=1, + norm_cfg=bbox_norm_cfg, + act_cfg=self.act_cfg), + nn.Conv2d(in_channels=reg_out_channels, + out_channels=4 * self.reg_max, + kernel_size=1))) + self.cls_preds.append( + nn.Sequential( + ConvModule(in_channels=self.in_channels[i], + out_channels=cls_out_channels, + kernel_size=3, + stride=1, + padding=1, + norm_cfg=bbox_norm_cfg, + act_cfg=self.act_cfg), + ConvModule(in_channels=cls_out_channels, + out_channels=cls_out_channels, + kernel_size=3, + stride=1, + padding=1, + norm_cfg=bbox_norm_cfg, + act_cfg=self.act_cfg), + nn.Conv2d(in_channels=cls_out_channels, + out_channels=self.embed_dims, + kernel_size=1))) + self.seg_preds.append( + nn.Sequential( + ConvModule(in_channels=self.in_channels[i], + out_channels=seg_out_channels, + kernel_size=3, + stride=1, + padding=1, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg), + ConvModule(in_channels=seg_out_channels, + out_channels=seg_out_channels, + kernel_size=3, + stride=1, + padding=1, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg), + nn.Conv2d(in_channels=seg_out_channels, + out_channels=self.mask_channels, + kernel_size=1))) + + if self.use_bn_head: + self.cls_contrasts.append( + BNContrastiveHead(self.embed_dims, self.norm_cfg)) + else: + self.cls_contrasts.append(ContrastiveHead(self.embed_dims)) + + proj = torch.arange(self.reg_max, dtype=torch.float) + self.register_buffer('proj', proj, persistent=False) + + self.proto_pred = ProtoModule(in_channels=self.in_channels[0], + middle_channels=self.proto_channels, + mask_channels=self.mask_channels, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + if self.freeze_bbox or self.freeze_bbox: + self._freeze_all() + + def _freeze_all(self): + frozen_list = [self.cls_preds, self.reg_preds, self.cls_contrasts] + if self.freeze_all: + frozen_list.extend([self.proto_pred, self.seg_preds]) + for module in frozen_list: + for m in module.modules(): + if isinstance(m, _BatchNorm): + m.eval() + for param in m.parameters(): + param.requires_grad = False + + def train(self, mode: bool = True): + """Convert the model into training mode while keep normalization layer + frozen.""" + super().train(mode) + if self.freeze_bbox or self.freeze_all: + self._freeze_all() + + def forward(self, img_feats: Tuple[Tensor], + txt_feats: Tensor) -> Tuple[List]: + """Forward features from the upstream network.""" + assert len(img_feats) == self.num_levels + txt_feats = [txt_feats for _ in range(self.num_levels)] + mask_protos = self.proto_pred(img_feats[0]) + cls_logit, bbox_preds, bbox_dist_preds, coeff_preds = multi_apply( + self.forward_single, img_feats, txt_feats, self.cls_preds, + self.reg_preds, self.cls_contrasts, self.seg_preds) + if self.training: + return cls_logit, bbox_preds, bbox_dist_preds, coeff_preds, mask_protos + else: + return cls_logit, bbox_preds, None, coeff_preds, mask_protos + + def forward_single(self, img_feat: Tensor, txt_feat: Tensor, + cls_pred: nn.ModuleList, reg_pred: nn.ModuleList, + cls_contrast: nn.ModuleList, + seg_pred: nn.ModuleList) -> Tuple: + """Forward feature of a single scale level.""" + b, _, h, w = img_feat.shape + cls_embed = cls_pred(img_feat) + cls_logit = cls_contrast(cls_embed, txt_feat) + bbox_dist_preds = reg_pred(img_feat) + coeff_pred = seg_pred(img_feat) + if self.reg_max > 1: + bbox_dist_preds = bbox_dist_preds.reshape( + [-1, 4, self.reg_max, h * w]).permute(0, 3, 1, 2) + + # TODO: The get_flops script cannot handle the situation of + # matmul, and needs to be fixed later + # bbox_preds = bbox_dist_preds.softmax(3).matmul(self.proj) + bbox_preds = bbox_dist_preds.softmax(3).matmul( + self.proj.view([-1, 1])).squeeze(-1) + bbox_preds = bbox_preds.transpose(1, 2).reshape(b, -1, h, w) + else: + bbox_preds = bbox_dist_preds + if self.training: + return cls_logit, bbox_preds, bbox_dist_preds, coeff_pred + else: + return cls_logit, bbox_preds, None, coeff_pred + + +@MODELS.register_module() +class YOLOWorldSegHead(YOLOv5InsHead): + def __init__(self, + head_module: ConfigType, + prior_generator: ConfigType = dict( + type='mmdet.MlvlPointGenerator', + offset=0.5, + strides=[8, 16, 32]), + bbox_coder: ConfigType = dict(type='DistancePointBBoxCoder'), + loss_cls: ConfigType = dict(type='mmdet.CrossEntropyLoss', + use_sigmoid=True, + reduction='none', + loss_weight=0.5), + loss_bbox: ConfigType = dict(type='IoULoss', + iou_mode='ciou', + bbox_format='xyxy', + reduction='sum', + loss_weight=7.5, + return_iou=False), + loss_dfl=dict(type='mmdet.DistributionFocalLoss', + reduction='mean', + loss_weight=1.5 / 4), + mask_overlap: bool = True, + loss_mask: ConfigType = dict(type='mmdet.CrossEntropyLoss', + use_sigmoid=True, + reduction='none'), + loss_mask_weight=0.05, + train_cfg: OptConfigType = None, + test_cfg: OptConfigType = None, + init_cfg: OptMultiConfig = None): + super().__init__(head_module=head_module, + prior_generator=prior_generator, + bbox_coder=bbox_coder, + loss_cls=loss_cls, + loss_bbox=loss_bbox, + train_cfg=train_cfg, + test_cfg=test_cfg, + init_cfg=init_cfg) + self.loss_dfl = MODELS.build(loss_dfl) + self.loss_obj = None + self.mask_overlap = mask_overlap + self.loss_mask: nn.Module = MODELS.build(loss_mask) + self.loss_mask_weight = loss_mask_weight + + def special_init(self): + """Since YOLO series algorithms will inherit from YOLOv5Head, but + different algorithms have special initialization process. + + The special_init function is designed to deal with this situation. + """ + if self.train_cfg: + self.assigner = TASK_UTILS.build(self.train_cfg.assigner) + # Add common attributes to reduce calculation + self.featmap_sizes_train = None + self.num_level_priors = None + self.flatten_priors_train = None + self.stride_tensor = None + + """YOLO World head.""" + + def loss(self, img_feats: Tuple[Tensor], txt_feats: Tensor, + batch_data_samples: Union[list, dict]) -> dict: + """Perform forward propagation and loss calculation of the detection + head on the features of the upstream network.""" + + outs = self(img_feats, txt_feats) + # Fast version + loss_inputs = outs + (batch_data_samples['bboxes_labels'], + batch_data_samples['masks'], + batch_data_samples['img_metas']) + losses = self.loss_by_feat(*loss_inputs) + + return losses + + def loss_and_predict( + self, + img_feats: Tuple[Tensor], + txt_feats: Tensor, + batch_data_samples: SampleList, + proposal_cfg: Optional[ConfigDict] = None + ) -> Tuple[dict, InstanceList]: + """Perform forward propagation of the head, then calculate loss and + predictions from the features and data samples. + """ + outputs = unpack_gt_instances(batch_data_samples) + (batch_gt_instances, batch_gt_instances_ignore, + batch_img_metas) = outputs + + outs = self(img_feats, txt_feats) + + loss_inputs = outs + (batch_gt_instances, batch_img_metas, + batch_gt_instances_ignore) + losses = self.loss_by_feat(*loss_inputs) + + predictions = self.predict_by_feat(*outs, + batch_img_metas=batch_img_metas, + cfg=proposal_cfg) + return losses, predictions + + def forward(self, img_feats: Tuple[Tensor], + txt_feats: Tensor) -> Tuple[List]: + """Forward features from the upstream network.""" + return self.head_module(img_feats, txt_feats) + + def predict(self, + img_feats: Tuple[Tensor], + txt_feats: Tensor, + batch_data_samples: SampleList, + rescale: bool = False) -> InstanceList: + """Perform forward propagation of the detection head and predict + detection results on the features of the upstream network. + """ + batch_img_metas = [ + data_samples.metainfo for data_samples in batch_data_samples + ] + outs = self(img_feats, txt_feats) + predictions = self.predict_by_feat(*outs, + batch_img_metas=batch_img_metas, + rescale=rescale) + return predictions + + def aug_test(self, + aug_batch_feats, + aug_batch_img_metas, + rescale=False, + with_ori_nms=False, + **kwargs): + """Test function with test time augmentation.""" + raise NotImplementedError('aug_test is not implemented yet.') + + def loss_by_feat( + self, + cls_scores: Sequence[Tensor], + bbox_preds: Sequence[Tensor], + bbox_dist_preds: Sequence[Tensor], + coeff_preds: Sequence[Tensor], + proto_preds: Tensor, + batch_gt_instances: Sequence[InstanceData], + batch_gt_masks: Sequence[Tensor], + batch_img_metas: Sequence[dict], + batch_gt_instances_ignore: OptInstanceList = None) -> dict: + """Calculate the loss based on the features extracted by the detection + head. + + Args: + cls_scores (Sequence[Tensor]): Box scores for each scale level, + each is a 4D-tensor, the channel number is + num_priors * num_classes. + bbox_preds (Sequence[Tensor]): Box energies / deltas for each scale + level, each is a 4D-tensor, the channel number is + num_priors * 4. + bbox_dist_preds (Sequence[Tensor]): Box distribution logits for + each scale level with shape (bs, reg_max + 1, H*W, 4). + batch_gt_instances (list[:obj:`InstanceData`]): Batch of + gt_instance. It usually includes ``bboxes`` and ``labels`` + attributes. + batch_img_metas (list[dict]): Meta information of each image, e.g., + image size, scaling factor, etc. + batch_gt_instances_ignore (list[:obj:`InstanceData`], optional): + Batch of gt_instances_ignore. It includes ``bboxes`` attribute + data that is ignored during training and testing. + Defaults to None. + Returns: + dict[str, Tensor]: A dictionary of losses. + """ + num_imgs = len(batch_img_metas) + + current_featmap_sizes = [ + cls_score.shape[2:] for cls_score in cls_scores + ] + # If the shape does not equal, generate new one + if current_featmap_sizes != self.featmap_sizes_train: + self.featmap_sizes_train = current_featmap_sizes + + mlvl_priors_with_stride = self.prior_generator.grid_priors( + self.featmap_sizes_train, + dtype=cls_scores[0].dtype, + device=cls_scores[0].device, + with_stride=True) + + self.num_level_priors = [len(n) for n in mlvl_priors_with_stride] + self.flatten_priors_train = torch.cat(mlvl_priors_with_stride, + dim=0) + self.stride_tensor = self.flatten_priors_train[..., [2]] + + # gt info + gt_info = gt_instances_preprocess(batch_gt_instances, num_imgs) + gt_labels = gt_info[:, :, :1] + gt_bboxes = gt_info[:, :, 1:] # xyxy + pad_bbox_flag = (gt_bboxes.sum(-1, keepdim=True) > 0).float() + + # pred info + flatten_cls_preds = [ + cls_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, + self.num_classes) + for cls_pred in cls_scores + ] + flatten_pred_bboxes = [ + bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, 4) + for bbox_pred in bbox_preds + ] + # (bs, n, 4 * reg_max) + flatten_pred_dists = [ + bbox_pred_org.reshape(num_imgs, -1, self.head_module.reg_max * 4) + for bbox_pred_org in bbox_dist_preds + ] + + flatten_pred_coeffs = [ + coeff_pred.permute(0, 2, 3, + 1).reshape(num_imgs, -1, + self.head_module.mask_channels) + for coeff_pred in coeff_preds + ] + + flatten_dist_preds = torch.cat(flatten_pred_dists, dim=1) + flatten_cls_preds = torch.cat(flatten_cls_preds, dim=1) + flatten_pred_bboxes = torch.cat(flatten_pred_bboxes, dim=1) + flatten_pred_bboxes = self.bbox_coder.decode( + self.flatten_priors_train[..., :2], flatten_pred_bboxes, + self.stride_tensor[..., 0]) + flatten_pred_coeffs = torch.cat(flatten_pred_coeffs, dim=1) + + assigned_result = self.assigner( + (flatten_pred_bboxes.detach()).type(gt_bboxes.dtype), + flatten_cls_preds.detach().sigmoid(), self.flatten_priors_train, + gt_labels, gt_bboxes, pad_bbox_flag) + + assigned_bboxes = assigned_result['assigned_bboxes'] + assigned_scores = assigned_result['assigned_scores'] + fg_mask_pre_prior = assigned_result['fg_mask_pre_prior'] + assigned_gt_idxs = assigned_result['assigned_gt_idxs'] + + assigned_scores_sum = assigned_scores.sum().clamp(min=1) + + loss_cls = self.loss_cls(flatten_cls_preds, assigned_scores).sum() + loss_cls /= assigned_scores_sum + + # rescale bbox + assigned_bboxes /= self.stride_tensor + flatten_pred_bboxes /= self.stride_tensor + + # select positive samples mask + num_pos = fg_mask_pre_prior.sum() + if num_pos > 0: + # when num_pos > 0, assigned_scores_sum will >0, so the loss_bbox + # will not report an error + # iou loss + prior_bbox_mask = fg_mask_pre_prior.unsqueeze(-1).repeat([1, 1, 4]) + pred_bboxes_pos = torch.masked_select( + flatten_pred_bboxes, prior_bbox_mask).reshape([-1, 4]) + assigned_bboxes_pos = torch.masked_select( + assigned_bboxes, prior_bbox_mask).reshape([-1, 4]) + bbox_weight = torch.masked_select(assigned_scores.sum(-1), + fg_mask_pre_prior).unsqueeze(-1) + loss_bbox = self.loss_bbox( + pred_bboxes_pos, assigned_bboxes_pos, + weight=bbox_weight) / assigned_scores_sum + + # dfl loss + pred_dist_pos = flatten_dist_preds[fg_mask_pre_prior] + assigned_ltrb = self.bbox_coder.encode( + self.flatten_priors_train[..., :2] / self.stride_tensor, + assigned_bboxes, + max_dis=self.head_module.reg_max - 1, + eps=0.01) + assigned_ltrb_pos = torch.masked_select( + assigned_ltrb, prior_bbox_mask).reshape([-1, 4]) + loss_dfl = self.loss_dfl(pred_dist_pos.reshape( + -1, self.head_module.reg_max), + assigned_ltrb_pos.reshape(-1), + weight=bbox_weight.expand(-1, + 4).reshape(-1), + avg_factor=assigned_scores_sum) + + _, c, mask_h, mask_w = proto_preds.shape + if batch_gt_masks.shape[-2:] != (mask_h, mask_w): + batch_gt_masks = F.interpolate(batch_gt_masks[None], + (mask_h, mask_w), + mode='nearest')[0] + + loss_mask = torch.zeros(1, device=loss_dfl.device) + box_sum_flag = pad_bbox_flag.long().sum(dim=1).squeeze(1) + + batch_inds = torch.zeros(num_imgs, + dtype=torch.int64, + device=assigned_gt_idxs.device)[:, None] + batch_inds[1:] = box_sum_flag.cumsum(dim=0)[:-1][..., None] + _assigned_gt_idxs = assigned_gt_idxs + batch_inds + + for bs in range(num_imgs): + # 8400 + bbox_match_inds = assigned_gt_idxs[bs] + mask_match_inds = _assigned_gt_idxs[bs] + + bbox_match_inds = torch.masked_select(bbox_match_inds, + fg_mask_pre_prior[bs]) + mask_match_inds = torch.masked_select(mask_match_inds, + fg_mask_pre_prior[bs]) + + # mask + mask_dim = coeff_preds[0].shape[1] + prior_mask_mask = fg_mask_pre_prior[bs].unsqueeze(-1).repeat( + [1, mask_dim]) + pred_coeffs_pos = torch.masked_select(flatten_pred_coeffs[bs], + prior_mask_mask).reshape( + [-1, mask_dim]) + + match_boxes = gt_bboxes[bs][bbox_match_inds] / 4 + normed_boxes = gt_bboxes[bs][bbox_match_inds] / 640 + + bbox_area = (normed_boxes[:, 2:] - + normed_boxes[:, :2]).prod(dim=1) + if not mask_match_inds.any(): + continue + assert not self.mask_overlap + mask_gti = batch_gt_masks[mask_match_inds] + mask_preds = ( + pred_coeffs_pos @ proto_preds[bs].view(c, -1)).view( + -1, mask_h, mask_w) + loss_mask_full = self.loss_mask(mask_preds, mask_gti) + _loss_mask = (self.crop_mask(loss_mask_full[None], + match_boxes).mean(dim=(2, 3)) / + bbox_area) + + loss_mask += _loss_mask.mean() + + else: + loss_bbox = flatten_pred_bboxes.sum() * 0 + loss_dfl = flatten_pred_bboxes.sum() * 0 + loss_mask = flatten_pred_coeffs.sum() * 0 + _, world_size = get_dist_info() + + return dict(loss_cls=loss_cls * num_imgs * world_size, + loss_bbox=loss_bbox * num_imgs * world_size, + loss_dfl=loss_dfl * num_imgs * world_size, + loss_mask=loss_mask * self.loss_mask_weight * world_size) diff --git a/models/YOLO-World/yolo_world/models/detectors/__init__.py b/models/YOLO-World/yolo_world/models/detectors/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..991a2e4284135d4706865a4e9371b83989a4183e --- /dev/null +++ b/models/YOLO-World/yolo_world/models/detectors/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) Tencent Inc. All rights reserved. +from .yolo_world import YOLOWorldDetector, SimpleYOLOWorldDetector + +__all__ = ['YOLOWorldDetector', 'SimpleYOLOWorldDetector'] diff --git a/models/YOLO-World/yolo_world/models/detectors/yolo_world.py b/models/YOLO-World/yolo_world/models/detectors/yolo_world.py new file mode 100644 index 0000000000000000000000000000000000000000..6d4b09103dabade430e0c13b28e94b509ebb552e --- /dev/null +++ b/models/YOLO-World/yolo_world/models/detectors/yolo_world.py @@ -0,0 +1,231 @@ +# Copyright (c) Tencent Inc. All rights reserved. +from typing import List, Tuple, Union +import torch +import torch.nn as nn +from torch import Tensor +from mmdet.structures import OptSampleList, SampleList +from mmyolo.models.detectors import YOLODetector +from mmyolo.registry import MODELS + + +@MODELS.register_module() +class YOLOWorldDetector(YOLODetector): + """Implementation of YOLOW Series""" + + def __init__(self, + *args, + mm_neck: bool = False, + num_train_classes=80, + num_test_classes=80, + **kwargs) -> None: + self.mm_neck = mm_neck + self.num_train_classes = num_train_classes + self.num_test_classes = num_test_classes + super().__init__(*args, **kwargs) + + def loss(self, batch_inputs: Tensor, + batch_data_samples: SampleList) -> Union[dict, list]: + """Calculate losses from a batch of inputs and data samples.""" + self.bbox_head.num_classes = self.num_train_classes + img_feats, txt_feats = self.extract_feat(batch_inputs, + batch_data_samples) + losses = self.bbox_head.loss(img_feats, txt_feats, batch_data_samples) + return losses + + def predict(self, + batch_inputs: Tensor, + batch_data_samples: SampleList, + rescale: bool = True) -> SampleList: + """Predict results from a batch of inputs and data samples with post- + processing. + """ + + img_feats, txt_feats = self.extract_feat(batch_inputs, + batch_data_samples) + + # self.bbox_head.num_classes = self.num_test_classes + self.bbox_head.num_classes = txt_feats[0].shape[0] + results_list = self.bbox_head.predict(img_feats, + txt_feats, + batch_data_samples, + rescale=rescale) + + batch_data_samples = self.add_pred_to_datasample( + batch_data_samples, results_list) + return batch_data_samples + + def reparameterize(self, texts: List[List[str]]) -> None: + # encode text embeddings into the detector + self.texts = texts + self.text_feats = self.backbone.forward_text(texts) + + def _forward( + self, + batch_inputs: Tensor, + batch_data_samples: OptSampleList = None) -> Tuple[List[Tensor]]: + """Network forward process. Usually includes backbone, neck and head + forward without any post-processing. + """ + img_feats, txt_feats = self.extract_feat(batch_inputs, + batch_data_samples) + results = self.bbox_head.forward(img_feats, txt_feats) + return results + + def extract_feat( + self, batch_inputs: Tensor, + batch_data_samples: SampleList) -> Tuple[Tuple[Tensor], Tensor]: + """Extract features.""" + txt_feats = None + if batch_data_samples is None: + texts = self.texts + txt_feats = self.text_feats + elif isinstance(batch_data_samples, + dict) and 'texts' in batch_data_samples: + texts = batch_data_samples['texts'] + elif isinstance(batch_data_samples, list) and hasattr( + batch_data_samples[0], 'texts'): + texts = [data_sample.texts for data_sample in batch_data_samples] + elif hasattr(self, 'text_feats'): + texts = self.texts + txt_feats = self.text_feats + else: + raise TypeError('batch_data_samples should be dict or list.') + if txt_feats is not None: + # forward image only + img_feats = self.backbone.forward_image(batch_inputs) + else: + img_feats, txt_feats = self.backbone(batch_inputs, texts) + if self.with_neck: + if self.mm_neck: + img_feats = self.neck(img_feats, txt_feats) + else: + img_feats = self.neck(img_feats) + return img_feats, txt_feats + + +@MODELS.register_module() +class SimpleYOLOWorldDetector(YOLODetector): + """Implementation of YOLO World Series""" + + def __init__(self, + *args, + mm_neck: bool = False, + num_train_classes=80, + num_test_classes=80, + prompt_dim=512, + num_prompts=80, + embedding_path='', + reparameterized=False, + freeze_prompt=False, + use_mlp_adapter=False, + **kwargs) -> None: + self.mm_neck = mm_neck + self.num_training_classes = num_train_classes + self.num_test_classes = num_test_classes + self.prompt_dim = prompt_dim + self.num_prompts = num_prompts + self.reparameterized = reparameterized + self.freeze_prompt = freeze_prompt + self.use_mlp_adapter = use_mlp_adapter + super().__init__(*args, **kwargs) + + if not self.reparameterized: + if len(embedding_path) > 0: + import numpy as np + self.embeddings = torch.nn.Parameter( + torch.from_numpy(np.load(embedding_path)).float()) + else: + # random init + embeddings = nn.functional.normalize(torch.randn( + (num_prompts, prompt_dim)), + dim=-1) + self.embeddings = nn.Parameter(embeddings) + + if self.freeze_prompt: + self.embeddings.requires_grad = False + else: + self.embeddings.requires_grad = True + + if use_mlp_adapter: + self.adapter = nn.Sequential( + nn.Linear(prompt_dim, prompt_dim * 2), nn.ReLU(True), + nn.Linear(prompt_dim * 2, prompt_dim)) + else: + self.adapter = None + + def loss(self, batch_inputs: Tensor, + batch_data_samples: SampleList) -> Union[dict, list]: + """Calculate losses from a batch of inputs and data samples.""" + self.bbox_head.num_classes = self.num_training_classes + img_feats, txt_feats = self.extract_feat(batch_inputs, + batch_data_samples) + if self.reparameterized: + losses = self.bbox_head.loss(img_feats, batch_data_samples) + else: + losses = self.bbox_head.loss(img_feats, txt_feats, + batch_data_samples) + return losses + + def predict(self, + batch_inputs: Tensor, + batch_data_samples: SampleList, + rescale: bool = True) -> SampleList: + """Predict results from a batch of inputs and data samples with post- + processing. + """ + + img_feats, txt_feats = self.extract_feat(batch_inputs, + batch_data_samples) + + self.bbox_head.num_classes = self.num_test_classes + if self.reparameterized: + results_list = self.bbox_head.predict(img_feats, + batch_data_samples, + rescale=rescale) + else: + results_list = self.bbox_head.predict(img_feats, + txt_feats, + batch_data_samples, + rescale=rescale) + + batch_data_samples = self.add_pred_to_datasample( + batch_data_samples, results_list) + return batch_data_samples + + def _forward( + self, + batch_inputs: Tensor, + batch_data_samples: OptSampleList = None) -> Tuple[List[Tensor]]: + """Network forward process. Usually includes backbone, neck and head + forward without any post-processing. + """ + img_feats, txt_feats = self.extract_feat(batch_inputs, + batch_data_samples) + if self.reparameterized: + results = self.bbox_head.forward(img_feats) + else: + results = self.bbox_head.forward(img_feats, txt_feats) + return results + + def extract_feat( + self, batch_inputs: Tensor, + batch_data_samples: SampleList) -> Tuple[Tuple[Tensor], Tensor]: + """Extract features.""" + # only image features + img_feats, _ = self.backbone(batch_inputs, None) + + if not self.reparameterized: + # use embeddings + txt_feats = self.embeddings[None] + if self.adapter is not None: + txt_feats = self.adapter(txt_feats) + txt_feats + txt_feats = nn.functional.normalize(txt_feats, dim=-1, p=2) + txt_feats = txt_feats.repeat(img_feats[0].shape[0], 1, 1) + else: + txt_feats = None + if self.with_neck: + if self.mm_neck: + img_feats = self.neck(img_feats, txt_feats) + else: + img_feats = self.neck(img_feats) + return img_feats, txt_feats diff --git a/models/YOLO-World/yolo_world/models/layers/__init__.py b/models/YOLO-World/yolo_world/models/layers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..830dc3bd45a7bd3a214b1c40f80b8d55fabc2195 --- /dev/null +++ b/models/YOLO-World/yolo_world/models/layers/__init__.py @@ -0,0 +1,18 @@ +# Copyright (c) Tencent Inc. All rights reserved. +# Basic brick modules for PAFPN based on CSPLayers + +from .yolo_bricks import ( + CSPLayerWithTwoConv, + MaxSigmoidAttnBlock, + MaxSigmoidCSPLayerWithTwoConv, + ImagePoolingAttentionModule, + RepConvMaxSigmoidCSPLayerWithTwoConv, + RepMaxSigmoidCSPLayerWithTwoConv + ) + +__all__ = ['CSPLayerWithTwoConv', + 'MaxSigmoidAttnBlock', + 'MaxSigmoidCSPLayerWithTwoConv', + 'RepConvMaxSigmoidCSPLayerWithTwoConv', + 'RepMaxSigmoidCSPLayerWithTwoConv', + 'ImagePoolingAttentionModule'] diff --git a/models/YOLO-World/yolo_world/models/layers/yolo_bricks.py b/models/YOLO-World/yolo_world/models/layers/yolo_bricks.py new file mode 100644 index 0000000000000000000000000000000000000000..0c39131cfda2de942bfd3fa9f894870b8664f377 --- /dev/null +++ b/models/YOLO-World/yolo_world/models/layers/yolo_bricks.py @@ -0,0 +1,601 @@ +# Copyright (c) Tencent Inc. All rights reserved. +from typing import List + +import torch +import torch.nn as nn +from torch import Tensor +import torch.nn.functional as F +from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule, Linear +from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig +from mmengine.model import BaseModule +from mmyolo.registry import MODELS +from mmyolo.models.layers import CSPLayerWithTwoConv + + +@MODELS.register_module() +class MaxSigmoidAttnBlock(BaseModule): + """Max Sigmoid attention block.""" + + def __init__(self, + in_channels: int, + out_channels: int, + guide_channels: int, + embed_channels: int, + kernel_size: int = 3, + padding: int = 1, + num_heads: int = 1, + use_depthwise: bool = False, + with_scale: bool = False, + conv_cfg: OptConfigType = None, + norm_cfg: ConfigType = dict(type='BN', + momentum=0.03, + eps=0.001), + init_cfg: OptMultiConfig = None, + use_einsum: bool = True) -> None: + super().__init__(init_cfg=init_cfg) + conv = DepthwiseSeparableConvModule if use_depthwise else ConvModule + + assert (out_channels % num_heads == 0 and + embed_channels % num_heads == 0), \ + 'out_channels and embed_channels should be divisible by num_heads.' + self.num_heads = num_heads + self.head_channels = out_channels // num_heads + self.use_einsum = use_einsum + + self.embed_conv = ConvModule( + in_channels, + embed_channels, + 1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=None) if embed_channels != in_channels else None + self.guide_fc = Linear(guide_channels, embed_channels) + self.bias = nn.Parameter(torch.zeros(num_heads)) + if with_scale: + self.scale = nn.Parameter(torch.ones(1, num_heads, 1, 1)) + else: + self.scale = 1.0 + + self.project_conv = conv(in_channels, + out_channels, + kernel_size, + stride=1, + padding=padding, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=None) + + def forward(self, x: Tensor, guide: Tensor) -> Tensor: + """Forward process.""" + B, _, H, W = x.shape + + guide = self.guide_fc(guide) + guide = guide.reshape(B, -1, self.num_heads, self.head_channels) + embed = self.embed_conv(x) if self.embed_conv is not None else x + embed = embed.reshape(B, self.num_heads, self.head_channels, H, W) + + if self.use_einsum: + attn_weight = torch.einsum('bmchw,bnmc->bmhwn', embed, guide) + else: + batch, m, channel, height, width = embed.shape + _, n, _, _ = guide.shape + embed = embed.permute(0, 1, 3, 4, 2) + embed = embed.reshape(batch, m, -1, channel) + guide = guide.permute(0, 2, 3, 1) + attn_weight = torch.matmul(embed, guide) + attn_weight = attn_weight.reshape(batch, m, height, width, n) + + attn_weight = attn_weight.max(dim=-1)[0] + attn_weight = attn_weight / (self.head_channels**0.5) + attn_weight = attn_weight + self.bias[None, :, None, None] + attn_weight = attn_weight.sigmoid() * self.scale + + x = self.project_conv(x) + x = x.reshape(B, self.num_heads, -1, H, W) + x = x * attn_weight.unsqueeze(2) + x = x.reshape(B, -1, H, W) + return x + + +@MODELS.register_module() +class RepMatrixMaxSigmoidAttnBlock(BaseModule): + """Max Sigmoid attention block.""" + + def __init__(self, + in_channels: int, + out_channels: int, + embed_channels: int, + guide_channels: int, + kernel_size: int = 3, + padding: int = 1, + num_heads: int = 1, + use_depthwise: bool = False, + with_scale: bool = False, + conv_cfg: OptConfigType = None, + norm_cfg: ConfigType = dict(type='BN', + momentum=0.03, + eps=0.001), + init_cfg: OptMultiConfig = None, + use_einsum: bool = True) -> None: + super().__init__(init_cfg=init_cfg) + conv = DepthwiseSeparableConvModule if use_depthwise else ConvModule + + assert (out_channels % num_heads == 0 and + embed_channels % num_heads == 0), \ + 'out_channels and embed_channels should be divisible by num_heads.' + self.num_heads = num_heads + self.head_channels = out_channels // num_heads + self.use_einsum = use_einsum + + self.embed_conv = ConvModule( + in_channels, + embed_channels, + 1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=None) if embed_channels != in_channels else None + self.bias = nn.Parameter(torch.zeros(num_heads)) + self.guide_weight = nn.Parameter( + torch.zeros(guide_channels, embed_channels // num_heads, + num_heads)) + self.project_conv = conv(in_channels, + out_channels, + kernel_size, + stride=1, + padding=padding, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=None) + + def forward(self, x: Tensor, txt_feats: Tensor = None) -> Tensor: + """Forward process.""" + B, _, H, W = x.shape + + embed = self.embed_conv(x) if self.embed_conv is not None else x + embed = embed.reshape(B, self.num_heads, self.head_channels, H, W) + + batch, m, channel, height, width = embed.shape + _, n, _, _ = self.guide_weight.shape + # can be formulated to split conv + embed = embed.permute(0, 1, 3, 4, 2) + embed = embed.reshape(batch, m, -1, channel) + attn_weight = torch.matmul(embed, self.guide_weight) + attn_weight = attn_weight.reshape(batch, m, height, width, n) + + attn_weight = attn_weight.max(dim=-1)[0] + attn_weight = attn_weight / (self.head_channels**0.5) + attn_weight = attn_weight + self.bias[None, :, None, None] + attn_weight = attn_weight.sigmoid() + + x = self.project_conv(x) + x = x.reshape(B, self.num_heads, -1, H, W) + x = x * attn_weight.unsqueeze(2) + x = x.reshape(B, -1, H, W) + return x + + +@MODELS.register_module() +class RepConvMaxSigmoidAttnBlock(BaseModule): + """Max Sigmoid attention block.""" + + def __init__(self, + in_channels: int, + out_channels: int, + embed_channels: int, + guide_channels: int, + kernel_size: int = 3, + padding: int = 1, + num_heads: int = 1, + use_depthwise: bool = False, + with_scale: bool = False, + conv_cfg: OptConfigType = None, + norm_cfg: ConfigType = dict(type='BN', + momentum=0.03, + eps=0.001), + init_cfg: OptMultiConfig = None, + use_einsum: bool = True) -> None: + super().__init__(init_cfg=init_cfg) + conv = DepthwiseSeparableConvModule if use_depthwise else ConvModule + + assert (out_channels % num_heads == 0 and + embed_channels % num_heads == 0), \ + 'out_channels and embed_channels should be divisible by num_heads.' + self.num_heads = num_heads + self.head_channels = out_channels // num_heads + self.use_einsum = use_einsum + + self.embed_conv = ConvModule( + in_channels, + embed_channels, + 1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=None) if embed_channels != in_channels else None + self.bias = nn.Parameter(torch.zeros(num_heads)) + self.num_heads = num_heads + self.split_channels = embed_channels // num_heads + self.guide_convs = nn.ModuleList( + nn.Conv2d(self.split_channels, guide_channels, 1, bias=False) + for _ in range(num_heads)) + self.project_conv = conv(in_channels, + out_channels, + kernel_size, + stride=1, + padding=padding, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=None) + + def forward(self, x: Tensor, txt_feats: Tensor = None) -> Tensor: + """Forward process.""" + B, C, H, W = x.shape + + embed = self.embed_conv(x) if self.embed_conv is not None else x + embed = list(embed.split(self.split_channels, 1)) + # Bx(MxN)xHxW (H*c=C, H: heads) + attn_weight = torch.cat( + [conv(x) for conv, x in zip(self.guide_convs, embed)], dim=1) + # BxMxNxHxW + attn_weight = attn_weight.view(B, self.num_heads, -1, H, W) + # attn_weight = torch.stack( + # [conv(x) for conv, x in zip(self.guide_convs, embed)]) + # BxMxNxHxW -> BxMxHxW + attn_weight = attn_weight.max(dim=2)[0] / (self.head_channels**0.5) + attn_weight = (attn_weight + self.bias.view(1, -1, 1, 1)).sigmoid() + # .transpose(0, 1) + # BxMx1xHxW + attn_weight = attn_weight[:, :, None] + x = self.project_conv(x) + # BxHxCxHxW + x = x.view(B, self.num_heads, -1, H, W) + x = x * attn_weight + x = x.view(B, -1, H, W) + return x + + +@MODELS.register_module() +class MaxSigmoidCSPLayerWithTwoConv(CSPLayerWithTwoConv): + """Sigmoid-attention based CSP layer with two convolution layers.""" + + def __init__( + self, + in_channels: int, + out_channels: int, + guide_channels: int, + embed_channels: int, + num_heads: int = 1, + expand_ratio: float = 0.5, + num_blocks: int = 1, + with_scale: bool = False, + add_identity: bool = True, # shortcut + conv_cfg: OptConfigType = None, + norm_cfg: ConfigType = dict(type='BN', momentum=0.03, eps=0.001), + act_cfg: ConfigType = dict(type='SiLU', inplace=True), + init_cfg: OptMultiConfig = None, + use_einsum: bool = True) -> None: + super().__init__(in_channels=in_channels, + out_channels=out_channels, + expand_ratio=expand_ratio, + num_blocks=num_blocks, + add_identity=add_identity, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + init_cfg=init_cfg) + + self.final_conv = ConvModule((3 + num_blocks) * self.mid_channels, + out_channels, + 1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + + self.attn_block = MaxSigmoidAttnBlock(self.mid_channels, + self.mid_channels, + guide_channels=guide_channels, + embed_channels=embed_channels, + num_heads=num_heads, + with_scale=with_scale, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + use_einsum=use_einsum) + + def forward(self, x: Tensor, guide: Tensor) -> Tensor: + """Forward process.""" + x_main = self.main_conv(x) + x_main = list(x_main.split((self.mid_channels, self.mid_channels), 1)) + x_main.extend(blocks(x_main[-1]) for blocks in self.blocks) + x_main.append(self.attn_block(x_main[-1], guide)) + return self.final_conv(torch.cat(x_main, 1)) + + +@MODELS.register_module() +class RepMaxSigmoidCSPLayerWithTwoConv(CSPLayerWithTwoConv): + """Sigmoid-attention based CSP layer with two convolution layers.""" + + def __init__( + self, + in_channels: int, + out_channels: int, + guide_channels: int, + embed_channels: int, + num_heads: int = 1, + expand_ratio: float = 0.5, + num_blocks: int = 1, + with_scale: bool = False, + add_identity: bool = True, # shortcut + conv_cfg: OptConfigType = None, + norm_cfg: ConfigType = dict(type='BN', momentum=0.03, eps=0.001), + act_cfg: ConfigType = dict(type='SiLU', inplace=True), + init_cfg: OptMultiConfig = None, + use_einsum: bool = True) -> None: + super().__init__(in_channels=in_channels, + out_channels=out_channels, + expand_ratio=expand_ratio, + num_blocks=num_blocks, + add_identity=add_identity, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + init_cfg=init_cfg) + + self.final_conv = ConvModule((3 + num_blocks) * self.mid_channels, + out_channels, + 1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + + self.attn_block = RepMatrixMaxSigmoidAttnBlock( + self.mid_channels, + self.mid_channels, + embed_channels=embed_channels, + guide_channels=guide_channels, + num_heads=num_heads, + with_scale=with_scale, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + use_einsum=use_einsum) + + def forward(self, x: Tensor, guide: Tensor) -> Tensor: + """Forward process.""" + x_main = self.main_conv(x) + x_main = list(x_main.split((self.mid_channels, self.mid_channels), 1)) + x_main.extend(blocks(x_main[-1]) for blocks in self.blocks) + x_main.append(self.attn_block(x_main[-1], guide)) + return self.final_conv(torch.cat(x_main, 1)) + + +@MODELS.register_module() +class RepConvMaxSigmoidCSPLayerWithTwoConv(CSPLayerWithTwoConv): + """Sigmoid-attention based CSP layer with two convolution layers.""" + + def __init__( + self, + in_channels: int, + out_channels: int, + guide_channels: int, + embed_channels: int, + num_heads: int = 1, + expand_ratio: float = 0.5, + num_blocks: int = 1, + with_scale: bool = False, + add_identity: bool = True, # shortcut + conv_cfg: OptConfigType = None, + norm_cfg: ConfigType = dict(type='BN', momentum=0.03, eps=0.001), + act_cfg: ConfigType = dict(type='SiLU', inplace=True), + init_cfg: OptMultiConfig = None, + use_einsum: bool = True) -> None: + super().__init__(in_channels=in_channels, + out_channels=out_channels, + expand_ratio=expand_ratio, + num_blocks=num_blocks, + add_identity=add_identity, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + init_cfg=init_cfg) + + self.final_conv = ConvModule((3 + num_blocks) * self.mid_channels, + out_channels, + 1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + + self.attn_block = RepConvMaxSigmoidAttnBlock( + self.mid_channels, + self.mid_channels, + embed_channels=embed_channels, + guide_channels=guide_channels, + num_heads=num_heads, + with_scale=with_scale, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + use_einsum=use_einsum) + + def forward(self, x: Tensor, guide: Tensor) -> Tensor: + """Forward process.""" + x_main = self.main_conv(x) + x_main = list(x_main.split((self.mid_channels, self.mid_channels), 1)) + x_main.extend(blocks(x_main[-1]) for blocks in self.blocks) + x_main.append(self.attn_block(x_main[-1], guide)) + return self.final_conv(torch.cat(x_main, 1)) + + +@MODELS.register_module() +class ImagePoolingAttentionModule(nn.Module): + + def __init__(self, + image_channels: List[int], + text_channels: int, + embed_channels: int, + with_scale: bool = False, + num_feats: int = 3, + num_heads: int = 8, + pool_size: int = 3, + use_einsum: bool = True): + super().__init__() + + self.text_channels = text_channels + self.embed_channels = embed_channels + self.num_heads = num_heads + self.num_feats = num_feats + self.head_channels = embed_channels // num_heads + self.pool_size = pool_size + self.use_einsum = use_einsum + if with_scale: + self.scale = nn.Parameter(torch.tensor([0.]), requires_grad=True) + else: + self.scale = 1.0 + self.projections = nn.ModuleList([ + ConvModule(in_channels, embed_channels, 1, act_cfg=None) + for in_channels in image_channels + ]) + self.query = nn.Sequential(nn.LayerNorm(text_channels), + Linear(text_channels, embed_channels)) + self.key = nn.Sequential(nn.LayerNorm(embed_channels), + Linear(embed_channels, embed_channels)) + self.value = nn.Sequential(nn.LayerNorm(embed_channels), + Linear(embed_channels, embed_channels)) + self.proj = Linear(embed_channels, text_channels) + + self.image_pools = nn.ModuleList([ + nn.AdaptiveMaxPool2d((pool_size, pool_size)) + for _ in range(num_feats) + ]) + + def forward(self, text_features, image_features): + B = image_features[0].shape[0] + assert len(image_features) == self.num_feats + num_patches = self.pool_size**2 + mlvl_image_features = [ + pool(proj(x)).view(B, -1, num_patches) + for (x, proj, pool + ) in zip(image_features, self.projections, self.image_pools) + ] + mlvl_image_features = torch.cat(mlvl_image_features, + dim=-1).transpose(1, 2) + q = self.query(text_features) + k = self.key(mlvl_image_features) + v = self.value(mlvl_image_features) + + q = q.reshape(B, -1, self.num_heads, self.head_channels) + k = k.reshape(B, -1, self.num_heads, self.head_channels) + v = v.reshape(B, -1, self.num_heads, self.head_channels) + if self.use_einsum: + attn_weight = torch.einsum('bnmc,bkmc->bmnk', q, k) + else: + q = q.permute(0, 2, 1, 3) + k = k.permute(0, 2, 3, 1) + attn_weight = torch.matmul(q, k) + + attn_weight = attn_weight / (self.head_channels**0.5) + attn_weight = F.softmax(attn_weight, dim=-1) + if self.use_einsum: + x = torch.einsum('bmnk,bkmc->bnmc', attn_weight, v) + else: + v = v.permute(0, 2, 1, 3) + x = torch.matmul(attn_weight, v) + x = x.permute(0, 2, 1, 3) + x = self.proj(x.reshape(B, -1, self.embed_channels)) + return x * self.scale + text_features + + +@MODELS.register_module() +class VanillaSigmoidBlock(BaseModule): + """Sigmoid attention block.""" + + def __init__(self, + in_channels: int, + out_channels: int, + guide_channels: int, + embed_channels: int, + kernel_size: int = 3, + padding: int = 1, + num_heads: int = 1, + use_depthwise: bool = False, + with_scale: bool = False, + conv_cfg: OptConfigType = None, + norm_cfg: ConfigType = dict(type='BN', + momentum=0.03, + eps=0.001), + init_cfg: OptMultiConfig = None) -> None: + super().__init__(init_cfg=init_cfg) + conv = DepthwiseSeparableConvModule if use_depthwise else ConvModule + + assert (out_channels % num_heads == 0 and + embed_channels % num_heads == 0), \ + 'out_channels and embed_channels should be divisible by num_heads.' + self.num_heads = num_heads + self.head_channels = out_channels // num_heads + + self.project_conv = conv(in_channels, + out_channels, + kernel_size, + stride=1, + padding=padding, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=None) + + def forward(self, x: Tensor, guide: Tensor) -> Tensor: + """Forward process.""" + x = self.project_conv(x) + # remove sigmoid + # x = x * x.sigmoid() + return x + + +@MODELS.register_module() +class EfficientCSPLayerWithTwoConv(CSPLayerWithTwoConv): + """Sigmoid-attention based CSP layer with two convolution layers.""" + + def __init__( + self, + in_channels: int, + out_channels: int, + guide_channels: int, + embed_channels: int, + num_heads: int = 1, + expand_ratio: float = 0.5, + num_blocks: int = 1, + with_scale: bool = False, + add_identity: bool = True, # shortcut + conv_cfg: OptConfigType = None, + norm_cfg: ConfigType = dict(type='BN', momentum=0.03, eps=0.001), + act_cfg: ConfigType = dict(type='SiLU', inplace=True), + init_cfg: OptMultiConfig = None) -> None: + super().__init__(in_channels=in_channels, + out_channels=out_channels, + expand_ratio=expand_ratio, + num_blocks=num_blocks, + add_identity=add_identity, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + init_cfg=init_cfg) + + self.final_conv = ConvModule((3 + num_blocks) * self.mid_channels, + out_channels, + 1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + + self.attn_block = VanillaSigmoidBlock(self.mid_channels, + self.mid_channels, + guide_channels=guide_channels, + embed_channels=embed_channels, + num_heads=num_heads, + with_scale=with_scale, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg) + + def forward(self, x: Tensor, guide: Tensor) -> Tensor: + """Forward process.""" + x_main = self.main_conv(x) + x_main = list(x_main.split((self.mid_channels, self.mid_channels), 1)) + x_main.extend(blocks(x_main[-1]) for blocks in self.blocks) + x_main.append(self.attn_block(x_main[-1], guide)) + return self.final_conv(torch.cat(x_main, 1)) diff --git a/models/YOLO-World/yolo_world/models/losses/__init__.py b/models/YOLO-World/yolo_world/models/losses/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8423e30498fa69a08b9d66b492261cbfdec9e4f3 --- /dev/null +++ b/models/YOLO-World/yolo_world/models/losses/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) Tencent Inc. All rights reserved. +from .dynamic_loss import CoVMSELoss + +__all__ = ['CoVMSELoss'] diff --git a/models/YOLO-World/yolo_world/models/losses/dynamic_loss.py b/models/YOLO-World/yolo_world/models/losses/dynamic_loss.py new file mode 100644 index 0000000000000000000000000000000000000000..30d56c3afd8ef22867bf5cba919e4a2844577688 --- /dev/null +++ b/models/YOLO-World/yolo_world/models/losses/dynamic_loss.py @@ -0,0 +1,38 @@ +# Copyright (c) Tencent Inc. All rights reserved. +from typing import Optional + +import torch +import torch.nn as nn +from torch import Tensor +from mmdet.models.losses.mse_loss import mse_loss +from mmyolo.registry import MODELS + + +@MODELS.register_module() +class CoVMSELoss(nn.Module): + + def __init__(self, + dim: int = 0, + reduction: str = 'mean', + loss_weight: float = 1.0, + eps: float = 1e-6) -> None: + super().__init__() + self.dim = dim + self.reduction = reduction + self.loss_weight = loss_weight + self.eps = eps + + def forward(self, + pred: Tensor, + weight: Optional[Tensor] = None, + avg_factor: Optional[int] = None, + reduction_override: Optional[str] = None) -> Tensor: + """Forward function of loss.""" + assert reduction_override in (None, 'none', 'mean', 'sum') + reduction = ( + reduction_override if reduction_override else self.reduction) + cov = pred.std(self.dim) / pred.mean(self.dim).clamp(min=self.eps) + target = torch.zeros_like(cov) + loss = self.loss_weight * mse_loss( + cov, target, weight, reduction=reduction, avg_factor=avg_factor) + return loss diff --git a/models/YOLO-World/yolo_world/models/necks/__init__.py b/models/YOLO-World/yolo_world/models/necks/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..4d5219c66b5805bcb5e54bc0506dd19c4d8753ab --- /dev/null +++ b/models/YOLO-World/yolo_world/models/necks/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) Tencent Inc. All rights reserved. +from .yolo_world_pafpn import YOLOWorldPAFPN, YOLOWorldDualPAFPN + +__all__ = ['YOLOWorldPAFPN', 'YOLOWorldDualPAFPN'] diff --git a/models/YOLO-World/yolo_world/models/necks/yolo_world_pafpn.py b/models/YOLO-World/yolo_world/models/necks/yolo_world_pafpn.py new file mode 100644 index 0000000000000000000000000000000000000000..35d04cd5b5b13abecf23718ad6532bf7050b00f6 --- /dev/null +++ b/models/YOLO-World/yolo_world/models/necks/yolo_world_pafpn.py @@ -0,0 +1,235 @@ +# Copyright (c) Tencent Inc. All rights reserved. +import copy +from typing import List, Union + +import torch +import torch.nn as nn +from torch import Tensor +from mmdet.utils import ConfigType, OptMultiConfig + +from mmyolo.registry import MODELS +from mmyolo.models.utils import make_divisible, make_round +from mmyolo.models.necks.yolov8_pafpn import YOLOv8PAFPN + + +@MODELS.register_module() +class YOLOWorldPAFPN(YOLOv8PAFPN): + """Path Aggregation Network used in YOLO World + Following YOLOv8 PAFPN, including text to image fusion + """ + def __init__(self, + in_channels: List[int], + out_channels: Union[List[int], int], + guide_channels: int, + embed_channels: List[int], + num_heads: List[int], + deepen_factor: float = 1.0, + widen_factor: float = 1.0, + num_csp_blocks: int = 3, + freeze_all: bool = False, + block_cfg: ConfigType = dict(type='CSPLayerWithTwoConv'), + norm_cfg: ConfigType = dict(type='BN', + momentum=0.03, + eps=0.001), + act_cfg: ConfigType = dict(type='SiLU', inplace=True), + init_cfg: OptMultiConfig = None) -> None: + self.guide_channels = guide_channels + self.embed_channels = embed_channels + self.num_heads = num_heads + self.block_cfg = block_cfg + super().__init__(in_channels=in_channels, + out_channels=out_channels, + deepen_factor=deepen_factor, + widen_factor=widen_factor, + num_csp_blocks=num_csp_blocks, + freeze_all=freeze_all, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + init_cfg=init_cfg) + + def build_top_down_layer(self, idx: int) -> nn.Module: + """build top down layer. + + Args: + idx (int): layer idx. + + Returns: + nn.Module: The top down layer. + """ + block_cfg = copy.deepcopy(self.block_cfg) + block_cfg.update( + dict(in_channels=make_divisible( + (self.in_channels[idx - 1] + self.in_channels[idx]), + self.widen_factor), + out_channels=make_divisible(self.out_channels[idx - 1], + self.widen_factor), + guide_channels=self.guide_channels, + embed_channels=make_round(self.embed_channels[idx - 1], + self.widen_factor), + num_heads=make_round(self.num_heads[idx - 1], + self.widen_factor), + num_blocks=make_round(self.num_csp_blocks, + self.deepen_factor), + add_identity=False, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg)) + return MODELS.build(block_cfg) + + def build_bottom_up_layer(self, idx: int) -> nn.Module: + """build bottom up layer. + + Args: + idx (int): layer idx. + + Returns: + nn.Module: The bottom up layer. + """ + block_cfg = copy.deepcopy(self.block_cfg) + block_cfg.update( + dict(in_channels=make_divisible( + (self.out_channels[idx] + self.out_channels[idx + 1]), + self.widen_factor), + out_channels=make_divisible(self.out_channels[idx + 1], + self.widen_factor), + guide_channels=self.guide_channels, + embed_channels=make_round(self.embed_channels[idx + 1], + self.widen_factor), + num_heads=make_round(self.num_heads[idx + 1], + self.widen_factor), + num_blocks=make_round(self.num_csp_blocks, + self.deepen_factor), + add_identity=False, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg)) + return MODELS.build(block_cfg) + + def forward(self, img_feats: List[Tensor], txt_feats: Tensor = None) -> tuple: + """Forward function. + including multi-level image features, text features: BxLxD + """ + assert len(img_feats) == len(self.in_channels) + # reduce layers + reduce_outs = [] + for idx in range(len(self.in_channels)): + reduce_outs.append(self.reduce_layers[idx](img_feats[idx])) + + # top-down path + inner_outs = [reduce_outs[-1]] + for idx in range(len(self.in_channels) - 1, 0, -1): + feat_high = inner_outs[0] + feat_low = reduce_outs[idx - 1] + upsample_feat = self.upsample_layers[len(self.in_channels) - 1 - + idx](feat_high) + if self.upsample_feats_cat_first: + top_down_layer_inputs = torch.cat([upsample_feat, feat_low], 1) + else: + top_down_layer_inputs = torch.cat([feat_low, upsample_feat], 1) + inner_out = self.top_down_layers[len(self.in_channels) - 1 - idx]( + top_down_layer_inputs, txt_feats) + inner_outs.insert(0, inner_out) + + # bottom-up path + outs = [inner_outs[0]] + for idx in range(len(self.in_channels) - 1): + feat_low = outs[-1] + feat_high = inner_outs[idx + 1] + downsample_feat = self.downsample_layers[idx](feat_low) + out = self.bottom_up_layers[idx](torch.cat( + [downsample_feat, feat_high], 1), txt_feats) + outs.append(out) + + # out_layers + results = [] + for idx in range(len(self.in_channels)): + results.append(self.out_layers[idx](outs[idx])) + + return tuple(results) + + +@MODELS.register_module() +class YOLOWorldDualPAFPN(YOLOWorldPAFPN): + """Path Aggregation Network used in YOLO World v8.""" + def __init__(self, + in_channels: List[int], + out_channels: Union[List[int], int], + guide_channels: int, + embed_channels: List[int], + num_heads: List[int], + deepen_factor: float = 1.0, + widen_factor: float = 1.0, + num_csp_blocks: int = 3, + freeze_all: bool = False, + text_enhancder: ConfigType = dict( + type='ImagePoolingAttentionModule', + embed_channels=256, + num_heads=8, + pool_size=3), + block_cfg: ConfigType = dict(type='CSPLayerWithTwoConv'), + norm_cfg: ConfigType = dict(type='BN', + momentum=0.03, + eps=0.001), + act_cfg: ConfigType = dict(type='SiLU', inplace=True), + init_cfg: OptMultiConfig = None) -> None: + super().__init__(in_channels=in_channels, + out_channels=out_channels, + guide_channels=guide_channels, + embed_channels=embed_channels, + num_heads=num_heads, + deepen_factor=deepen_factor, + widen_factor=widen_factor, + num_csp_blocks=num_csp_blocks, + freeze_all=freeze_all, + block_cfg=block_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + init_cfg=init_cfg) + + text_enhancder.update( + dict( + image_channels=[int(x * widen_factor) for x in out_channels], + text_channels=guide_channels, + num_feats=len(out_channels), + )) + print(text_enhancder) + self.text_enhancer = MODELS.build(text_enhancder) + + def forward(self, img_feats: List[Tensor], txt_feats: Tensor) -> tuple: + """Forward function.""" + assert len(img_feats) == len(self.in_channels) + # reduce layers + reduce_outs = [] + for idx in range(len(self.in_channels)): + reduce_outs.append(self.reduce_layers[idx](img_feats[idx])) + + # top-down path + inner_outs = [reduce_outs[-1]] + for idx in range(len(self.in_channels) - 1, 0, -1): + feat_high = inner_outs[0] + feat_low = reduce_outs[idx - 1] + upsample_feat = self.upsample_layers[len(self.in_channels) - 1 - + idx](feat_high) + if self.upsample_feats_cat_first: + top_down_layer_inputs = torch.cat([upsample_feat, feat_low], 1) + else: + top_down_layer_inputs = torch.cat([feat_low, upsample_feat], 1) + inner_out = self.top_down_layers[len(self.in_channels) - 1 - idx]( + top_down_layer_inputs, txt_feats) + inner_outs.insert(0, inner_out) + + txt_feats = self.text_enhancer(txt_feats, inner_outs) + # bottom-up path + outs = [inner_outs[0]] + for idx in range(len(self.in_channels) - 1): + feat_low = outs[-1] + feat_high = inner_outs[idx + 1] + downsample_feat = self.downsample_layers[idx](feat_low) + out = self.bottom_up_layers[idx](torch.cat( + [downsample_feat, feat_high], 1), txt_feats) + outs.append(out) + + # out_layers + results = [] + for idx in range(len(self.in_channels)): + results.append(self.out_layers[idx](outs[idx])) + + return tuple(results) diff --git a/models/YOLO-World/yolo_world/version.py b/models/YOLO-World/yolo_world/version.py new file mode 100644 index 0000000000000000000000000000000000000000..8f340586751e4b37195b1bbd1ee2a5b237561ee2 --- /dev/null +++ b/models/YOLO-World/yolo_world/version.py @@ -0,0 +1,23 @@ +# Copyright (c) Tencent Inc. All rights reserved. +from yolo_world import __version__ + +def __version_info() -> tuple: + """Parse a version string into a tuple. + Returns: + tuple[int | str]: The version info, e.g., "1.3.0" is parsed into + (1, 3, 0), and "2.0.0rc1" is parsed into (2, 0, 0, 'rc1'). + """ + version_info = [] + for x in __version__.split('.'): + if x.isdigit(): + version_info.append(int(x)) + elif x.find('rc') != -1: + patch_version = x.split('rc') + version_info.append(int(patch_version[0])) + version_info.append(f'rc{patch_version[1]}') + return tuple(version_info) + + +version_info = __version_info() + +__all__ = ['__version__', 'version_info'] diff --git a/pretrained/config.yaml b/pretrained/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c4f9e5d087bd625a214d2292d3f5f466e5bd9d87 --- /dev/null +++ b/pretrained/config.yaml @@ -0,0 +1,20 @@ +openyolo3d: + frequency: 10 + vis_depth_threshold: 0.05 + depth_scale: 1000.0 + topk: 25 + topk_per_image: -1 + +network2d: + text_prompts: ["chair"] + topk: 100 + th: 0.08 + nms: 0.3 + use_amp: False + pretrained_path: "pretrained/checkpoints/yolo_world_v2_x_obj365v1_goldg_cc3mlite_pretrain_1280ft-14996a36.pth" + config_path: "pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival.py" + +network3d: + pretrained_path: "pretrained/checkpoints/scannet200_val.ckpt" + th: 0.04 + nms: 0.6 \ No newline at end of file diff --git a/pretrained/config_replica.yaml b/pretrained/config_replica.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5609d9cdf754db9f9066d0d59f9cb66683236125 --- /dev/null +++ b/pretrained/config_replica.yaml @@ -0,0 +1,21 @@ +openyolo3d: + frequency: 1 + vis_depth_threshold: 0.4 + depth_scale: 6553.5 + topk: 40 + topk_per_image: -1 + +network2d: + text_prompts: ["basket", "bed", "bench", "bin", "blanket", "blinds", "book", "bottle", "box", "bowl", "camera", "cabinet", "candle", "chair", "clock", "cloth", "comforter", "cushion", "desk", "desk-organizer", "door", "indoor-plant", "lamp", "monitor", "nightstand", "panel", "picture", "pillar", "pillow", "pipe", "plant-stand", "plate", "pot", "sculpture", "shelf", "sofa", "stool", "switch", "table", "tablet", "tissue-paper", "tv-screen", "tv-stand", "vase", "vent", "wall-plug", "window", "rug"] + topk: 100 + th: 0.1 + nms: 0.3 + use_amp: False + pretrained_path: "pretrained/checkpoints/yolo_world_v2_x_obj365v1_goldg_cc3mlite_pretrain_1280ft-14996a36.pth" + config_path: "pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival.py" + +network3d: + pretrained_path: "pretrained/checkpoints/scannet200_val.ckpt" + th: 0.02 + nms: 0.1 + is_gt: False diff --git a/pretrained/config_scannet200.yaml b/pretrained/config_scannet200.yaml new file mode 100644 index 0000000000000000000000000000000000000000..da67eed0c095984ec830840ebb3b5498743cb68e --- /dev/null +++ b/pretrained/config_scannet200.yaml @@ -0,0 +1,21 @@ +openyolo3d: + frequency: 10 + vis_depth_threshold: 0.05 + depth_scale: 1000.0 + topk: 40 + topk_per_image: 600 + +network2d: + text_prompts: ['chair', 'table', 'door', 'couch', 'cabinet', 'shelf', 'desk', 'office chair', 'bed', 'pillow', 'sink', 'picture', 'window', 'toilet', 'bookshelf', 'monitor', 'curtain', 'book', 'armchair', 'coffee table', 'box', 'refrigerator', 'lamp', 'kitchen cabinet', 'towel', 'clothes', 'tv', 'nightstand', 'counter', 'dresser', 'stool', 'cushion', 'plant', 'ceiling', 'bathtub', 'end table', 'dining table', 'keyboard', 'bag', 'backpack', 'toilet paper', 'printer', 'tv stand', 'whiteboard', 'blanket', 'shower curtain', 'trash can', 'closet', 'stairs', 'microwave', 'stove', 'shoe', 'computer tower', 'bottle', 'bin', 'ottoman', 'bench', 'board', 'washing machine', 'mirror', 'copier', 'basket', 'sofa chair', 'file cabinet', 'fan', 'laptop', 'shower', 'paper', 'person', 'paper towel dispenser', 'oven', 'blinds', 'rack', 'plate', 'blackboard', 'piano', 'suitcase', 'rail', 'radiator', 'recycling bin', 'container', 'wardrobe', 'soap dispenser', 'telephone', 'bucket', 'clock', 'stand', 'light', 'laundry basket', 'pipe', 'clothes dryer', 'guitar', 'toilet paper holder', 'seat', 'speaker', 'column', 'bicycle', 'ladder', 'bathroom stall', 'shower wall', 'cup', 'jacket', 'storage bin', 'coffee maker', 'dishwasher', 'paper towel roll', 'machine', 'mat', 'windowsill', 'bar', 'toaster', 'bulletin board', 'ironing board', 'fireplace', 'soap dish', 'kitchen counter', 'doorframe', 'toilet paper dispenser', 'mini fridge', 'fire extinguisher', 'ball', 'hat', 'shower curtain rod', 'water cooler', 'paper cutter', 'tray', 'shower door', 'pillar', 'ledge', 'toaster oven', 'mouse', 'toilet seat cover dispenser', 'furniture', 'cart', 'storage container', 'scale', 'tissue box', 'light switch', 'crate', 'power outlet', 'decoration', 'sign', 'projector', 'closet door', 'vacuum cleaner', 'candle', 'plunger', 'stuffed animal', 'headphones', 'dish rack', 'broom', 'guitar case', 'range hood', 'dustpan', 'hair dryer', 'water bottle', 'handicap bar', 'purse', 'vent', 'shower floor', 'water pitcher', 'mailbox', 'bowl', 'paper bag', 'alarm clock', 'music stand', 'projector screen', 'divider', 'laundry detergent', 'bathroom counter', 'object', 'bathroom vanity', 'closet wall', 'laundry hamper', 'bathroom stall door', 'ceiling light', 'trash bin', 'dumbbell', 'stair rail', 'tube', 'bathroom cabinet', 'cd case', 'closet rod', 'coffee kettle', 'structure', 'shower head', 'keyboard piano', 'case of water bottles', 'coat rack', 'storage organizer', 'folded chair', 'fire alarm', 'power strip', 'calendar', 'poster', 'potted plant', 'luggage', 'mattress'] + topk: 100 + th: 0.08 + nms: 0.3 + use_amp: False + pretrained_path: "pretrained/checkpoints/yolo_world_v2_x_obj365v1_goldg_cc3mlite_pretrain_1280ft-14996a36.pth" + config_path: "pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival.py" + +network3d: + pretrained_path: "pretrained/checkpoints/scannet200_val.ckpt" + th: 0.04 + nms: 0.6 + is_gt: False \ No newline at end of file diff --git a/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival.py b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival.py new file mode 100644 index 0000000000000000000000000000000000000000..2bfc3179deb965e6d62f6ccff4abc12647f298ec --- /dev/null +++ b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival.py @@ -0,0 +1,171 @@ +import os +_base_ = (f'{os.getcwd()}/models/YOLO-World/third_party/mmyolo/configs/yolov8/' + 'yolov8_x_syncbn_fast_8xb16-500e_coco.py') +custom_imports = dict(imports=['yolo_world'], + allow_failed_imports=False) + +# hyper-parameters +num_classes = 1203 +num_training_classes = 80 +max_epochs = 100 # Maximum training epochs +close_mosaic_epochs = 2 +save_epoch_intervals = 2 +text_channels = 512 +neck_embed_channels = [128, 256, _base_.last_stage_out_channels // 2] +neck_num_heads = [4, 8, _base_.last_stage_out_channels // 2 // 32] +base_lr = 2e-3 +weight_decay = 0.05 / 2 +train_batch_size_per_gpu = 16 + +# model settings +model = dict( + type='YOLOWorldDetector', + mm_neck=True, + num_train_classes=num_training_classes, + num_test_classes=num_classes, + data_preprocessor=dict(type='YOLOWDetDataPreprocessor'), + backbone=dict( + _delete_=True, + type='MultiModalYOLOBackbone', + image_model={{_base_.model.backbone}}, + text_model=dict( + type='HuggingCLIPLanguageBackbone', + model_name='openai/clip-vit-base-patch32', + frozen_modules=['all'])), + neck=dict(type='YOLOWorldPAFPN', + guide_channels=text_channels, + embed_channels=neck_embed_channels, + num_heads=neck_num_heads, + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv')), + bbox_head=dict(type='YOLOWorldHead', + head_module=dict(type='YOLOWorldHeadModule', + use_bn_head=True, + embed_dims=text_channels, + num_classes=num_training_classes)), + train_cfg=dict(assigner=dict(num_classes=num_training_classes))) + +# dataset settings +text_transform = [ + dict(type='RandomLoadText', + num_neg_samples=(num_classes, num_classes), + max_num_samples=num_training_classes, + padding_to_max=True, + padding_value=''), + dict(type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'flip', + 'flip_direction', 'texts')) +] +train_pipeline = [ + *_base_.pre_transform, + dict(type='MultiModalMosaic', + img_scale=_base_.img_scale, + pad_val=114.0, + pre_transform=_base_.pre_transform), + dict( + type='YOLOv5RandomAffine', + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=(1 - _base_.affine_scale, 1 + _base_.affine_scale), + max_aspect_ratio=_base_.max_aspect_ratio, + border=(-_base_.img_scale[0] // 2, -_base_.img_scale[1] // 2), + border_val=(114, 114, 114)), + *_base_.last_transform[:-1], + *text_transform, +] +train_pipeline_stage2 = [*_base_.train_pipeline_stage2[:-1], *text_transform] +obj365v1_train_dataset = dict( + type='MultiModalDataset', + dataset=dict( + type='YOLOv5Objects365V1Dataset', + data_root='data/objects365v1/', + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + filter_cfg=dict(filter_empty_gt=False, min_size=32)), + class_text_path='data/texts/obj365v1_class_texts.json', + pipeline=train_pipeline) + +mg_train_dataset = dict(type='YOLOv5MixedGroundingDataset', + data_root='data/mixed_grounding/', + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=train_pipeline) + +flickr_train_dataset = dict( + type='YOLOv5MixedGroundingDataset', + data_root='data/flickr/', + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=train_pipeline) + +train_dataloader = dict(batch_size=train_batch_size_per_gpu, + collate_fn=dict(type='yolow_collate'), + dataset=dict(_delete_=True, + type='ConcatDataset', + datasets=[ + obj365v1_train_dataset, + flickr_train_dataset, mg_train_dataset + ], + ignore_keys=['classes', 'palette'])) + +test_pipeline = [ + *_base_.test_pipeline[:-1], + dict(type='LoadText'), + dict(type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'pad_param', 'texts')) +] +coco_val_dataset = dict( + _delete_=True, + type='MultiModalDataset', + dataset=dict(type='YOLOv5LVISV1Dataset', + data_root='data/coco/', + test_mode=True, + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + data_prefix=dict(img=''), + batch_shapes_cfg=None), + class_text_path='data/texts/lvis_v1_class_texts.json', + pipeline=test_pipeline) +val_dataloader = dict(dataset=coco_val_dataset) +test_dataloader = val_dataloader + +val_evaluator = dict(type='mmdet.LVISMetric', + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox') +test_evaluator = val_evaluator + +# training settings +default_hooks = dict(param_scheduler=dict(max_epochs=max_epochs), + checkpoint=dict(interval=save_epoch_intervals, + rule='greater')) +custom_hooks = [ + dict(type='EMAHook', + ema_type='ExpMomentumEMA', + momentum=0.0001, + update_buffers=True, + strict_load=False, + priority=49), + dict(type='mmdet.PipelineSwitchHook', + switch_epoch=max_epochs - close_mosaic_epochs, + switch_pipeline=train_pipeline_stage2) +] +train_cfg = dict(max_epochs=max_epochs, + val_interval=10, + dynamic_intervals=[((max_epochs - close_mosaic_epochs), + _base_.val_interval_stage2)]) +optim_wrapper = dict(optimizer=dict( + _delete_=True, + type='AdamW', + lr=base_lr, + weight_decay=weight_decay, + batch_size_per_gpu=train_batch_size_per_gpu), + paramwise_cfg=dict(bias_decay_mult=0.0, + norm_decay_mult=0.0, + custom_keys={ + 'backbone.text_model': + dict(lr_mult=0.01), + 'logit_scale': + dict(weight_decay=0.0) + }), + constructor='YOLOWv5OptimizerConstructor') diff --git a/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240530_134026/vis_data/config.py b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240530_134026/vis_data/config.py new file mode 100644 index 0000000000000000000000000000000000000000..f8b00f96ad70de81ecc9073c7d67025d43429906 --- /dev/null +++ b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240530_134026/vis_data/config.py @@ -0,0 +1,1366 @@ +_backend_args = None +_multiscale_resize_transforms = [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), +] +affine_scale = 0.9 +albu_train_transforms = [ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), +] +backend_args = None +base_lr = 0.002 +batch_shapes_cfg = None +close_mosaic_epochs = 2 +coco_val_dataset = dict( + _delete_=True, + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +custom_hooks = [ + dict( + ema_type='ExpMomentumEMA', + momentum=0.0001, + priority=49, + strict_load=False, + type='EMAHook', + update_buffers=True), + dict( + switch_epoch=98, + switch_pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='mmdet.PipelineSwitchHook'), +] +custom_imports = dict( + allow_failed_imports=False, imports=[ + 'yolo_world', + ]) +data_root = 'data/coco/' +dataset_type = 'YOLOv5CocoDataset' +deepen_factor = 1.0 +default_hooks = dict( + checkpoint=dict( + interval=2, + max_keep_ckpts=2, + rule='greater', + save_best='auto', + type='CheckpointHook'), + logger=dict(interval=50, type='LoggerHook'), + param_scheduler=dict( + lr_factor=0.01, + max_epochs=100, + scheduler_type='linear', + type='YOLOv5ParamSchedulerHook'), + sampler_seed=dict(type='DistSamplerSeedHook'), + timer=dict(type='IterTimerHook'), + visualization=dict(type='mmdet.DetVisualizationHook')) +default_scope = 'mmyolo' +env_cfg = dict( + cudnn_benchmark=True, + dist_cfg=dict(backend='nccl'), + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0)) +flickr_train_dataset = dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +img_scale = ( + 640, + 640, +) +img_scales = [ + ( + 640, + 640, + ), + ( + 320, + 320, + ), + ( + 960, + 960, + ), +] +last_stage_out_channels = 512 +last_transform = [ + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), +] +load_from = '/home/jean/Amine/OpenYolo3D/pretrained/checkpoints/yolo_world_v2_x_obj365v1_goldg_cc3mlite_pretrain_1280ft-14996a36.pth' +log_level = 'INFO' +log_processor = dict(by_epoch=True, type='LogProcessor', window_size=50) +loss_bbox_weight = 7.5 +loss_cls_weight = 0.5 +loss_dfl_weight = 0.375 +lr_factor = 0.01 +max_aspect_ratio = 100 +max_epochs = 100 +max_keep_ckpts = 2 +mg_train_dataset = dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +mixup_prob = 0.15 +model = dict( + backbone=dict( + image_model=dict( + act_cfg=dict(inplace=True, type='SiLU'), + arch='P5', + deepen_factor=1.0, + last_stage_out_channels=512, + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + type='YOLOv8CSPDarknet', + widen_factor=1.25), + text_model=dict( + frozen_modules=[ + 'all', + ], + model_name='openai/clip-vit-base-patch32', + type='HuggingCLIPLanguageBackbone'), + type='MultiModalYOLOBackbone'), + bbox_head=dict( + bbox_coder=dict(type='DistancePointBBoxCoder'), + head_module=dict( + act_cfg=dict(inplace=True, type='SiLU'), + embed_dims=512, + featmap_strides=[ + 8, + 16, + 32, + ], + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_classes=80, + reg_max=16, + type='YOLOWorldHeadModule', + use_bn_head=True, + widen_factor=1.25), + loss_bbox=dict( + bbox_format='xyxy', + iou_mode='ciou', + loss_weight=7.5, + reduction='sum', + return_iou=False, + type='IoULoss'), + loss_cls=dict( + loss_weight=0.5, + reduction='none', + type='mmdet.CrossEntropyLoss', + use_sigmoid=True), + loss_dfl=dict( + loss_weight=0.375, + reduction='mean', + type='mmdet.DistributionFocalLoss'), + prior_generator=dict( + offset=0.5, strides=[ + 8, + 16, + 32, + ], type='mmdet.MlvlPointGenerator'), + type='YOLOWorldHead'), + data_preprocessor=dict( + bgr_to_rgb=True, + mean=[ + 0.0, + 0.0, + 0.0, + ], + std=[ + 255.0, + 255.0, + 255.0, + ], + type='YOLOWDetDataPreprocessor'), + mm_neck=True, + neck=dict( + act_cfg=dict(inplace=True, type='SiLU'), + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv'), + deepen_factor=1.0, + embed_channels=[ + 128, + 256, + 256, + ], + guide_channels=512, + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_csp_blocks=3, + num_heads=[ + 4, + 8, + 8, + ], + out_channels=[ + 256, + 512, + 512, + ], + type='YOLOWorldPAFPN', + widen_factor=1.25), + num_test_classes=1203, + num_train_classes=80, + test_cfg=dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001), + train_cfg=dict( + assigner=dict( + alpha=0.5, + beta=6.0, + eps=1e-09, + num_classes=80, + topk=10, + type='BatchTaskAlignedAssigner', + use_ciou=True)), + type='YOLOWorldDetector') +model_test_cfg = dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001) +mosaic_affine_transform = [ + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='Mosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), +] +neck_embed_channels = [ + 128, + 256, + 256, +] +neck_num_heads = [ + 4, + 8, + 8, +] +norm_cfg = dict(eps=0.001, momentum=0.03, type='BN') +num_classes = 1203 +num_det_layers = 3 +num_training_classes = 80 +obj365v1_train_dataset = dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +optim_wrapper = dict( + clip_grad=dict(max_norm=10.0), + constructor='YOLOWv5OptimizerConstructor', + optimizer=dict( + batch_size_per_gpu=16, lr=0.002, type='AdamW', weight_decay=0.025), + paramwise_cfg=dict( + bias_decay_mult=0.0, + custom_keys=dict({ + 'backbone.text_model': dict(lr_mult=0.01), + 'logit_scale': dict(weight_decay=0.0) + }), + norm_decay_mult=0.0), + type='OptimWrapper') +param_scheduler = None +persistent_workers = True +pre_transform = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), +] +resume = False +save_epoch_intervals = 2 +strides = [ + 8, + 16, + 32, +] +tal_alpha = 0.5 +tal_beta = 6.0 +tal_topk = 10 +test_cfg = dict(type='TestLoop') +test_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +test_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +test_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +text_channels = 512 +text_transform = [ + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_ann_file = 'annotations/instances_train2017.json' +train_batch_size_per_gpu = 16 +train_cfg = dict( + dynamic_intervals=[ + ( + 98, + 1, + ), + ], + max_epochs=100, + type='EpochBasedTrainLoop', + val_interval=10) +train_data_prefix = 'train2017/' +train_dataloader = dict( + batch_size=16, + collate_fn=dict(type='yolow_collate'), + dataset=dict( + datasets=[ + dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + ], + ignore_keys=[ + 'classes', + 'palette', + ], + type='ConcatDataset'), + num_workers=8, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=True, type='DefaultSampler')) +train_num_workers = 8 +train_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_pipeline_stage2 = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +tta_model = dict( + tta_cfg=dict(max_per_img=300, nms=dict(iou_threshold=0.65, type='nms')), + type='mmdet.DetTTAModel') +tta_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict( + transforms=[ + [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), + ], + [ + dict(prob=1.0, type='mmdet.RandomFlip'), + dict(prob=0.0, type='mmdet.RandomFlip'), + ], + [ + dict(type='mmdet.LoadAnnotations', with_bbox=True), + ], + [ + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), + ], + ], + type='TestTimeAug'), +] +val_ann_file = 'annotations/instances_val2017.json' +val_batch_size_per_gpu = 1 +val_cfg = dict(type='ValLoop') +val_data_prefix = 'val2017/' +val_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +val_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +val_interval_stage2 = 1 +val_num_workers = 2 +vis_backends = [ + dict(type='LocalVisBackend'), +] +visualizer = dict( + name='visualizer', + type='mmdet.DetLocalVisualizer', + vis_backends=[ + dict(type='LocalVisBackend'), + ]) +weight_decay = 0.025 +widen_factor = 1.25 +work_dir = '/home/jean/Amine/OpenYolo3D/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival' diff --git a/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240530_134152/vis_data/config.py b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240530_134152/vis_data/config.py new file mode 100644 index 0000000000000000000000000000000000000000..f8b00f96ad70de81ecc9073c7d67025d43429906 --- /dev/null +++ b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240530_134152/vis_data/config.py @@ -0,0 +1,1366 @@ +_backend_args = None +_multiscale_resize_transforms = [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), +] +affine_scale = 0.9 +albu_train_transforms = [ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), +] +backend_args = None +base_lr = 0.002 +batch_shapes_cfg = None +close_mosaic_epochs = 2 +coco_val_dataset = dict( + _delete_=True, + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +custom_hooks = [ + dict( + ema_type='ExpMomentumEMA', + momentum=0.0001, + priority=49, + strict_load=False, + type='EMAHook', + update_buffers=True), + dict( + switch_epoch=98, + switch_pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='mmdet.PipelineSwitchHook'), +] +custom_imports = dict( + allow_failed_imports=False, imports=[ + 'yolo_world', + ]) +data_root = 'data/coco/' +dataset_type = 'YOLOv5CocoDataset' +deepen_factor = 1.0 +default_hooks = dict( + checkpoint=dict( + interval=2, + max_keep_ckpts=2, + rule='greater', + save_best='auto', + type='CheckpointHook'), + logger=dict(interval=50, type='LoggerHook'), + param_scheduler=dict( + lr_factor=0.01, + max_epochs=100, + scheduler_type='linear', + type='YOLOv5ParamSchedulerHook'), + sampler_seed=dict(type='DistSamplerSeedHook'), + timer=dict(type='IterTimerHook'), + visualization=dict(type='mmdet.DetVisualizationHook')) +default_scope = 'mmyolo' +env_cfg = dict( + cudnn_benchmark=True, + dist_cfg=dict(backend='nccl'), + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0)) +flickr_train_dataset = dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +img_scale = ( + 640, + 640, +) +img_scales = [ + ( + 640, + 640, + ), + ( + 320, + 320, + ), + ( + 960, + 960, + ), +] +last_stage_out_channels = 512 +last_transform = [ + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), +] +load_from = '/home/jean/Amine/OpenYolo3D/pretrained/checkpoints/yolo_world_v2_x_obj365v1_goldg_cc3mlite_pretrain_1280ft-14996a36.pth' +log_level = 'INFO' +log_processor = dict(by_epoch=True, type='LogProcessor', window_size=50) +loss_bbox_weight = 7.5 +loss_cls_weight = 0.5 +loss_dfl_weight = 0.375 +lr_factor = 0.01 +max_aspect_ratio = 100 +max_epochs = 100 +max_keep_ckpts = 2 +mg_train_dataset = dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +mixup_prob = 0.15 +model = dict( + backbone=dict( + image_model=dict( + act_cfg=dict(inplace=True, type='SiLU'), + arch='P5', + deepen_factor=1.0, + last_stage_out_channels=512, + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + type='YOLOv8CSPDarknet', + widen_factor=1.25), + text_model=dict( + frozen_modules=[ + 'all', + ], + model_name='openai/clip-vit-base-patch32', + type='HuggingCLIPLanguageBackbone'), + type='MultiModalYOLOBackbone'), + bbox_head=dict( + bbox_coder=dict(type='DistancePointBBoxCoder'), + head_module=dict( + act_cfg=dict(inplace=True, type='SiLU'), + embed_dims=512, + featmap_strides=[ + 8, + 16, + 32, + ], + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_classes=80, + reg_max=16, + type='YOLOWorldHeadModule', + use_bn_head=True, + widen_factor=1.25), + loss_bbox=dict( + bbox_format='xyxy', + iou_mode='ciou', + loss_weight=7.5, + reduction='sum', + return_iou=False, + type='IoULoss'), + loss_cls=dict( + loss_weight=0.5, + reduction='none', + type='mmdet.CrossEntropyLoss', + use_sigmoid=True), + loss_dfl=dict( + loss_weight=0.375, + reduction='mean', + type='mmdet.DistributionFocalLoss'), + prior_generator=dict( + offset=0.5, strides=[ + 8, + 16, + 32, + ], type='mmdet.MlvlPointGenerator'), + type='YOLOWorldHead'), + data_preprocessor=dict( + bgr_to_rgb=True, + mean=[ + 0.0, + 0.0, + 0.0, + ], + std=[ + 255.0, + 255.0, + 255.0, + ], + type='YOLOWDetDataPreprocessor'), + mm_neck=True, + neck=dict( + act_cfg=dict(inplace=True, type='SiLU'), + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv'), + deepen_factor=1.0, + embed_channels=[ + 128, + 256, + 256, + ], + guide_channels=512, + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_csp_blocks=3, + num_heads=[ + 4, + 8, + 8, + ], + out_channels=[ + 256, + 512, + 512, + ], + type='YOLOWorldPAFPN', + widen_factor=1.25), + num_test_classes=1203, + num_train_classes=80, + test_cfg=dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001), + train_cfg=dict( + assigner=dict( + alpha=0.5, + beta=6.0, + eps=1e-09, + num_classes=80, + topk=10, + type='BatchTaskAlignedAssigner', + use_ciou=True)), + type='YOLOWorldDetector') +model_test_cfg = dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001) +mosaic_affine_transform = [ + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='Mosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), +] +neck_embed_channels = [ + 128, + 256, + 256, +] +neck_num_heads = [ + 4, + 8, + 8, +] +norm_cfg = dict(eps=0.001, momentum=0.03, type='BN') +num_classes = 1203 +num_det_layers = 3 +num_training_classes = 80 +obj365v1_train_dataset = dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +optim_wrapper = dict( + clip_grad=dict(max_norm=10.0), + constructor='YOLOWv5OptimizerConstructor', + optimizer=dict( + batch_size_per_gpu=16, lr=0.002, type='AdamW', weight_decay=0.025), + paramwise_cfg=dict( + bias_decay_mult=0.0, + custom_keys=dict({ + 'backbone.text_model': dict(lr_mult=0.01), + 'logit_scale': dict(weight_decay=0.0) + }), + norm_decay_mult=0.0), + type='OptimWrapper') +param_scheduler = None +persistent_workers = True +pre_transform = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), +] +resume = False +save_epoch_intervals = 2 +strides = [ + 8, + 16, + 32, +] +tal_alpha = 0.5 +tal_beta = 6.0 +tal_topk = 10 +test_cfg = dict(type='TestLoop') +test_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +test_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +test_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +text_channels = 512 +text_transform = [ + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_ann_file = 'annotations/instances_train2017.json' +train_batch_size_per_gpu = 16 +train_cfg = dict( + dynamic_intervals=[ + ( + 98, + 1, + ), + ], + max_epochs=100, + type='EpochBasedTrainLoop', + val_interval=10) +train_data_prefix = 'train2017/' +train_dataloader = dict( + batch_size=16, + collate_fn=dict(type='yolow_collate'), + dataset=dict( + datasets=[ + dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + ], + ignore_keys=[ + 'classes', + 'palette', + ], + type='ConcatDataset'), + num_workers=8, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=True, type='DefaultSampler')) +train_num_workers = 8 +train_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_pipeline_stage2 = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +tta_model = dict( + tta_cfg=dict(max_per_img=300, nms=dict(iou_threshold=0.65, type='nms')), + type='mmdet.DetTTAModel') +tta_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict( + transforms=[ + [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), + ], + [ + dict(prob=1.0, type='mmdet.RandomFlip'), + dict(prob=0.0, type='mmdet.RandomFlip'), + ], + [ + dict(type='mmdet.LoadAnnotations', with_bbox=True), + ], + [ + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), + ], + ], + type='TestTimeAug'), +] +val_ann_file = 'annotations/instances_val2017.json' +val_batch_size_per_gpu = 1 +val_cfg = dict(type='ValLoop') +val_data_prefix = 'val2017/' +val_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +val_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +val_interval_stage2 = 1 +val_num_workers = 2 +vis_backends = [ + dict(type='LocalVisBackend'), +] +visualizer = dict( + name='visualizer', + type='mmdet.DetLocalVisualizer', + vis_backends=[ + dict(type='LocalVisBackend'), + ]) +weight_decay = 0.025 +widen_factor = 1.25 +work_dir = '/home/jean/Amine/OpenYolo3D/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival' diff --git a/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240530_134433/vis_data/config.py b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240530_134433/vis_data/config.py new file mode 100644 index 0000000000000000000000000000000000000000..f8b00f96ad70de81ecc9073c7d67025d43429906 --- /dev/null +++ b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240530_134433/vis_data/config.py @@ -0,0 +1,1366 @@ +_backend_args = None +_multiscale_resize_transforms = [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), +] +affine_scale = 0.9 +albu_train_transforms = [ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), +] +backend_args = None +base_lr = 0.002 +batch_shapes_cfg = None +close_mosaic_epochs = 2 +coco_val_dataset = dict( + _delete_=True, + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +custom_hooks = [ + dict( + ema_type='ExpMomentumEMA', + momentum=0.0001, + priority=49, + strict_load=False, + type='EMAHook', + update_buffers=True), + dict( + switch_epoch=98, + switch_pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='mmdet.PipelineSwitchHook'), +] +custom_imports = dict( + allow_failed_imports=False, imports=[ + 'yolo_world', + ]) +data_root = 'data/coco/' +dataset_type = 'YOLOv5CocoDataset' +deepen_factor = 1.0 +default_hooks = dict( + checkpoint=dict( + interval=2, + max_keep_ckpts=2, + rule='greater', + save_best='auto', + type='CheckpointHook'), + logger=dict(interval=50, type='LoggerHook'), + param_scheduler=dict( + lr_factor=0.01, + max_epochs=100, + scheduler_type='linear', + type='YOLOv5ParamSchedulerHook'), + sampler_seed=dict(type='DistSamplerSeedHook'), + timer=dict(type='IterTimerHook'), + visualization=dict(type='mmdet.DetVisualizationHook')) +default_scope = 'mmyolo' +env_cfg = dict( + cudnn_benchmark=True, + dist_cfg=dict(backend='nccl'), + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0)) +flickr_train_dataset = dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +img_scale = ( + 640, + 640, +) +img_scales = [ + ( + 640, + 640, + ), + ( + 320, + 320, + ), + ( + 960, + 960, + ), +] +last_stage_out_channels = 512 +last_transform = [ + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), +] +load_from = '/home/jean/Amine/OpenYolo3D/pretrained/checkpoints/yolo_world_v2_x_obj365v1_goldg_cc3mlite_pretrain_1280ft-14996a36.pth' +log_level = 'INFO' +log_processor = dict(by_epoch=True, type='LogProcessor', window_size=50) +loss_bbox_weight = 7.5 +loss_cls_weight = 0.5 +loss_dfl_weight = 0.375 +lr_factor = 0.01 +max_aspect_ratio = 100 +max_epochs = 100 +max_keep_ckpts = 2 +mg_train_dataset = dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +mixup_prob = 0.15 +model = dict( + backbone=dict( + image_model=dict( + act_cfg=dict(inplace=True, type='SiLU'), + arch='P5', + deepen_factor=1.0, + last_stage_out_channels=512, + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + type='YOLOv8CSPDarknet', + widen_factor=1.25), + text_model=dict( + frozen_modules=[ + 'all', + ], + model_name='openai/clip-vit-base-patch32', + type='HuggingCLIPLanguageBackbone'), + type='MultiModalYOLOBackbone'), + bbox_head=dict( + bbox_coder=dict(type='DistancePointBBoxCoder'), + head_module=dict( + act_cfg=dict(inplace=True, type='SiLU'), + embed_dims=512, + featmap_strides=[ + 8, + 16, + 32, + ], + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_classes=80, + reg_max=16, + type='YOLOWorldHeadModule', + use_bn_head=True, + widen_factor=1.25), + loss_bbox=dict( + bbox_format='xyxy', + iou_mode='ciou', + loss_weight=7.5, + reduction='sum', + return_iou=False, + type='IoULoss'), + loss_cls=dict( + loss_weight=0.5, + reduction='none', + type='mmdet.CrossEntropyLoss', + use_sigmoid=True), + loss_dfl=dict( + loss_weight=0.375, + reduction='mean', + type='mmdet.DistributionFocalLoss'), + prior_generator=dict( + offset=0.5, strides=[ + 8, + 16, + 32, + ], type='mmdet.MlvlPointGenerator'), + type='YOLOWorldHead'), + data_preprocessor=dict( + bgr_to_rgb=True, + mean=[ + 0.0, + 0.0, + 0.0, + ], + std=[ + 255.0, + 255.0, + 255.0, + ], + type='YOLOWDetDataPreprocessor'), + mm_neck=True, + neck=dict( + act_cfg=dict(inplace=True, type='SiLU'), + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv'), + deepen_factor=1.0, + embed_channels=[ + 128, + 256, + 256, + ], + guide_channels=512, + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_csp_blocks=3, + num_heads=[ + 4, + 8, + 8, + ], + out_channels=[ + 256, + 512, + 512, + ], + type='YOLOWorldPAFPN', + widen_factor=1.25), + num_test_classes=1203, + num_train_classes=80, + test_cfg=dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001), + train_cfg=dict( + assigner=dict( + alpha=0.5, + beta=6.0, + eps=1e-09, + num_classes=80, + topk=10, + type='BatchTaskAlignedAssigner', + use_ciou=True)), + type='YOLOWorldDetector') +model_test_cfg = dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001) +mosaic_affine_transform = [ + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='Mosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), +] +neck_embed_channels = [ + 128, + 256, + 256, +] +neck_num_heads = [ + 4, + 8, + 8, +] +norm_cfg = dict(eps=0.001, momentum=0.03, type='BN') +num_classes = 1203 +num_det_layers = 3 +num_training_classes = 80 +obj365v1_train_dataset = dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +optim_wrapper = dict( + clip_grad=dict(max_norm=10.0), + constructor='YOLOWv5OptimizerConstructor', + optimizer=dict( + batch_size_per_gpu=16, lr=0.002, type='AdamW', weight_decay=0.025), + paramwise_cfg=dict( + bias_decay_mult=0.0, + custom_keys=dict({ + 'backbone.text_model': dict(lr_mult=0.01), + 'logit_scale': dict(weight_decay=0.0) + }), + norm_decay_mult=0.0), + type='OptimWrapper') +param_scheduler = None +persistent_workers = True +pre_transform = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), +] +resume = False +save_epoch_intervals = 2 +strides = [ + 8, + 16, + 32, +] +tal_alpha = 0.5 +tal_beta = 6.0 +tal_topk = 10 +test_cfg = dict(type='TestLoop') +test_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +test_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +test_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +text_channels = 512 +text_transform = [ + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_ann_file = 'annotations/instances_train2017.json' +train_batch_size_per_gpu = 16 +train_cfg = dict( + dynamic_intervals=[ + ( + 98, + 1, + ), + ], + max_epochs=100, + type='EpochBasedTrainLoop', + val_interval=10) +train_data_prefix = 'train2017/' +train_dataloader = dict( + batch_size=16, + collate_fn=dict(type='yolow_collate'), + dataset=dict( + datasets=[ + dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + ], + ignore_keys=[ + 'classes', + 'palette', + ], + type='ConcatDataset'), + num_workers=8, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=True, type='DefaultSampler')) +train_num_workers = 8 +train_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_pipeline_stage2 = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +tta_model = dict( + tta_cfg=dict(max_per_img=300, nms=dict(iou_threshold=0.65, type='nms')), + type='mmdet.DetTTAModel') +tta_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict( + transforms=[ + [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), + ], + [ + dict(prob=1.0, type='mmdet.RandomFlip'), + dict(prob=0.0, type='mmdet.RandomFlip'), + ], + [ + dict(type='mmdet.LoadAnnotations', with_bbox=True), + ], + [ + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), + ], + ], + type='TestTimeAug'), +] +val_ann_file = 'annotations/instances_val2017.json' +val_batch_size_per_gpu = 1 +val_cfg = dict(type='ValLoop') +val_data_prefix = 'val2017/' +val_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +val_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +val_interval_stage2 = 1 +val_num_workers = 2 +vis_backends = [ + dict(type='LocalVisBackend'), +] +visualizer = dict( + name='visualizer', + type='mmdet.DetLocalVisualizer', + vis_backends=[ + dict(type='LocalVisBackend'), + ]) +weight_decay = 0.025 +widen_factor = 1.25 +work_dir = '/home/jean/Amine/OpenYolo3D/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival' diff --git a/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240530_134559/vis_data/config.py b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240530_134559/vis_data/config.py new file mode 100644 index 0000000000000000000000000000000000000000..f8b00f96ad70de81ecc9073c7d67025d43429906 --- /dev/null +++ b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240530_134559/vis_data/config.py @@ -0,0 +1,1366 @@ +_backend_args = None +_multiscale_resize_transforms = [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), +] +affine_scale = 0.9 +albu_train_transforms = [ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), +] +backend_args = None +base_lr = 0.002 +batch_shapes_cfg = None +close_mosaic_epochs = 2 +coco_val_dataset = dict( + _delete_=True, + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +custom_hooks = [ + dict( + ema_type='ExpMomentumEMA', + momentum=0.0001, + priority=49, + strict_load=False, + type='EMAHook', + update_buffers=True), + dict( + switch_epoch=98, + switch_pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='mmdet.PipelineSwitchHook'), +] +custom_imports = dict( + allow_failed_imports=False, imports=[ + 'yolo_world', + ]) +data_root = 'data/coco/' +dataset_type = 'YOLOv5CocoDataset' +deepen_factor = 1.0 +default_hooks = dict( + checkpoint=dict( + interval=2, + max_keep_ckpts=2, + rule='greater', + save_best='auto', + type='CheckpointHook'), + logger=dict(interval=50, type='LoggerHook'), + param_scheduler=dict( + lr_factor=0.01, + max_epochs=100, + scheduler_type='linear', + type='YOLOv5ParamSchedulerHook'), + sampler_seed=dict(type='DistSamplerSeedHook'), + timer=dict(type='IterTimerHook'), + visualization=dict(type='mmdet.DetVisualizationHook')) +default_scope = 'mmyolo' +env_cfg = dict( + cudnn_benchmark=True, + dist_cfg=dict(backend='nccl'), + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0)) +flickr_train_dataset = dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +img_scale = ( + 640, + 640, +) +img_scales = [ + ( + 640, + 640, + ), + ( + 320, + 320, + ), + ( + 960, + 960, + ), +] +last_stage_out_channels = 512 +last_transform = [ + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), +] +load_from = '/home/jean/Amine/OpenYolo3D/pretrained/checkpoints/yolo_world_v2_x_obj365v1_goldg_cc3mlite_pretrain_1280ft-14996a36.pth' +log_level = 'INFO' +log_processor = dict(by_epoch=True, type='LogProcessor', window_size=50) +loss_bbox_weight = 7.5 +loss_cls_weight = 0.5 +loss_dfl_weight = 0.375 +lr_factor = 0.01 +max_aspect_ratio = 100 +max_epochs = 100 +max_keep_ckpts = 2 +mg_train_dataset = dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +mixup_prob = 0.15 +model = dict( + backbone=dict( + image_model=dict( + act_cfg=dict(inplace=True, type='SiLU'), + arch='P5', + deepen_factor=1.0, + last_stage_out_channels=512, + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + type='YOLOv8CSPDarknet', + widen_factor=1.25), + text_model=dict( + frozen_modules=[ + 'all', + ], + model_name='openai/clip-vit-base-patch32', + type='HuggingCLIPLanguageBackbone'), + type='MultiModalYOLOBackbone'), + bbox_head=dict( + bbox_coder=dict(type='DistancePointBBoxCoder'), + head_module=dict( + act_cfg=dict(inplace=True, type='SiLU'), + embed_dims=512, + featmap_strides=[ + 8, + 16, + 32, + ], + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_classes=80, + reg_max=16, + type='YOLOWorldHeadModule', + use_bn_head=True, + widen_factor=1.25), + loss_bbox=dict( + bbox_format='xyxy', + iou_mode='ciou', + loss_weight=7.5, + reduction='sum', + return_iou=False, + type='IoULoss'), + loss_cls=dict( + loss_weight=0.5, + reduction='none', + type='mmdet.CrossEntropyLoss', + use_sigmoid=True), + loss_dfl=dict( + loss_weight=0.375, + reduction='mean', + type='mmdet.DistributionFocalLoss'), + prior_generator=dict( + offset=0.5, strides=[ + 8, + 16, + 32, + ], type='mmdet.MlvlPointGenerator'), + type='YOLOWorldHead'), + data_preprocessor=dict( + bgr_to_rgb=True, + mean=[ + 0.0, + 0.0, + 0.0, + ], + std=[ + 255.0, + 255.0, + 255.0, + ], + type='YOLOWDetDataPreprocessor'), + mm_neck=True, + neck=dict( + act_cfg=dict(inplace=True, type='SiLU'), + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv'), + deepen_factor=1.0, + embed_channels=[ + 128, + 256, + 256, + ], + guide_channels=512, + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_csp_blocks=3, + num_heads=[ + 4, + 8, + 8, + ], + out_channels=[ + 256, + 512, + 512, + ], + type='YOLOWorldPAFPN', + widen_factor=1.25), + num_test_classes=1203, + num_train_classes=80, + test_cfg=dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001), + train_cfg=dict( + assigner=dict( + alpha=0.5, + beta=6.0, + eps=1e-09, + num_classes=80, + topk=10, + type='BatchTaskAlignedAssigner', + use_ciou=True)), + type='YOLOWorldDetector') +model_test_cfg = dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001) +mosaic_affine_transform = [ + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='Mosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), +] +neck_embed_channels = [ + 128, + 256, + 256, +] +neck_num_heads = [ + 4, + 8, + 8, +] +norm_cfg = dict(eps=0.001, momentum=0.03, type='BN') +num_classes = 1203 +num_det_layers = 3 +num_training_classes = 80 +obj365v1_train_dataset = dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +optim_wrapper = dict( + clip_grad=dict(max_norm=10.0), + constructor='YOLOWv5OptimizerConstructor', + optimizer=dict( + batch_size_per_gpu=16, lr=0.002, type='AdamW', weight_decay=0.025), + paramwise_cfg=dict( + bias_decay_mult=0.0, + custom_keys=dict({ + 'backbone.text_model': dict(lr_mult=0.01), + 'logit_scale': dict(weight_decay=0.0) + }), + norm_decay_mult=0.0), + type='OptimWrapper') +param_scheduler = None +persistent_workers = True +pre_transform = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), +] +resume = False +save_epoch_intervals = 2 +strides = [ + 8, + 16, + 32, +] +tal_alpha = 0.5 +tal_beta = 6.0 +tal_topk = 10 +test_cfg = dict(type='TestLoop') +test_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +test_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +test_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +text_channels = 512 +text_transform = [ + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_ann_file = 'annotations/instances_train2017.json' +train_batch_size_per_gpu = 16 +train_cfg = dict( + dynamic_intervals=[ + ( + 98, + 1, + ), + ], + max_epochs=100, + type='EpochBasedTrainLoop', + val_interval=10) +train_data_prefix = 'train2017/' +train_dataloader = dict( + batch_size=16, + collate_fn=dict(type='yolow_collate'), + dataset=dict( + datasets=[ + dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + ], + ignore_keys=[ + 'classes', + 'palette', + ], + type='ConcatDataset'), + num_workers=8, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=True, type='DefaultSampler')) +train_num_workers = 8 +train_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_pipeline_stage2 = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +tta_model = dict( + tta_cfg=dict(max_per_img=300, nms=dict(iou_threshold=0.65, type='nms')), + type='mmdet.DetTTAModel') +tta_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict( + transforms=[ + [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), + ], + [ + dict(prob=1.0, type='mmdet.RandomFlip'), + dict(prob=0.0, type='mmdet.RandomFlip'), + ], + [ + dict(type='mmdet.LoadAnnotations', with_bbox=True), + ], + [ + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), + ], + ], + type='TestTimeAug'), +] +val_ann_file = 'annotations/instances_val2017.json' +val_batch_size_per_gpu = 1 +val_cfg = dict(type='ValLoop') +val_data_prefix = 'val2017/' +val_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +val_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +val_interval_stage2 = 1 +val_num_workers = 2 +vis_backends = [ + dict(type='LocalVisBackend'), +] +visualizer = dict( + name='visualizer', + type='mmdet.DetLocalVisualizer', + vis_backends=[ + dict(type='LocalVisBackend'), + ]) +weight_decay = 0.025 +widen_factor = 1.25 +work_dir = '/home/jean/Amine/OpenYolo3D/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival' diff --git a/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240530_134830/vis_data/config.py b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240530_134830/vis_data/config.py new file mode 100644 index 0000000000000000000000000000000000000000..f8b00f96ad70de81ecc9073c7d67025d43429906 --- /dev/null +++ b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240530_134830/vis_data/config.py @@ -0,0 +1,1366 @@ +_backend_args = None +_multiscale_resize_transforms = [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), +] +affine_scale = 0.9 +albu_train_transforms = [ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), +] +backend_args = None +base_lr = 0.002 +batch_shapes_cfg = None +close_mosaic_epochs = 2 +coco_val_dataset = dict( + _delete_=True, + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +custom_hooks = [ + dict( + ema_type='ExpMomentumEMA', + momentum=0.0001, + priority=49, + strict_load=False, + type='EMAHook', + update_buffers=True), + dict( + switch_epoch=98, + switch_pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='mmdet.PipelineSwitchHook'), +] +custom_imports = dict( + allow_failed_imports=False, imports=[ + 'yolo_world', + ]) +data_root = 'data/coco/' +dataset_type = 'YOLOv5CocoDataset' +deepen_factor = 1.0 +default_hooks = dict( + checkpoint=dict( + interval=2, + max_keep_ckpts=2, + rule='greater', + save_best='auto', + type='CheckpointHook'), + logger=dict(interval=50, type='LoggerHook'), + param_scheduler=dict( + lr_factor=0.01, + max_epochs=100, + scheduler_type='linear', + type='YOLOv5ParamSchedulerHook'), + sampler_seed=dict(type='DistSamplerSeedHook'), + timer=dict(type='IterTimerHook'), + visualization=dict(type='mmdet.DetVisualizationHook')) +default_scope = 'mmyolo' +env_cfg = dict( + cudnn_benchmark=True, + dist_cfg=dict(backend='nccl'), + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0)) +flickr_train_dataset = dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +img_scale = ( + 640, + 640, +) +img_scales = [ + ( + 640, + 640, + ), + ( + 320, + 320, + ), + ( + 960, + 960, + ), +] +last_stage_out_channels = 512 +last_transform = [ + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), +] +load_from = '/home/jean/Amine/OpenYolo3D/pretrained/checkpoints/yolo_world_v2_x_obj365v1_goldg_cc3mlite_pretrain_1280ft-14996a36.pth' +log_level = 'INFO' +log_processor = dict(by_epoch=True, type='LogProcessor', window_size=50) +loss_bbox_weight = 7.5 +loss_cls_weight = 0.5 +loss_dfl_weight = 0.375 +lr_factor = 0.01 +max_aspect_ratio = 100 +max_epochs = 100 +max_keep_ckpts = 2 +mg_train_dataset = dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +mixup_prob = 0.15 +model = dict( + backbone=dict( + image_model=dict( + act_cfg=dict(inplace=True, type='SiLU'), + arch='P5', + deepen_factor=1.0, + last_stage_out_channels=512, + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + type='YOLOv8CSPDarknet', + widen_factor=1.25), + text_model=dict( + frozen_modules=[ + 'all', + ], + model_name='openai/clip-vit-base-patch32', + type='HuggingCLIPLanguageBackbone'), + type='MultiModalYOLOBackbone'), + bbox_head=dict( + bbox_coder=dict(type='DistancePointBBoxCoder'), + head_module=dict( + act_cfg=dict(inplace=True, type='SiLU'), + embed_dims=512, + featmap_strides=[ + 8, + 16, + 32, + ], + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_classes=80, + reg_max=16, + type='YOLOWorldHeadModule', + use_bn_head=True, + widen_factor=1.25), + loss_bbox=dict( + bbox_format='xyxy', + iou_mode='ciou', + loss_weight=7.5, + reduction='sum', + return_iou=False, + type='IoULoss'), + loss_cls=dict( + loss_weight=0.5, + reduction='none', + type='mmdet.CrossEntropyLoss', + use_sigmoid=True), + loss_dfl=dict( + loss_weight=0.375, + reduction='mean', + type='mmdet.DistributionFocalLoss'), + prior_generator=dict( + offset=0.5, strides=[ + 8, + 16, + 32, + ], type='mmdet.MlvlPointGenerator'), + type='YOLOWorldHead'), + data_preprocessor=dict( + bgr_to_rgb=True, + mean=[ + 0.0, + 0.0, + 0.0, + ], + std=[ + 255.0, + 255.0, + 255.0, + ], + type='YOLOWDetDataPreprocessor'), + mm_neck=True, + neck=dict( + act_cfg=dict(inplace=True, type='SiLU'), + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv'), + deepen_factor=1.0, + embed_channels=[ + 128, + 256, + 256, + ], + guide_channels=512, + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_csp_blocks=3, + num_heads=[ + 4, + 8, + 8, + ], + out_channels=[ + 256, + 512, + 512, + ], + type='YOLOWorldPAFPN', + widen_factor=1.25), + num_test_classes=1203, + num_train_classes=80, + test_cfg=dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001), + train_cfg=dict( + assigner=dict( + alpha=0.5, + beta=6.0, + eps=1e-09, + num_classes=80, + topk=10, + type='BatchTaskAlignedAssigner', + use_ciou=True)), + type='YOLOWorldDetector') +model_test_cfg = dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001) +mosaic_affine_transform = [ + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='Mosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), +] +neck_embed_channels = [ + 128, + 256, + 256, +] +neck_num_heads = [ + 4, + 8, + 8, +] +norm_cfg = dict(eps=0.001, momentum=0.03, type='BN') +num_classes = 1203 +num_det_layers = 3 +num_training_classes = 80 +obj365v1_train_dataset = dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +optim_wrapper = dict( + clip_grad=dict(max_norm=10.0), + constructor='YOLOWv5OptimizerConstructor', + optimizer=dict( + batch_size_per_gpu=16, lr=0.002, type='AdamW', weight_decay=0.025), + paramwise_cfg=dict( + bias_decay_mult=0.0, + custom_keys=dict({ + 'backbone.text_model': dict(lr_mult=0.01), + 'logit_scale': dict(weight_decay=0.0) + }), + norm_decay_mult=0.0), + type='OptimWrapper') +param_scheduler = None +persistent_workers = True +pre_transform = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), +] +resume = False +save_epoch_intervals = 2 +strides = [ + 8, + 16, + 32, +] +tal_alpha = 0.5 +tal_beta = 6.0 +tal_topk = 10 +test_cfg = dict(type='TestLoop') +test_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +test_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +test_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +text_channels = 512 +text_transform = [ + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_ann_file = 'annotations/instances_train2017.json' +train_batch_size_per_gpu = 16 +train_cfg = dict( + dynamic_intervals=[ + ( + 98, + 1, + ), + ], + max_epochs=100, + type='EpochBasedTrainLoop', + val_interval=10) +train_data_prefix = 'train2017/' +train_dataloader = dict( + batch_size=16, + collate_fn=dict(type='yolow_collate'), + dataset=dict( + datasets=[ + dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + ], + ignore_keys=[ + 'classes', + 'palette', + ], + type='ConcatDataset'), + num_workers=8, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=True, type='DefaultSampler')) +train_num_workers = 8 +train_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_pipeline_stage2 = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +tta_model = dict( + tta_cfg=dict(max_per_img=300, nms=dict(iou_threshold=0.65, type='nms')), + type='mmdet.DetTTAModel') +tta_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict( + transforms=[ + [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), + ], + [ + dict(prob=1.0, type='mmdet.RandomFlip'), + dict(prob=0.0, type='mmdet.RandomFlip'), + ], + [ + dict(type='mmdet.LoadAnnotations', with_bbox=True), + ], + [ + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), + ], + ], + type='TestTimeAug'), +] +val_ann_file = 'annotations/instances_val2017.json' +val_batch_size_per_gpu = 1 +val_cfg = dict(type='ValLoop') +val_data_prefix = 'val2017/' +val_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +val_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +val_interval_stage2 = 1 +val_num_workers = 2 +vis_backends = [ + dict(type='LocalVisBackend'), +] +visualizer = dict( + name='visualizer', + type='mmdet.DetLocalVisualizer', + vis_backends=[ + dict(type='LocalVisBackend'), + ]) +weight_decay = 0.025 +widen_factor = 1.25 +work_dir = '/home/jean/Amine/OpenYolo3D/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival' diff --git a/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240530_135416/vis_data/config.py b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240530_135416/vis_data/config.py new file mode 100644 index 0000000000000000000000000000000000000000..f8b00f96ad70de81ecc9073c7d67025d43429906 --- /dev/null +++ b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240530_135416/vis_data/config.py @@ -0,0 +1,1366 @@ +_backend_args = None +_multiscale_resize_transforms = [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), +] +affine_scale = 0.9 +albu_train_transforms = [ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), +] +backend_args = None +base_lr = 0.002 +batch_shapes_cfg = None +close_mosaic_epochs = 2 +coco_val_dataset = dict( + _delete_=True, + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +custom_hooks = [ + dict( + ema_type='ExpMomentumEMA', + momentum=0.0001, + priority=49, + strict_load=False, + type='EMAHook', + update_buffers=True), + dict( + switch_epoch=98, + switch_pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='mmdet.PipelineSwitchHook'), +] +custom_imports = dict( + allow_failed_imports=False, imports=[ + 'yolo_world', + ]) +data_root = 'data/coco/' +dataset_type = 'YOLOv5CocoDataset' +deepen_factor = 1.0 +default_hooks = dict( + checkpoint=dict( + interval=2, + max_keep_ckpts=2, + rule='greater', + save_best='auto', + type='CheckpointHook'), + logger=dict(interval=50, type='LoggerHook'), + param_scheduler=dict( + lr_factor=0.01, + max_epochs=100, + scheduler_type='linear', + type='YOLOv5ParamSchedulerHook'), + sampler_seed=dict(type='DistSamplerSeedHook'), + timer=dict(type='IterTimerHook'), + visualization=dict(type='mmdet.DetVisualizationHook')) +default_scope = 'mmyolo' +env_cfg = dict( + cudnn_benchmark=True, + dist_cfg=dict(backend='nccl'), + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0)) +flickr_train_dataset = dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +img_scale = ( + 640, + 640, +) +img_scales = [ + ( + 640, + 640, + ), + ( + 320, + 320, + ), + ( + 960, + 960, + ), +] +last_stage_out_channels = 512 +last_transform = [ + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), +] +load_from = '/home/jean/Amine/OpenYolo3D/pretrained/checkpoints/yolo_world_v2_x_obj365v1_goldg_cc3mlite_pretrain_1280ft-14996a36.pth' +log_level = 'INFO' +log_processor = dict(by_epoch=True, type='LogProcessor', window_size=50) +loss_bbox_weight = 7.5 +loss_cls_weight = 0.5 +loss_dfl_weight = 0.375 +lr_factor = 0.01 +max_aspect_ratio = 100 +max_epochs = 100 +max_keep_ckpts = 2 +mg_train_dataset = dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +mixup_prob = 0.15 +model = dict( + backbone=dict( + image_model=dict( + act_cfg=dict(inplace=True, type='SiLU'), + arch='P5', + deepen_factor=1.0, + last_stage_out_channels=512, + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + type='YOLOv8CSPDarknet', + widen_factor=1.25), + text_model=dict( + frozen_modules=[ + 'all', + ], + model_name='openai/clip-vit-base-patch32', + type='HuggingCLIPLanguageBackbone'), + type='MultiModalYOLOBackbone'), + bbox_head=dict( + bbox_coder=dict(type='DistancePointBBoxCoder'), + head_module=dict( + act_cfg=dict(inplace=True, type='SiLU'), + embed_dims=512, + featmap_strides=[ + 8, + 16, + 32, + ], + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_classes=80, + reg_max=16, + type='YOLOWorldHeadModule', + use_bn_head=True, + widen_factor=1.25), + loss_bbox=dict( + bbox_format='xyxy', + iou_mode='ciou', + loss_weight=7.5, + reduction='sum', + return_iou=False, + type='IoULoss'), + loss_cls=dict( + loss_weight=0.5, + reduction='none', + type='mmdet.CrossEntropyLoss', + use_sigmoid=True), + loss_dfl=dict( + loss_weight=0.375, + reduction='mean', + type='mmdet.DistributionFocalLoss'), + prior_generator=dict( + offset=0.5, strides=[ + 8, + 16, + 32, + ], type='mmdet.MlvlPointGenerator'), + type='YOLOWorldHead'), + data_preprocessor=dict( + bgr_to_rgb=True, + mean=[ + 0.0, + 0.0, + 0.0, + ], + std=[ + 255.0, + 255.0, + 255.0, + ], + type='YOLOWDetDataPreprocessor'), + mm_neck=True, + neck=dict( + act_cfg=dict(inplace=True, type='SiLU'), + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv'), + deepen_factor=1.0, + embed_channels=[ + 128, + 256, + 256, + ], + guide_channels=512, + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_csp_blocks=3, + num_heads=[ + 4, + 8, + 8, + ], + out_channels=[ + 256, + 512, + 512, + ], + type='YOLOWorldPAFPN', + widen_factor=1.25), + num_test_classes=1203, + num_train_classes=80, + test_cfg=dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001), + train_cfg=dict( + assigner=dict( + alpha=0.5, + beta=6.0, + eps=1e-09, + num_classes=80, + topk=10, + type='BatchTaskAlignedAssigner', + use_ciou=True)), + type='YOLOWorldDetector') +model_test_cfg = dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001) +mosaic_affine_transform = [ + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='Mosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), +] +neck_embed_channels = [ + 128, + 256, + 256, +] +neck_num_heads = [ + 4, + 8, + 8, +] +norm_cfg = dict(eps=0.001, momentum=0.03, type='BN') +num_classes = 1203 +num_det_layers = 3 +num_training_classes = 80 +obj365v1_train_dataset = dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +optim_wrapper = dict( + clip_grad=dict(max_norm=10.0), + constructor='YOLOWv5OptimizerConstructor', + optimizer=dict( + batch_size_per_gpu=16, lr=0.002, type='AdamW', weight_decay=0.025), + paramwise_cfg=dict( + bias_decay_mult=0.0, + custom_keys=dict({ + 'backbone.text_model': dict(lr_mult=0.01), + 'logit_scale': dict(weight_decay=0.0) + }), + norm_decay_mult=0.0), + type='OptimWrapper') +param_scheduler = None +persistent_workers = True +pre_transform = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), +] +resume = False +save_epoch_intervals = 2 +strides = [ + 8, + 16, + 32, +] +tal_alpha = 0.5 +tal_beta = 6.0 +tal_topk = 10 +test_cfg = dict(type='TestLoop') +test_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +test_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +test_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +text_channels = 512 +text_transform = [ + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_ann_file = 'annotations/instances_train2017.json' +train_batch_size_per_gpu = 16 +train_cfg = dict( + dynamic_intervals=[ + ( + 98, + 1, + ), + ], + max_epochs=100, + type='EpochBasedTrainLoop', + val_interval=10) +train_data_prefix = 'train2017/' +train_dataloader = dict( + batch_size=16, + collate_fn=dict(type='yolow_collate'), + dataset=dict( + datasets=[ + dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + ], + ignore_keys=[ + 'classes', + 'palette', + ], + type='ConcatDataset'), + num_workers=8, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=True, type='DefaultSampler')) +train_num_workers = 8 +train_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_pipeline_stage2 = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +tta_model = dict( + tta_cfg=dict(max_per_img=300, nms=dict(iou_threshold=0.65, type='nms')), + type='mmdet.DetTTAModel') +tta_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict( + transforms=[ + [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), + ], + [ + dict(prob=1.0, type='mmdet.RandomFlip'), + dict(prob=0.0, type='mmdet.RandomFlip'), + ], + [ + dict(type='mmdet.LoadAnnotations', with_bbox=True), + ], + [ + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), + ], + ], + type='TestTimeAug'), +] +val_ann_file = 'annotations/instances_val2017.json' +val_batch_size_per_gpu = 1 +val_cfg = dict(type='ValLoop') +val_data_prefix = 'val2017/' +val_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +val_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +val_interval_stage2 = 1 +val_num_workers = 2 +vis_backends = [ + dict(type='LocalVisBackend'), +] +visualizer = dict( + name='visualizer', + type='mmdet.DetLocalVisualizer', + vis_backends=[ + dict(type='LocalVisBackend'), + ]) +weight_decay = 0.025 +widen_factor = 1.25 +work_dir = '/home/jean/Amine/OpenYolo3D/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival' diff --git a/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240530_135651/vis_data/config.py b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240530_135651/vis_data/config.py new file mode 100644 index 0000000000000000000000000000000000000000..f8b00f96ad70de81ecc9073c7d67025d43429906 --- /dev/null +++ b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240530_135651/vis_data/config.py @@ -0,0 +1,1366 @@ +_backend_args = None +_multiscale_resize_transforms = [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), +] +affine_scale = 0.9 +albu_train_transforms = [ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), +] +backend_args = None +base_lr = 0.002 +batch_shapes_cfg = None +close_mosaic_epochs = 2 +coco_val_dataset = dict( + _delete_=True, + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +custom_hooks = [ + dict( + ema_type='ExpMomentumEMA', + momentum=0.0001, + priority=49, + strict_load=False, + type='EMAHook', + update_buffers=True), + dict( + switch_epoch=98, + switch_pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='mmdet.PipelineSwitchHook'), +] +custom_imports = dict( + allow_failed_imports=False, imports=[ + 'yolo_world', + ]) +data_root = 'data/coco/' +dataset_type = 'YOLOv5CocoDataset' +deepen_factor = 1.0 +default_hooks = dict( + checkpoint=dict( + interval=2, + max_keep_ckpts=2, + rule='greater', + save_best='auto', + type='CheckpointHook'), + logger=dict(interval=50, type='LoggerHook'), + param_scheduler=dict( + lr_factor=0.01, + max_epochs=100, + scheduler_type='linear', + type='YOLOv5ParamSchedulerHook'), + sampler_seed=dict(type='DistSamplerSeedHook'), + timer=dict(type='IterTimerHook'), + visualization=dict(type='mmdet.DetVisualizationHook')) +default_scope = 'mmyolo' +env_cfg = dict( + cudnn_benchmark=True, + dist_cfg=dict(backend='nccl'), + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0)) +flickr_train_dataset = dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +img_scale = ( + 640, + 640, +) +img_scales = [ + ( + 640, + 640, + ), + ( + 320, + 320, + ), + ( + 960, + 960, + ), +] +last_stage_out_channels = 512 +last_transform = [ + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), +] +load_from = '/home/jean/Amine/OpenYolo3D/pretrained/checkpoints/yolo_world_v2_x_obj365v1_goldg_cc3mlite_pretrain_1280ft-14996a36.pth' +log_level = 'INFO' +log_processor = dict(by_epoch=True, type='LogProcessor', window_size=50) +loss_bbox_weight = 7.5 +loss_cls_weight = 0.5 +loss_dfl_weight = 0.375 +lr_factor = 0.01 +max_aspect_ratio = 100 +max_epochs = 100 +max_keep_ckpts = 2 +mg_train_dataset = dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +mixup_prob = 0.15 +model = dict( + backbone=dict( + image_model=dict( + act_cfg=dict(inplace=True, type='SiLU'), + arch='P5', + deepen_factor=1.0, + last_stage_out_channels=512, + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + type='YOLOv8CSPDarknet', + widen_factor=1.25), + text_model=dict( + frozen_modules=[ + 'all', + ], + model_name='openai/clip-vit-base-patch32', + type='HuggingCLIPLanguageBackbone'), + type='MultiModalYOLOBackbone'), + bbox_head=dict( + bbox_coder=dict(type='DistancePointBBoxCoder'), + head_module=dict( + act_cfg=dict(inplace=True, type='SiLU'), + embed_dims=512, + featmap_strides=[ + 8, + 16, + 32, + ], + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_classes=80, + reg_max=16, + type='YOLOWorldHeadModule', + use_bn_head=True, + widen_factor=1.25), + loss_bbox=dict( + bbox_format='xyxy', + iou_mode='ciou', + loss_weight=7.5, + reduction='sum', + return_iou=False, + type='IoULoss'), + loss_cls=dict( + loss_weight=0.5, + reduction='none', + type='mmdet.CrossEntropyLoss', + use_sigmoid=True), + loss_dfl=dict( + loss_weight=0.375, + reduction='mean', + type='mmdet.DistributionFocalLoss'), + prior_generator=dict( + offset=0.5, strides=[ + 8, + 16, + 32, + ], type='mmdet.MlvlPointGenerator'), + type='YOLOWorldHead'), + data_preprocessor=dict( + bgr_to_rgb=True, + mean=[ + 0.0, + 0.0, + 0.0, + ], + std=[ + 255.0, + 255.0, + 255.0, + ], + type='YOLOWDetDataPreprocessor'), + mm_neck=True, + neck=dict( + act_cfg=dict(inplace=True, type='SiLU'), + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv'), + deepen_factor=1.0, + embed_channels=[ + 128, + 256, + 256, + ], + guide_channels=512, + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_csp_blocks=3, + num_heads=[ + 4, + 8, + 8, + ], + out_channels=[ + 256, + 512, + 512, + ], + type='YOLOWorldPAFPN', + widen_factor=1.25), + num_test_classes=1203, + num_train_classes=80, + test_cfg=dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001), + train_cfg=dict( + assigner=dict( + alpha=0.5, + beta=6.0, + eps=1e-09, + num_classes=80, + topk=10, + type='BatchTaskAlignedAssigner', + use_ciou=True)), + type='YOLOWorldDetector') +model_test_cfg = dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001) +mosaic_affine_transform = [ + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='Mosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), +] +neck_embed_channels = [ + 128, + 256, + 256, +] +neck_num_heads = [ + 4, + 8, + 8, +] +norm_cfg = dict(eps=0.001, momentum=0.03, type='BN') +num_classes = 1203 +num_det_layers = 3 +num_training_classes = 80 +obj365v1_train_dataset = dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +optim_wrapper = dict( + clip_grad=dict(max_norm=10.0), + constructor='YOLOWv5OptimizerConstructor', + optimizer=dict( + batch_size_per_gpu=16, lr=0.002, type='AdamW', weight_decay=0.025), + paramwise_cfg=dict( + bias_decay_mult=0.0, + custom_keys=dict({ + 'backbone.text_model': dict(lr_mult=0.01), + 'logit_scale': dict(weight_decay=0.0) + }), + norm_decay_mult=0.0), + type='OptimWrapper') +param_scheduler = None +persistent_workers = True +pre_transform = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), +] +resume = False +save_epoch_intervals = 2 +strides = [ + 8, + 16, + 32, +] +tal_alpha = 0.5 +tal_beta = 6.0 +tal_topk = 10 +test_cfg = dict(type='TestLoop') +test_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +test_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +test_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +text_channels = 512 +text_transform = [ + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_ann_file = 'annotations/instances_train2017.json' +train_batch_size_per_gpu = 16 +train_cfg = dict( + dynamic_intervals=[ + ( + 98, + 1, + ), + ], + max_epochs=100, + type='EpochBasedTrainLoop', + val_interval=10) +train_data_prefix = 'train2017/' +train_dataloader = dict( + batch_size=16, + collate_fn=dict(type='yolow_collate'), + dataset=dict( + datasets=[ + dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + ], + ignore_keys=[ + 'classes', + 'palette', + ], + type='ConcatDataset'), + num_workers=8, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=True, type='DefaultSampler')) +train_num_workers = 8 +train_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_pipeline_stage2 = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +tta_model = dict( + tta_cfg=dict(max_per_img=300, nms=dict(iou_threshold=0.65, type='nms')), + type='mmdet.DetTTAModel') +tta_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict( + transforms=[ + [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), + ], + [ + dict(prob=1.0, type='mmdet.RandomFlip'), + dict(prob=0.0, type='mmdet.RandomFlip'), + ], + [ + dict(type='mmdet.LoadAnnotations', with_bbox=True), + ], + [ + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), + ], + ], + type='TestTimeAug'), +] +val_ann_file = 'annotations/instances_val2017.json' +val_batch_size_per_gpu = 1 +val_cfg = dict(type='ValLoop') +val_data_prefix = 'val2017/' +val_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +val_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +val_interval_stage2 = 1 +val_num_workers = 2 +vis_backends = [ + dict(type='LocalVisBackend'), +] +visualizer = dict( + name='visualizer', + type='mmdet.DetLocalVisualizer', + vis_backends=[ + dict(type='LocalVisBackend'), + ]) +weight_decay = 0.025 +widen_factor = 1.25 +work_dir = '/home/jean/Amine/OpenYolo3D/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival' diff --git a/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240530_140039/vis_data/config.py b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240530_140039/vis_data/config.py new file mode 100644 index 0000000000000000000000000000000000000000..f8b00f96ad70de81ecc9073c7d67025d43429906 --- /dev/null +++ b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240530_140039/vis_data/config.py @@ -0,0 +1,1366 @@ +_backend_args = None +_multiscale_resize_transforms = [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), +] +affine_scale = 0.9 +albu_train_transforms = [ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), +] +backend_args = None +base_lr = 0.002 +batch_shapes_cfg = None +close_mosaic_epochs = 2 +coco_val_dataset = dict( + _delete_=True, + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +custom_hooks = [ + dict( + ema_type='ExpMomentumEMA', + momentum=0.0001, + priority=49, + strict_load=False, + type='EMAHook', + update_buffers=True), + dict( + switch_epoch=98, + switch_pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='mmdet.PipelineSwitchHook'), +] +custom_imports = dict( + allow_failed_imports=False, imports=[ + 'yolo_world', + ]) +data_root = 'data/coco/' +dataset_type = 'YOLOv5CocoDataset' +deepen_factor = 1.0 +default_hooks = dict( + checkpoint=dict( + interval=2, + max_keep_ckpts=2, + rule='greater', + save_best='auto', + type='CheckpointHook'), + logger=dict(interval=50, type='LoggerHook'), + param_scheduler=dict( + lr_factor=0.01, + max_epochs=100, + scheduler_type='linear', + type='YOLOv5ParamSchedulerHook'), + sampler_seed=dict(type='DistSamplerSeedHook'), + timer=dict(type='IterTimerHook'), + visualization=dict(type='mmdet.DetVisualizationHook')) +default_scope = 'mmyolo' +env_cfg = dict( + cudnn_benchmark=True, + dist_cfg=dict(backend='nccl'), + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0)) +flickr_train_dataset = dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +img_scale = ( + 640, + 640, +) +img_scales = [ + ( + 640, + 640, + ), + ( + 320, + 320, + ), + ( + 960, + 960, + ), +] +last_stage_out_channels = 512 +last_transform = [ + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), +] +load_from = '/home/jean/Amine/OpenYolo3D/pretrained/checkpoints/yolo_world_v2_x_obj365v1_goldg_cc3mlite_pretrain_1280ft-14996a36.pth' +log_level = 'INFO' +log_processor = dict(by_epoch=True, type='LogProcessor', window_size=50) +loss_bbox_weight = 7.5 +loss_cls_weight = 0.5 +loss_dfl_weight = 0.375 +lr_factor = 0.01 +max_aspect_ratio = 100 +max_epochs = 100 +max_keep_ckpts = 2 +mg_train_dataset = dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +mixup_prob = 0.15 +model = dict( + backbone=dict( + image_model=dict( + act_cfg=dict(inplace=True, type='SiLU'), + arch='P5', + deepen_factor=1.0, + last_stage_out_channels=512, + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + type='YOLOv8CSPDarknet', + widen_factor=1.25), + text_model=dict( + frozen_modules=[ + 'all', + ], + model_name='openai/clip-vit-base-patch32', + type='HuggingCLIPLanguageBackbone'), + type='MultiModalYOLOBackbone'), + bbox_head=dict( + bbox_coder=dict(type='DistancePointBBoxCoder'), + head_module=dict( + act_cfg=dict(inplace=True, type='SiLU'), + embed_dims=512, + featmap_strides=[ + 8, + 16, + 32, + ], + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_classes=80, + reg_max=16, + type='YOLOWorldHeadModule', + use_bn_head=True, + widen_factor=1.25), + loss_bbox=dict( + bbox_format='xyxy', + iou_mode='ciou', + loss_weight=7.5, + reduction='sum', + return_iou=False, + type='IoULoss'), + loss_cls=dict( + loss_weight=0.5, + reduction='none', + type='mmdet.CrossEntropyLoss', + use_sigmoid=True), + loss_dfl=dict( + loss_weight=0.375, + reduction='mean', + type='mmdet.DistributionFocalLoss'), + prior_generator=dict( + offset=0.5, strides=[ + 8, + 16, + 32, + ], type='mmdet.MlvlPointGenerator'), + type='YOLOWorldHead'), + data_preprocessor=dict( + bgr_to_rgb=True, + mean=[ + 0.0, + 0.0, + 0.0, + ], + std=[ + 255.0, + 255.0, + 255.0, + ], + type='YOLOWDetDataPreprocessor'), + mm_neck=True, + neck=dict( + act_cfg=dict(inplace=True, type='SiLU'), + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv'), + deepen_factor=1.0, + embed_channels=[ + 128, + 256, + 256, + ], + guide_channels=512, + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_csp_blocks=3, + num_heads=[ + 4, + 8, + 8, + ], + out_channels=[ + 256, + 512, + 512, + ], + type='YOLOWorldPAFPN', + widen_factor=1.25), + num_test_classes=1203, + num_train_classes=80, + test_cfg=dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001), + train_cfg=dict( + assigner=dict( + alpha=0.5, + beta=6.0, + eps=1e-09, + num_classes=80, + topk=10, + type='BatchTaskAlignedAssigner', + use_ciou=True)), + type='YOLOWorldDetector') +model_test_cfg = dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001) +mosaic_affine_transform = [ + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='Mosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), +] +neck_embed_channels = [ + 128, + 256, + 256, +] +neck_num_heads = [ + 4, + 8, + 8, +] +norm_cfg = dict(eps=0.001, momentum=0.03, type='BN') +num_classes = 1203 +num_det_layers = 3 +num_training_classes = 80 +obj365v1_train_dataset = dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +optim_wrapper = dict( + clip_grad=dict(max_norm=10.0), + constructor='YOLOWv5OptimizerConstructor', + optimizer=dict( + batch_size_per_gpu=16, lr=0.002, type='AdamW', weight_decay=0.025), + paramwise_cfg=dict( + bias_decay_mult=0.0, + custom_keys=dict({ + 'backbone.text_model': dict(lr_mult=0.01), + 'logit_scale': dict(weight_decay=0.0) + }), + norm_decay_mult=0.0), + type='OptimWrapper') +param_scheduler = None +persistent_workers = True +pre_transform = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), +] +resume = False +save_epoch_intervals = 2 +strides = [ + 8, + 16, + 32, +] +tal_alpha = 0.5 +tal_beta = 6.0 +tal_topk = 10 +test_cfg = dict(type='TestLoop') +test_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +test_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +test_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +text_channels = 512 +text_transform = [ + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_ann_file = 'annotations/instances_train2017.json' +train_batch_size_per_gpu = 16 +train_cfg = dict( + dynamic_intervals=[ + ( + 98, + 1, + ), + ], + max_epochs=100, + type='EpochBasedTrainLoop', + val_interval=10) +train_data_prefix = 'train2017/' +train_dataloader = dict( + batch_size=16, + collate_fn=dict(type='yolow_collate'), + dataset=dict( + datasets=[ + dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + ], + ignore_keys=[ + 'classes', + 'palette', + ], + type='ConcatDataset'), + num_workers=8, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=True, type='DefaultSampler')) +train_num_workers = 8 +train_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_pipeline_stage2 = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +tta_model = dict( + tta_cfg=dict(max_per_img=300, nms=dict(iou_threshold=0.65, type='nms')), + type='mmdet.DetTTAModel') +tta_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict( + transforms=[ + [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), + ], + [ + dict(prob=1.0, type='mmdet.RandomFlip'), + dict(prob=0.0, type='mmdet.RandomFlip'), + ], + [ + dict(type='mmdet.LoadAnnotations', with_bbox=True), + ], + [ + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), + ], + ], + type='TestTimeAug'), +] +val_ann_file = 'annotations/instances_val2017.json' +val_batch_size_per_gpu = 1 +val_cfg = dict(type='ValLoop') +val_data_prefix = 'val2017/' +val_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +val_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +val_interval_stage2 = 1 +val_num_workers = 2 +vis_backends = [ + dict(type='LocalVisBackend'), +] +visualizer = dict( + name='visualizer', + type='mmdet.DetLocalVisualizer', + vis_backends=[ + dict(type='LocalVisBackend'), + ]) +weight_decay = 0.025 +widen_factor = 1.25 +work_dir = '/home/jean/Amine/OpenYolo3D/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival' diff --git a/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240530_140423/vis_data/config.py b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240530_140423/vis_data/config.py new file mode 100644 index 0000000000000000000000000000000000000000..f8b00f96ad70de81ecc9073c7d67025d43429906 --- /dev/null +++ b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240530_140423/vis_data/config.py @@ -0,0 +1,1366 @@ +_backend_args = None +_multiscale_resize_transforms = [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), +] +affine_scale = 0.9 +albu_train_transforms = [ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), +] +backend_args = None +base_lr = 0.002 +batch_shapes_cfg = None +close_mosaic_epochs = 2 +coco_val_dataset = dict( + _delete_=True, + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +custom_hooks = [ + dict( + ema_type='ExpMomentumEMA', + momentum=0.0001, + priority=49, + strict_load=False, + type='EMAHook', + update_buffers=True), + dict( + switch_epoch=98, + switch_pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='mmdet.PipelineSwitchHook'), +] +custom_imports = dict( + allow_failed_imports=False, imports=[ + 'yolo_world', + ]) +data_root = 'data/coco/' +dataset_type = 'YOLOv5CocoDataset' +deepen_factor = 1.0 +default_hooks = dict( + checkpoint=dict( + interval=2, + max_keep_ckpts=2, + rule='greater', + save_best='auto', + type='CheckpointHook'), + logger=dict(interval=50, type='LoggerHook'), + param_scheduler=dict( + lr_factor=0.01, + max_epochs=100, + scheduler_type='linear', + type='YOLOv5ParamSchedulerHook'), + sampler_seed=dict(type='DistSamplerSeedHook'), + timer=dict(type='IterTimerHook'), + visualization=dict(type='mmdet.DetVisualizationHook')) +default_scope = 'mmyolo' +env_cfg = dict( + cudnn_benchmark=True, + dist_cfg=dict(backend='nccl'), + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0)) +flickr_train_dataset = dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +img_scale = ( + 640, + 640, +) +img_scales = [ + ( + 640, + 640, + ), + ( + 320, + 320, + ), + ( + 960, + 960, + ), +] +last_stage_out_channels = 512 +last_transform = [ + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), +] +load_from = '/home/jean/Amine/OpenYolo3D/pretrained/checkpoints/yolo_world_v2_x_obj365v1_goldg_cc3mlite_pretrain_1280ft-14996a36.pth' +log_level = 'INFO' +log_processor = dict(by_epoch=True, type='LogProcessor', window_size=50) +loss_bbox_weight = 7.5 +loss_cls_weight = 0.5 +loss_dfl_weight = 0.375 +lr_factor = 0.01 +max_aspect_ratio = 100 +max_epochs = 100 +max_keep_ckpts = 2 +mg_train_dataset = dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +mixup_prob = 0.15 +model = dict( + backbone=dict( + image_model=dict( + act_cfg=dict(inplace=True, type='SiLU'), + arch='P5', + deepen_factor=1.0, + last_stage_out_channels=512, + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + type='YOLOv8CSPDarknet', + widen_factor=1.25), + text_model=dict( + frozen_modules=[ + 'all', + ], + model_name='openai/clip-vit-base-patch32', + type='HuggingCLIPLanguageBackbone'), + type='MultiModalYOLOBackbone'), + bbox_head=dict( + bbox_coder=dict(type='DistancePointBBoxCoder'), + head_module=dict( + act_cfg=dict(inplace=True, type='SiLU'), + embed_dims=512, + featmap_strides=[ + 8, + 16, + 32, + ], + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_classes=80, + reg_max=16, + type='YOLOWorldHeadModule', + use_bn_head=True, + widen_factor=1.25), + loss_bbox=dict( + bbox_format='xyxy', + iou_mode='ciou', + loss_weight=7.5, + reduction='sum', + return_iou=False, + type='IoULoss'), + loss_cls=dict( + loss_weight=0.5, + reduction='none', + type='mmdet.CrossEntropyLoss', + use_sigmoid=True), + loss_dfl=dict( + loss_weight=0.375, + reduction='mean', + type='mmdet.DistributionFocalLoss'), + prior_generator=dict( + offset=0.5, strides=[ + 8, + 16, + 32, + ], type='mmdet.MlvlPointGenerator'), + type='YOLOWorldHead'), + data_preprocessor=dict( + bgr_to_rgb=True, + mean=[ + 0.0, + 0.0, + 0.0, + ], + std=[ + 255.0, + 255.0, + 255.0, + ], + type='YOLOWDetDataPreprocessor'), + mm_neck=True, + neck=dict( + act_cfg=dict(inplace=True, type='SiLU'), + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv'), + deepen_factor=1.0, + embed_channels=[ + 128, + 256, + 256, + ], + guide_channels=512, + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_csp_blocks=3, + num_heads=[ + 4, + 8, + 8, + ], + out_channels=[ + 256, + 512, + 512, + ], + type='YOLOWorldPAFPN', + widen_factor=1.25), + num_test_classes=1203, + num_train_classes=80, + test_cfg=dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001), + train_cfg=dict( + assigner=dict( + alpha=0.5, + beta=6.0, + eps=1e-09, + num_classes=80, + topk=10, + type='BatchTaskAlignedAssigner', + use_ciou=True)), + type='YOLOWorldDetector') +model_test_cfg = dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001) +mosaic_affine_transform = [ + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='Mosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), +] +neck_embed_channels = [ + 128, + 256, + 256, +] +neck_num_heads = [ + 4, + 8, + 8, +] +norm_cfg = dict(eps=0.001, momentum=0.03, type='BN') +num_classes = 1203 +num_det_layers = 3 +num_training_classes = 80 +obj365v1_train_dataset = dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +optim_wrapper = dict( + clip_grad=dict(max_norm=10.0), + constructor='YOLOWv5OptimizerConstructor', + optimizer=dict( + batch_size_per_gpu=16, lr=0.002, type='AdamW', weight_decay=0.025), + paramwise_cfg=dict( + bias_decay_mult=0.0, + custom_keys=dict({ + 'backbone.text_model': dict(lr_mult=0.01), + 'logit_scale': dict(weight_decay=0.0) + }), + norm_decay_mult=0.0), + type='OptimWrapper') +param_scheduler = None +persistent_workers = True +pre_transform = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), +] +resume = False +save_epoch_intervals = 2 +strides = [ + 8, + 16, + 32, +] +tal_alpha = 0.5 +tal_beta = 6.0 +tal_topk = 10 +test_cfg = dict(type='TestLoop') +test_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +test_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +test_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +text_channels = 512 +text_transform = [ + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_ann_file = 'annotations/instances_train2017.json' +train_batch_size_per_gpu = 16 +train_cfg = dict( + dynamic_intervals=[ + ( + 98, + 1, + ), + ], + max_epochs=100, + type='EpochBasedTrainLoop', + val_interval=10) +train_data_prefix = 'train2017/' +train_dataloader = dict( + batch_size=16, + collate_fn=dict(type='yolow_collate'), + dataset=dict( + datasets=[ + dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + ], + ignore_keys=[ + 'classes', + 'palette', + ], + type='ConcatDataset'), + num_workers=8, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=True, type='DefaultSampler')) +train_num_workers = 8 +train_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_pipeline_stage2 = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +tta_model = dict( + tta_cfg=dict(max_per_img=300, nms=dict(iou_threshold=0.65, type='nms')), + type='mmdet.DetTTAModel') +tta_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict( + transforms=[ + [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), + ], + [ + dict(prob=1.0, type='mmdet.RandomFlip'), + dict(prob=0.0, type='mmdet.RandomFlip'), + ], + [ + dict(type='mmdet.LoadAnnotations', with_bbox=True), + ], + [ + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), + ], + ], + type='TestTimeAug'), +] +val_ann_file = 'annotations/instances_val2017.json' +val_batch_size_per_gpu = 1 +val_cfg = dict(type='ValLoop') +val_data_prefix = 'val2017/' +val_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +val_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +val_interval_stage2 = 1 +val_num_workers = 2 +vis_backends = [ + dict(type='LocalVisBackend'), +] +visualizer = dict( + name='visualizer', + type='mmdet.DetLocalVisualizer', + vis_backends=[ + dict(type='LocalVisBackend'), + ]) +weight_decay = 0.025 +widen_factor = 1.25 +work_dir = '/home/jean/Amine/OpenYolo3D/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival' diff --git a/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240530_140609/vis_data/config.py b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240530_140609/vis_data/config.py new file mode 100644 index 0000000000000000000000000000000000000000..f8b00f96ad70de81ecc9073c7d67025d43429906 --- /dev/null +++ b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240530_140609/vis_data/config.py @@ -0,0 +1,1366 @@ +_backend_args = None +_multiscale_resize_transforms = [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), +] +affine_scale = 0.9 +albu_train_transforms = [ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), +] +backend_args = None +base_lr = 0.002 +batch_shapes_cfg = None +close_mosaic_epochs = 2 +coco_val_dataset = dict( + _delete_=True, + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +custom_hooks = [ + dict( + ema_type='ExpMomentumEMA', + momentum=0.0001, + priority=49, + strict_load=False, + type='EMAHook', + update_buffers=True), + dict( + switch_epoch=98, + switch_pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='mmdet.PipelineSwitchHook'), +] +custom_imports = dict( + allow_failed_imports=False, imports=[ + 'yolo_world', + ]) +data_root = 'data/coco/' +dataset_type = 'YOLOv5CocoDataset' +deepen_factor = 1.0 +default_hooks = dict( + checkpoint=dict( + interval=2, + max_keep_ckpts=2, + rule='greater', + save_best='auto', + type='CheckpointHook'), + logger=dict(interval=50, type='LoggerHook'), + param_scheduler=dict( + lr_factor=0.01, + max_epochs=100, + scheduler_type='linear', + type='YOLOv5ParamSchedulerHook'), + sampler_seed=dict(type='DistSamplerSeedHook'), + timer=dict(type='IterTimerHook'), + visualization=dict(type='mmdet.DetVisualizationHook')) +default_scope = 'mmyolo' +env_cfg = dict( + cudnn_benchmark=True, + dist_cfg=dict(backend='nccl'), + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0)) +flickr_train_dataset = dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +img_scale = ( + 640, + 640, +) +img_scales = [ + ( + 640, + 640, + ), + ( + 320, + 320, + ), + ( + 960, + 960, + ), +] +last_stage_out_channels = 512 +last_transform = [ + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), +] +load_from = '/home/jean/Amine/OpenYolo3D/pretrained/checkpoints/yolo_world_v2_x_obj365v1_goldg_cc3mlite_pretrain_1280ft-14996a36.pth' +log_level = 'INFO' +log_processor = dict(by_epoch=True, type='LogProcessor', window_size=50) +loss_bbox_weight = 7.5 +loss_cls_weight = 0.5 +loss_dfl_weight = 0.375 +lr_factor = 0.01 +max_aspect_ratio = 100 +max_epochs = 100 +max_keep_ckpts = 2 +mg_train_dataset = dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +mixup_prob = 0.15 +model = dict( + backbone=dict( + image_model=dict( + act_cfg=dict(inplace=True, type='SiLU'), + arch='P5', + deepen_factor=1.0, + last_stage_out_channels=512, + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + type='YOLOv8CSPDarknet', + widen_factor=1.25), + text_model=dict( + frozen_modules=[ + 'all', + ], + model_name='openai/clip-vit-base-patch32', + type='HuggingCLIPLanguageBackbone'), + type='MultiModalYOLOBackbone'), + bbox_head=dict( + bbox_coder=dict(type='DistancePointBBoxCoder'), + head_module=dict( + act_cfg=dict(inplace=True, type='SiLU'), + embed_dims=512, + featmap_strides=[ + 8, + 16, + 32, + ], + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_classes=80, + reg_max=16, + type='YOLOWorldHeadModule', + use_bn_head=True, + widen_factor=1.25), + loss_bbox=dict( + bbox_format='xyxy', + iou_mode='ciou', + loss_weight=7.5, + reduction='sum', + return_iou=False, + type='IoULoss'), + loss_cls=dict( + loss_weight=0.5, + reduction='none', + type='mmdet.CrossEntropyLoss', + use_sigmoid=True), + loss_dfl=dict( + loss_weight=0.375, + reduction='mean', + type='mmdet.DistributionFocalLoss'), + prior_generator=dict( + offset=0.5, strides=[ + 8, + 16, + 32, + ], type='mmdet.MlvlPointGenerator'), + type='YOLOWorldHead'), + data_preprocessor=dict( + bgr_to_rgb=True, + mean=[ + 0.0, + 0.0, + 0.0, + ], + std=[ + 255.0, + 255.0, + 255.0, + ], + type='YOLOWDetDataPreprocessor'), + mm_neck=True, + neck=dict( + act_cfg=dict(inplace=True, type='SiLU'), + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv'), + deepen_factor=1.0, + embed_channels=[ + 128, + 256, + 256, + ], + guide_channels=512, + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_csp_blocks=3, + num_heads=[ + 4, + 8, + 8, + ], + out_channels=[ + 256, + 512, + 512, + ], + type='YOLOWorldPAFPN', + widen_factor=1.25), + num_test_classes=1203, + num_train_classes=80, + test_cfg=dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001), + train_cfg=dict( + assigner=dict( + alpha=0.5, + beta=6.0, + eps=1e-09, + num_classes=80, + topk=10, + type='BatchTaskAlignedAssigner', + use_ciou=True)), + type='YOLOWorldDetector') +model_test_cfg = dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001) +mosaic_affine_transform = [ + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='Mosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), +] +neck_embed_channels = [ + 128, + 256, + 256, +] +neck_num_heads = [ + 4, + 8, + 8, +] +norm_cfg = dict(eps=0.001, momentum=0.03, type='BN') +num_classes = 1203 +num_det_layers = 3 +num_training_classes = 80 +obj365v1_train_dataset = dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +optim_wrapper = dict( + clip_grad=dict(max_norm=10.0), + constructor='YOLOWv5OptimizerConstructor', + optimizer=dict( + batch_size_per_gpu=16, lr=0.002, type='AdamW', weight_decay=0.025), + paramwise_cfg=dict( + bias_decay_mult=0.0, + custom_keys=dict({ + 'backbone.text_model': dict(lr_mult=0.01), + 'logit_scale': dict(weight_decay=0.0) + }), + norm_decay_mult=0.0), + type='OptimWrapper') +param_scheduler = None +persistent_workers = True +pre_transform = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), +] +resume = False +save_epoch_intervals = 2 +strides = [ + 8, + 16, + 32, +] +tal_alpha = 0.5 +tal_beta = 6.0 +tal_topk = 10 +test_cfg = dict(type='TestLoop') +test_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +test_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +test_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +text_channels = 512 +text_transform = [ + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_ann_file = 'annotations/instances_train2017.json' +train_batch_size_per_gpu = 16 +train_cfg = dict( + dynamic_intervals=[ + ( + 98, + 1, + ), + ], + max_epochs=100, + type='EpochBasedTrainLoop', + val_interval=10) +train_data_prefix = 'train2017/' +train_dataloader = dict( + batch_size=16, + collate_fn=dict(type='yolow_collate'), + dataset=dict( + datasets=[ + dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + ], + ignore_keys=[ + 'classes', + 'palette', + ], + type='ConcatDataset'), + num_workers=8, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=True, type='DefaultSampler')) +train_num_workers = 8 +train_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_pipeline_stage2 = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +tta_model = dict( + tta_cfg=dict(max_per_img=300, nms=dict(iou_threshold=0.65, type='nms')), + type='mmdet.DetTTAModel') +tta_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict( + transforms=[ + [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), + ], + [ + dict(prob=1.0, type='mmdet.RandomFlip'), + dict(prob=0.0, type='mmdet.RandomFlip'), + ], + [ + dict(type='mmdet.LoadAnnotations', with_bbox=True), + ], + [ + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), + ], + ], + type='TestTimeAug'), +] +val_ann_file = 'annotations/instances_val2017.json' +val_batch_size_per_gpu = 1 +val_cfg = dict(type='ValLoop') +val_data_prefix = 'val2017/' +val_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +val_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +val_interval_stage2 = 1 +val_num_workers = 2 +vis_backends = [ + dict(type='LocalVisBackend'), +] +visualizer = dict( + name='visualizer', + type='mmdet.DetLocalVisualizer', + vis_backends=[ + dict(type='LocalVisBackend'), + ]) +weight_decay = 0.025 +widen_factor = 1.25 +work_dir = '/home/jean/Amine/OpenYolo3D/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival' diff --git a/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240530_141113/vis_data/config.py b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240530_141113/vis_data/config.py new file mode 100644 index 0000000000000000000000000000000000000000..f8b00f96ad70de81ecc9073c7d67025d43429906 --- /dev/null +++ b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240530_141113/vis_data/config.py @@ -0,0 +1,1366 @@ +_backend_args = None +_multiscale_resize_transforms = [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), +] +affine_scale = 0.9 +albu_train_transforms = [ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), +] +backend_args = None +base_lr = 0.002 +batch_shapes_cfg = None +close_mosaic_epochs = 2 +coco_val_dataset = dict( + _delete_=True, + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +custom_hooks = [ + dict( + ema_type='ExpMomentumEMA', + momentum=0.0001, + priority=49, + strict_load=False, + type='EMAHook', + update_buffers=True), + dict( + switch_epoch=98, + switch_pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='mmdet.PipelineSwitchHook'), +] +custom_imports = dict( + allow_failed_imports=False, imports=[ + 'yolo_world', + ]) +data_root = 'data/coco/' +dataset_type = 'YOLOv5CocoDataset' +deepen_factor = 1.0 +default_hooks = dict( + checkpoint=dict( + interval=2, + max_keep_ckpts=2, + rule='greater', + save_best='auto', + type='CheckpointHook'), + logger=dict(interval=50, type='LoggerHook'), + param_scheduler=dict( + lr_factor=0.01, + max_epochs=100, + scheduler_type='linear', + type='YOLOv5ParamSchedulerHook'), + sampler_seed=dict(type='DistSamplerSeedHook'), + timer=dict(type='IterTimerHook'), + visualization=dict(type='mmdet.DetVisualizationHook')) +default_scope = 'mmyolo' +env_cfg = dict( + cudnn_benchmark=True, + dist_cfg=dict(backend='nccl'), + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0)) +flickr_train_dataset = dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +img_scale = ( + 640, + 640, +) +img_scales = [ + ( + 640, + 640, + ), + ( + 320, + 320, + ), + ( + 960, + 960, + ), +] +last_stage_out_channels = 512 +last_transform = [ + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), +] +load_from = '/home/jean/Amine/OpenYolo3D/pretrained/checkpoints/yolo_world_v2_x_obj365v1_goldg_cc3mlite_pretrain_1280ft-14996a36.pth' +log_level = 'INFO' +log_processor = dict(by_epoch=True, type='LogProcessor', window_size=50) +loss_bbox_weight = 7.5 +loss_cls_weight = 0.5 +loss_dfl_weight = 0.375 +lr_factor = 0.01 +max_aspect_ratio = 100 +max_epochs = 100 +max_keep_ckpts = 2 +mg_train_dataset = dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +mixup_prob = 0.15 +model = dict( + backbone=dict( + image_model=dict( + act_cfg=dict(inplace=True, type='SiLU'), + arch='P5', + deepen_factor=1.0, + last_stage_out_channels=512, + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + type='YOLOv8CSPDarknet', + widen_factor=1.25), + text_model=dict( + frozen_modules=[ + 'all', + ], + model_name='openai/clip-vit-base-patch32', + type='HuggingCLIPLanguageBackbone'), + type='MultiModalYOLOBackbone'), + bbox_head=dict( + bbox_coder=dict(type='DistancePointBBoxCoder'), + head_module=dict( + act_cfg=dict(inplace=True, type='SiLU'), + embed_dims=512, + featmap_strides=[ + 8, + 16, + 32, + ], + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_classes=80, + reg_max=16, + type='YOLOWorldHeadModule', + use_bn_head=True, + widen_factor=1.25), + loss_bbox=dict( + bbox_format='xyxy', + iou_mode='ciou', + loss_weight=7.5, + reduction='sum', + return_iou=False, + type='IoULoss'), + loss_cls=dict( + loss_weight=0.5, + reduction='none', + type='mmdet.CrossEntropyLoss', + use_sigmoid=True), + loss_dfl=dict( + loss_weight=0.375, + reduction='mean', + type='mmdet.DistributionFocalLoss'), + prior_generator=dict( + offset=0.5, strides=[ + 8, + 16, + 32, + ], type='mmdet.MlvlPointGenerator'), + type='YOLOWorldHead'), + data_preprocessor=dict( + bgr_to_rgb=True, + mean=[ + 0.0, + 0.0, + 0.0, + ], + std=[ + 255.0, + 255.0, + 255.0, + ], + type='YOLOWDetDataPreprocessor'), + mm_neck=True, + neck=dict( + act_cfg=dict(inplace=True, type='SiLU'), + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv'), + deepen_factor=1.0, + embed_channels=[ + 128, + 256, + 256, + ], + guide_channels=512, + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_csp_blocks=3, + num_heads=[ + 4, + 8, + 8, + ], + out_channels=[ + 256, + 512, + 512, + ], + type='YOLOWorldPAFPN', + widen_factor=1.25), + num_test_classes=1203, + num_train_classes=80, + test_cfg=dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001), + train_cfg=dict( + assigner=dict( + alpha=0.5, + beta=6.0, + eps=1e-09, + num_classes=80, + topk=10, + type='BatchTaskAlignedAssigner', + use_ciou=True)), + type='YOLOWorldDetector') +model_test_cfg = dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001) +mosaic_affine_transform = [ + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='Mosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), +] +neck_embed_channels = [ + 128, + 256, + 256, +] +neck_num_heads = [ + 4, + 8, + 8, +] +norm_cfg = dict(eps=0.001, momentum=0.03, type='BN') +num_classes = 1203 +num_det_layers = 3 +num_training_classes = 80 +obj365v1_train_dataset = dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +optim_wrapper = dict( + clip_grad=dict(max_norm=10.0), + constructor='YOLOWv5OptimizerConstructor', + optimizer=dict( + batch_size_per_gpu=16, lr=0.002, type='AdamW', weight_decay=0.025), + paramwise_cfg=dict( + bias_decay_mult=0.0, + custom_keys=dict({ + 'backbone.text_model': dict(lr_mult=0.01), + 'logit_scale': dict(weight_decay=0.0) + }), + norm_decay_mult=0.0), + type='OptimWrapper') +param_scheduler = None +persistent_workers = True +pre_transform = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), +] +resume = False +save_epoch_intervals = 2 +strides = [ + 8, + 16, + 32, +] +tal_alpha = 0.5 +tal_beta = 6.0 +tal_topk = 10 +test_cfg = dict(type='TestLoop') +test_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +test_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +test_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +text_channels = 512 +text_transform = [ + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_ann_file = 'annotations/instances_train2017.json' +train_batch_size_per_gpu = 16 +train_cfg = dict( + dynamic_intervals=[ + ( + 98, + 1, + ), + ], + max_epochs=100, + type='EpochBasedTrainLoop', + val_interval=10) +train_data_prefix = 'train2017/' +train_dataloader = dict( + batch_size=16, + collate_fn=dict(type='yolow_collate'), + dataset=dict( + datasets=[ + dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + ], + ignore_keys=[ + 'classes', + 'palette', + ], + type='ConcatDataset'), + num_workers=8, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=True, type='DefaultSampler')) +train_num_workers = 8 +train_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_pipeline_stage2 = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +tta_model = dict( + tta_cfg=dict(max_per_img=300, nms=dict(iou_threshold=0.65, type='nms')), + type='mmdet.DetTTAModel') +tta_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict( + transforms=[ + [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), + ], + [ + dict(prob=1.0, type='mmdet.RandomFlip'), + dict(prob=0.0, type='mmdet.RandomFlip'), + ], + [ + dict(type='mmdet.LoadAnnotations', with_bbox=True), + ], + [ + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), + ], + ], + type='TestTimeAug'), +] +val_ann_file = 'annotations/instances_val2017.json' +val_batch_size_per_gpu = 1 +val_cfg = dict(type='ValLoop') +val_data_prefix = 'val2017/' +val_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +val_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +val_interval_stage2 = 1 +val_num_workers = 2 +vis_backends = [ + dict(type='LocalVisBackend'), +] +visualizer = dict( + name='visualizer', + type='mmdet.DetLocalVisualizer', + vis_backends=[ + dict(type='LocalVisBackend'), + ]) +weight_decay = 0.025 +widen_factor = 1.25 +work_dir = '/home/jean/Amine/OpenYolo3D/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival' diff --git a/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240530_141414/vis_data/config.py b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240530_141414/vis_data/config.py new file mode 100644 index 0000000000000000000000000000000000000000..f8b00f96ad70de81ecc9073c7d67025d43429906 --- /dev/null +++ b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240530_141414/vis_data/config.py @@ -0,0 +1,1366 @@ +_backend_args = None +_multiscale_resize_transforms = [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), +] +affine_scale = 0.9 +albu_train_transforms = [ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), +] +backend_args = None +base_lr = 0.002 +batch_shapes_cfg = None +close_mosaic_epochs = 2 +coco_val_dataset = dict( + _delete_=True, + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +custom_hooks = [ + dict( + ema_type='ExpMomentumEMA', + momentum=0.0001, + priority=49, + strict_load=False, + type='EMAHook', + update_buffers=True), + dict( + switch_epoch=98, + switch_pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='mmdet.PipelineSwitchHook'), +] +custom_imports = dict( + allow_failed_imports=False, imports=[ + 'yolo_world', + ]) +data_root = 'data/coco/' +dataset_type = 'YOLOv5CocoDataset' +deepen_factor = 1.0 +default_hooks = dict( + checkpoint=dict( + interval=2, + max_keep_ckpts=2, + rule='greater', + save_best='auto', + type='CheckpointHook'), + logger=dict(interval=50, type='LoggerHook'), + param_scheduler=dict( + lr_factor=0.01, + max_epochs=100, + scheduler_type='linear', + type='YOLOv5ParamSchedulerHook'), + sampler_seed=dict(type='DistSamplerSeedHook'), + timer=dict(type='IterTimerHook'), + visualization=dict(type='mmdet.DetVisualizationHook')) +default_scope = 'mmyolo' +env_cfg = dict( + cudnn_benchmark=True, + dist_cfg=dict(backend='nccl'), + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0)) +flickr_train_dataset = dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +img_scale = ( + 640, + 640, +) +img_scales = [ + ( + 640, + 640, + ), + ( + 320, + 320, + ), + ( + 960, + 960, + ), +] +last_stage_out_channels = 512 +last_transform = [ + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), +] +load_from = '/home/jean/Amine/OpenYolo3D/pretrained/checkpoints/yolo_world_v2_x_obj365v1_goldg_cc3mlite_pretrain_1280ft-14996a36.pth' +log_level = 'INFO' +log_processor = dict(by_epoch=True, type='LogProcessor', window_size=50) +loss_bbox_weight = 7.5 +loss_cls_weight = 0.5 +loss_dfl_weight = 0.375 +lr_factor = 0.01 +max_aspect_ratio = 100 +max_epochs = 100 +max_keep_ckpts = 2 +mg_train_dataset = dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +mixup_prob = 0.15 +model = dict( + backbone=dict( + image_model=dict( + act_cfg=dict(inplace=True, type='SiLU'), + arch='P5', + deepen_factor=1.0, + last_stage_out_channels=512, + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + type='YOLOv8CSPDarknet', + widen_factor=1.25), + text_model=dict( + frozen_modules=[ + 'all', + ], + model_name='openai/clip-vit-base-patch32', + type='HuggingCLIPLanguageBackbone'), + type='MultiModalYOLOBackbone'), + bbox_head=dict( + bbox_coder=dict(type='DistancePointBBoxCoder'), + head_module=dict( + act_cfg=dict(inplace=True, type='SiLU'), + embed_dims=512, + featmap_strides=[ + 8, + 16, + 32, + ], + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_classes=80, + reg_max=16, + type='YOLOWorldHeadModule', + use_bn_head=True, + widen_factor=1.25), + loss_bbox=dict( + bbox_format='xyxy', + iou_mode='ciou', + loss_weight=7.5, + reduction='sum', + return_iou=False, + type='IoULoss'), + loss_cls=dict( + loss_weight=0.5, + reduction='none', + type='mmdet.CrossEntropyLoss', + use_sigmoid=True), + loss_dfl=dict( + loss_weight=0.375, + reduction='mean', + type='mmdet.DistributionFocalLoss'), + prior_generator=dict( + offset=0.5, strides=[ + 8, + 16, + 32, + ], type='mmdet.MlvlPointGenerator'), + type='YOLOWorldHead'), + data_preprocessor=dict( + bgr_to_rgb=True, + mean=[ + 0.0, + 0.0, + 0.0, + ], + std=[ + 255.0, + 255.0, + 255.0, + ], + type='YOLOWDetDataPreprocessor'), + mm_neck=True, + neck=dict( + act_cfg=dict(inplace=True, type='SiLU'), + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv'), + deepen_factor=1.0, + embed_channels=[ + 128, + 256, + 256, + ], + guide_channels=512, + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_csp_blocks=3, + num_heads=[ + 4, + 8, + 8, + ], + out_channels=[ + 256, + 512, + 512, + ], + type='YOLOWorldPAFPN', + widen_factor=1.25), + num_test_classes=1203, + num_train_classes=80, + test_cfg=dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001), + train_cfg=dict( + assigner=dict( + alpha=0.5, + beta=6.0, + eps=1e-09, + num_classes=80, + topk=10, + type='BatchTaskAlignedAssigner', + use_ciou=True)), + type='YOLOWorldDetector') +model_test_cfg = dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001) +mosaic_affine_transform = [ + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='Mosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), +] +neck_embed_channels = [ + 128, + 256, + 256, +] +neck_num_heads = [ + 4, + 8, + 8, +] +norm_cfg = dict(eps=0.001, momentum=0.03, type='BN') +num_classes = 1203 +num_det_layers = 3 +num_training_classes = 80 +obj365v1_train_dataset = dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +optim_wrapper = dict( + clip_grad=dict(max_norm=10.0), + constructor='YOLOWv5OptimizerConstructor', + optimizer=dict( + batch_size_per_gpu=16, lr=0.002, type='AdamW', weight_decay=0.025), + paramwise_cfg=dict( + bias_decay_mult=0.0, + custom_keys=dict({ + 'backbone.text_model': dict(lr_mult=0.01), + 'logit_scale': dict(weight_decay=0.0) + }), + norm_decay_mult=0.0), + type='OptimWrapper') +param_scheduler = None +persistent_workers = True +pre_transform = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), +] +resume = False +save_epoch_intervals = 2 +strides = [ + 8, + 16, + 32, +] +tal_alpha = 0.5 +tal_beta = 6.0 +tal_topk = 10 +test_cfg = dict(type='TestLoop') +test_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +test_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +test_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +text_channels = 512 +text_transform = [ + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_ann_file = 'annotations/instances_train2017.json' +train_batch_size_per_gpu = 16 +train_cfg = dict( + dynamic_intervals=[ + ( + 98, + 1, + ), + ], + max_epochs=100, + type='EpochBasedTrainLoop', + val_interval=10) +train_data_prefix = 'train2017/' +train_dataloader = dict( + batch_size=16, + collate_fn=dict(type='yolow_collate'), + dataset=dict( + datasets=[ + dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + ], + ignore_keys=[ + 'classes', + 'palette', + ], + type='ConcatDataset'), + num_workers=8, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=True, type='DefaultSampler')) +train_num_workers = 8 +train_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_pipeline_stage2 = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +tta_model = dict( + tta_cfg=dict(max_per_img=300, nms=dict(iou_threshold=0.65, type='nms')), + type='mmdet.DetTTAModel') +tta_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict( + transforms=[ + [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), + ], + [ + dict(prob=1.0, type='mmdet.RandomFlip'), + dict(prob=0.0, type='mmdet.RandomFlip'), + ], + [ + dict(type='mmdet.LoadAnnotations', with_bbox=True), + ], + [ + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), + ], + ], + type='TestTimeAug'), +] +val_ann_file = 'annotations/instances_val2017.json' +val_batch_size_per_gpu = 1 +val_cfg = dict(type='ValLoop') +val_data_prefix = 'val2017/' +val_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +val_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +val_interval_stage2 = 1 +val_num_workers = 2 +vis_backends = [ + dict(type='LocalVisBackend'), +] +visualizer = dict( + name='visualizer', + type='mmdet.DetLocalVisualizer', + vis_backends=[ + dict(type='LocalVisBackend'), + ]) +weight_decay = 0.025 +widen_factor = 1.25 +work_dir = '/home/jean/Amine/OpenYolo3D/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival' diff --git a/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240530_141806/vis_data/config.py b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240530_141806/vis_data/config.py new file mode 100644 index 0000000000000000000000000000000000000000..f8b00f96ad70de81ecc9073c7d67025d43429906 --- /dev/null +++ b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240530_141806/vis_data/config.py @@ -0,0 +1,1366 @@ +_backend_args = None +_multiscale_resize_transforms = [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), +] +affine_scale = 0.9 +albu_train_transforms = [ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), +] +backend_args = None +base_lr = 0.002 +batch_shapes_cfg = None +close_mosaic_epochs = 2 +coco_val_dataset = dict( + _delete_=True, + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +custom_hooks = [ + dict( + ema_type='ExpMomentumEMA', + momentum=0.0001, + priority=49, + strict_load=False, + type='EMAHook', + update_buffers=True), + dict( + switch_epoch=98, + switch_pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='mmdet.PipelineSwitchHook'), +] +custom_imports = dict( + allow_failed_imports=False, imports=[ + 'yolo_world', + ]) +data_root = 'data/coco/' +dataset_type = 'YOLOv5CocoDataset' +deepen_factor = 1.0 +default_hooks = dict( + checkpoint=dict( + interval=2, + max_keep_ckpts=2, + rule='greater', + save_best='auto', + type='CheckpointHook'), + logger=dict(interval=50, type='LoggerHook'), + param_scheduler=dict( + lr_factor=0.01, + max_epochs=100, + scheduler_type='linear', + type='YOLOv5ParamSchedulerHook'), + sampler_seed=dict(type='DistSamplerSeedHook'), + timer=dict(type='IterTimerHook'), + visualization=dict(type='mmdet.DetVisualizationHook')) +default_scope = 'mmyolo' +env_cfg = dict( + cudnn_benchmark=True, + dist_cfg=dict(backend='nccl'), + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0)) +flickr_train_dataset = dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +img_scale = ( + 640, + 640, +) +img_scales = [ + ( + 640, + 640, + ), + ( + 320, + 320, + ), + ( + 960, + 960, + ), +] +last_stage_out_channels = 512 +last_transform = [ + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), +] +load_from = '/home/jean/Amine/OpenYolo3D/pretrained/checkpoints/yolo_world_v2_x_obj365v1_goldg_cc3mlite_pretrain_1280ft-14996a36.pth' +log_level = 'INFO' +log_processor = dict(by_epoch=True, type='LogProcessor', window_size=50) +loss_bbox_weight = 7.5 +loss_cls_weight = 0.5 +loss_dfl_weight = 0.375 +lr_factor = 0.01 +max_aspect_ratio = 100 +max_epochs = 100 +max_keep_ckpts = 2 +mg_train_dataset = dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +mixup_prob = 0.15 +model = dict( + backbone=dict( + image_model=dict( + act_cfg=dict(inplace=True, type='SiLU'), + arch='P5', + deepen_factor=1.0, + last_stage_out_channels=512, + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + type='YOLOv8CSPDarknet', + widen_factor=1.25), + text_model=dict( + frozen_modules=[ + 'all', + ], + model_name='openai/clip-vit-base-patch32', + type='HuggingCLIPLanguageBackbone'), + type='MultiModalYOLOBackbone'), + bbox_head=dict( + bbox_coder=dict(type='DistancePointBBoxCoder'), + head_module=dict( + act_cfg=dict(inplace=True, type='SiLU'), + embed_dims=512, + featmap_strides=[ + 8, + 16, + 32, + ], + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_classes=80, + reg_max=16, + type='YOLOWorldHeadModule', + use_bn_head=True, + widen_factor=1.25), + loss_bbox=dict( + bbox_format='xyxy', + iou_mode='ciou', + loss_weight=7.5, + reduction='sum', + return_iou=False, + type='IoULoss'), + loss_cls=dict( + loss_weight=0.5, + reduction='none', + type='mmdet.CrossEntropyLoss', + use_sigmoid=True), + loss_dfl=dict( + loss_weight=0.375, + reduction='mean', + type='mmdet.DistributionFocalLoss'), + prior_generator=dict( + offset=0.5, strides=[ + 8, + 16, + 32, + ], type='mmdet.MlvlPointGenerator'), + type='YOLOWorldHead'), + data_preprocessor=dict( + bgr_to_rgb=True, + mean=[ + 0.0, + 0.0, + 0.0, + ], + std=[ + 255.0, + 255.0, + 255.0, + ], + type='YOLOWDetDataPreprocessor'), + mm_neck=True, + neck=dict( + act_cfg=dict(inplace=True, type='SiLU'), + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv'), + deepen_factor=1.0, + embed_channels=[ + 128, + 256, + 256, + ], + guide_channels=512, + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_csp_blocks=3, + num_heads=[ + 4, + 8, + 8, + ], + out_channels=[ + 256, + 512, + 512, + ], + type='YOLOWorldPAFPN', + widen_factor=1.25), + num_test_classes=1203, + num_train_classes=80, + test_cfg=dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001), + train_cfg=dict( + assigner=dict( + alpha=0.5, + beta=6.0, + eps=1e-09, + num_classes=80, + topk=10, + type='BatchTaskAlignedAssigner', + use_ciou=True)), + type='YOLOWorldDetector') +model_test_cfg = dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001) +mosaic_affine_transform = [ + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='Mosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), +] +neck_embed_channels = [ + 128, + 256, + 256, +] +neck_num_heads = [ + 4, + 8, + 8, +] +norm_cfg = dict(eps=0.001, momentum=0.03, type='BN') +num_classes = 1203 +num_det_layers = 3 +num_training_classes = 80 +obj365v1_train_dataset = dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +optim_wrapper = dict( + clip_grad=dict(max_norm=10.0), + constructor='YOLOWv5OptimizerConstructor', + optimizer=dict( + batch_size_per_gpu=16, lr=0.002, type='AdamW', weight_decay=0.025), + paramwise_cfg=dict( + bias_decay_mult=0.0, + custom_keys=dict({ + 'backbone.text_model': dict(lr_mult=0.01), + 'logit_scale': dict(weight_decay=0.0) + }), + norm_decay_mult=0.0), + type='OptimWrapper') +param_scheduler = None +persistent_workers = True +pre_transform = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), +] +resume = False +save_epoch_intervals = 2 +strides = [ + 8, + 16, + 32, +] +tal_alpha = 0.5 +tal_beta = 6.0 +tal_topk = 10 +test_cfg = dict(type='TestLoop') +test_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +test_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +test_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +text_channels = 512 +text_transform = [ + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_ann_file = 'annotations/instances_train2017.json' +train_batch_size_per_gpu = 16 +train_cfg = dict( + dynamic_intervals=[ + ( + 98, + 1, + ), + ], + max_epochs=100, + type='EpochBasedTrainLoop', + val_interval=10) +train_data_prefix = 'train2017/' +train_dataloader = dict( + batch_size=16, + collate_fn=dict(type='yolow_collate'), + dataset=dict( + datasets=[ + dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + ], + ignore_keys=[ + 'classes', + 'palette', + ], + type='ConcatDataset'), + num_workers=8, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=True, type='DefaultSampler')) +train_num_workers = 8 +train_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_pipeline_stage2 = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +tta_model = dict( + tta_cfg=dict(max_per_img=300, nms=dict(iou_threshold=0.65, type='nms')), + type='mmdet.DetTTAModel') +tta_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict( + transforms=[ + [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), + ], + [ + dict(prob=1.0, type='mmdet.RandomFlip'), + dict(prob=0.0, type='mmdet.RandomFlip'), + ], + [ + dict(type='mmdet.LoadAnnotations', with_bbox=True), + ], + [ + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), + ], + ], + type='TestTimeAug'), +] +val_ann_file = 'annotations/instances_val2017.json' +val_batch_size_per_gpu = 1 +val_cfg = dict(type='ValLoop') +val_data_prefix = 'val2017/' +val_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +val_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +val_interval_stage2 = 1 +val_num_workers = 2 +vis_backends = [ + dict(type='LocalVisBackend'), +] +visualizer = dict( + name='visualizer', + type='mmdet.DetLocalVisualizer', + vis_backends=[ + dict(type='LocalVisBackend'), + ]) +weight_decay = 0.025 +widen_factor = 1.25 +work_dir = '/home/jean/Amine/OpenYolo3D/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival' diff --git a/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240530_142042/vis_data/config.py b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240530_142042/vis_data/config.py new file mode 100644 index 0000000000000000000000000000000000000000..f8b00f96ad70de81ecc9073c7d67025d43429906 --- /dev/null +++ b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240530_142042/vis_data/config.py @@ -0,0 +1,1366 @@ +_backend_args = None +_multiscale_resize_transforms = [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), +] +affine_scale = 0.9 +albu_train_transforms = [ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), +] +backend_args = None +base_lr = 0.002 +batch_shapes_cfg = None +close_mosaic_epochs = 2 +coco_val_dataset = dict( + _delete_=True, + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +custom_hooks = [ + dict( + ema_type='ExpMomentumEMA', + momentum=0.0001, + priority=49, + strict_load=False, + type='EMAHook', + update_buffers=True), + dict( + switch_epoch=98, + switch_pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='mmdet.PipelineSwitchHook'), +] +custom_imports = dict( + allow_failed_imports=False, imports=[ + 'yolo_world', + ]) +data_root = 'data/coco/' +dataset_type = 'YOLOv5CocoDataset' +deepen_factor = 1.0 +default_hooks = dict( + checkpoint=dict( + interval=2, + max_keep_ckpts=2, + rule='greater', + save_best='auto', + type='CheckpointHook'), + logger=dict(interval=50, type='LoggerHook'), + param_scheduler=dict( + lr_factor=0.01, + max_epochs=100, + scheduler_type='linear', + type='YOLOv5ParamSchedulerHook'), + sampler_seed=dict(type='DistSamplerSeedHook'), + timer=dict(type='IterTimerHook'), + visualization=dict(type='mmdet.DetVisualizationHook')) +default_scope = 'mmyolo' +env_cfg = dict( + cudnn_benchmark=True, + dist_cfg=dict(backend='nccl'), + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0)) +flickr_train_dataset = dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +img_scale = ( + 640, + 640, +) +img_scales = [ + ( + 640, + 640, + ), + ( + 320, + 320, + ), + ( + 960, + 960, + ), +] +last_stage_out_channels = 512 +last_transform = [ + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), +] +load_from = '/home/jean/Amine/OpenYolo3D/pretrained/checkpoints/yolo_world_v2_x_obj365v1_goldg_cc3mlite_pretrain_1280ft-14996a36.pth' +log_level = 'INFO' +log_processor = dict(by_epoch=True, type='LogProcessor', window_size=50) +loss_bbox_weight = 7.5 +loss_cls_weight = 0.5 +loss_dfl_weight = 0.375 +lr_factor = 0.01 +max_aspect_ratio = 100 +max_epochs = 100 +max_keep_ckpts = 2 +mg_train_dataset = dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +mixup_prob = 0.15 +model = dict( + backbone=dict( + image_model=dict( + act_cfg=dict(inplace=True, type='SiLU'), + arch='P5', + deepen_factor=1.0, + last_stage_out_channels=512, + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + type='YOLOv8CSPDarknet', + widen_factor=1.25), + text_model=dict( + frozen_modules=[ + 'all', + ], + model_name='openai/clip-vit-base-patch32', + type='HuggingCLIPLanguageBackbone'), + type='MultiModalYOLOBackbone'), + bbox_head=dict( + bbox_coder=dict(type='DistancePointBBoxCoder'), + head_module=dict( + act_cfg=dict(inplace=True, type='SiLU'), + embed_dims=512, + featmap_strides=[ + 8, + 16, + 32, + ], + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_classes=80, + reg_max=16, + type='YOLOWorldHeadModule', + use_bn_head=True, + widen_factor=1.25), + loss_bbox=dict( + bbox_format='xyxy', + iou_mode='ciou', + loss_weight=7.5, + reduction='sum', + return_iou=False, + type='IoULoss'), + loss_cls=dict( + loss_weight=0.5, + reduction='none', + type='mmdet.CrossEntropyLoss', + use_sigmoid=True), + loss_dfl=dict( + loss_weight=0.375, + reduction='mean', + type='mmdet.DistributionFocalLoss'), + prior_generator=dict( + offset=0.5, strides=[ + 8, + 16, + 32, + ], type='mmdet.MlvlPointGenerator'), + type='YOLOWorldHead'), + data_preprocessor=dict( + bgr_to_rgb=True, + mean=[ + 0.0, + 0.0, + 0.0, + ], + std=[ + 255.0, + 255.0, + 255.0, + ], + type='YOLOWDetDataPreprocessor'), + mm_neck=True, + neck=dict( + act_cfg=dict(inplace=True, type='SiLU'), + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv'), + deepen_factor=1.0, + embed_channels=[ + 128, + 256, + 256, + ], + guide_channels=512, + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_csp_blocks=3, + num_heads=[ + 4, + 8, + 8, + ], + out_channels=[ + 256, + 512, + 512, + ], + type='YOLOWorldPAFPN', + widen_factor=1.25), + num_test_classes=1203, + num_train_classes=80, + test_cfg=dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001), + train_cfg=dict( + assigner=dict( + alpha=0.5, + beta=6.0, + eps=1e-09, + num_classes=80, + topk=10, + type='BatchTaskAlignedAssigner', + use_ciou=True)), + type='YOLOWorldDetector') +model_test_cfg = dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001) +mosaic_affine_transform = [ + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='Mosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), +] +neck_embed_channels = [ + 128, + 256, + 256, +] +neck_num_heads = [ + 4, + 8, + 8, +] +norm_cfg = dict(eps=0.001, momentum=0.03, type='BN') +num_classes = 1203 +num_det_layers = 3 +num_training_classes = 80 +obj365v1_train_dataset = dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +optim_wrapper = dict( + clip_grad=dict(max_norm=10.0), + constructor='YOLOWv5OptimizerConstructor', + optimizer=dict( + batch_size_per_gpu=16, lr=0.002, type='AdamW', weight_decay=0.025), + paramwise_cfg=dict( + bias_decay_mult=0.0, + custom_keys=dict({ + 'backbone.text_model': dict(lr_mult=0.01), + 'logit_scale': dict(weight_decay=0.0) + }), + norm_decay_mult=0.0), + type='OptimWrapper') +param_scheduler = None +persistent_workers = True +pre_transform = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), +] +resume = False +save_epoch_intervals = 2 +strides = [ + 8, + 16, + 32, +] +tal_alpha = 0.5 +tal_beta = 6.0 +tal_topk = 10 +test_cfg = dict(type='TestLoop') +test_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +test_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +test_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +text_channels = 512 +text_transform = [ + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_ann_file = 'annotations/instances_train2017.json' +train_batch_size_per_gpu = 16 +train_cfg = dict( + dynamic_intervals=[ + ( + 98, + 1, + ), + ], + max_epochs=100, + type='EpochBasedTrainLoop', + val_interval=10) +train_data_prefix = 'train2017/' +train_dataloader = dict( + batch_size=16, + collate_fn=dict(type='yolow_collate'), + dataset=dict( + datasets=[ + dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + ], + ignore_keys=[ + 'classes', + 'palette', + ], + type='ConcatDataset'), + num_workers=8, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=True, type='DefaultSampler')) +train_num_workers = 8 +train_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_pipeline_stage2 = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +tta_model = dict( + tta_cfg=dict(max_per_img=300, nms=dict(iou_threshold=0.65, type='nms')), + type='mmdet.DetTTAModel') +tta_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict( + transforms=[ + [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), + ], + [ + dict(prob=1.0, type='mmdet.RandomFlip'), + dict(prob=0.0, type='mmdet.RandomFlip'), + ], + [ + dict(type='mmdet.LoadAnnotations', with_bbox=True), + ], + [ + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), + ], + ], + type='TestTimeAug'), +] +val_ann_file = 'annotations/instances_val2017.json' +val_batch_size_per_gpu = 1 +val_cfg = dict(type='ValLoop') +val_data_prefix = 'val2017/' +val_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +val_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +val_interval_stage2 = 1 +val_num_workers = 2 +vis_backends = [ + dict(type='LocalVisBackend'), +] +visualizer = dict( + name='visualizer', + type='mmdet.DetLocalVisualizer', + vis_backends=[ + dict(type='LocalVisBackend'), + ]) +weight_decay = 0.025 +widen_factor = 1.25 +work_dir = '/home/jean/Amine/OpenYolo3D/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival' diff --git a/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240530_142241/vis_data/config.py b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240530_142241/vis_data/config.py new file mode 100644 index 0000000000000000000000000000000000000000..f8b00f96ad70de81ecc9073c7d67025d43429906 --- /dev/null +++ b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240530_142241/vis_data/config.py @@ -0,0 +1,1366 @@ +_backend_args = None +_multiscale_resize_transforms = [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), +] +affine_scale = 0.9 +albu_train_transforms = [ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), +] +backend_args = None +base_lr = 0.002 +batch_shapes_cfg = None +close_mosaic_epochs = 2 +coco_val_dataset = dict( + _delete_=True, + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +custom_hooks = [ + dict( + ema_type='ExpMomentumEMA', + momentum=0.0001, + priority=49, + strict_load=False, + type='EMAHook', + update_buffers=True), + dict( + switch_epoch=98, + switch_pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='mmdet.PipelineSwitchHook'), +] +custom_imports = dict( + allow_failed_imports=False, imports=[ + 'yolo_world', + ]) +data_root = 'data/coco/' +dataset_type = 'YOLOv5CocoDataset' +deepen_factor = 1.0 +default_hooks = dict( + checkpoint=dict( + interval=2, + max_keep_ckpts=2, + rule='greater', + save_best='auto', + type='CheckpointHook'), + logger=dict(interval=50, type='LoggerHook'), + param_scheduler=dict( + lr_factor=0.01, + max_epochs=100, + scheduler_type='linear', + type='YOLOv5ParamSchedulerHook'), + sampler_seed=dict(type='DistSamplerSeedHook'), + timer=dict(type='IterTimerHook'), + visualization=dict(type='mmdet.DetVisualizationHook')) +default_scope = 'mmyolo' +env_cfg = dict( + cudnn_benchmark=True, + dist_cfg=dict(backend='nccl'), + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0)) +flickr_train_dataset = dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +img_scale = ( + 640, + 640, +) +img_scales = [ + ( + 640, + 640, + ), + ( + 320, + 320, + ), + ( + 960, + 960, + ), +] +last_stage_out_channels = 512 +last_transform = [ + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), +] +load_from = '/home/jean/Amine/OpenYolo3D/pretrained/checkpoints/yolo_world_v2_x_obj365v1_goldg_cc3mlite_pretrain_1280ft-14996a36.pth' +log_level = 'INFO' +log_processor = dict(by_epoch=True, type='LogProcessor', window_size=50) +loss_bbox_weight = 7.5 +loss_cls_weight = 0.5 +loss_dfl_weight = 0.375 +lr_factor = 0.01 +max_aspect_ratio = 100 +max_epochs = 100 +max_keep_ckpts = 2 +mg_train_dataset = dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +mixup_prob = 0.15 +model = dict( + backbone=dict( + image_model=dict( + act_cfg=dict(inplace=True, type='SiLU'), + arch='P5', + deepen_factor=1.0, + last_stage_out_channels=512, + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + type='YOLOv8CSPDarknet', + widen_factor=1.25), + text_model=dict( + frozen_modules=[ + 'all', + ], + model_name='openai/clip-vit-base-patch32', + type='HuggingCLIPLanguageBackbone'), + type='MultiModalYOLOBackbone'), + bbox_head=dict( + bbox_coder=dict(type='DistancePointBBoxCoder'), + head_module=dict( + act_cfg=dict(inplace=True, type='SiLU'), + embed_dims=512, + featmap_strides=[ + 8, + 16, + 32, + ], + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_classes=80, + reg_max=16, + type='YOLOWorldHeadModule', + use_bn_head=True, + widen_factor=1.25), + loss_bbox=dict( + bbox_format='xyxy', + iou_mode='ciou', + loss_weight=7.5, + reduction='sum', + return_iou=False, + type='IoULoss'), + loss_cls=dict( + loss_weight=0.5, + reduction='none', + type='mmdet.CrossEntropyLoss', + use_sigmoid=True), + loss_dfl=dict( + loss_weight=0.375, + reduction='mean', + type='mmdet.DistributionFocalLoss'), + prior_generator=dict( + offset=0.5, strides=[ + 8, + 16, + 32, + ], type='mmdet.MlvlPointGenerator'), + type='YOLOWorldHead'), + data_preprocessor=dict( + bgr_to_rgb=True, + mean=[ + 0.0, + 0.0, + 0.0, + ], + std=[ + 255.0, + 255.0, + 255.0, + ], + type='YOLOWDetDataPreprocessor'), + mm_neck=True, + neck=dict( + act_cfg=dict(inplace=True, type='SiLU'), + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv'), + deepen_factor=1.0, + embed_channels=[ + 128, + 256, + 256, + ], + guide_channels=512, + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_csp_blocks=3, + num_heads=[ + 4, + 8, + 8, + ], + out_channels=[ + 256, + 512, + 512, + ], + type='YOLOWorldPAFPN', + widen_factor=1.25), + num_test_classes=1203, + num_train_classes=80, + test_cfg=dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001), + train_cfg=dict( + assigner=dict( + alpha=0.5, + beta=6.0, + eps=1e-09, + num_classes=80, + topk=10, + type='BatchTaskAlignedAssigner', + use_ciou=True)), + type='YOLOWorldDetector') +model_test_cfg = dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001) +mosaic_affine_transform = [ + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='Mosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), +] +neck_embed_channels = [ + 128, + 256, + 256, +] +neck_num_heads = [ + 4, + 8, + 8, +] +norm_cfg = dict(eps=0.001, momentum=0.03, type='BN') +num_classes = 1203 +num_det_layers = 3 +num_training_classes = 80 +obj365v1_train_dataset = dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +optim_wrapper = dict( + clip_grad=dict(max_norm=10.0), + constructor='YOLOWv5OptimizerConstructor', + optimizer=dict( + batch_size_per_gpu=16, lr=0.002, type='AdamW', weight_decay=0.025), + paramwise_cfg=dict( + bias_decay_mult=0.0, + custom_keys=dict({ + 'backbone.text_model': dict(lr_mult=0.01), + 'logit_scale': dict(weight_decay=0.0) + }), + norm_decay_mult=0.0), + type='OptimWrapper') +param_scheduler = None +persistent_workers = True +pre_transform = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), +] +resume = False +save_epoch_intervals = 2 +strides = [ + 8, + 16, + 32, +] +tal_alpha = 0.5 +tal_beta = 6.0 +tal_topk = 10 +test_cfg = dict(type='TestLoop') +test_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +test_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +test_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +text_channels = 512 +text_transform = [ + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_ann_file = 'annotations/instances_train2017.json' +train_batch_size_per_gpu = 16 +train_cfg = dict( + dynamic_intervals=[ + ( + 98, + 1, + ), + ], + max_epochs=100, + type='EpochBasedTrainLoop', + val_interval=10) +train_data_prefix = 'train2017/' +train_dataloader = dict( + batch_size=16, + collate_fn=dict(type='yolow_collate'), + dataset=dict( + datasets=[ + dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + ], + ignore_keys=[ + 'classes', + 'palette', + ], + type='ConcatDataset'), + num_workers=8, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=True, type='DefaultSampler')) +train_num_workers = 8 +train_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_pipeline_stage2 = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +tta_model = dict( + tta_cfg=dict(max_per_img=300, nms=dict(iou_threshold=0.65, type='nms')), + type='mmdet.DetTTAModel') +tta_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict( + transforms=[ + [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), + ], + [ + dict(prob=1.0, type='mmdet.RandomFlip'), + dict(prob=0.0, type='mmdet.RandomFlip'), + ], + [ + dict(type='mmdet.LoadAnnotations', with_bbox=True), + ], + [ + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), + ], + ], + type='TestTimeAug'), +] +val_ann_file = 'annotations/instances_val2017.json' +val_batch_size_per_gpu = 1 +val_cfg = dict(type='ValLoop') +val_data_prefix = 'val2017/' +val_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +val_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +val_interval_stage2 = 1 +val_num_workers = 2 +vis_backends = [ + dict(type='LocalVisBackend'), +] +visualizer = dict( + name='visualizer', + type='mmdet.DetLocalVisualizer', + vis_backends=[ + dict(type='LocalVisBackend'), + ]) +weight_decay = 0.025 +widen_factor = 1.25 +work_dir = '/home/jean/Amine/OpenYolo3D/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival' diff --git a/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240530_142654/vis_data/config.py b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240530_142654/vis_data/config.py new file mode 100644 index 0000000000000000000000000000000000000000..f8b00f96ad70de81ecc9073c7d67025d43429906 --- /dev/null +++ b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240530_142654/vis_data/config.py @@ -0,0 +1,1366 @@ +_backend_args = None +_multiscale_resize_transforms = [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), +] +affine_scale = 0.9 +albu_train_transforms = [ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), +] +backend_args = None +base_lr = 0.002 +batch_shapes_cfg = None +close_mosaic_epochs = 2 +coco_val_dataset = dict( + _delete_=True, + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +custom_hooks = [ + dict( + ema_type='ExpMomentumEMA', + momentum=0.0001, + priority=49, + strict_load=False, + type='EMAHook', + update_buffers=True), + dict( + switch_epoch=98, + switch_pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='mmdet.PipelineSwitchHook'), +] +custom_imports = dict( + allow_failed_imports=False, imports=[ + 'yolo_world', + ]) +data_root = 'data/coco/' +dataset_type = 'YOLOv5CocoDataset' +deepen_factor = 1.0 +default_hooks = dict( + checkpoint=dict( + interval=2, + max_keep_ckpts=2, + rule='greater', + save_best='auto', + type='CheckpointHook'), + logger=dict(interval=50, type='LoggerHook'), + param_scheduler=dict( + lr_factor=0.01, + max_epochs=100, + scheduler_type='linear', + type='YOLOv5ParamSchedulerHook'), + sampler_seed=dict(type='DistSamplerSeedHook'), + timer=dict(type='IterTimerHook'), + visualization=dict(type='mmdet.DetVisualizationHook')) +default_scope = 'mmyolo' +env_cfg = dict( + cudnn_benchmark=True, + dist_cfg=dict(backend='nccl'), + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0)) +flickr_train_dataset = dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +img_scale = ( + 640, + 640, +) +img_scales = [ + ( + 640, + 640, + ), + ( + 320, + 320, + ), + ( + 960, + 960, + ), +] +last_stage_out_channels = 512 +last_transform = [ + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), +] +load_from = '/home/jean/Amine/OpenYolo3D/pretrained/checkpoints/yolo_world_v2_x_obj365v1_goldg_cc3mlite_pretrain_1280ft-14996a36.pth' +log_level = 'INFO' +log_processor = dict(by_epoch=True, type='LogProcessor', window_size=50) +loss_bbox_weight = 7.5 +loss_cls_weight = 0.5 +loss_dfl_weight = 0.375 +lr_factor = 0.01 +max_aspect_ratio = 100 +max_epochs = 100 +max_keep_ckpts = 2 +mg_train_dataset = dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +mixup_prob = 0.15 +model = dict( + backbone=dict( + image_model=dict( + act_cfg=dict(inplace=True, type='SiLU'), + arch='P5', + deepen_factor=1.0, + last_stage_out_channels=512, + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + type='YOLOv8CSPDarknet', + widen_factor=1.25), + text_model=dict( + frozen_modules=[ + 'all', + ], + model_name='openai/clip-vit-base-patch32', + type='HuggingCLIPLanguageBackbone'), + type='MultiModalYOLOBackbone'), + bbox_head=dict( + bbox_coder=dict(type='DistancePointBBoxCoder'), + head_module=dict( + act_cfg=dict(inplace=True, type='SiLU'), + embed_dims=512, + featmap_strides=[ + 8, + 16, + 32, + ], + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_classes=80, + reg_max=16, + type='YOLOWorldHeadModule', + use_bn_head=True, + widen_factor=1.25), + loss_bbox=dict( + bbox_format='xyxy', + iou_mode='ciou', + loss_weight=7.5, + reduction='sum', + return_iou=False, + type='IoULoss'), + loss_cls=dict( + loss_weight=0.5, + reduction='none', + type='mmdet.CrossEntropyLoss', + use_sigmoid=True), + loss_dfl=dict( + loss_weight=0.375, + reduction='mean', + type='mmdet.DistributionFocalLoss'), + prior_generator=dict( + offset=0.5, strides=[ + 8, + 16, + 32, + ], type='mmdet.MlvlPointGenerator'), + type='YOLOWorldHead'), + data_preprocessor=dict( + bgr_to_rgb=True, + mean=[ + 0.0, + 0.0, + 0.0, + ], + std=[ + 255.0, + 255.0, + 255.0, + ], + type='YOLOWDetDataPreprocessor'), + mm_neck=True, + neck=dict( + act_cfg=dict(inplace=True, type='SiLU'), + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv'), + deepen_factor=1.0, + embed_channels=[ + 128, + 256, + 256, + ], + guide_channels=512, + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_csp_blocks=3, + num_heads=[ + 4, + 8, + 8, + ], + out_channels=[ + 256, + 512, + 512, + ], + type='YOLOWorldPAFPN', + widen_factor=1.25), + num_test_classes=1203, + num_train_classes=80, + test_cfg=dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001), + train_cfg=dict( + assigner=dict( + alpha=0.5, + beta=6.0, + eps=1e-09, + num_classes=80, + topk=10, + type='BatchTaskAlignedAssigner', + use_ciou=True)), + type='YOLOWorldDetector') +model_test_cfg = dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001) +mosaic_affine_transform = [ + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='Mosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), +] +neck_embed_channels = [ + 128, + 256, + 256, +] +neck_num_heads = [ + 4, + 8, + 8, +] +norm_cfg = dict(eps=0.001, momentum=0.03, type='BN') +num_classes = 1203 +num_det_layers = 3 +num_training_classes = 80 +obj365v1_train_dataset = dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +optim_wrapper = dict( + clip_grad=dict(max_norm=10.0), + constructor='YOLOWv5OptimizerConstructor', + optimizer=dict( + batch_size_per_gpu=16, lr=0.002, type='AdamW', weight_decay=0.025), + paramwise_cfg=dict( + bias_decay_mult=0.0, + custom_keys=dict({ + 'backbone.text_model': dict(lr_mult=0.01), + 'logit_scale': dict(weight_decay=0.0) + }), + norm_decay_mult=0.0), + type='OptimWrapper') +param_scheduler = None +persistent_workers = True +pre_transform = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), +] +resume = False +save_epoch_intervals = 2 +strides = [ + 8, + 16, + 32, +] +tal_alpha = 0.5 +tal_beta = 6.0 +tal_topk = 10 +test_cfg = dict(type='TestLoop') +test_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +test_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +test_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +text_channels = 512 +text_transform = [ + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_ann_file = 'annotations/instances_train2017.json' +train_batch_size_per_gpu = 16 +train_cfg = dict( + dynamic_intervals=[ + ( + 98, + 1, + ), + ], + max_epochs=100, + type='EpochBasedTrainLoop', + val_interval=10) +train_data_prefix = 'train2017/' +train_dataloader = dict( + batch_size=16, + collate_fn=dict(type='yolow_collate'), + dataset=dict( + datasets=[ + dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + ], + ignore_keys=[ + 'classes', + 'palette', + ], + type='ConcatDataset'), + num_workers=8, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=True, type='DefaultSampler')) +train_num_workers = 8 +train_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_pipeline_stage2 = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +tta_model = dict( + tta_cfg=dict(max_per_img=300, nms=dict(iou_threshold=0.65, type='nms')), + type='mmdet.DetTTAModel') +tta_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict( + transforms=[ + [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), + ], + [ + dict(prob=1.0, type='mmdet.RandomFlip'), + dict(prob=0.0, type='mmdet.RandomFlip'), + ], + [ + dict(type='mmdet.LoadAnnotations', with_bbox=True), + ], + [ + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), + ], + ], + type='TestTimeAug'), +] +val_ann_file = 'annotations/instances_val2017.json' +val_batch_size_per_gpu = 1 +val_cfg = dict(type='ValLoop') +val_data_prefix = 'val2017/' +val_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +val_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +val_interval_stage2 = 1 +val_num_workers = 2 +vis_backends = [ + dict(type='LocalVisBackend'), +] +visualizer = dict( + name='visualizer', + type='mmdet.DetLocalVisualizer', + vis_backends=[ + dict(type='LocalVisBackend'), + ]) +weight_decay = 0.025 +widen_factor = 1.25 +work_dir = '/home/jean/Amine/OpenYolo3D/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival' diff --git a/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240530_142940/vis_data/config.py b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240530_142940/vis_data/config.py new file mode 100644 index 0000000000000000000000000000000000000000..f8b00f96ad70de81ecc9073c7d67025d43429906 --- /dev/null +++ b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240530_142940/vis_data/config.py @@ -0,0 +1,1366 @@ +_backend_args = None +_multiscale_resize_transforms = [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), +] +affine_scale = 0.9 +albu_train_transforms = [ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), +] +backend_args = None +base_lr = 0.002 +batch_shapes_cfg = None +close_mosaic_epochs = 2 +coco_val_dataset = dict( + _delete_=True, + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +custom_hooks = [ + dict( + ema_type='ExpMomentumEMA', + momentum=0.0001, + priority=49, + strict_load=False, + type='EMAHook', + update_buffers=True), + dict( + switch_epoch=98, + switch_pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='mmdet.PipelineSwitchHook'), +] +custom_imports = dict( + allow_failed_imports=False, imports=[ + 'yolo_world', + ]) +data_root = 'data/coco/' +dataset_type = 'YOLOv5CocoDataset' +deepen_factor = 1.0 +default_hooks = dict( + checkpoint=dict( + interval=2, + max_keep_ckpts=2, + rule='greater', + save_best='auto', + type='CheckpointHook'), + logger=dict(interval=50, type='LoggerHook'), + param_scheduler=dict( + lr_factor=0.01, + max_epochs=100, + scheduler_type='linear', + type='YOLOv5ParamSchedulerHook'), + sampler_seed=dict(type='DistSamplerSeedHook'), + timer=dict(type='IterTimerHook'), + visualization=dict(type='mmdet.DetVisualizationHook')) +default_scope = 'mmyolo' +env_cfg = dict( + cudnn_benchmark=True, + dist_cfg=dict(backend='nccl'), + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0)) +flickr_train_dataset = dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +img_scale = ( + 640, + 640, +) +img_scales = [ + ( + 640, + 640, + ), + ( + 320, + 320, + ), + ( + 960, + 960, + ), +] +last_stage_out_channels = 512 +last_transform = [ + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), +] +load_from = '/home/jean/Amine/OpenYolo3D/pretrained/checkpoints/yolo_world_v2_x_obj365v1_goldg_cc3mlite_pretrain_1280ft-14996a36.pth' +log_level = 'INFO' +log_processor = dict(by_epoch=True, type='LogProcessor', window_size=50) +loss_bbox_weight = 7.5 +loss_cls_weight = 0.5 +loss_dfl_weight = 0.375 +lr_factor = 0.01 +max_aspect_ratio = 100 +max_epochs = 100 +max_keep_ckpts = 2 +mg_train_dataset = dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +mixup_prob = 0.15 +model = dict( + backbone=dict( + image_model=dict( + act_cfg=dict(inplace=True, type='SiLU'), + arch='P5', + deepen_factor=1.0, + last_stage_out_channels=512, + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + type='YOLOv8CSPDarknet', + widen_factor=1.25), + text_model=dict( + frozen_modules=[ + 'all', + ], + model_name='openai/clip-vit-base-patch32', + type='HuggingCLIPLanguageBackbone'), + type='MultiModalYOLOBackbone'), + bbox_head=dict( + bbox_coder=dict(type='DistancePointBBoxCoder'), + head_module=dict( + act_cfg=dict(inplace=True, type='SiLU'), + embed_dims=512, + featmap_strides=[ + 8, + 16, + 32, + ], + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_classes=80, + reg_max=16, + type='YOLOWorldHeadModule', + use_bn_head=True, + widen_factor=1.25), + loss_bbox=dict( + bbox_format='xyxy', + iou_mode='ciou', + loss_weight=7.5, + reduction='sum', + return_iou=False, + type='IoULoss'), + loss_cls=dict( + loss_weight=0.5, + reduction='none', + type='mmdet.CrossEntropyLoss', + use_sigmoid=True), + loss_dfl=dict( + loss_weight=0.375, + reduction='mean', + type='mmdet.DistributionFocalLoss'), + prior_generator=dict( + offset=0.5, strides=[ + 8, + 16, + 32, + ], type='mmdet.MlvlPointGenerator'), + type='YOLOWorldHead'), + data_preprocessor=dict( + bgr_to_rgb=True, + mean=[ + 0.0, + 0.0, + 0.0, + ], + std=[ + 255.0, + 255.0, + 255.0, + ], + type='YOLOWDetDataPreprocessor'), + mm_neck=True, + neck=dict( + act_cfg=dict(inplace=True, type='SiLU'), + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv'), + deepen_factor=1.0, + embed_channels=[ + 128, + 256, + 256, + ], + guide_channels=512, + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_csp_blocks=3, + num_heads=[ + 4, + 8, + 8, + ], + out_channels=[ + 256, + 512, + 512, + ], + type='YOLOWorldPAFPN', + widen_factor=1.25), + num_test_classes=1203, + num_train_classes=80, + test_cfg=dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001), + train_cfg=dict( + assigner=dict( + alpha=0.5, + beta=6.0, + eps=1e-09, + num_classes=80, + topk=10, + type='BatchTaskAlignedAssigner', + use_ciou=True)), + type='YOLOWorldDetector') +model_test_cfg = dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001) +mosaic_affine_transform = [ + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='Mosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), +] +neck_embed_channels = [ + 128, + 256, + 256, +] +neck_num_heads = [ + 4, + 8, + 8, +] +norm_cfg = dict(eps=0.001, momentum=0.03, type='BN') +num_classes = 1203 +num_det_layers = 3 +num_training_classes = 80 +obj365v1_train_dataset = dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +optim_wrapper = dict( + clip_grad=dict(max_norm=10.0), + constructor='YOLOWv5OptimizerConstructor', + optimizer=dict( + batch_size_per_gpu=16, lr=0.002, type='AdamW', weight_decay=0.025), + paramwise_cfg=dict( + bias_decay_mult=0.0, + custom_keys=dict({ + 'backbone.text_model': dict(lr_mult=0.01), + 'logit_scale': dict(weight_decay=0.0) + }), + norm_decay_mult=0.0), + type='OptimWrapper') +param_scheduler = None +persistent_workers = True +pre_transform = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), +] +resume = False +save_epoch_intervals = 2 +strides = [ + 8, + 16, + 32, +] +tal_alpha = 0.5 +tal_beta = 6.0 +tal_topk = 10 +test_cfg = dict(type='TestLoop') +test_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +test_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +test_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +text_channels = 512 +text_transform = [ + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_ann_file = 'annotations/instances_train2017.json' +train_batch_size_per_gpu = 16 +train_cfg = dict( + dynamic_intervals=[ + ( + 98, + 1, + ), + ], + max_epochs=100, + type='EpochBasedTrainLoop', + val_interval=10) +train_data_prefix = 'train2017/' +train_dataloader = dict( + batch_size=16, + collate_fn=dict(type='yolow_collate'), + dataset=dict( + datasets=[ + dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + ], + ignore_keys=[ + 'classes', + 'palette', + ], + type='ConcatDataset'), + num_workers=8, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=True, type='DefaultSampler')) +train_num_workers = 8 +train_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_pipeline_stage2 = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +tta_model = dict( + tta_cfg=dict(max_per_img=300, nms=dict(iou_threshold=0.65, type='nms')), + type='mmdet.DetTTAModel') +tta_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict( + transforms=[ + [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), + ], + [ + dict(prob=1.0, type='mmdet.RandomFlip'), + dict(prob=0.0, type='mmdet.RandomFlip'), + ], + [ + dict(type='mmdet.LoadAnnotations', with_bbox=True), + ], + [ + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), + ], + ], + type='TestTimeAug'), +] +val_ann_file = 'annotations/instances_val2017.json' +val_batch_size_per_gpu = 1 +val_cfg = dict(type='ValLoop') +val_data_prefix = 'val2017/' +val_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +val_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +val_interval_stage2 = 1 +val_num_workers = 2 +vis_backends = [ + dict(type='LocalVisBackend'), +] +visualizer = dict( + name='visualizer', + type='mmdet.DetLocalVisualizer', + vis_backends=[ + dict(type='LocalVisBackend'), + ]) +weight_decay = 0.025 +widen_factor = 1.25 +work_dir = '/home/jean/Amine/OpenYolo3D/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival' diff --git a/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240530_143210/vis_data/config.py b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240530_143210/vis_data/config.py new file mode 100644 index 0000000000000000000000000000000000000000..f8b00f96ad70de81ecc9073c7d67025d43429906 --- /dev/null +++ b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240530_143210/vis_data/config.py @@ -0,0 +1,1366 @@ +_backend_args = None +_multiscale_resize_transforms = [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), +] +affine_scale = 0.9 +albu_train_transforms = [ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), +] +backend_args = None +base_lr = 0.002 +batch_shapes_cfg = None +close_mosaic_epochs = 2 +coco_val_dataset = dict( + _delete_=True, + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +custom_hooks = [ + dict( + ema_type='ExpMomentumEMA', + momentum=0.0001, + priority=49, + strict_load=False, + type='EMAHook', + update_buffers=True), + dict( + switch_epoch=98, + switch_pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='mmdet.PipelineSwitchHook'), +] +custom_imports = dict( + allow_failed_imports=False, imports=[ + 'yolo_world', + ]) +data_root = 'data/coco/' +dataset_type = 'YOLOv5CocoDataset' +deepen_factor = 1.0 +default_hooks = dict( + checkpoint=dict( + interval=2, + max_keep_ckpts=2, + rule='greater', + save_best='auto', + type='CheckpointHook'), + logger=dict(interval=50, type='LoggerHook'), + param_scheduler=dict( + lr_factor=0.01, + max_epochs=100, + scheduler_type='linear', + type='YOLOv5ParamSchedulerHook'), + sampler_seed=dict(type='DistSamplerSeedHook'), + timer=dict(type='IterTimerHook'), + visualization=dict(type='mmdet.DetVisualizationHook')) +default_scope = 'mmyolo' +env_cfg = dict( + cudnn_benchmark=True, + dist_cfg=dict(backend='nccl'), + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0)) +flickr_train_dataset = dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +img_scale = ( + 640, + 640, +) +img_scales = [ + ( + 640, + 640, + ), + ( + 320, + 320, + ), + ( + 960, + 960, + ), +] +last_stage_out_channels = 512 +last_transform = [ + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), +] +load_from = '/home/jean/Amine/OpenYolo3D/pretrained/checkpoints/yolo_world_v2_x_obj365v1_goldg_cc3mlite_pretrain_1280ft-14996a36.pth' +log_level = 'INFO' +log_processor = dict(by_epoch=True, type='LogProcessor', window_size=50) +loss_bbox_weight = 7.5 +loss_cls_weight = 0.5 +loss_dfl_weight = 0.375 +lr_factor = 0.01 +max_aspect_ratio = 100 +max_epochs = 100 +max_keep_ckpts = 2 +mg_train_dataset = dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +mixup_prob = 0.15 +model = dict( + backbone=dict( + image_model=dict( + act_cfg=dict(inplace=True, type='SiLU'), + arch='P5', + deepen_factor=1.0, + last_stage_out_channels=512, + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + type='YOLOv8CSPDarknet', + widen_factor=1.25), + text_model=dict( + frozen_modules=[ + 'all', + ], + model_name='openai/clip-vit-base-patch32', + type='HuggingCLIPLanguageBackbone'), + type='MultiModalYOLOBackbone'), + bbox_head=dict( + bbox_coder=dict(type='DistancePointBBoxCoder'), + head_module=dict( + act_cfg=dict(inplace=True, type='SiLU'), + embed_dims=512, + featmap_strides=[ + 8, + 16, + 32, + ], + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_classes=80, + reg_max=16, + type='YOLOWorldHeadModule', + use_bn_head=True, + widen_factor=1.25), + loss_bbox=dict( + bbox_format='xyxy', + iou_mode='ciou', + loss_weight=7.5, + reduction='sum', + return_iou=False, + type='IoULoss'), + loss_cls=dict( + loss_weight=0.5, + reduction='none', + type='mmdet.CrossEntropyLoss', + use_sigmoid=True), + loss_dfl=dict( + loss_weight=0.375, + reduction='mean', + type='mmdet.DistributionFocalLoss'), + prior_generator=dict( + offset=0.5, strides=[ + 8, + 16, + 32, + ], type='mmdet.MlvlPointGenerator'), + type='YOLOWorldHead'), + data_preprocessor=dict( + bgr_to_rgb=True, + mean=[ + 0.0, + 0.0, + 0.0, + ], + std=[ + 255.0, + 255.0, + 255.0, + ], + type='YOLOWDetDataPreprocessor'), + mm_neck=True, + neck=dict( + act_cfg=dict(inplace=True, type='SiLU'), + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv'), + deepen_factor=1.0, + embed_channels=[ + 128, + 256, + 256, + ], + guide_channels=512, + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_csp_blocks=3, + num_heads=[ + 4, + 8, + 8, + ], + out_channels=[ + 256, + 512, + 512, + ], + type='YOLOWorldPAFPN', + widen_factor=1.25), + num_test_classes=1203, + num_train_classes=80, + test_cfg=dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001), + train_cfg=dict( + assigner=dict( + alpha=0.5, + beta=6.0, + eps=1e-09, + num_classes=80, + topk=10, + type='BatchTaskAlignedAssigner', + use_ciou=True)), + type='YOLOWorldDetector') +model_test_cfg = dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001) +mosaic_affine_transform = [ + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='Mosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), +] +neck_embed_channels = [ + 128, + 256, + 256, +] +neck_num_heads = [ + 4, + 8, + 8, +] +norm_cfg = dict(eps=0.001, momentum=0.03, type='BN') +num_classes = 1203 +num_det_layers = 3 +num_training_classes = 80 +obj365v1_train_dataset = dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +optim_wrapper = dict( + clip_grad=dict(max_norm=10.0), + constructor='YOLOWv5OptimizerConstructor', + optimizer=dict( + batch_size_per_gpu=16, lr=0.002, type='AdamW', weight_decay=0.025), + paramwise_cfg=dict( + bias_decay_mult=0.0, + custom_keys=dict({ + 'backbone.text_model': dict(lr_mult=0.01), + 'logit_scale': dict(weight_decay=0.0) + }), + norm_decay_mult=0.0), + type='OptimWrapper') +param_scheduler = None +persistent_workers = True +pre_transform = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), +] +resume = False +save_epoch_intervals = 2 +strides = [ + 8, + 16, + 32, +] +tal_alpha = 0.5 +tal_beta = 6.0 +tal_topk = 10 +test_cfg = dict(type='TestLoop') +test_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +test_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +test_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +text_channels = 512 +text_transform = [ + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_ann_file = 'annotations/instances_train2017.json' +train_batch_size_per_gpu = 16 +train_cfg = dict( + dynamic_intervals=[ + ( + 98, + 1, + ), + ], + max_epochs=100, + type='EpochBasedTrainLoop', + val_interval=10) +train_data_prefix = 'train2017/' +train_dataloader = dict( + batch_size=16, + collate_fn=dict(type='yolow_collate'), + dataset=dict( + datasets=[ + dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + ], + ignore_keys=[ + 'classes', + 'palette', + ], + type='ConcatDataset'), + num_workers=8, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=True, type='DefaultSampler')) +train_num_workers = 8 +train_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_pipeline_stage2 = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +tta_model = dict( + tta_cfg=dict(max_per_img=300, nms=dict(iou_threshold=0.65, type='nms')), + type='mmdet.DetTTAModel') +tta_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict( + transforms=[ + [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), + ], + [ + dict(prob=1.0, type='mmdet.RandomFlip'), + dict(prob=0.0, type='mmdet.RandomFlip'), + ], + [ + dict(type='mmdet.LoadAnnotations', with_bbox=True), + ], + [ + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), + ], + ], + type='TestTimeAug'), +] +val_ann_file = 'annotations/instances_val2017.json' +val_batch_size_per_gpu = 1 +val_cfg = dict(type='ValLoop') +val_data_prefix = 'val2017/' +val_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +val_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +val_interval_stage2 = 1 +val_num_workers = 2 +vis_backends = [ + dict(type='LocalVisBackend'), +] +visualizer = dict( + name='visualizer', + type='mmdet.DetLocalVisualizer', + vis_backends=[ + dict(type='LocalVisBackend'), + ]) +weight_decay = 0.025 +widen_factor = 1.25 +work_dir = '/home/jean/Amine/OpenYolo3D/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival' diff --git a/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240530_152552/vis_data/config.py b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240530_152552/vis_data/config.py new file mode 100644 index 0000000000000000000000000000000000000000..f8b00f96ad70de81ecc9073c7d67025d43429906 --- /dev/null +++ b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240530_152552/vis_data/config.py @@ -0,0 +1,1366 @@ +_backend_args = None +_multiscale_resize_transforms = [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), +] +affine_scale = 0.9 +albu_train_transforms = [ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), +] +backend_args = None +base_lr = 0.002 +batch_shapes_cfg = None +close_mosaic_epochs = 2 +coco_val_dataset = dict( + _delete_=True, + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +custom_hooks = [ + dict( + ema_type='ExpMomentumEMA', + momentum=0.0001, + priority=49, + strict_load=False, + type='EMAHook', + update_buffers=True), + dict( + switch_epoch=98, + switch_pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='mmdet.PipelineSwitchHook'), +] +custom_imports = dict( + allow_failed_imports=False, imports=[ + 'yolo_world', + ]) +data_root = 'data/coco/' +dataset_type = 'YOLOv5CocoDataset' +deepen_factor = 1.0 +default_hooks = dict( + checkpoint=dict( + interval=2, + max_keep_ckpts=2, + rule='greater', + save_best='auto', + type='CheckpointHook'), + logger=dict(interval=50, type='LoggerHook'), + param_scheduler=dict( + lr_factor=0.01, + max_epochs=100, + scheduler_type='linear', + type='YOLOv5ParamSchedulerHook'), + sampler_seed=dict(type='DistSamplerSeedHook'), + timer=dict(type='IterTimerHook'), + visualization=dict(type='mmdet.DetVisualizationHook')) +default_scope = 'mmyolo' +env_cfg = dict( + cudnn_benchmark=True, + dist_cfg=dict(backend='nccl'), + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0)) +flickr_train_dataset = dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +img_scale = ( + 640, + 640, +) +img_scales = [ + ( + 640, + 640, + ), + ( + 320, + 320, + ), + ( + 960, + 960, + ), +] +last_stage_out_channels = 512 +last_transform = [ + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), +] +load_from = '/home/jean/Amine/OpenYolo3D/pretrained/checkpoints/yolo_world_v2_x_obj365v1_goldg_cc3mlite_pretrain_1280ft-14996a36.pth' +log_level = 'INFO' +log_processor = dict(by_epoch=True, type='LogProcessor', window_size=50) +loss_bbox_weight = 7.5 +loss_cls_weight = 0.5 +loss_dfl_weight = 0.375 +lr_factor = 0.01 +max_aspect_ratio = 100 +max_epochs = 100 +max_keep_ckpts = 2 +mg_train_dataset = dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +mixup_prob = 0.15 +model = dict( + backbone=dict( + image_model=dict( + act_cfg=dict(inplace=True, type='SiLU'), + arch='P5', + deepen_factor=1.0, + last_stage_out_channels=512, + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + type='YOLOv8CSPDarknet', + widen_factor=1.25), + text_model=dict( + frozen_modules=[ + 'all', + ], + model_name='openai/clip-vit-base-patch32', + type='HuggingCLIPLanguageBackbone'), + type='MultiModalYOLOBackbone'), + bbox_head=dict( + bbox_coder=dict(type='DistancePointBBoxCoder'), + head_module=dict( + act_cfg=dict(inplace=True, type='SiLU'), + embed_dims=512, + featmap_strides=[ + 8, + 16, + 32, + ], + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_classes=80, + reg_max=16, + type='YOLOWorldHeadModule', + use_bn_head=True, + widen_factor=1.25), + loss_bbox=dict( + bbox_format='xyxy', + iou_mode='ciou', + loss_weight=7.5, + reduction='sum', + return_iou=False, + type='IoULoss'), + loss_cls=dict( + loss_weight=0.5, + reduction='none', + type='mmdet.CrossEntropyLoss', + use_sigmoid=True), + loss_dfl=dict( + loss_weight=0.375, + reduction='mean', + type='mmdet.DistributionFocalLoss'), + prior_generator=dict( + offset=0.5, strides=[ + 8, + 16, + 32, + ], type='mmdet.MlvlPointGenerator'), + type='YOLOWorldHead'), + data_preprocessor=dict( + bgr_to_rgb=True, + mean=[ + 0.0, + 0.0, + 0.0, + ], + std=[ + 255.0, + 255.0, + 255.0, + ], + type='YOLOWDetDataPreprocessor'), + mm_neck=True, + neck=dict( + act_cfg=dict(inplace=True, type='SiLU'), + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv'), + deepen_factor=1.0, + embed_channels=[ + 128, + 256, + 256, + ], + guide_channels=512, + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_csp_blocks=3, + num_heads=[ + 4, + 8, + 8, + ], + out_channels=[ + 256, + 512, + 512, + ], + type='YOLOWorldPAFPN', + widen_factor=1.25), + num_test_classes=1203, + num_train_classes=80, + test_cfg=dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001), + train_cfg=dict( + assigner=dict( + alpha=0.5, + beta=6.0, + eps=1e-09, + num_classes=80, + topk=10, + type='BatchTaskAlignedAssigner', + use_ciou=True)), + type='YOLOWorldDetector') +model_test_cfg = dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001) +mosaic_affine_transform = [ + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='Mosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), +] +neck_embed_channels = [ + 128, + 256, + 256, +] +neck_num_heads = [ + 4, + 8, + 8, +] +norm_cfg = dict(eps=0.001, momentum=0.03, type='BN') +num_classes = 1203 +num_det_layers = 3 +num_training_classes = 80 +obj365v1_train_dataset = dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +optim_wrapper = dict( + clip_grad=dict(max_norm=10.0), + constructor='YOLOWv5OptimizerConstructor', + optimizer=dict( + batch_size_per_gpu=16, lr=0.002, type='AdamW', weight_decay=0.025), + paramwise_cfg=dict( + bias_decay_mult=0.0, + custom_keys=dict({ + 'backbone.text_model': dict(lr_mult=0.01), + 'logit_scale': dict(weight_decay=0.0) + }), + norm_decay_mult=0.0), + type='OptimWrapper') +param_scheduler = None +persistent_workers = True +pre_transform = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), +] +resume = False +save_epoch_intervals = 2 +strides = [ + 8, + 16, + 32, +] +tal_alpha = 0.5 +tal_beta = 6.0 +tal_topk = 10 +test_cfg = dict(type='TestLoop') +test_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +test_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +test_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +text_channels = 512 +text_transform = [ + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_ann_file = 'annotations/instances_train2017.json' +train_batch_size_per_gpu = 16 +train_cfg = dict( + dynamic_intervals=[ + ( + 98, + 1, + ), + ], + max_epochs=100, + type='EpochBasedTrainLoop', + val_interval=10) +train_data_prefix = 'train2017/' +train_dataloader = dict( + batch_size=16, + collate_fn=dict(type='yolow_collate'), + dataset=dict( + datasets=[ + dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + ], + ignore_keys=[ + 'classes', + 'palette', + ], + type='ConcatDataset'), + num_workers=8, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=True, type='DefaultSampler')) +train_num_workers = 8 +train_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_pipeline_stage2 = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +tta_model = dict( + tta_cfg=dict(max_per_img=300, nms=dict(iou_threshold=0.65, type='nms')), + type='mmdet.DetTTAModel') +tta_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict( + transforms=[ + [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), + ], + [ + dict(prob=1.0, type='mmdet.RandomFlip'), + dict(prob=0.0, type='mmdet.RandomFlip'), + ], + [ + dict(type='mmdet.LoadAnnotations', with_bbox=True), + ], + [ + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), + ], + ], + type='TestTimeAug'), +] +val_ann_file = 'annotations/instances_val2017.json' +val_batch_size_per_gpu = 1 +val_cfg = dict(type='ValLoop') +val_data_prefix = 'val2017/' +val_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +val_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +val_interval_stage2 = 1 +val_num_workers = 2 +vis_backends = [ + dict(type='LocalVisBackend'), +] +visualizer = dict( + name='visualizer', + type='mmdet.DetLocalVisualizer', + vis_backends=[ + dict(type='LocalVisBackend'), + ]) +weight_decay = 0.025 +widen_factor = 1.25 +work_dir = '/home/jean/Amine/OpenYolo3D/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival' diff --git a/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240530_153048/vis_data/config.py b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240530_153048/vis_data/config.py new file mode 100644 index 0000000000000000000000000000000000000000..f8b00f96ad70de81ecc9073c7d67025d43429906 --- /dev/null +++ b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240530_153048/vis_data/config.py @@ -0,0 +1,1366 @@ +_backend_args = None +_multiscale_resize_transforms = [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), +] +affine_scale = 0.9 +albu_train_transforms = [ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), +] +backend_args = None +base_lr = 0.002 +batch_shapes_cfg = None +close_mosaic_epochs = 2 +coco_val_dataset = dict( + _delete_=True, + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +custom_hooks = [ + dict( + ema_type='ExpMomentumEMA', + momentum=0.0001, + priority=49, + strict_load=False, + type='EMAHook', + update_buffers=True), + dict( + switch_epoch=98, + switch_pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='mmdet.PipelineSwitchHook'), +] +custom_imports = dict( + allow_failed_imports=False, imports=[ + 'yolo_world', + ]) +data_root = 'data/coco/' +dataset_type = 'YOLOv5CocoDataset' +deepen_factor = 1.0 +default_hooks = dict( + checkpoint=dict( + interval=2, + max_keep_ckpts=2, + rule='greater', + save_best='auto', + type='CheckpointHook'), + logger=dict(interval=50, type='LoggerHook'), + param_scheduler=dict( + lr_factor=0.01, + max_epochs=100, + scheduler_type='linear', + type='YOLOv5ParamSchedulerHook'), + sampler_seed=dict(type='DistSamplerSeedHook'), + timer=dict(type='IterTimerHook'), + visualization=dict(type='mmdet.DetVisualizationHook')) +default_scope = 'mmyolo' +env_cfg = dict( + cudnn_benchmark=True, + dist_cfg=dict(backend='nccl'), + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0)) +flickr_train_dataset = dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +img_scale = ( + 640, + 640, +) +img_scales = [ + ( + 640, + 640, + ), + ( + 320, + 320, + ), + ( + 960, + 960, + ), +] +last_stage_out_channels = 512 +last_transform = [ + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), +] +load_from = '/home/jean/Amine/OpenYolo3D/pretrained/checkpoints/yolo_world_v2_x_obj365v1_goldg_cc3mlite_pretrain_1280ft-14996a36.pth' +log_level = 'INFO' +log_processor = dict(by_epoch=True, type='LogProcessor', window_size=50) +loss_bbox_weight = 7.5 +loss_cls_weight = 0.5 +loss_dfl_weight = 0.375 +lr_factor = 0.01 +max_aspect_ratio = 100 +max_epochs = 100 +max_keep_ckpts = 2 +mg_train_dataset = dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +mixup_prob = 0.15 +model = dict( + backbone=dict( + image_model=dict( + act_cfg=dict(inplace=True, type='SiLU'), + arch='P5', + deepen_factor=1.0, + last_stage_out_channels=512, + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + type='YOLOv8CSPDarknet', + widen_factor=1.25), + text_model=dict( + frozen_modules=[ + 'all', + ], + model_name='openai/clip-vit-base-patch32', + type='HuggingCLIPLanguageBackbone'), + type='MultiModalYOLOBackbone'), + bbox_head=dict( + bbox_coder=dict(type='DistancePointBBoxCoder'), + head_module=dict( + act_cfg=dict(inplace=True, type='SiLU'), + embed_dims=512, + featmap_strides=[ + 8, + 16, + 32, + ], + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_classes=80, + reg_max=16, + type='YOLOWorldHeadModule', + use_bn_head=True, + widen_factor=1.25), + loss_bbox=dict( + bbox_format='xyxy', + iou_mode='ciou', + loss_weight=7.5, + reduction='sum', + return_iou=False, + type='IoULoss'), + loss_cls=dict( + loss_weight=0.5, + reduction='none', + type='mmdet.CrossEntropyLoss', + use_sigmoid=True), + loss_dfl=dict( + loss_weight=0.375, + reduction='mean', + type='mmdet.DistributionFocalLoss'), + prior_generator=dict( + offset=0.5, strides=[ + 8, + 16, + 32, + ], type='mmdet.MlvlPointGenerator'), + type='YOLOWorldHead'), + data_preprocessor=dict( + bgr_to_rgb=True, + mean=[ + 0.0, + 0.0, + 0.0, + ], + std=[ + 255.0, + 255.0, + 255.0, + ], + type='YOLOWDetDataPreprocessor'), + mm_neck=True, + neck=dict( + act_cfg=dict(inplace=True, type='SiLU'), + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv'), + deepen_factor=1.0, + embed_channels=[ + 128, + 256, + 256, + ], + guide_channels=512, + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_csp_blocks=3, + num_heads=[ + 4, + 8, + 8, + ], + out_channels=[ + 256, + 512, + 512, + ], + type='YOLOWorldPAFPN', + widen_factor=1.25), + num_test_classes=1203, + num_train_classes=80, + test_cfg=dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001), + train_cfg=dict( + assigner=dict( + alpha=0.5, + beta=6.0, + eps=1e-09, + num_classes=80, + topk=10, + type='BatchTaskAlignedAssigner', + use_ciou=True)), + type='YOLOWorldDetector') +model_test_cfg = dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001) +mosaic_affine_transform = [ + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='Mosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), +] +neck_embed_channels = [ + 128, + 256, + 256, +] +neck_num_heads = [ + 4, + 8, + 8, +] +norm_cfg = dict(eps=0.001, momentum=0.03, type='BN') +num_classes = 1203 +num_det_layers = 3 +num_training_classes = 80 +obj365v1_train_dataset = dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +optim_wrapper = dict( + clip_grad=dict(max_norm=10.0), + constructor='YOLOWv5OptimizerConstructor', + optimizer=dict( + batch_size_per_gpu=16, lr=0.002, type='AdamW', weight_decay=0.025), + paramwise_cfg=dict( + bias_decay_mult=0.0, + custom_keys=dict({ + 'backbone.text_model': dict(lr_mult=0.01), + 'logit_scale': dict(weight_decay=0.0) + }), + norm_decay_mult=0.0), + type='OptimWrapper') +param_scheduler = None +persistent_workers = True +pre_transform = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), +] +resume = False +save_epoch_intervals = 2 +strides = [ + 8, + 16, + 32, +] +tal_alpha = 0.5 +tal_beta = 6.0 +tal_topk = 10 +test_cfg = dict(type='TestLoop') +test_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +test_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +test_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +text_channels = 512 +text_transform = [ + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_ann_file = 'annotations/instances_train2017.json' +train_batch_size_per_gpu = 16 +train_cfg = dict( + dynamic_intervals=[ + ( + 98, + 1, + ), + ], + max_epochs=100, + type='EpochBasedTrainLoop', + val_interval=10) +train_data_prefix = 'train2017/' +train_dataloader = dict( + batch_size=16, + collate_fn=dict(type='yolow_collate'), + dataset=dict( + datasets=[ + dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + ], + ignore_keys=[ + 'classes', + 'palette', + ], + type='ConcatDataset'), + num_workers=8, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=True, type='DefaultSampler')) +train_num_workers = 8 +train_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_pipeline_stage2 = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +tta_model = dict( + tta_cfg=dict(max_per_img=300, nms=dict(iou_threshold=0.65, type='nms')), + type='mmdet.DetTTAModel') +tta_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict( + transforms=[ + [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), + ], + [ + dict(prob=1.0, type='mmdet.RandomFlip'), + dict(prob=0.0, type='mmdet.RandomFlip'), + ], + [ + dict(type='mmdet.LoadAnnotations', with_bbox=True), + ], + [ + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), + ], + ], + type='TestTimeAug'), +] +val_ann_file = 'annotations/instances_val2017.json' +val_batch_size_per_gpu = 1 +val_cfg = dict(type='ValLoop') +val_data_prefix = 'val2017/' +val_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +val_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +val_interval_stage2 = 1 +val_num_workers = 2 +vis_backends = [ + dict(type='LocalVisBackend'), +] +visualizer = dict( + name='visualizer', + type='mmdet.DetLocalVisualizer', + vis_backends=[ + dict(type='LocalVisBackend'), + ]) +weight_decay = 0.025 +widen_factor = 1.25 +work_dir = '/home/jean/Amine/OpenYolo3D/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival' diff --git a/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240530_153154/vis_data/config.py b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240530_153154/vis_data/config.py new file mode 100644 index 0000000000000000000000000000000000000000..f8b00f96ad70de81ecc9073c7d67025d43429906 --- /dev/null +++ b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240530_153154/vis_data/config.py @@ -0,0 +1,1366 @@ +_backend_args = None +_multiscale_resize_transforms = [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), +] +affine_scale = 0.9 +albu_train_transforms = [ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), +] +backend_args = None +base_lr = 0.002 +batch_shapes_cfg = None +close_mosaic_epochs = 2 +coco_val_dataset = dict( + _delete_=True, + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +custom_hooks = [ + dict( + ema_type='ExpMomentumEMA', + momentum=0.0001, + priority=49, + strict_load=False, + type='EMAHook', + update_buffers=True), + dict( + switch_epoch=98, + switch_pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='mmdet.PipelineSwitchHook'), +] +custom_imports = dict( + allow_failed_imports=False, imports=[ + 'yolo_world', + ]) +data_root = 'data/coco/' +dataset_type = 'YOLOv5CocoDataset' +deepen_factor = 1.0 +default_hooks = dict( + checkpoint=dict( + interval=2, + max_keep_ckpts=2, + rule='greater', + save_best='auto', + type='CheckpointHook'), + logger=dict(interval=50, type='LoggerHook'), + param_scheduler=dict( + lr_factor=0.01, + max_epochs=100, + scheduler_type='linear', + type='YOLOv5ParamSchedulerHook'), + sampler_seed=dict(type='DistSamplerSeedHook'), + timer=dict(type='IterTimerHook'), + visualization=dict(type='mmdet.DetVisualizationHook')) +default_scope = 'mmyolo' +env_cfg = dict( + cudnn_benchmark=True, + dist_cfg=dict(backend='nccl'), + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0)) +flickr_train_dataset = dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +img_scale = ( + 640, + 640, +) +img_scales = [ + ( + 640, + 640, + ), + ( + 320, + 320, + ), + ( + 960, + 960, + ), +] +last_stage_out_channels = 512 +last_transform = [ + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), +] +load_from = '/home/jean/Amine/OpenYolo3D/pretrained/checkpoints/yolo_world_v2_x_obj365v1_goldg_cc3mlite_pretrain_1280ft-14996a36.pth' +log_level = 'INFO' +log_processor = dict(by_epoch=True, type='LogProcessor', window_size=50) +loss_bbox_weight = 7.5 +loss_cls_weight = 0.5 +loss_dfl_weight = 0.375 +lr_factor = 0.01 +max_aspect_ratio = 100 +max_epochs = 100 +max_keep_ckpts = 2 +mg_train_dataset = dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +mixup_prob = 0.15 +model = dict( + backbone=dict( + image_model=dict( + act_cfg=dict(inplace=True, type='SiLU'), + arch='P5', + deepen_factor=1.0, + last_stage_out_channels=512, + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + type='YOLOv8CSPDarknet', + widen_factor=1.25), + text_model=dict( + frozen_modules=[ + 'all', + ], + model_name='openai/clip-vit-base-patch32', + type='HuggingCLIPLanguageBackbone'), + type='MultiModalYOLOBackbone'), + bbox_head=dict( + bbox_coder=dict(type='DistancePointBBoxCoder'), + head_module=dict( + act_cfg=dict(inplace=True, type='SiLU'), + embed_dims=512, + featmap_strides=[ + 8, + 16, + 32, + ], + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_classes=80, + reg_max=16, + type='YOLOWorldHeadModule', + use_bn_head=True, + widen_factor=1.25), + loss_bbox=dict( + bbox_format='xyxy', + iou_mode='ciou', + loss_weight=7.5, + reduction='sum', + return_iou=False, + type='IoULoss'), + loss_cls=dict( + loss_weight=0.5, + reduction='none', + type='mmdet.CrossEntropyLoss', + use_sigmoid=True), + loss_dfl=dict( + loss_weight=0.375, + reduction='mean', + type='mmdet.DistributionFocalLoss'), + prior_generator=dict( + offset=0.5, strides=[ + 8, + 16, + 32, + ], type='mmdet.MlvlPointGenerator'), + type='YOLOWorldHead'), + data_preprocessor=dict( + bgr_to_rgb=True, + mean=[ + 0.0, + 0.0, + 0.0, + ], + std=[ + 255.0, + 255.0, + 255.0, + ], + type='YOLOWDetDataPreprocessor'), + mm_neck=True, + neck=dict( + act_cfg=dict(inplace=True, type='SiLU'), + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv'), + deepen_factor=1.0, + embed_channels=[ + 128, + 256, + 256, + ], + guide_channels=512, + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_csp_blocks=3, + num_heads=[ + 4, + 8, + 8, + ], + out_channels=[ + 256, + 512, + 512, + ], + type='YOLOWorldPAFPN', + widen_factor=1.25), + num_test_classes=1203, + num_train_classes=80, + test_cfg=dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001), + train_cfg=dict( + assigner=dict( + alpha=0.5, + beta=6.0, + eps=1e-09, + num_classes=80, + topk=10, + type='BatchTaskAlignedAssigner', + use_ciou=True)), + type='YOLOWorldDetector') +model_test_cfg = dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001) +mosaic_affine_transform = [ + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='Mosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), +] +neck_embed_channels = [ + 128, + 256, + 256, +] +neck_num_heads = [ + 4, + 8, + 8, +] +norm_cfg = dict(eps=0.001, momentum=0.03, type='BN') +num_classes = 1203 +num_det_layers = 3 +num_training_classes = 80 +obj365v1_train_dataset = dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +optim_wrapper = dict( + clip_grad=dict(max_norm=10.0), + constructor='YOLOWv5OptimizerConstructor', + optimizer=dict( + batch_size_per_gpu=16, lr=0.002, type='AdamW', weight_decay=0.025), + paramwise_cfg=dict( + bias_decay_mult=0.0, + custom_keys=dict({ + 'backbone.text_model': dict(lr_mult=0.01), + 'logit_scale': dict(weight_decay=0.0) + }), + norm_decay_mult=0.0), + type='OptimWrapper') +param_scheduler = None +persistent_workers = True +pre_transform = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), +] +resume = False +save_epoch_intervals = 2 +strides = [ + 8, + 16, + 32, +] +tal_alpha = 0.5 +tal_beta = 6.0 +tal_topk = 10 +test_cfg = dict(type='TestLoop') +test_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +test_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +test_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +text_channels = 512 +text_transform = [ + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_ann_file = 'annotations/instances_train2017.json' +train_batch_size_per_gpu = 16 +train_cfg = dict( + dynamic_intervals=[ + ( + 98, + 1, + ), + ], + max_epochs=100, + type='EpochBasedTrainLoop', + val_interval=10) +train_data_prefix = 'train2017/' +train_dataloader = dict( + batch_size=16, + collate_fn=dict(type='yolow_collate'), + dataset=dict( + datasets=[ + dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + ], + ignore_keys=[ + 'classes', + 'palette', + ], + type='ConcatDataset'), + num_workers=8, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=True, type='DefaultSampler')) +train_num_workers = 8 +train_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_pipeline_stage2 = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +tta_model = dict( + tta_cfg=dict(max_per_img=300, nms=dict(iou_threshold=0.65, type='nms')), + type='mmdet.DetTTAModel') +tta_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict( + transforms=[ + [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), + ], + [ + dict(prob=1.0, type='mmdet.RandomFlip'), + dict(prob=0.0, type='mmdet.RandomFlip'), + ], + [ + dict(type='mmdet.LoadAnnotations', with_bbox=True), + ], + [ + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), + ], + ], + type='TestTimeAug'), +] +val_ann_file = 'annotations/instances_val2017.json' +val_batch_size_per_gpu = 1 +val_cfg = dict(type='ValLoop') +val_data_prefix = 'val2017/' +val_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +val_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +val_interval_stage2 = 1 +val_num_workers = 2 +vis_backends = [ + dict(type='LocalVisBackend'), +] +visualizer = dict( + name='visualizer', + type='mmdet.DetLocalVisualizer', + vis_backends=[ + dict(type='LocalVisBackend'), + ]) +weight_decay = 0.025 +widen_factor = 1.25 +work_dir = '/home/jean/Amine/OpenYolo3D/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival' diff --git a/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240530_155047/vis_data/config.py b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240530_155047/vis_data/config.py new file mode 100644 index 0000000000000000000000000000000000000000..f8b00f96ad70de81ecc9073c7d67025d43429906 --- /dev/null +++ b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240530_155047/vis_data/config.py @@ -0,0 +1,1366 @@ +_backend_args = None +_multiscale_resize_transforms = [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), +] +affine_scale = 0.9 +albu_train_transforms = [ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), +] +backend_args = None +base_lr = 0.002 +batch_shapes_cfg = None +close_mosaic_epochs = 2 +coco_val_dataset = dict( + _delete_=True, + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +custom_hooks = [ + dict( + ema_type='ExpMomentumEMA', + momentum=0.0001, + priority=49, + strict_load=False, + type='EMAHook', + update_buffers=True), + dict( + switch_epoch=98, + switch_pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='mmdet.PipelineSwitchHook'), +] +custom_imports = dict( + allow_failed_imports=False, imports=[ + 'yolo_world', + ]) +data_root = 'data/coco/' +dataset_type = 'YOLOv5CocoDataset' +deepen_factor = 1.0 +default_hooks = dict( + checkpoint=dict( + interval=2, + max_keep_ckpts=2, + rule='greater', + save_best='auto', + type='CheckpointHook'), + logger=dict(interval=50, type='LoggerHook'), + param_scheduler=dict( + lr_factor=0.01, + max_epochs=100, + scheduler_type='linear', + type='YOLOv5ParamSchedulerHook'), + sampler_seed=dict(type='DistSamplerSeedHook'), + timer=dict(type='IterTimerHook'), + visualization=dict(type='mmdet.DetVisualizationHook')) +default_scope = 'mmyolo' +env_cfg = dict( + cudnn_benchmark=True, + dist_cfg=dict(backend='nccl'), + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0)) +flickr_train_dataset = dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +img_scale = ( + 640, + 640, +) +img_scales = [ + ( + 640, + 640, + ), + ( + 320, + 320, + ), + ( + 960, + 960, + ), +] +last_stage_out_channels = 512 +last_transform = [ + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), +] +load_from = '/home/jean/Amine/OpenYolo3D/pretrained/checkpoints/yolo_world_v2_x_obj365v1_goldg_cc3mlite_pretrain_1280ft-14996a36.pth' +log_level = 'INFO' +log_processor = dict(by_epoch=True, type='LogProcessor', window_size=50) +loss_bbox_weight = 7.5 +loss_cls_weight = 0.5 +loss_dfl_weight = 0.375 +lr_factor = 0.01 +max_aspect_ratio = 100 +max_epochs = 100 +max_keep_ckpts = 2 +mg_train_dataset = dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +mixup_prob = 0.15 +model = dict( + backbone=dict( + image_model=dict( + act_cfg=dict(inplace=True, type='SiLU'), + arch='P5', + deepen_factor=1.0, + last_stage_out_channels=512, + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + type='YOLOv8CSPDarknet', + widen_factor=1.25), + text_model=dict( + frozen_modules=[ + 'all', + ], + model_name='openai/clip-vit-base-patch32', + type='HuggingCLIPLanguageBackbone'), + type='MultiModalYOLOBackbone'), + bbox_head=dict( + bbox_coder=dict(type='DistancePointBBoxCoder'), + head_module=dict( + act_cfg=dict(inplace=True, type='SiLU'), + embed_dims=512, + featmap_strides=[ + 8, + 16, + 32, + ], + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_classes=80, + reg_max=16, + type='YOLOWorldHeadModule', + use_bn_head=True, + widen_factor=1.25), + loss_bbox=dict( + bbox_format='xyxy', + iou_mode='ciou', + loss_weight=7.5, + reduction='sum', + return_iou=False, + type='IoULoss'), + loss_cls=dict( + loss_weight=0.5, + reduction='none', + type='mmdet.CrossEntropyLoss', + use_sigmoid=True), + loss_dfl=dict( + loss_weight=0.375, + reduction='mean', + type='mmdet.DistributionFocalLoss'), + prior_generator=dict( + offset=0.5, strides=[ + 8, + 16, + 32, + ], type='mmdet.MlvlPointGenerator'), + type='YOLOWorldHead'), + data_preprocessor=dict( + bgr_to_rgb=True, + mean=[ + 0.0, + 0.0, + 0.0, + ], + std=[ + 255.0, + 255.0, + 255.0, + ], + type='YOLOWDetDataPreprocessor'), + mm_neck=True, + neck=dict( + act_cfg=dict(inplace=True, type='SiLU'), + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv'), + deepen_factor=1.0, + embed_channels=[ + 128, + 256, + 256, + ], + guide_channels=512, + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_csp_blocks=3, + num_heads=[ + 4, + 8, + 8, + ], + out_channels=[ + 256, + 512, + 512, + ], + type='YOLOWorldPAFPN', + widen_factor=1.25), + num_test_classes=1203, + num_train_classes=80, + test_cfg=dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001), + train_cfg=dict( + assigner=dict( + alpha=0.5, + beta=6.0, + eps=1e-09, + num_classes=80, + topk=10, + type='BatchTaskAlignedAssigner', + use_ciou=True)), + type='YOLOWorldDetector') +model_test_cfg = dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001) +mosaic_affine_transform = [ + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='Mosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), +] +neck_embed_channels = [ + 128, + 256, + 256, +] +neck_num_heads = [ + 4, + 8, + 8, +] +norm_cfg = dict(eps=0.001, momentum=0.03, type='BN') +num_classes = 1203 +num_det_layers = 3 +num_training_classes = 80 +obj365v1_train_dataset = dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +optim_wrapper = dict( + clip_grad=dict(max_norm=10.0), + constructor='YOLOWv5OptimizerConstructor', + optimizer=dict( + batch_size_per_gpu=16, lr=0.002, type='AdamW', weight_decay=0.025), + paramwise_cfg=dict( + bias_decay_mult=0.0, + custom_keys=dict({ + 'backbone.text_model': dict(lr_mult=0.01), + 'logit_scale': dict(weight_decay=0.0) + }), + norm_decay_mult=0.0), + type='OptimWrapper') +param_scheduler = None +persistent_workers = True +pre_transform = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), +] +resume = False +save_epoch_intervals = 2 +strides = [ + 8, + 16, + 32, +] +tal_alpha = 0.5 +tal_beta = 6.0 +tal_topk = 10 +test_cfg = dict(type='TestLoop') +test_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +test_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +test_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +text_channels = 512 +text_transform = [ + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_ann_file = 'annotations/instances_train2017.json' +train_batch_size_per_gpu = 16 +train_cfg = dict( + dynamic_intervals=[ + ( + 98, + 1, + ), + ], + max_epochs=100, + type='EpochBasedTrainLoop', + val_interval=10) +train_data_prefix = 'train2017/' +train_dataloader = dict( + batch_size=16, + collate_fn=dict(type='yolow_collate'), + dataset=dict( + datasets=[ + dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + ], + ignore_keys=[ + 'classes', + 'palette', + ], + type='ConcatDataset'), + num_workers=8, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=True, type='DefaultSampler')) +train_num_workers = 8 +train_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_pipeline_stage2 = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +tta_model = dict( + tta_cfg=dict(max_per_img=300, nms=dict(iou_threshold=0.65, type='nms')), + type='mmdet.DetTTAModel') +tta_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict( + transforms=[ + [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), + ], + [ + dict(prob=1.0, type='mmdet.RandomFlip'), + dict(prob=0.0, type='mmdet.RandomFlip'), + ], + [ + dict(type='mmdet.LoadAnnotations', with_bbox=True), + ], + [ + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), + ], + ], + type='TestTimeAug'), +] +val_ann_file = 'annotations/instances_val2017.json' +val_batch_size_per_gpu = 1 +val_cfg = dict(type='ValLoop') +val_data_prefix = 'val2017/' +val_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +val_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +val_interval_stage2 = 1 +val_num_workers = 2 +vis_backends = [ + dict(type='LocalVisBackend'), +] +visualizer = dict( + name='visualizer', + type='mmdet.DetLocalVisualizer', + vis_backends=[ + dict(type='LocalVisBackend'), + ]) +weight_decay = 0.025 +widen_factor = 1.25 +work_dir = '/home/jean/Amine/OpenYolo3D/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival' diff --git a/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240530_155226/vis_data/config.py b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240530_155226/vis_data/config.py new file mode 100644 index 0000000000000000000000000000000000000000..f8b00f96ad70de81ecc9073c7d67025d43429906 --- /dev/null +++ b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240530_155226/vis_data/config.py @@ -0,0 +1,1366 @@ +_backend_args = None +_multiscale_resize_transforms = [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), +] +affine_scale = 0.9 +albu_train_transforms = [ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), +] +backend_args = None +base_lr = 0.002 +batch_shapes_cfg = None +close_mosaic_epochs = 2 +coco_val_dataset = dict( + _delete_=True, + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +custom_hooks = [ + dict( + ema_type='ExpMomentumEMA', + momentum=0.0001, + priority=49, + strict_load=False, + type='EMAHook', + update_buffers=True), + dict( + switch_epoch=98, + switch_pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='mmdet.PipelineSwitchHook'), +] +custom_imports = dict( + allow_failed_imports=False, imports=[ + 'yolo_world', + ]) +data_root = 'data/coco/' +dataset_type = 'YOLOv5CocoDataset' +deepen_factor = 1.0 +default_hooks = dict( + checkpoint=dict( + interval=2, + max_keep_ckpts=2, + rule='greater', + save_best='auto', + type='CheckpointHook'), + logger=dict(interval=50, type='LoggerHook'), + param_scheduler=dict( + lr_factor=0.01, + max_epochs=100, + scheduler_type='linear', + type='YOLOv5ParamSchedulerHook'), + sampler_seed=dict(type='DistSamplerSeedHook'), + timer=dict(type='IterTimerHook'), + visualization=dict(type='mmdet.DetVisualizationHook')) +default_scope = 'mmyolo' +env_cfg = dict( + cudnn_benchmark=True, + dist_cfg=dict(backend='nccl'), + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0)) +flickr_train_dataset = dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +img_scale = ( + 640, + 640, +) +img_scales = [ + ( + 640, + 640, + ), + ( + 320, + 320, + ), + ( + 960, + 960, + ), +] +last_stage_out_channels = 512 +last_transform = [ + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), +] +load_from = '/home/jean/Amine/OpenYolo3D/pretrained/checkpoints/yolo_world_v2_x_obj365v1_goldg_cc3mlite_pretrain_1280ft-14996a36.pth' +log_level = 'INFO' +log_processor = dict(by_epoch=True, type='LogProcessor', window_size=50) +loss_bbox_weight = 7.5 +loss_cls_weight = 0.5 +loss_dfl_weight = 0.375 +lr_factor = 0.01 +max_aspect_ratio = 100 +max_epochs = 100 +max_keep_ckpts = 2 +mg_train_dataset = dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +mixup_prob = 0.15 +model = dict( + backbone=dict( + image_model=dict( + act_cfg=dict(inplace=True, type='SiLU'), + arch='P5', + deepen_factor=1.0, + last_stage_out_channels=512, + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + type='YOLOv8CSPDarknet', + widen_factor=1.25), + text_model=dict( + frozen_modules=[ + 'all', + ], + model_name='openai/clip-vit-base-patch32', + type='HuggingCLIPLanguageBackbone'), + type='MultiModalYOLOBackbone'), + bbox_head=dict( + bbox_coder=dict(type='DistancePointBBoxCoder'), + head_module=dict( + act_cfg=dict(inplace=True, type='SiLU'), + embed_dims=512, + featmap_strides=[ + 8, + 16, + 32, + ], + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_classes=80, + reg_max=16, + type='YOLOWorldHeadModule', + use_bn_head=True, + widen_factor=1.25), + loss_bbox=dict( + bbox_format='xyxy', + iou_mode='ciou', + loss_weight=7.5, + reduction='sum', + return_iou=False, + type='IoULoss'), + loss_cls=dict( + loss_weight=0.5, + reduction='none', + type='mmdet.CrossEntropyLoss', + use_sigmoid=True), + loss_dfl=dict( + loss_weight=0.375, + reduction='mean', + type='mmdet.DistributionFocalLoss'), + prior_generator=dict( + offset=0.5, strides=[ + 8, + 16, + 32, + ], type='mmdet.MlvlPointGenerator'), + type='YOLOWorldHead'), + data_preprocessor=dict( + bgr_to_rgb=True, + mean=[ + 0.0, + 0.0, + 0.0, + ], + std=[ + 255.0, + 255.0, + 255.0, + ], + type='YOLOWDetDataPreprocessor'), + mm_neck=True, + neck=dict( + act_cfg=dict(inplace=True, type='SiLU'), + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv'), + deepen_factor=1.0, + embed_channels=[ + 128, + 256, + 256, + ], + guide_channels=512, + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_csp_blocks=3, + num_heads=[ + 4, + 8, + 8, + ], + out_channels=[ + 256, + 512, + 512, + ], + type='YOLOWorldPAFPN', + widen_factor=1.25), + num_test_classes=1203, + num_train_classes=80, + test_cfg=dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001), + train_cfg=dict( + assigner=dict( + alpha=0.5, + beta=6.0, + eps=1e-09, + num_classes=80, + topk=10, + type='BatchTaskAlignedAssigner', + use_ciou=True)), + type='YOLOWorldDetector') +model_test_cfg = dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001) +mosaic_affine_transform = [ + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='Mosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), +] +neck_embed_channels = [ + 128, + 256, + 256, +] +neck_num_heads = [ + 4, + 8, + 8, +] +norm_cfg = dict(eps=0.001, momentum=0.03, type='BN') +num_classes = 1203 +num_det_layers = 3 +num_training_classes = 80 +obj365v1_train_dataset = dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +optim_wrapper = dict( + clip_grad=dict(max_norm=10.0), + constructor='YOLOWv5OptimizerConstructor', + optimizer=dict( + batch_size_per_gpu=16, lr=0.002, type='AdamW', weight_decay=0.025), + paramwise_cfg=dict( + bias_decay_mult=0.0, + custom_keys=dict({ + 'backbone.text_model': dict(lr_mult=0.01), + 'logit_scale': dict(weight_decay=0.0) + }), + norm_decay_mult=0.0), + type='OptimWrapper') +param_scheduler = None +persistent_workers = True +pre_transform = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), +] +resume = False +save_epoch_intervals = 2 +strides = [ + 8, + 16, + 32, +] +tal_alpha = 0.5 +tal_beta = 6.0 +tal_topk = 10 +test_cfg = dict(type='TestLoop') +test_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +test_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +test_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +text_channels = 512 +text_transform = [ + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_ann_file = 'annotations/instances_train2017.json' +train_batch_size_per_gpu = 16 +train_cfg = dict( + dynamic_intervals=[ + ( + 98, + 1, + ), + ], + max_epochs=100, + type='EpochBasedTrainLoop', + val_interval=10) +train_data_prefix = 'train2017/' +train_dataloader = dict( + batch_size=16, + collate_fn=dict(type='yolow_collate'), + dataset=dict( + datasets=[ + dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + ], + ignore_keys=[ + 'classes', + 'palette', + ], + type='ConcatDataset'), + num_workers=8, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=True, type='DefaultSampler')) +train_num_workers = 8 +train_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_pipeline_stage2 = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +tta_model = dict( + tta_cfg=dict(max_per_img=300, nms=dict(iou_threshold=0.65, type='nms')), + type='mmdet.DetTTAModel') +tta_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict( + transforms=[ + [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), + ], + [ + dict(prob=1.0, type='mmdet.RandomFlip'), + dict(prob=0.0, type='mmdet.RandomFlip'), + ], + [ + dict(type='mmdet.LoadAnnotations', with_bbox=True), + ], + [ + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), + ], + ], + type='TestTimeAug'), +] +val_ann_file = 'annotations/instances_val2017.json' +val_batch_size_per_gpu = 1 +val_cfg = dict(type='ValLoop') +val_data_prefix = 'val2017/' +val_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +val_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +val_interval_stage2 = 1 +val_num_workers = 2 +vis_backends = [ + dict(type='LocalVisBackend'), +] +visualizer = dict( + name='visualizer', + type='mmdet.DetLocalVisualizer', + vis_backends=[ + dict(type='LocalVisBackend'), + ]) +weight_decay = 0.025 +widen_factor = 1.25 +work_dir = '/home/jean/Amine/OpenYolo3D/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival' diff --git a/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240530_155818/vis_data/config.py b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240530_155818/vis_data/config.py new file mode 100644 index 0000000000000000000000000000000000000000..f8b00f96ad70de81ecc9073c7d67025d43429906 --- /dev/null +++ b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240530_155818/vis_data/config.py @@ -0,0 +1,1366 @@ +_backend_args = None +_multiscale_resize_transforms = [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), +] +affine_scale = 0.9 +albu_train_transforms = [ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), +] +backend_args = None +base_lr = 0.002 +batch_shapes_cfg = None +close_mosaic_epochs = 2 +coco_val_dataset = dict( + _delete_=True, + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +custom_hooks = [ + dict( + ema_type='ExpMomentumEMA', + momentum=0.0001, + priority=49, + strict_load=False, + type='EMAHook', + update_buffers=True), + dict( + switch_epoch=98, + switch_pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='mmdet.PipelineSwitchHook'), +] +custom_imports = dict( + allow_failed_imports=False, imports=[ + 'yolo_world', + ]) +data_root = 'data/coco/' +dataset_type = 'YOLOv5CocoDataset' +deepen_factor = 1.0 +default_hooks = dict( + checkpoint=dict( + interval=2, + max_keep_ckpts=2, + rule='greater', + save_best='auto', + type='CheckpointHook'), + logger=dict(interval=50, type='LoggerHook'), + param_scheduler=dict( + lr_factor=0.01, + max_epochs=100, + scheduler_type='linear', + type='YOLOv5ParamSchedulerHook'), + sampler_seed=dict(type='DistSamplerSeedHook'), + timer=dict(type='IterTimerHook'), + visualization=dict(type='mmdet.DetVisualizationHook')) +default_scope = 'mmyolo' +env_cfg = dict( + cudnn_benchmark=True, + dist_cfg=dict(backend='nccl'), + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0)) +flickr_train_dataset = dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +img_scale = ( + 640, + 640, +) +img_scales = [ + ( + 640, + 640, + ), + ( + 320, + 320, + ), + ( + 960, + 960, + ), +] +last_stage_out_channels = 512 +last_transform = [ + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), +] +load_from = '/home/jean/Amine/OpenYolo3D/pretrained/checkpoints/yolo_world_v2_x_obj365v1_goldg_cc3mlite_pretrain_1280ft-14996a36.pth' +log_level = 'INFO' +log_processor = dict(by_epoch=True, type='LogProcessor', window_size=50) +loss_bbox_weight = 7.5 +loss_cls_weight = 0.5 +loss_dfl_weight = 0.375 +lr_factor = 0.01 +max_aspect_ratio = 100 +max_epochs = 100 +max_keep_ckpts = 2 +mg_train_dataset = dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +mixup_prob = 0.15 +model = dict( + backbone=dict( + image_model=dict( + act_cfg=dict(inplace=True, type='SiLU'), + arch='P5', + deepen_factor=1.0, + last_stage_out_channels=512, + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + type='YOLOv8CSPDarknet', + widen_factor=1.25), + text_model=dict( + frozen_modules=[ + 'all', + ], + model_name='openai/clip-vit-base-patch32', + type='HuggingCLIPLanguageBackbone'), + type='MultiModalYOLOBackbone'), + bbox_head=dict( + bbox_coder=dict(type='DistancePointBBoxCoder'), + head_module=dict( + act_cfg=dict(inplace=True, type='SiLU'), + embed_dims=512, + featmap_strides=[ + 8, + 16, + 32, + ], + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_classes=80, + reg_max=16, + type='YOLOWorldHeadModule', + use_bn_head=True, + widen_factor=1.25), + loss_bbox=dict( + bbox_format='xyxy', + iou_mode='ciou', + loss_weight=7.5, + reduction='sum', + return_iou=False, + type='IoULoss'), + loss_cls=dict( + loss_weight=0.5, + reduction='none', + type='mmdet.CrossEntropyLoss', + use_sigmoid=True), + loss_dfl=dict( + loss_weight=0.375, + reduction='mean', + type='mmdet.DistributionFocalLoss'), + prior_generator=dict( + offset=0.5, strides=[ + 8, + 16, + 32, + ], type='mmdet.MlvlPointGenerator'), + type='YOLOWorldHead'), + data_preprocessor=dict( + bgr_to_rgb=True, + mean=[ + 0.0, + 0.0, + 0.0, + ], + std=[ + 255.0, + 255.0, + 255.0, + ], + type='YOLOWDetDataPreprocessor'), + mm_neck=True, + neck=dict( + act_cfg=dict(inplace=True, type='SiLU'), + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv'), + deepen_factor=1.0, + embed_channels=[ + 128, + 256, + 256, + ], + guide_channels=512, + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_csp_blocks=3, + num_heads=[ + 4, + 8, + 8, + ], + out_channels=[ + 256, + 512, + 512, + ], + type='YOLOWorldPAFPN', + widen_factor=1.25), + num_test_classes=1203, + num_train_classes=80, + test_cfg=dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001), + train_cfg=dict( + assigner=dict( + alpha=0.5, + beta=6.0, + eps=1e-09, + num_classes=80, + topk=10, + type='BatchTaskAlignedAssigner', + use_ciou=True)), + type='YOLOWorldDetector') +model_test_cfg = dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001) +mosaic_affine_transform = [ + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='Mosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), +] +neck_embed_channels = [ + 128, + 256, + 256, +] +neck_num_heads = [ + 4, + 8, + 8, +] +norm_cfg = dict(eps=0.001, momentum=0.03, type='BN') +num_classes = 1203 +num_det_layers = 3 +num_training_classes = 80 +obj365v1_train_dataset = dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +optim_wrapper = dict( + clip_grad=dict(max_norm=10.0), + constructor='YOLOWv5OptimizerConstructor', + optimizer=dict( + batch_size_per_gpu=16, lr=0.002, type='AdamW', weight_decay=0.025), + paramwise_cfg=dict( + bias_decay_mult=0.0, + custom_keys=dict({ + 'backbone.text_model': dict(lr_mult=0.01), + 'logit_scale': dict(weight_decay=0.0) + }), + norm_decay_mult=0.0), + type='OptimWrapper') +param_scheduler = None +persistent_workers = True +pre_transform = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), +] +resume = False +save_epoch_intervals = 2 +strides = [ + 8, + 16, + 32, +] +tal_alpha = 0.5 +tal_beta = 6.0 +tal_topk = 10 +test_cfg = dict(type='TestLoop') +test_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +test_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +test_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +text_channels = 512 +text_transform = [ + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_ann_file = 'annotations/instances_train2017.json' +train_batch_size_per_gpu = 16 +train_cfg = dict( + dynamic_intervals=[ + ( + 98, + 1, + ), + ], + max_epochs=100, + type='EpochBasedTrainLoop', + val_interval=10) +train_data_prefix = 'train2017/' +train_dataloader = dict( + batch_size=16, + collate_fn=dict(type='yolow_collate'), + dataset=dict( + datasets=[ + dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + ], + ignore_keys=[ + 'classes', + 'palette', + ], + type='ConcatDataset'), + num_workers=8, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=True, type='DefaultSampler')) +train_num_workers = 8 +train_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_pipeline_stage2 = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +tta_model = dict( + tta_cfg=dict(max_per_img=300, nms=dict(iou_threshold=0.65, type='nms')), + type='mmdet.DetTTAModel') +tta_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict( + transforms=[ + [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), + ], + [ + dict(prob=1.0, type='mmdet.RandomFlip'), + dict(prob=0.0, type='mmdet.RandomFlip'), + ], + [ + dict(type='mmdet.LoadAnnotations', with_bbox=True), + ], + [ + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), + ], + ], + type='TestTimeAug'), +] +val_ann_file = 'annotations/instances_val2017.json' +val_batch_size_per_gpu = 1 +val_cfg = dict(type='ValLoop') +val_data_prefix = 'val2017/' +val_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +val_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +val_interval_stage2 = 1 +val_num_workers = 2 +vis_backends = [ + dict(type='LocalVisBackend'), +] +visualizer = dict( + name='visualizer', + type='mmdet.DetLocalVisualizer', + vis_backends=[ + dict(type='LocalVisBackend'), + ]) +weight_decay = 0.025 +widen_factor = 1.25 +work_dir = '/home/jean/Amine/OpenYolo3D/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival' diff --git a/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240530_160717/vis_data/config.py b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240530_160717/vis_data/config.py new file mode 100644 index 0000000000000000000000000000000000000000..f8b00f96ad70de81ecc9073c7d67025d43429906 --- /dev/null +++ b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240530_160717/vis_data/config.py @@ -0,0 +1,1366 @@ +_backend_args = None +_multiscale_resize_transforms = [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), +] +affine_scale = 0.9 +albu_train_transforms = [ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), +] +backend_args = None +base_lr = 0.002 +batch_shapes_cfg = None +close_mosaic_epochs = 2 +coco_val_dataset = dict( + _delete_=True, + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +custom_hooks = [ + dict( + ema_type='ExpMomentumEMA', + momentum=0.0001, + priority=49, + strict_load=False, + type='EMAHook', + update_buffers=True), + dict( + switch_epoch=98, + switch_pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='mmdet.PipelineSwitchHook'), +] +custom_imports = dict( + allow_failed_imports=False, imports=[ + 'yolo_world', + ]) +data_root = 'data/coco/' +dataset_type = 'YOLOv5CocoDataset' +deepen_factor = 1.0 +default_hooks = dict( + checkpoint=dict( + interval=2, + max_keep_ckpts=2, + rule='greater', + save_best='auto', + type='CheckpointHook'), + logger=dict(interval=50, type='LoggerHook'), + param_scheduler=dict( + lr_factor=0.01, + max_epochs=100, + scheduler_type='linear', + type='YOLOv5ParamSchedulerHook'), + sampler_seed=dict(type='DistSamplerSeedHook'), + timer=dict(type='IterTimerHook'), + visualization=dict(type='mmdet.DetVisualizationHook')) +default_scope = 'mmyolo' +env_cfg = dict( + cudnn_benchmark=True, + dist_cfg=dict(backend='nccl'), + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0)) +flickr_train_dataset = dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +img_scale = ( + 640, + 640, +) +img_scales = [ + ( + 640, + 640, + ), + ( + 320, + 320, + ), + ( + 960, + 960, + ), +] +last_stage_out_channels = 512 +last_transform = [ + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), +] +load_from = '/home/jean/Amine/OpenYolo3D/pretrained/checkpoints/yolo_world_v2_x_obj365v1_goldg_cc3mlite_pretrain_1280ft-14996a36.pth' +log_level = 'INFO' +log_processor = dict(by_epoch=True, type='LogProcessor', window_size=50) +loss_bbox_weight = 7.5 +loss_cls_weight = 0.5 +loss_dfl_weight = 0.375 +lr_factor = 0.01 +max_aspect_ratio = 100 +max_epochs = 100 +max_keep_ckpts = 2 +mg_train_dataset = dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +mixup_prob = 0.15 +model = dict( + backbone=dict( + image_model=dict( + act_cfg=dict(inplace=True, type='SiLU'), + arch='P5', + deepen_factor=1.0, + last_stage_out_channels=512, + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + type='YOLOv8CSPDarknet', + widen_factor=1.25), + text_model=dict( + frozen_modules=[ + 'all', + ], + model_name='openai/clip-vit-base-patch32', + type='HuggingCLIPLanguageBackbone'), + type='MultiModalYOLOBackbone'), + bbox_head=dict( + bbox_coder=dict(type='DistancePointBBoxCoder'), + head_module=dict( + act_cfg=dict(inplace=True, type='SiLU'), + embed_dims=512, + featmap_strides=[ + 8, + 16, + 32, + ], + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_classes=80, + reg_max=16, + type='YOLOWorldHeadModule', + use_bn_head=True, + widen_factor=1.25), + loss_bbox=dict( + bbox_format='xyxy', + iou_mode='ciou', + loss_weight=7.5, + reduction='sum', + return_iou=False, + type='IoULoss'), + loss_cls=dict( + loss_weight=0.5, + reduction='none', + type='mmdet.CrossEntropyLoss', + use_sigmoid=True), + loss_dfl=dict( + loss_weight=0.375, + reduction='mean', + type='mmdet.DistributionFocalLoss'), + prior_generator=dict( + offset=0.5, strides=[ + 8, + 16, + 32, + ], type='mmdet.MlvlPointGenerator'), + type='YOLOWorldHead'), + data_preprocessor=dict( + bgr_to_rgb=True, + mean=[ + 0.0, + 0.0, + 0.0, + ], + std=[ + 255.0, + 255.0, + 255.0, + ], + type='YOLOWDetDataPreprocessor'), + mm_neck=True, + neck=dict( + act_cfg=dict(inplace=True, type='SiLU'), + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv'), + deepen_factor=1.0, + embed_channels=[ + 128, + 256, + 256, + ], + guide_channels=512, + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_csp_blocks=3, + num_heads=[ + 4, + 8, + 8, + ], + out_channels=[ + 256, + 512, + 512, + ], + type='YOLOWorldPAFPN', + widen_factor=1.25), + num_test_classes=1203, + num_train_classes=80, + test_cfg=dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001), + train_cfg=dict( + assigner=dict( + alpha=0.5, + beta=6.0, + eps=1e-09, + num_classes=80, + topk=10, + type='BatchTaskAlignedAssigner', + use_ciou=True)), + type='YOLOWorldDetector') +model_test_cfg = dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001) +mosaic_affine_transform = [ + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='Mosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), +] +neck_embed_channels = [ + 128, + 256, + 256, +] +neck_num_heads = [ + 4, + 8, + 8, +] +norm_cfg = dict(eps=0.001, momentum=0.03, type='BN') +num_classes = 1203 +num_det_layers = 3 +num_training_classes = 80 +obj365v1_train_dataset = dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +optim_wrapper = dict( + clip_grad=dict(max_norm=10.0), + constructor='YOLOWv5OptimizerConstructor', + optimizer=dict( + batch_size_per_gpu=16, lr=0.002, type='AdamW', weight_decay=0.025), + paramwise_cfg=dict( + bias_decay_mult=0.0, + custom_keys=dict({ + 'backbone.text_model': dict(lr_mult=0.01), + 'logit_scale': dict(weight_decay=0.0) + }), + norm_decay_mult=0.0), + type='OptimWrapper') +param_scheduler = None +persistent_workers = True +pre_transform = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), +] +resume = False +save_epoch_intervals = 2 +strides = [ + 8, + 16, + 32, +] +tal_alpha = 0.5 +tal_beta = 6.0 +tal_topk = 10 +test_cfg = dict(type='TestLoop') +test_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +test_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +test_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +text_channels = 512 +text_transform = [ + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_ann_file = 'annotations/instances_train2017.json' +train_batch_size_per_gpu = 16 +train_cfg = dict( + dynamic_intervals=[ + ( + 98, + 1, + ), + ], + max_epochs=100, + type='EpochBasedTrainLoop', + val_interval=10) +train_data_prefix = 'train2017/' +train_dataloader = dict( + batch_size=16, + collate_fn=dict(type='yolow_collate'), + dataset=dict( + datasets=[ + dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + ], + ignore_keys=[ + 'classes', + 'palette', + ], + type='ConcatDataset'), + num_workers=8, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=True, type='DefaultSampler')) +train_num_workers = 8 +train_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_pipeline_stage2 = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +tta_model = dict( + tta_cfg=dict(max_per_img=300, nms=dict(iou_threshold=0.65, type='nms')), + type='mmdet.DetTTAModel') +tta_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict( + transforms=[ + [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), + ], + [ + dict(prob=1.0, type='mmdet.RandomFlip'), + dict(prob=0.0, type='mmdet.RandomFlip'), + ], + [ + dict(type='mmdet.LoadAnnotations', with_bbox=True), + ], + [ + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), + ], + ], + type='TestTimeAug'), +] +val_ann_file = 'annotations/instances_val2017.json' +val_batch_size_per_gpu = 1 +val_cfg = dict(type='ValLoop') +val_data_prefix = 'val2017/' +val_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +val_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +val_interval_stage2 = 1 +val_num_workers = 2 +vis_backends = [ + dict(type='LocalVisBackend'), +] +visualizer = dict( + name='visualizer', + type='mmdet.DetLocalVisualizer', + vis_backends=[ + dict(type='LocalVisBackend'), + ]) +weight_decay = 0.025 +widen_factor = 1.25 +work_dir = '/home/jean/Amine/OpenYolo3D/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival' diff --git a/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240530_163015/vis_data/config.py b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240530_163015/vis_data/config.py new file mode 100644 index 0000000000000000000000000000000000000000..f8b00f96ad70de81ecc9073c7d67025d43429906 --- /dev/null +++ b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240530_163015/vis_data/config.py @@ -0,0 +1,1366 @@ +_backend_args = None +_multiscale_resize_transforms = [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), +] +affine_scale = 0.9 +albu_train_transforms = [ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), +] +backend_args = None +base_lr = 0.002 +batch_shapes_cfg = None +close_mosaic_epochs = 2 +coco_val_dataset = dict( + _delete_=True, + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +custom_hooks = [ + dict( + ema_type='ExpMomentumEMA', + momentum=0.0001, + priority=49, + strict_load=False, + type='EMAHook', + update_buffers=True), + dict( + switch_epoch=98, + switch_pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='mmdet.PipelineSwitchHook'), +] +custom_imports = dict( + allow_failed_imports=False, imports=[ + 'yolo_world', + ]) +data_root = 'data/coco/' +dataset_type = 'YOLOv5CocoDataset' +deepen_factor = 1.0 +default_hooks = dict( + checkpoint=dict( + interval=2, + max_keep_ckpts=2, + rule='greater', + save_best='auto', + type='CheckpointHook'), + logger=dict(interval=50, type='LoggerHook'), + param_scheduler=dict( + lr_factor=0.01, + max_epochs=100, + scheduler_type='linear', + type='YOLOv5ParamSchedulerHook'), + sampler_seed=dict(type='DistSamplerSeedHook'), + timer=dict(type='IterTimerHook'), + visualization=dict(type='mmdet.DetVisualizationHook')) +default_scope = 'mmyolo' +env_cfg = dict( + cudnn_benchmark=True, + dist_cfg=dict(backend='nccl'), + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0)) +flickr_train_dataset = dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +img_scale = ( + 640, + 640, +) +img_scales = [ + ( + 640, + 640, + ), + ( + 320, + 320, + ), + ( + 960, + 960, + ), +] +last_stage_out_channels = 512 +last_transform = [ + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), +] +load_from = '/home/jean/Amine/OpenYolo3D/pretrained/checkpoints/yolo_world_v2_x_obj365v1_goldg_cc3mlite_pretrain_1280ft-14996a36.pth' +log_level = 'INFO' +log_processor = dict(by_epoch=True, type='LogProcessor', window_size=50) +loss_bbox_weight = 7.5 +loss_cls_weight = 0.5 +loss_dfl_weight = 0.375 +lr_factor = 0.01 +max_aspect_ratio = 100 +max_epochs = 100 +max_keep_ckpts = 2 +mg_train_dataset = dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +mixup_prob = 0.15 +model = dict( + backbone=dict( + image_model=dict( + act_cfg=dict(inplace=True, type='SiLU'), + arch='P5', + deepen_factor=1.0, + last_stage_out_channels=512, + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + type='YOLOv8CSPDarknet', + widen_factor=1.25), + text_model=dict( + frozen_modules=[ + 'all', + ], + model_name='openai/clip-vit-base-patch32', + type='HuggingCLIPLanguageBackbone'), + type='MultiModalYOLOBackbone'), + bbox_head=dict( + bbox_coder=dict(type='DistancePointBBoxCoder'), + head_module=dict( + act_cfg=dict(inplace=True, type='SiLU'), + embed_dims=512, + featmap_strides=[ + 8, + 16, + 32, + ], + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_classes=80, + reg_max=16, + type='YOLOWorldHeadModule', + use_bn_head=True, + widen_factor=1.25), + loss_bbox=dict( + bbox_format='xyxy', + iou_mode='ciou', + loss_weight=7.5, + reduction='sum', + return_iou=False, + type='IoULoss'), + loss_cls=dict( + loss_weight=0.5, + reduction='none', + type='mmdet.CrossEntropyLoss', + use_sigmoid=True), + loss_dfl=dict( + loss_weight=0.375, + reduction='mean', + type='mmdet.DistributionFocalLoss'), + prior_generator=dict( + offset=0.5, strides=[ + 8, + 16, + 32, + ], type='mmdet.MlvlPointGenerator'), + type='YOLOWorldHead'), + data_preprocessor=dict( + bgr_to_rgb=True, + mean=[ + 0.0, + 0.0, + 0.0, + ], + std=[ + 255.0, + 255.0, + 255.0, + ], + type='YOLOWDetDataPreprocessor'), + mm_neck=True, + neck=dict( + act_cfg=dict(inplace=True, type='SiLU'), + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv'), + deepen_factor=1.0, + embed_channels=[ + 128, + 256, + 256, + ], + guide_channels=512, + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_csp_blocks=3, + num_heads=[ + 4, + 8, + 8, + ], + out_channels=[ + 256, + 512, + 512, + ], + type='YOLOWorldPAFPN', + widen_factor=1.25), + num_test_classes=1203, + num_train_classes=80, + test_cfg=dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001), + train_cfg=dict( + assigner=dict( + alpha=0.5, + beta=6.0, + eps=1e-09, + num_classes=80, + topk=10, + type='BatchTaskAlignedAssigner', + use_ciou=True)), + type='YOLOWorldDetector') +model_test_cfg = dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001) +mosaic_affine_transform = [ + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='Mosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), +] +neck_embed_channels = [ + 128, + 256, + 256, +] +neck_num_heads = [ + 4, + 8, + 8, +] +norm_cfg = dict(eps=0.001, momentum=0.03, type='BN') +num_classes = 1203 +num_det_layers = 3 +num_training_classes = 80 +obj365v1_train_dataset = dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +optim_wrapper = dict( + clip_grad=dict(max_norm=10.0), + constructor='YOLOWv5OptimizerConstructor', + optimizer=dict( + batch_size_per_gpu=16, lr=0.002, type='AdamW', weight_decay=0.025), + paramwise_cfg=dict( + bias_decay_mult=0.0, + custom_keys=dict({ + 'backbone.text_model': dict(lr_mult=0.01), + 'logit_scale': dict(weight_decay=0.0) + }), + norm_decay_mult=0.0), + type='OptimWrapper') +param_scheduler = None +persistent_workers = True +pre_transform = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), +] +resume = False +save_epoch_intervals = 2 +strides = [ + 8, + 16, + 32, +] +tal_alpha = 0.5 +tal_beta = 6.0 +tal_topk = 10 +test_cfg = dict(type='TestLoop') +test_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +test_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +test_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +text_channels = 512 +text_transform = [ + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_ann_file = 'annotations/instances_train2017.json' +train_batch_size_per_gpu = 16 +train_cfg = dict( + dynamic_intervals=[ + ( + 98, + 1, + ), + ], + max_epochs=100, + type='EpochBasedTrainLoop', + val_interval=10) +train_data_prefix = 'train2017/' +train_dataloader = dict( + batch_size=16, + collate_fn=dict(type='yolow_collate'), + dataset=dict( + datasets=[ + dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + ], + ignore_keys=[ + 'classes', + 'palette', + ], + type='ConcatDataset'), + num_workers=8, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=True, type='DefaultSampler')) +train_num_workers = 8 +train_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_pipeline_stage2 = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +tta_model = dict( + tta_cfg=dict(max_per_img=300, nms=dict(iou_threshold=0.65, type='nms')), + type='mmdet.DetTTAModel') +tta_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict( + transforms=[ + [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), + ], + [ + dict(prob=1.0, type='mmdet.RandomFlip'), + dict(prob=0.0, type='mmdet.RandomFlip'), + ], + [ + dict(type='mmdet.LoadAnnotations', with_bbox=True), + ], + [ + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), + ], + ], + type='TestTimeAug'), +] +val_ann_file = 'annotations/instances_val2017.json' +val_batch_size_per_gpu = 1 +val_cfg = dict(type='ValLoop') +val_data_prefix = 'val2017/' +val_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +val_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +val_interval_stage2 = 1 +val_num_workers = 2 +vis_backends = [ + dict(type='LocalVisBackend'), +] +visualizer = dict( + name='visualizer', + type='mmdet.DetLocalVisualizer', + vis_backends=[ + dict(type='LocalVisBackend'), + ]) +weight_decay = 0.025 +widen_factor = 1.25 +work_dir = '/home/jean/Amine/OpenYolo3D/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival' diff --git a/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240530_181107/vis_data/config.py b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240530_181107/vis_data/config.py new file mode 100644 index 0000000000000000000000000000000000000000..f8b00f96ad70de81ecc9073c7d67025d43429906 --- /dev/null +++ b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240530_181107/vis_data/config.py @@ -0,0 +1,1366 @@ +_backend_args = None +_multiscale_resize_transforms = [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), +] +affine_scale = 0.9 +albu_train_transforms = [ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), +] +backend_args = None +base_lr = 0.002 +batch_shapes_cfg = None +close_mosaic_epochs = 2 +coco_val_dataset = dict( + _delete_=True, + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +custom_hooks = [ + dict( + ema_type='ExpMomentumEMA', + momentum=0.0001, + priority=49, + strict_load=False, + type='EMAHook', + update_buffers=True), + dict( + switch_epoch=98, + switch_pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='mmdet.PipelineSwitchHook'), +] +custom_imports = dict( + allow_failed_imports=False, imports=[ + 'yolo_world', + ]) +data_root = 'data/coco/' +dataset_type = 'YOLOv5CocoDataset' +deepen_factor = 1.0 +default_hooks = dict( + checkpoint=dict( + interval=2, + max_keep_ckpts=2, + rule='greater', + save_best='auto', + type='CheckpointHook'), + logger=dict(interval=50, type='LoggerHook'), + param_scheduler=dict( + lr_factor=0.01, + max_epochs=100, + scheduler_type='linear', + type='YOLOv5ParamSchedulerHook'), + sampler_seed=dict(type='DistSamplerSeedHook'), + timer=dict(type='IterTimerHook'), + visualization=dict(type='mmdet.DetVisualizationHook')) +default_scope = 'mmyolo' +env_cfg = dict( + cudnn_benchmark=True, + dist_cfg=dict(backend='nccl'), + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0)) +flickr_train_dataset = dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +img_scale = ( + 640, + 640, +) +img_scales = [ + ( + 640, + 640, + ), + ( + 320, + 320, + ), + ( + 960, + 960, + ), +] +last_stage_out_channels = 512 +last_transform = [ + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), +] +load_from = '/home/jean/Amine/OpenYolo3D/pretrained/checkpoints/yolo_world_v2_x_obj365v1_goldg_cc3mlite_pretrain_1280ft-14996a36.pth' +log_level = 'INFO' +log_processor = dict(by_epoch=True, type='LogProcessor', window_size=50) +loss_bbox_weight = 7.5 +loss_cls_weight = 0.5 +loss_dfl_weight = 0.375 +lr_factor = 0.01 +max_aspect_ratio = 100 +max_epochs = 100 +max_keep_ckpts = 2 +mg_train_dataset = dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +mixup_prob = 0.15 +model = dict( + backbone=dict( + image_model=dict( + act_cfg=dict(inplace=True, type='SiLU'), + arch='P5', + deepen_factor=1.0, + last_stage_out_channels=512, + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + type='YOLOv8CSPDarknet', + widen_factor=1.25), + text_model=dict( + frozen_modules=[ + 'all', + ], + model_name='openai/clip-vit-base-patch32', + type='HuggingCLIPLanguageBackbone'), + type='MultiModalYOLOBackbone'), + bbox_head=dict( + bbox_coder=dict(type='DistancePointBBoxCoder'), + head_module=dict( + act_cfg=dict(inplace=True, type='SiLU'), + embed_dims=512, + featmap_strides=[ + 8, + 16, + 32, + ], + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_classes=80, + reg_max=16, + type='YOLOWorldHeadModule', + use_bn_head=True, + widen_factor=1.25), + loss_bbox=dict( + bbox_format='xyxy', + iou_mode='ciou', + loss_weight=7.5, + reduction='sum', + return_iou=False, + type='IoULoss'), + loss_cls=dict( + loss_weight=0.5, + reduction='none', + type='mmdet.CrossEntropyLoss', + use_sigmoid=True), + loss_dfl=dict( + loss_weight=0.375, + reduction='mean', + type='mmdet.DistributionFocalLoss'), + prior_generator=dict( + offset=0.5, strides=[ + 8, + 16, + 32, + ], type='mmdet.MlvlPointGenerator'), + type='YOLOWorldHead'), + data_preprocessor=dict( + bgr_to_rgb=True, + mean=[ + 0.0, + 0.0, + 0.0, + ], + std=[ + 255.0, + 255.0, + 255.0, + ], + type='YOLOWDetDataPreprocessor'), + mm_neck=True, + neck=dict( + act_cfg=dict(inplace=True, type='SiLU'), + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv'), + deepen_factor=1.0, + embed_channels=[ + 128, + 256, + 256, + ], + guide_channels=512, + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_csp_blocks=3, + num_heads=[ + 4, + 8, + 8, + ], + out_channels=[ + 256, + 512, + 512, + ], + type='YOLOWorldPAFPN', + widen_factor=1.25), + num_test_classes=1203, + num_train_classes=80, + test_cfg=dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001), + train_cfg=dict( + assigner=dict( + alpha=0.5, + beta=6.0, + eps=1e-09, + num_classes=80, + topk=10, + type='BatchTaskAlignedAssigner', + use_ciou=True)), + type='YOLOWorldDetector') +model_test_cfg = dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001) +mosaic_affine_transform = [ + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='Mosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), +] +neck_embed_channels = [ + 128, + 256, + 256, +] +neck_num_heads = [ + 4, + 8, + 8, +] +norm_cfg = dict(eps=0.001, momentum=0.03, type='BN') +num_classes = 1203 +num_det_layers = 3 +num_training_classes = 80 +obj365v1_train_dataset = dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +optim_wrapper = dict( + clip_grad=dict(max_norm=10.0), + constructor='YOLOWv5OptimizerConstructor', + optimizer=dict( + batch_size_per_gpu=16, lr=0.002, type='AdamW', weight_decay=0.025), + paramwise_cfg=dict( + bias_decay_mult=0.0, + custom_keys=dict({ + 'backbone.text_model': dict(lr_mult=0.01), + 'logit_scale': dict(weight_decay=0.0) + }), + norm_decay_mult=0.0), + type='OptimWrapper') +param_scheduler = None +persistent_workers = True +pre_transform = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), +] +resume = False +save_epoch_intervals = 2 +strides = [ + 8, + 16, + 32, +] +tal_alpha = 0.5 +tal_beta = 6.0 +tal_topk = 10 +test_cfg = dict(type='TestLoop') +test_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +test_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +test_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +text_channels = 512 +text_transform = [ + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_ann_file = 'annotations/instances_train2017.json' +train_batch_size_per_gpu = 16 +train_cfg = dict( + dynamic_intervals=[ + ( + 98, + 1, + ), + ], + max_epochs=100, + type='EpochBasedTrainLoop', + val_interval=10) +train_data_prefix = 'train2017/' +train_dataloader = dict( + batch_size=16, + collate_fn=dict(type='yolow_collate'), + dataset=dict( + datasets=[ + dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + ], + ignore_keys=[ + 'classes', + 'palette', + ], + type='ConcatDataset'), + num_workers=8, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=True, type='DefaultSampler')) +train_num_workers = 8 +train_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_pipeline_stage2 = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +tta_model = dict( + tta_cfg=dict(max_per_img=300, nms=dict(iou_threshold=0.65, type='nms')), + type='mmdet.DetTTAModel') +tta_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict( + transforms=[ + [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), + ], + [ + dict(prob=1.0, type='mmdet.RandomFlip'), + dict(prob=0.0, type='mmdet.RandomFlip'), + ], + [ + dict(type='mmdet.LoadAnnotations', with_bbox=True), + ], + [ + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), + ], + ], + type='TestTimeAug'), +] +val_ann_file = 'annotations/instances_val2017.json' +val_batch_size_per_gpu = 1 +val_cfg = dict(type='ValLoop') +val_data_prefix = 'val2017/' +val_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +val_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +val_interval_stage2 = 1 +val_num_workers = 2 +vis_backends = [ + dict(type='LocalVisBackend'), +] +visualizer = dict( + name='visualizer', + type='mmdet.DetLocalVisualizer', + vis_backends=[ + dict(type='LocalVisBackend'), + ]) +weight_decay = 0.025 +widen_factor = 1.25 +work_dir = '/home/jean/Amine/OpenYolo3D/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival' diff --git a/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240530_190536/vis_data/config.py b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240530_190536/vis_data/config.py new file mode 100644 index 0000000000000000000000000000000000000000..f8b00f96ad70de81ecc9073c7d67025d43429906 --- /dev/null +++ b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240530_190536/vis_data/config.py @@ -0,0 +1,1366 @@ +_backend_args = None +_multiscale_resize_transforms = [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), +] +affine_scale = 0.9 +albu_train_transforms = [ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), +] +backend_args = None +base_lr = 0.002 +batch_shapes_cfg = None +close_mosaic_epochs = 2 +coco_val_dataset = dict( + _delete_=True, + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +custom_hooks = [ + dict( + ema_type='ExpMomentumEMA', + momentum=0.0001, + priority=49, + strict_load=False, + type='EMAHook', + update_buffers=True), + dict( + switch_epoch=98, + switch_pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='mmdet.PipelineSwitchHook'), +] +custom_imports = dict( + allow_failed_imports=False, imports=[ + 'yolo_world', + ]) +data_root = 'data/coco/' +dataset_type = 'YOLOv5CocoDataset' +deepen_factor = 1.0 +default_hooks = dict( + checkpoint=dict( + interval=2, + max_keep_ckpts=2, + rule='greater', + save_best='auto', + type='CheckpointHook'), + logger=dict(interval=50, type='LoggerHook'), + param_scheduler=dict( + lr_factor=0.01, + max_epochs=100, + scheduler_type='linear', + type='YOLOv5ParamSchedulerHook'), + sampler_seed=dict(type='DistSamplerSeedHook'), + timer=dict(type='IterTimerHook'), + visualization=dict(type='mmdet.DetVisualizationHook')) +default_scope = 'mmyolo' +env_cfg = dict( + cudnn_benchmark=True, + dist_cfg=dict(backend='nccl'), + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0)) +flickr_train_dataset = dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +img_scale = ( + 640, + 640, +) +img_scales = [ + ( + 640, + 640, + ), + ( + 320, + 320, + ), + ( + 960, + 960, + ), +] +last_stage_out_channels = 512 +last_transform = [ + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), +] +load_from = '/home/jean/Amine/OpenYolo3D/pretrained/checkpoints/yolo_world_v2_x_obj365v1_goldg_cc3mlite_pretrain_1280ft-14996a36.pth' +log_level = 'INFO' +log_processor = dict(by_epoch=True, type='LogProcessor', window_size=50) +loss_bbox_weight = 7.5 +loss_cls_weight = 0.5 +loss_dfl_weight = 0.375 +lr_factor = 0.01 +max_aspect_ratio = 100 +max_epochs = 100 +max_keep_ckpts = 2 +mg_train_dataset = dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +mixup_prob = 0.15 +model = dict( + backbone=dict( + image_model=dict( + act_cfg=dict(inplace=True, type='SiLU'), + arch='P5', + deepen_factor=1.0, + last_stage_out_channels=512, + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + type='YOLOv8CSPDarknet', + widen_factor=1.25), + text_model=dict( + frozen_modules=[ + 'all', + ], + model_name='openai/clip-vit-base-patch32', + type='HuggingCLIPLanguageBackbone'), + type='MultiModalYOLOBackbone'), + bbox_head=dict( + bbox_coder=dict(type='DistancePointBBoxCoder'), + head_module=dict( + act_cfg=dict(inplace=True, type='SiLU'), + embed_dims=512, + featmap_strides=[ + 8, + 16, + 32, + ], + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_classes=80, + reg_max=16, + type='YOLOWorldHeadModule', + use_bn_head=True, + widen_factor=1.25), + loss_bbox=dict( + bbox_format='xyxy', + iou_mode='ciou', + loss_weight=7.5, + reduction='sum', + return_iou=False, + type='IoULoss'), + loss_cls=dict( + loss_weight=0.5, + reduction='none', + type='mmdet.CrossEntropyLoss', + use_sigmoid=True), + loss_dfl=dict( + loss_weight=0.375, + reduction='mean', + type='mmdet.DistributionFocalLoss'), + prior_generator=dict( + offset=0.5, strides=[ + 8, + 16, + 32, + ], type='mmdet.MlvlPointGenerator'), + type='YOLOWorldHead'), + data_preprocessor=dict( + bgr_to_rgb=True, + mean=[ + 0.0, + 0.0, + 0.0, + ], + std=[ + 255.0, + 255.0, + 255.0, + ], + type='YOLOWDetDataPreprocessor'), + mm_neck=True, + neck=dict( + act_cfg=dict(inplace=True, type='SiLU'), + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv'), + deepen_factor=1.0, + embed_channels=[ + 128, + 256, + 256, + ], + guide_channels=512, + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_csp_blocks=3, + num_heads=[ + 4, + 8, + 8, + ], + out_channels=[ + 256, + 512, + 512, + ], + type='YOLOWorldPAFPN', + widen_factor=1.25), + num_test_classes=1203, + num_train_classes=80, + test_cfg=dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001), + train_cfg=dict( + assigner=dict( + alpha=0.5, + beta=6.0, + eps=1e-09, + num_classes=80, + topk=10, + type='BatchTaskAlignedAssigner', + use_ciou=True)), + type='YOLOWorldDetector') +model_test_cfg = dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001) +mosaic_affine_transform = [ + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='Mosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), +] +neck_embed_channels = [ + 128, + 256, + 256, +] +neck_num_heads = [ + 4, + 8, + 8, +] +norm_cfg = dict(eps=0.001, momentum=0.03, type='BN') +num_classes = 1203 +num_det_layers = 3 +num_training_classes = 80 +obj365v1_train_dataset = dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +optim_wrapper = dict( + clip_grad=dict(max_norm=10.0), + constructor='YOLOWv5OptimizerConstructor', + optimizer=dict( + batch_size_per_gpu=16, lr=0.002, type='AdamW', weight_decay=0.025), + paramwise_cfg=dict( + bias_decay_mult=0.0, + custom_keys=dict({ + 'backbone.text_model': dict(lr_mult=0.01), + 'logit_scale': dict(weight_decay=0.0) + }), + norm_decay_mult=0.0), + type='OptimWrapper') +param_scheduler = None +persistent_workers = True +pre_transform = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), +] +resume = False +save_epoch_intervals = 2 +strides = [ + 8, + 16, + 32, +] +tal_alpha = 0.5 +tal_beta = 6.0 +tal_topk = 10 +test_cfg = dict(type='TestLoop') +test_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +test_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +test_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +text_channels = 512 +text_transform = [ + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_ann_file = 'annotations/instances_train2017.json' +train_batch_size_per_gpu = 16 +train_cfg = dict( + dynamic_intervals=[ + ( + 98, + 1, + ), + ], + max_epochs=100, + type='EpochBasedTrainLoop', + val_interval=10) +train_data_prefix = 'train2017/' +train_dataloader = dict( + batch_size=16, + collate_fn=dict(type='yolow_collate'), + dataset=dict( + datasets=[ + dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + ], + ignore_keys=[ + 'classes', + 'palette', + ], + type='ConcatDataset'), + num_workers=8, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=True, type='DefaultSampler')) +train_num_workers = 8 +train_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_pipeline_stage2 = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +tta_model = dict( + tta_cfg=dict(max_per_img=300, nms=dict(iou_threshold=0.65, type='nms')), + type='mmdet.DetTTAModel') +tta_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict( + transforms=[ + [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), + ], + [ + dict(prob=1.0, type='mmdet.RandomFlip'), + dict(prob=0.0, type='mmdet.RandomFlip'), + ], + [ + dict(type='mmdet.LoadAnnotations', with_bbox=True), + ], + [ + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), + ], + ], + type='TestTimeAug'), +] +val_ann_file = 'annotations/instances_val2017.json' +val_batch_size_per_gpu = 1 +val_cfg = dict(type='ValLoop') +val_data_prefix = 'val2017/' +val_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +val_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +val_interval_stage2 = 1 +val_num_workers = 2 +vis_backends = [ + dict(type='LocalVisBackend'), +] +visualizer = dict( + name='visualizer', + type='mmdet.DetLocalVisualizer', + vis_backends=[ + dict(type='LocalVisBackend'), + ]) +weight_decay = 0.025 +widen_factor = 1.25 +work_dir = '/home/jean/Amine/OpenYolo3D/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival' diff --git a/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240530_212538/vis_data/config.py b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240530_212538/vis_data/config.py new file mode 100644 index 0000000000000000000000000000000000000000..f8b00f96ad70de81ecc9073c7d67025d43429906 --- /dev/null +++ b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240530_212538/vis_data/config.py @@ -0,0 +1,1366 @@ +_backend_args = None +_multiscale_resize_transforms = [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), +] +affine_scale = 0.9 +albu_train_transforms = [ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), +] +backend_args = None +base_lr = 0.002 +batch_shapes_cfg = None +close_mosaic_epochs = 2 +coco_val_dataset = dict( + _delete_=True, + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +custom_hooks = [ + dict( + ema_type='ExpMomentumEMA', + momentum=0.0001, + priority=49, + strict_load=False, + type='EMAHook', + update_buffers=True), + dict( + switch_epoch=98, + switch_pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='mmdet.PipelineSwitchHook'), +] +custom_imports = dict( + allow_failed_imports=False, imports=[ + 'yolo_world', + ]) +data_root = 'data/coco/' +dataset_type = 'YOLOv5CocoDataset' +deepen_factor = 1.0 +default_hooks = dict( + checkpoint=dict( + interval=2, + max_keep_ckpts=2, + rule='greater', + save_best='auto', + type='CheckpointHook'), + logger=dict(interval=50, type='LoggerHook'), + param_scheduler=dict( + lr_factor=0.01, + max_epochs=100, + scheduler_type='linear', + type='YOLOv5ParamSchedulerHook'), + sampler_seed=dict(type='DistSamplerSeedHook'), + timer=dict(type='IterTimerHook'), + visualization=dict(type='mmdet.DetVisualizationHook')) +default_scope = 'mmyolo' +env_cfg = dict( + cudnn_benchmark=True, + dist_cfg=dict(backend='nccl'), + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0)) +flickr_train_dataset = dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +img_scale = ( + 640, + 640, +) +img_scales = [ + ( + 640, + 640, + ), + ( + 320, + 320, + ), + ( + 960, + 960, + ), +] +last_stage_out_channels = 512 +last_transform = [ + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), +] +load_from = '/home/jean/Amine/OpenYolo3D/pretrained/checkpoints/yolo_world_v2_x_obj365v1_goldg_cc3mlite_pretrain_1280ft-14996a36.pth' +log_level = 'INFO' +log_processor = dict(by_epoch=True, type='LogProcessor', window_size=50) +loss_bbox_weight = 7.5 +loss_cls_weight = 0.5 +loss_dfl_weight = 0.375 +lr_factor = 0.01 +max_aspect_ratio = 100 +max_epochs = 100 +max_keep_ckpts = 2 +mg_train_dataset = dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +mixup_prob = 0.15 +model = dict( + backbone=dict( + image_model=dict( + act_cfg=dict(inplace=True, type='SiLU'), + arch='P5', + deepen_factor=1.0, + last_stage_out_channels=512, + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + type='YOLOv8CSPDarknet', + widen_factor=1.25), + text_model=dict( + frozen_modules=[ + 'all', + ], + model_name='openai/clip-vit-base-patch32', + type='HuggingCLIPLanguageBackbone'), + type='MultiModalYOLOBackbone'), + bbox_head=dict( + bbox_coder=dict(type='DistancePointBBoxCoder'), + head_module=dict( + act_cfg=dict(inplace=True, type='SiLU'), + embed_dims=512, + featmap_strides=[ + 8, + 16, + 32, + ], + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_classes=80, + reg_max=16, + type='YOLOWorldHeadModule', + use_bn_head=True, + widen_factor=1.25), + loss_bbox=dict( + bbox_format='xyxy', + iou_mode='ciou', + loss_weight=7.5, + reduction='sum', + return_iou=False, + type='IoULoss'), + loss_cls=dict( + loss_weight=0.5, + reduction='none', + type='mmdet.CrossEntropyLoss', + use_sigmoid=True), + loss_dfl=dict( + loss_weight=0.375, + reduction='mean', + type='mmdet.DistributionFocalLoss'), + prior_generator=dict( + offset=0.5, strides=[ + 8, + 16, + 32, + ], type='mmdet.MlvlPointGenerator'), + type='YOLOWorldHead'), + data_preprocessor=dict( + bgr_to_rgb=True, + mean=[ + 0.0, + 0.0, + 0.0, + ], + std=[ + 255.0, + 255.0, + 255.0, + ], + type='YOLOWDetDataPreprocessor'), + mm_neck=True, + neck=dict( + act_cfg=dict(inplace=True, type='SiLU'), + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv'), + deepen_factor=1.0, + embed_channels=[ + 128, + 256, + 256, + ], + guide_channels=512, + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_csp_blocks=3, + num_heads=[ + 4, + 8, + 8, + ], + out_channels=[ + 256, + 512, + 512, + ], + type='YOLOWorldPAFPN', + widen_factor=1.25), + num_test_classes=1203, + num_train_classes=80, + test_cfg=dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001), + train_cfg=dict( + assigner=dict( + alpha=0.5, + beta=6.0, + eps=1e-09, + num_classes=80, + topk=10, + type='BatchTaskAlignedAssigner', + use_ciou=True)), + type='YOLOWorldDetector') +model_test_cfg = dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001) +mosaic_affine_transform = [ + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='Mosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), +] +neck_embed_channels = [ + 128, + 256, + 256, +] +neck_num_heads = [ + 4, + 8, + 8, +] +norm_cfg = dict(eps=0.001, momentum=0.03, type='BN') +num_classes = 1203 +num_det_layers = 3 +num_training_classes = 80 +obj365v1_train_dataset = dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +optim_wrapper = dict( + clip_grad=dict(max_norm=10.0), + constructor='YOLOWv5OptimizerConstructor', + optimizer=dict( + batch_size_per_gpu=16, lr=0.002, type='AdamW', weight_decay=0.025), + paramwise_cfg=dict( + bias_decay_mult=0.0, + custom_keys=dict({ + 'backbone.text_model': dict(lr_mult=0.01), + 'logit_scale': dict(weight_decay=0.0) + }), + norm_decay_mult=0.0), + type='OptimWrapper') +param_scheduler = None +persistent_workers = True +pre_transform = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), +] +resume = False +save_epoch_intervals = 2 +strides = [ + 8, + 16, + 32, +] +tal_alpha = 0.5 +tal_beta = 6.0 +tal_topk = 10 +test_cfg = dict(type='TestLoop') +test_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +test_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +test_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +text_channels = 512 +text_transform = [ + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_ann_file = 'annotations/instances_train2017.json' +train_batch_size_per_gpu = 16 +train_cfg = dict( + dynamic_intervals=[ + ( + 98, + 1, + ), + ], + max_epochs=100, + type='EpochBasedTrainLoop', + val_interval=10) +train_data_prefix = 'train2017/' +train_dataloader = dict( + batch_size=16, + collate_fn=dict(type='yolow_collate'), + dataset=dict( + datasets=[ + dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + ], + ignore_keys=[ + 'classes', + 'palette', + ], + type='ConcatDataset'), + num_workers=8, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=True, type='DefaultSampler')) +train_num_workers = 8 +train_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_pipeline_stage2 = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +tta_model = dict( + tta_cfg=dict(max_per_img=300, nms=dict(iou_threshold=0.65, type='nms')), + type='mmdet.DetTTAModel') +tta_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict( + transforms=[ + [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), + ], + [ + dict(prob=1.0, type='mmdet.RandomFlip'), + dict(prob=0.0, type='mmdet.RandomFlip'), + ], + [ + dict(type='mmdet.LoadAnnotations', with_bbox=True), + ], + [ + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), + ], + ], + type='TestTimeAug'), +] +val_ann_file = 'annotations/instances_val2017.json' +val_batch_size_per_gpu = 1 +val_cfg = dict(type='ValLoop') +val_data_prefix = 'val2017/' +val_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +val_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +val_interval_stage2 = 1 +val_num_workers = 2 +vis_backends = [ + dict(type='LocalVisBackend'), +] +visualizer = dict( + name='visualizer', + type='mmdet.DetLocalVisualizer', + vis_backends=[ + dict(type='LocalVisBackend'), + ]) +weight_decay = 0.025 +widen_factor = 1.25 +work_dir = '/home/jean/Amine/OpenYolo3D/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival' diff --git a/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240531_115852/vis_data/config.py b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240531_115852/vis_data/config.py new file mode 100644 index 0000000000000000000000000000000000000000..f8b00f96ad70de81ecc9073c7d67025d43429906 --- /dev/null +++ b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240531_115852/vis_data/config.py @@ -0,0 +1,1366 @@ +_backend_args = None +_multiscale_resize_transforms = [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), +] +affine_scale = 0.9 +albu_train_transforms = [ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), +] +backend_args = None +base_lr = 0.002 +batch_shapes_cfg = None +close_mosaic_epochs = 2 +coco_val_dataset = dict( + _delete_=True, + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +custom_hooks = [ + dict( + ema_type='ExpMomentumEMA', + momentum=0.0001, + priority=49, + strict_load=False, + type='EMAHook', + update_buffers=True), + dict( + switch_epoch=98, + switch_pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='mmdet.PipelineSwitchHook'), +] +custom_imports = dict( + allow_failed_imports=False, imports=[ + 'yolo_world', + ]) +data_root = 'data/coco/' +dataset_type = 'YOLOv5CocoDataset' +deepen_factor = 1.0 +default_hooks = dict( + checkpoint=dict( + interval=2, + max_keep_ckpts=2, + rule='greater', + save_best='auto', + type='CheckpointHook'), + logger=dict(interval=50, type='LoggerHook'), + param_scheduler=dict( + lr_factor=0.01, + max_epochs=100, + scheduler_type='linear', + type='YOLOv5ParamSchedulerHook'), + sampler_seed=dict(type='DistSamplerSeedHook'), + timer=dict(type='IterTimerHook'), + visualization=dict(type='mmdet.DetVisualizationHook')) +default_scope = 'mmyolo' +env_cfg = dict( + cudnn_benchmark=True, + dist_cfg=dict(backend='nccl'), + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0)) +flickr_train_dataset = dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +img_scale = ( + 640, + 640, +) +img_scales = [ + ( + 640, + 640, + ), + ( + 320, + 320, + ), + ( + 960, + 960, + ), +] +last_stage_out_channels = 512 +last_transform = [ + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), +] +load_from = '/home/jean/Amine/OpenYolo3D/pretrained/checkpoints/yolo_world_v2_x_obj365v1_goldg_cc3mlite_pretrain_1280ft-14996a36.pth' +log_level = 'INFO' +log_processor = dict(by_epoch=True, type='LogProcessor', window_size=50) +loss_bbox_weight = 7.5 +loss_cls_weight = 0.5 +loss_dfl_weight = 0.375 +lr_factor = 0.01 +max_aspect_ratio = 100 +max_epochs = 100 +max_keep_ckpts = 2 +mg_train_dataset = dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +mixup_prob = 0.15 +model = dict( + backbone=dict( + image_model=dict( + act_cfg=dict(inplace=True, type='SiLU'), + arch='P5', + deepen_factor=1.0, + last_stage_out_channels=512, + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + type='YOLOv8CSPDarknet', + widen_factor=1.25), + text_model=dict( + frozen_modules=[ + 'all', + ], + model_name='openai/clip-vit-base-patch32', + type='HuggingCLIPLanguageBackbone'), + type='MultiModalYOLOBackbone'), + bbox_head=dict( + bbox_coder=dict(type='DistancePointBBoxCoder'), + head_module=dict( + act_cfg=dict(inplace=True, type='SiLU'), + embed_dims=512, + featmap_strides=[ + 8, + 16, + 32, + ], + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_classes=80, + reg_max=16, + type='YOLOWorldHeadModule', + use_bn_head=True, + widen_factor=1.25), + loss_bbox=dict( + bbox_format='xyxy', + iou_mode='ciou', + loss_weight=7.5, + reduction='sum', + return_iou=False, + type='IoULoss'), + loss_cls=dict( + loss_weight=0.5, + reduction='none', + type='mmdet.CrossEntropyLoss', + use_sigmoid=True), + loss_dfl=dict( + loss_weight=0.375, + reduction='mean', + type='mmdet.DistributionFocalLoss'), + prior_generator=dict( + offset=0.5, strides=[ + 8, + 16, + 32, + ], type='mmdet.MlvlPointGenerator'), + type='YOLOWorldHead'), + data_preprocessor=dict( + bgr_to_rgb=True, + mean=[ + 0.0, + 0.0, + 0.0, + ], + std=[ + 255.0, + 255.0, + 255.0, + ], + type='YOLOWDetDataPreprocessor'), + mm_neck=True, + neck=dict( + act_cfg=dict(inplace=True, type='SiLU'), + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv'), + deepen_factor=1.0, + embed_channels=[ + 128, + 256, + 256, + ], + guide_channels=512, + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_csp_blocks=3, + num_heads=[ + 4, + 8, + 8, + ], + out_channels=[ + 256, + 512, + 512, + ], + type='YOLOWorldPAFPN', + widen_factor=1.25), + num_test_classes=1203, + num_train_classes=80, + test_cfg=dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001), + train_cfg=dict( + assigner=dict( + alpha=0.5, + beta=6.0, + eps=1e-09, + num_classes=80, + topk=10, + type='BatchTaskAlignedAssigner', + use_ciou=True)), + type='YOLOWorldDetector') +model_test_cfg = dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001) +mosaic_affine_transform = [ + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='Mosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), +] +neck_embed_channels = [ + 128, + 256, + 256, +] +neck_num_heads = [ + 4, + 8, + 8, +] +norm_cfg = dict(eps=0.001, momentum=0.03, type='BN') +num_classes = 1203 +num_det_layers = 3 +num_training_classes = 80 +obj365v1_train_dataset = dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +optim_wrapper = dict( + clip_grad=dict(max_norm=10.0), + constructor='YOLOWv5OptimizerConstructor', + optimizer=dict( + batch_size_per_gpu=16, lr=0.002, type='AdamW', weight_decay=0.025), + paramwise_cfg=dict( + bias_decay_mult=0.0, + custom_keys=dict({ + 'backbone.text_model': dict(lr_mult=0.01), + 'logit_scale': dict(weight_decay=0.0) + }), + norm_decay_mult=0.0), + type='OptimWrapper') +param_scheduler = None +persistent_workers = True +pre_transform = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), +] +resume = False +save_epoch_intervals = 2 +strides = [ + 8, + 16, + 32, +] +tal_alpha = 0.5 +tal_beta = 6.0 +tal_topk = 10 +test_cfg = dict(type='TestLoop') +test_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +test_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +test_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +text_channels = 512 +text_transform = [ + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_ann_file = 'annotations/instances_train2017.json' +train_batch_size_per_gpu = 16 +train_cfg = dict( + dynamic_intervals=[ + ( + 98, + 1, + ), + ], + max_epochs=100, + type='EpochBasedTrainLoop', + val_interval=10) +train_data_prefix = 'train2017/' +train_dataloader = dict( + batch_size=16, + collate_fn=dict(type='yolow_collate'), + dataset=dict( + datasets=[ + dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + ], + ignore_keys=[ + 'classes', + 'palette', + ], + type='ConcatDataset'), + num_workers=8, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=True, type='DefaultSampler')) +train_num_workers = 8 +train_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_pipeline_stage2 = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +tta_model = dict( + tta_cfg=dict(max_per_img=300, nms=dict(iou_threshold=0.65, type='nms')), + type='mmdet.DetTTAModel') +tta_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict( + transforms=[ + [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), + ], + [ + dict(prob=1.0, type='mmdet.RandomFlip'), + dict(prob=0.0, type='mmdet.RandomFlip'), + ], + [ + dict(type='mmdet.LoadAnnotations', with_bbox=True), + ], + [ + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), + ], + ], + type='TestTimeAug'), +] +val_ann_file = 'annotations/instances_val2017.json' +val_batch_size_per_gpu = 1 +val_cfg = dict(type='ValLoop') +val_data_prefix = 'val2017/' +val_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +val_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +val_interval_stage2 = 1 +val_num_workers = 2 +vis_backends = [ + dict(type='LocalVisBackend'), +] +visualizer = dict( + name='visualizer', + type='mmdet.DetLocalVisualizer', + vis_backends=[ + dict(type='LocalVisBackend'), + ]) +weight_decay = 0.025 +widen_factor = 1.25 +work_dir = '/home/jean/Amine/OpenYolo3D/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival' diff --git a/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240531_120013/vis_data/config.py b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240531_120013/vis_data/config.py new file mode 100644 index 0000000000000000000000000000000000000000..f8b00f96ad70de81ecc9073c7d67025d43429906 --- /dev/null +++ b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240531_120013/vis_data/config.py @@ -0,0 +1,1366 @@ +_backend_args = None +_multiscale_resize_transforms = [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), +] +affine_scale = 0.9 +albu_train_transforms = [ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), +] +backend_args = None +base_lr = 0.002 +batch_shapes_cfg = None +close_mosaic_epochs = 2 +coco_val_dataset = dict( + _delete_=True, + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +custom_hooks = [ + dict( + ema_type='ExpMomentumEMA', + momentum=0.0001, + priority=49, + strict_load=False, + type='EMAHook', + update_buffers=True), + dict( + switch_epoch=98, + switch_pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='mmdet.PipelineSwitchHook'), +] +custom_imports = dict( + allow_failed_imports=False, imports=[ + 'yolo_world', + ]) +data_root = 'data/coco/' +dataset_type = 'YOLOv5CocoDataset' +deepen_factor = 1.0 +default_hooks = dict( + checkpoint=dict( + interval=2, + max_keep_ckpts=2, + rule='greater', + save_best='auto', + type='CheckpointHook'), + logger=dict(interval=50, type='LoggerHook'), + param_scheduler=dict( + lr_factor=0.01, + max_epochs=100, + scheduler_type='linear', + type='YOLOv5ParamSchedulerHook'), + sampler_seed=dict(type='DistSamplerSeedHook'), + timer=dict(type='IterTimerHook'), + visualization=dict(type='mmdet.DetVisualizationHook')) +default_scope = 'mmyolo' +env_cfg = dict( + cudnn_benchmark=True, + dist_cfg=dict(backend='nccl'), + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0)) +flickr_train_dataset = dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +img_scale = ( + 640, + 640, +) +img_scales = [ + ( + 640, + 640, + ), + ( + 320, + 320, + ), + ( + 960, + 960, + ), +] +last_stage_out_channels = 512 +last_transform = [ + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), +] +load_from = '/home/jean/Amine/OpenYolo3D/pretrained/checkpoints/yolo_world_v2_x_obj365v1_goldg_cc3mlite_pretrain_1280ft-14996a36.pth' +log_level = 'INFO' +log_processor = dict(by_epoch=True, type='LogProcessor', window_size=50) +loss_bbox_weight = 7.5 +loss_cls_weight = 0.5 +loss_dfl_weight = 0.375 +lr_factor = 0.01 +max_aspect_ratio = 100 +max_epochs = 100 +max_keep_ckpts = 2 +mg_train_dataset = dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +mixup_prob = 0.15 +model = dict( + backbone=dict( + image_model=dict( + act_cfg=dict(inplace=True, type='SiLU'), + arch='P5', + deepen_factor=1.0, + last_stage_out_channels=512, + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + type='YOLOv8CSPDarknet', + widen_factor=1.25), + text_model=dict( + frozen_modules=[ + 'all', + ], + model_name='openai/clip-vit-base-patch32', + type='HuggingCLIPLanguageBackbone'), + type='MultiModalYOLOBackbone'), + bbox_head=dict( + bbox_coder=dict(type='DistancePointBBoxCoder'), + head_module=dict( + act_cfg=dict(inplace=True, type='SiLU'), + embed_dims=512, + featmap_strides=[ + 8, + 16, + 32, + ], + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_classes=80, + reg_max=16, + type='YOLOWorldHeadModule', + use_bn_head=True, + widen_factor=1.25), + loss_bbox=dict( + bbox_format='xyxy', + iou_mode='ciou', + loss_weight=7.5, + reduction='sum', + return_iou=False, + type='IoULoss'), + loss_cls=dict( + loss_weight=0.5, + reduction='none', + type='mmdet.CrossEntropyLoss', + use_sigmoid=True), + loss_dfl=dict( + loss_weight=0.375, + reduction='mean', + type='mmdet.DistributionFocalLoss'), + prior_generator=dict( + offset=0.5, strides=[ + 8, + 16, + 32, + ], type='mmdet.MlvlPointGenerator'), + type='YOLOWorldHead'), + data_preprocessor=dict( + bgr_to_rgb=True, + mean=[ + 0.0, + 0.0, + 0.0, + ], + std=[ + 255.0, + 255.0, + 255.0, + ], + type='YOLOWDetDataPreprocessor'), + mm_neck=True, + neck=dict( + act_cfg=dict(inplace=True, type='SiLU'), + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv'), + deepen_factor=1.0, + embed_channels=[ + 128, + 256, + 256, + ], + guide_channels=512, + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_csp_blocks=3, + num_heads=[ + 4, + 8, + 8, + ], + out_channels=[ + 256, + 512, + 512, + ], + type='YOLOWorldPAFPN', + widen_factor=1.25), + num_test_classes=1203, + num_train_classes=80, + test_cfg=dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001), + train_cfg=dict( + assigner=dict( + alpha=0.5, + beta=6.0, + eps=1e-09, + num_classes=80, + topk=10, + type='BatchTaskAlignedAssigner', + use_ciou=True)), + type='YOLOWorldDetector') +model_test_cfg = dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001) +mosaic_affine_transform = [ + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='Mosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), +] +neck_embed_channels = [ + 128, + 256, + 256, +] +neck_num_heads = [ + 4, + 8, + 8, +] +norm_cfg = dict(eps=0.001, momentum=0.03, type='BN') +num_classes = 1203 +num_det_layers = 3 +num_training_classes = 80 +obj365v1_train_dataset = dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +optim_wrapper = dict( + clip_grad=dict(max_norm=10.0), + constructor='YOLOWv5OptimizerConstructor', + optimizer=dict( + batch_size_per_gpu=16, lr=0.002, type='AdamW', weight_decay=0.025), + paramwise_cfg=dict( + bias_decay_mult=0.0, + custom_keys=dict({ + 'backbone.text_model': dict(lr_mult=0.01), + 'logit_scale': dict(weight_decay=0.0) + }), + norm_decay_mult=0.0), + type='OptimWrapper') +param_scheduler = None +persistent_workers = True +pre_transform = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), +] +resume = False +save_epoch_intervals = 2 +strides = [ + 8, + 16, + 32, +] +tal_alpha = 0.5 +tal_beta = 6.0 +tal_topk = 10 +test_cfg = dict(type='TestLoop') +test_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +test_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +test_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +text_channels = 512 +text_transform = [ + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_ann_file = 'annotations/instances_train2017.json' +train_batch_size_per_gpu = 16 +train_cfg = dict( + dynamic_intervals=[ + ( + 98, + 1, + ), + ], + max_epochs=100, + type='EpochBasedTrainLoop', + val_interval=10) +train_data_prefix = 'train2017/' +train_dataloader = dict( + batch_size=16, + collate_fn=dict(type='yolow_collate'), + dataset=dict( + datasets=[ + dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + ], + ignore_keys=[ + 'classes', + 'palette', + ], + type='ConcatDataset'), + num_workers=8, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=True, type='DefaultSampler')) +train_num_workers = 8 +train_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_pipeline_stage2 = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +tta_model = dict( + tta_cfg=dict(max_per_img=300, nms=dict(iou_threshold=0.65, type='nms')), + type='mmdet.DetTTAModel') +tta_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict( + transforms=[ + [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), + ], + [ + dict(prob=1.0, type='mmdet.RandomFlip'), + dict(prob=0.0, type='mmdet.RandomFlip'), + ], + [ + dict(type='mmdet.LoadAnnotations', with_bbox=True), + ], + [ + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), + ], + ], + type='TestTimeAug'), +] +val_ann_file = 'annotations/instances_val2017.json' +val_batch_size_per_gpu = 1 +val_cfg = dict(type='ValLoop') +val_data_prefix = 'val2017/' +val_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +val_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +val_interval_stage2 = 1 +val_num_workers = 2 +vis_backends = [ + dict(type='LocalVisBackend'), +] +visualizer = dict( + name='visualizer', + type='mmdet.DetLocalVisualizer', + vis_backends=[ + dict(type='LocalVisBackend'), + ]) +weight_decay = 0.025 +widen_factor = 1.25 +work_dir = '/home/jean/Amine/OpenYolo3D/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival' diff --git a/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240531_120058/vis_data/config.py b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240531_120058/vis_data/config.py new file mode 100644 index 0000000000000000000000000000000000000000..f8b00f96ad70de81ecc9073c7d67025d43429906 --- /dev/null +++ b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240531_120058/vis_data/config.py @@ -0,0 +1,1366 @@ +_backend_args = None +_multiscale_resize_transforms = [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), +] +affine_scale = 0.9 +albu_train_transforms = [ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), +] +backend_args = None +base_lr = 0.002 +batch_shapes_cfg = None +close_mosaic_epochs = 2 +coco_val_dataset = dict( + _delete_=True, + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +custom_hooks = [ + dict( + ema_type='ExpMomentumEMA', + momentum=0.0001, + priority=49, + strict_load=False, + type='EMAHook', + update_buffers=True), + dict( + switch_epoch=98, + switch_pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='mmdet.PipelineSwitchHook'), +] +custom_imports = dict( + allow_failed_imports=False, imports=[ + 'yolo_world', + ]) +data_root = 'data/coco/' +dataset_type = 'YOLOv5CocoDataset' +deepen_factor = 1.0 +default_hooks = dict( + checkpoint=dict( + interval=2, + max_keep_ckpts=2, + rule='greater', + save_best='auto', + type='CheckpointHook'), + logger=dict(interval=50, type='LoggerHook'), + param_scheduler=dict( + lr_factor=0.01, + max_epochs=100, + scheduler_type='linear', + type='YOLOv5ParamSchedulerHook'), + sampler_seed=dict(type='DistSamplerSeedHook'), + timer=dict(type='IterTimerHook'), + visualization=dict(type='mmdet.DetVisualizationHook')) +default_scope = 'mmyolo' +env_cfg = dict( + cudnn_benchmark=True, + dist_cfg=dict(backend='nccl'), + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0)) +flickr_train_dataset = dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +img_scale = ( + 640, + 640, +) +img_scales = [ + ( + 640, + 640, + ), + ( + 320, + 320, + ), + ( + 960, + 960, + ), +] +last_stage_out_channels = 512 +last_transform = [ + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), +] +load_from = '/home/jean/Amine/OpenYolo3D/pretrained/checkpoints/yolo_world_v2_x_obj365v1_goldg_cc3mlite_pretrain_1280ft-14996a36.pth' +log_level = 'INFO' +log_processor = dict(by_epoch=True, type='LogProcessor', window_size=50) +loss_bbox_weight = 7.5 +loss_cls_weight = 0.5 +loss_dfl_weight = 0.375 +lr_factor = 0.01 +max_aspect_ratio = 100 +max_epochs = 100 +max_keep_ckpts = 2 +mg_train_dataset = dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +mixup_prob = 0.15 +model = dict( + backbone=dict( + image_model=dict( + act_cfg=dict(inplace=True, type='SiLU'), + arch='P5', + deepen_factor=1.0, + last_stage_out_channels=512, + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + type='YOLOv8CSPDarknet', + widen_factor=1.25), + text_model=dict( + frozen_modules=[ + 'all', + ], + model_name='openai/clip-vit-base-patch32', + type='HuggingCLIPLanguageBackbone'), + type='MultiModalYOLOBackbone'), + bbox_head=dict( + bbox_coder=dict(type='DistancePointBBoxCoder'), + head_module=dict( + act_cfg=dict(inplace=True, type='SiLU'), + embed_dims=512, + featmap_strides=[ + 8, + 16, + 32, + ], + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_classes=80, + reg_max=16, + type='YOLOWorldHeadModule', + use_bn_head=True, + widen_factor=1.25), + loss_bbox=dict( + bbox_format='xyxy', + iou_mode='ciou', + loss_weight=7.5, + reduction='sum', + return_iou=False, + type='IoULoss'), + loss_cls=dict( + loss_weight=0.5, + reduction='none', + type='mmdet.CrossEntropyLoss', + use_sigmoid=True), + loss_dfl=dict( + loss_weight=0.375, + reduction='mean', + type='mmdet.DistributionFocalLoss'), + prior_generator=dict( + offset=0.5, strides=[ + 8, + 16, + 32, + ], type='mmdet.MlvlPointGenerator'), + type='YOLOWorldHead'), + data_preprocessor=dict( + bgr_to_rgb=True, + mean=[ + 0.0, + 0.0, + 0.0, + ], + std=[ + 255.0, + 255.0, + 255.0, + ], + type='YOLOWDetDataPreprocessor'), + mm_neck=True, + neck=dict( + act_cfg=dict(inplace=True, type='SiLU'), + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv'), + deepen_factor=1.0, + embed_channels=[ + 128, + 256, + 256, + ], + guide_channels=512, + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_csp_blocks=3, + num_heads=[ + 4, + 8, + 8, + ], + out_channels=[ + 256, + 512, + 512, + ], + type='YOLOWorldPAFPN', + widen_factor=1.25), + num_test_classes=1203, + num_train_classes=80, + test_cfg=dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001), + train_cfg=dict( + assigner=dict( + alpha=0.5, + beta=6.0, + eps=1e-09, + num_classes=80, + topk=10, + type='BatchTaskAlignedAssigner', + use_ciou=True)), + type='YOLOWorldDetector') +model_test_cfg = dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001) +mosaic_affine_transform = [ + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='Mosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), +] +neck_embed_channels = [ + 128, + 256, + 256, +] +neck_num_heads = [ + 4, + 8, + 8, +] +norm_cfg = dict(eps=0.001, momentum=0.03, type='BN') +num_classes = 1203 +num_det_layers = 3 +num_training_classes = 80 +obj365v1_train_dataset = dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +optim_wrapper = dict( + clip_grad=dict(max_norm=10.0), + constructor='YOLOWv5OptimizerConstructor', + optimizer=dict( + batch_size_per_gpu=16, lr=0.002, type='AdamW', weight_decay=0.025), + paramwise_cfg=dict( + bias_decay_mult=0.0, + custom_keys=dict({ + 'backbone.text_model': dict(lr_mult=0.01), + 'logit_scale': dict(weight_decay=0.0) + }), + norm_decay_mult=0.0), + type='OptimWrapper') +param_scheduler = None +persistent_workers = True +pre_transform = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), +] +resume = False +save_epoch_intervals = 2 +strides = [ + 8, + 16, + 32, +] +tal_alpha = 0.5 +tal_beta = 6.0 +tal_topk = 10 +test_cfg = dict(type='TestLoop') +test_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +test_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +test_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +text_channels = 512 +text_transform = [ + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_ann_file = 'annotations/instances_train2017.json' +train_batch_size_per_gpu = 16 +train_cfg = dict( + dynamic_intervals=[ + ( + 98, + 1, + ), + ], + max_epochs=100, + type='EpochBasedTrainLoop', + val_interval=10) +train_data_prefix = 'train2017/' +train_dataloader = dict( + batch_size=16, + collate_fn=dict(type='yolow_collate'), + dataset=dict( + datasets=[ + dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + ], + ignore_keys=[ + 'classes', + 'palette', + ], + type='ConcatDataset'), + num_workers=8, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=True, type='DefaultSampler')) +train_num_workers = 8 +train_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_pipeline_stage2 = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +tta_model = dict( + tta_cfg=dict(max_per_img=300, nms=dict(iou_threshold=0.65, type='nms')), + type='mmdet.DetTTAModel') +tta_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict( + transforms=[ + [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), + ], + [ + dict(prob=1.0, type='mmdet.RandomFlip'), + dict(prob=0.0, type='mmdet.RandomFlip'), + ], + [ + dict(type='mmdet.LoadAnnotations', with_bbox=True), + ], + [ + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), + ], + ], + type='TestTimeAug'), +] +val_ann_file = 'annotations/instances_val2017.json' +val_batch_size_per_gpu = 1 +val_cfg = dict(type='ValLoop') +val_data_prefix = 'val2017/' +val_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +val_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +val_interval_stage2 = 1 +val_num_workers = 2 +vis_backends = [ + dict(type='LocalVisBackend'), +] +visualizer = dict( + name='visualizer', + type='mmdet.DetLocalVisualizer', + vis_backends=[ + dict(type='LocalVisBackend'), + ]) +weight_decay = 0.025 +widen_factor = 1.25 +work_dir = '/home/jean/Amine/OpenYolo3D/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival' diff --git a/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240531_120255/vis_data/config.py b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240531_120255/vis_data/config.py new file mode 100644 index 0000000000000000000000000000000000000000..f8b00f96ad70de81ecc9073c7d67025d43429906 --- /dev/null +++ b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240531_120255/vis_data/config.py @@ -0,0 +1,1366 @@ +_backend_args = None +_multiscale_resize_transforms = [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), +] +affine_scale = 0.9 +albu_train_transforms = [ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), +] +backend_args = None +base_lr = 0.002 +batch_shapes_cfg = None +close_mosaic_epochs = 2 +coco_val_dataset = dict( + _delete_=True, + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +custom_hooks = [ + dict( + ema_type='ExpMomentumEMA', + momentum=0.0001, + priority=49, + strict_load=False, + type='EMAHook', + update_buffers=True), + dict( + switch_epoch=98, + switch_pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='mmdet.PipelineSwitchHook'), +] +custom_imports = dict( + allow_failed_imports=False, imports=[ + 'yolo_world', + ]) +data_root = 'data/coco/' +dataset_type = 'YOLOv5CocoDataset' +deepen_factor = 1.0 +default_hooks = dict( + checkpoint=dict( + interval=2, + max_keep_ckpts=2, + rule='greater', + save_best='auto', + type='CheckpointHook'), + logger=dict(interval=50, type='LoggerHook'), + param_scheduler=dict( + lr_factor=0.01, + max_epochs=100, + scheduler_type='linear', + type='YOLOv5ParamSchedulerHook'), + sampler_seed=dict(type='DistSamplerSeedHook'), + timer=dict(type='IterTimerHook'), + visualization=dict(type='mmdet.DetVisualizationHook')) +default_scope = 'mmyolo' +env_cfg = dict( + cudnn_benchmark=True, + dist_cfg=dict(backend='nccl'), + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0)) +flickr_train_dataset = dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +img_scale = ( + 640, + 640, +) +img_scales = [ + ( + 640, + 640, + ), + ( + 320, + 320, + ), + ( + 960, + 960, + ), +] +last_stage_out_channels = 512 +last_transform = [ + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), +] +load_from = '/home/jean/Amine/OpenYolo3D/pretrained/checkpoints/yolo_world_v2_x_obj365v1_goldg_cc3mlite_pretrain_1280ft-14996a36.pth' +log_level = 'INFO' +log_processor = dict(by_epoch=True, type='LogProcessor', window_size=50) +loss_bbox_weight = 7.5 +loss_cls_weight = 0.5 +loss_dfl_weight = 0.375 +lr_factor = 0.01 +max_aspect_ratio = 100 +max_epochs = 100 +max_keep_ckpts = 2 +mg_train_dataset = dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +mixup_prob = 0.15 +model = dict( + backbone=dict( + image_model=dict( + act_cfg=dict(inplace=True, type='SiLU'), + arch='P5', + deepen_factor=1.0, + last_stage_out_channels=512, + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + type='YOLOv8CSPDarknet', + widen_factor=1.25), + text_model=dict( + frozen_modules=[ + 'all', + ], + model_name='openai/clip-vit-base-patch32', + type='HuggingCLIPLanguageBackbone'), + type='MultiModalYOLOBackbone'), + bbox_head=dict( + bbox_coder=dict(type='DistancePointBBoxCoder'), + head_module=dict( + act_cfg=dict(inplace=True, type='SiLU'), + embed_dims=512, + featmap_strides=[ + 8, + 16, + 32, + ], + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_classes=80, + reg_max=16, + type='YOLOWorldHeadModule', + use_bn_head=True, + widen_factor=1.25), + loss_bbox=dict( + bbox_format='xyxy', + iou_mode='ciou', + loss_weight=7.5, + reduction='sum', + return_iou=False, + type='IoULoss'), + loss_cls=dict( + loss_weight=0.5, + reduction='none', + type='mmdet.CrossEntropyLoss', + use_sigmoid=True), + loss_dfl=dict( + loss_weight=0.375, + reduction='mean', + type='mmdet.DistributionFocalLoss'), + prior_generator=dict( + offset=0.5, strides=[ + 8, + 16, + 32, + ], type='mmdet.MlvlPointGenerator'), + type='YOLOWorldHead'), + data_preprocessor=dict( + bgr_to_rgb=True, + mean=[ + 0.0, + 0.0, + 0.0, + ], + std=[ + 255.0, + 255.0, + 255.0, + ], + type='YOLOWDetDataPreprocessor'), + mm_neck=True, + neck=dict( + act_cfg=dict(inplace=True, type='SiLU'), + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv'), + deepen_factor=1.0, + embed_channels=[ + 128, + 256, + 256, + ], + guide_channels=512, + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_csp_blocks=3, + num_heads=[ + 4, + 8, + 8, + ], + out_channels=[ + 256, + 512, + 512, + ], + type='YOLOWorldPAFPN', + widen_factor=1.25), + num_test_classes=1203, + num_train_classes=80, + test_cfg=dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001), + train_cfg=dict( + assigner=dict( + alpha=0.5, + beta=6.0, + eps=1e-09, + num_classes=80, + topk=10, + type='BatchTaskAlignedAssigner', + use_ciou=True)), + type='YOLOWorldDetector') +model_test_cfg = dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001) +mosaic_affine_transform = [ + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='Mosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), +] +neck_embed_channels = [ + 128, + 256, + 256, +] +neck_num_heads = [ + 4, + 8, + 8, +] +norm_cfg = dict(eps=0.001, momentum=0.03, type='BN') +num_classes = 1203 +num_det_layers = 3 +num_training_classes = 80 +obj365v1_train_dataset = dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +optim_wrapper = dict( + clip_grad=dict(max_norm=10.0), + constructor='YOLOWv5OptimizerConstructor', + optimizer=dict( + batch_size_per_gpu=16, lr=0.002, type='AdamW', weight_decay=0.025), + paramwise_cfg=dict( + bias_decay_mult=0.0, + custom_keys=dict({ + 'backbone.text_model': dict(lr_mult=0.01), + 'logit_scale': dict(weight_decay=0.0) + }), + norm_decay_mult=0.0), + type='OptimWrapper') +param_scheduler = None +persistent_workers = True +pre_transform = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), +] +resume = False +save_epoch_intervals = 2 +strides = [ + 8, + 16, + 32, +] +tal_alpha = 0.5 +tal_beta = 6.0 +tal_topk = 10 +test_cfg = dict(type='TestLoop') +test_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +test_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +test_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +text_channels = 512 +text_transform = [ + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_ann_file = 'annotations/instances_train2017.json' +train_batch_size_per_gpu = 16 +train_cfg = dict( + dynamic_intervals=[ + ( + 98, + 1, + ), + ], + max_epochs=100, + type='EpochBasedTrainLoop', + val_interval=10) +train_data_prefix = 'train2017/' +train_dataloader = dict( + batch_size=16, + collate_fn=dict(type='yolow_collate'), + dataset=dict( + datasets=[ + dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + ], + ignore_keys=[ + 'classes', + 'palette', + ], + type='ConcatDataset'), + num_workers=8, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=True, type='DefaultSampler')) +train_num_workers = 8 +train_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_pipeline_stage2 = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +tta_model = dict( + tta_cfg=dict(max_per_img=300, nms=dict(iou_threshold=0.65, type='nms')), + type='mmdet.DetTTAModel') +tta_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict( + transforms=[ + [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), + ], + [ + dict(prob=1.0, type='mmdet.RandomFlip'), + dict(prob=0.0, type='mmdet.RandomFlip'), + ], + [ + dict(type='mmdet.LoadAnnotations', with_bbox=True), + ], + [ + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), + ], + ], + type='TestTimeAug'), +] +val_ann_file = 'annotations/instances_val2017.json' +val_batch_size_per_gpu = 1 +val_cfg = dict(type='ValLoop') +val_data_prefix = 'val2017/' +val_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +val_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +val_interval_stage2 = 1 +val_num_workers = 2 +vis_backends = [ + dict(type='LocalVisBackend'), +] +visualizer = dict( + name='visualizer', + type='mmdet.DetLocalVisualizer', + vis_backends=[ + dict(type='LocalVisBackend'), + ]) +weight_decay = 0.025 +widen_factor = 1.25 +work_dir = '/home/jean/Amine/OpenYolo3D/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival' diff --git a/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240531_120409/vis_data/config.py b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240531_120409/vis_data/config.py new file mode 100644 index 0000000000000000000000000000000000000000..f8b00f96ad70de81ecc9073c7d67025d43429906 --- /dev/null +++ b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240531_120409/vis_data/config.py @@ -0,0 +1,1366 @@ +_backend_args = None +_multiscale_resize_transforms = [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), +] +affine_scale = 0.9 +albu_train_transforms = [ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), +] +backend_args = None +base_lr = 0.002 +batch_shapes_cfg = None +close_mosaic_epochs = 2 +coco_val_dataset = dict( + _delete_=True, + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +custom_hooks = [ + dict( + ema_type='ExpMomentumEMA', + momentum=0.0001, + priority=49, + strict_load=False, + type='EMAHook', + update_buffers=True), + dict( + switch_epoch=98, + switch_pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='mmdet.PipelineSwitchHook'), +] +custom_imports = dict( + allow_failed_imports=False, imports=[ + 'yolo_world', + ]) +data_root = 'data/coco/' +dataset_type = 'YOLOv5CocoDataset' +deepen_factor = 1.0 +default_hooks = dict( + checkpoint=dict( + interval=2, + max_keep_ckpts=2, + rule='greater', + save_best='auto', + type='CheckpointHook'), + logger=dict(interval=50, type='LoggerHook'), + param_scheduler=dict( + lr_factor=0.01, + max_epochs=100, + scheduler_type='linear', + type='YOLOv5ParamSchedulerHook'), + sampler_seed=dict(type='DistSamplerSeedHook'), + timer=dict(type='IterTimerHook'), + visualization=dict(type='mmdet.DetVisualizationHook')) +default_scope = 'mmyolo' +env_cfg = dict( + cudnn_benchmark=True, + dist_cfg=dict(backend='nccl'), + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0)) +flickr_train_dataset = dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +img_scale = ( + 640, + 640, +) +img_scales = [ + ( + 640, + 640, + ), + ( + 320, + 320, + ), + ( + 960, + 960, + ), +] +last_stage_out_channels = 512 +last_transform = [ + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), +] +load_from = '/home/jean/Amine/OpenYolo3D/pretrained/checkpoints/yolo_world_v2_x_obj365v1_goldg_cc3mlite_pretrain_1280ft-14996a36.pth' +log_level = 'INFO' +log_processor = dict(by_epoch=True, type='LogProcessor', window_size=50) +loss_bbox_weight = 7.5 +loss_cls_weight = 0.5 +loss_dfl_weight = 0.375 +lr_factor = 0.01 +max_aspect_ratio = 100 +max_epochs = 100 +max_keep_ckpts = 2 +mg_train_dataset = dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +mixup_prob = 0.15 +model = dict( + backbone=dict( + image_model=dict( + act_cfg=dict(inplace=True, type='SiLU'), + arch='P5', + deepen_factor=1.0, + last_stage_out_channels=512, + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + type='YOLOv8CSPDarknet', + widen_factor=1.25), + text_model=dict( + frozen_modules=[ + 'all', + ], + model_name='openai/clip-vit-base-patch32', + type='HuggingCLIPLanguageBackbone'), + type='MultiModalYOLOBackbone'), + bbox_head=dict( + bbox_coder=dict(type='DistancePointBBoxCoder'), + head_module=dict( + act_cfg=dict(inplace=True, type='SiLU'), + embed_dims=512, + featmap_strides=[ + 8, + 16, + 32, + ], + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_classes=80, + reg_max=16, + type='YOLOWorldHeadModule', + use_bn_head=True, + widen_factor=1.25), + loss_bbox=dict( + bbox_format='xyxy', + iou_mode='ciou', + loss_weight=7.5, + reduction='sum', + return_iou=False, + type='IoULoss'), + loss_cls=dict( + loss_weight=0.5, + reduction='none', + type='mmdet.CrossEntropyLoss', + use_sigmoid=True), + loss_dfl=dict( + loss_weight=0.375, + reduction='mean', + type='mmdet.DistributionFocalLoss'), + prior_generator=dict( + offset=0.5, strides=[ + 8, + 16, + 32, + ], type='mmdet.MlvlPointGenerator'), + type='YOLOWorldHead'), + data_preprocessor=dict( + bgr_to_rgb=True, + mean=[ + 0.0, + 0.0, + 0.0, + ], + std=[ + 255.0, + 255.0, + 255.0, + ], + type='YOLOWDetDataPreprocessor'), + mm_neck=True, + neck=dict( + act_cfg=dict(inplace=True, type='SiLU'), + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv'), + deepen_factor=1.0, + embed_channels=[ + 128, + 256, + 256, + ], + guide_channels=512, + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_csp_blocks=3, + num_heads=[ + 4, + 8, + 8, + ], + out_channels=[ + 256, + 512, + 512, + ], + type='YOLOWorldPAFPN', + widen_factor=1.25), + num_test_classes=1203, + num_train_classes=80, + test_cfg=dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001), + train_cfg=dict( + assigner=dict( + alpha=0.5, + beta=6.0, + eps=1e-09, + num_classes=80, + topk=10, + type='BatchTaskAlignedAssigner', + use_ciou=True)), + type='YOLOWorldDetector') +model_test_cfg = dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001) +mosaic_affine_transform = [ + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='Mosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), +] +neck_embed_channels = [ + 128, + 256, + 256, +] +neck_num_heads = [ + 4, + 8, + 8, +] +norm_cfg = dict(eps=0.001, momentum=0.03, type='BN') +num_classes = 1203 +num_det_layers = 3 +num_training_classes = 80 +obj365v1_train_dataset = dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +optim_wrapper = dict( + clip_grad=dict(max_norm=10.0), + constructor='YOLOWv5OptimizerConstructor', + optimizer=dict( + batch_size_per_gpu=16, lr=0.002, type='AdamW', weight_decay=0.025), + paramwise_cfg=dict( + bias_decay_mult=0.0, + custom_keys=dict({ + 'backbone.text_model': dict(lr_mult=0.01), + 'logit_scale': dict(weight_decay=0.0) + }), + norm_decay_mult=0.0), + type='OptimWrapper') +param_scheduler = None +persistent_workers = True +pre_transform = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), +] +resume = False +save_epoch_intervals = 2 +strides = [ + 8, + 16, + 32, +] +tal_alpha = 0.5 +tal_beta = 6.0 +tal_topk = 10 +test_cfg = dict(type='TestLoop') +test_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +test_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +test_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +text_channels = 512 +text_transform = [ + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_ann_file = 'annotations/instances_train2017.json' +train_batch_size_per_gpu = 16 +train_cfg = dict( + dynamic_intervals=[ + ( + 98, + 1, + ), + ], + max_epochs=100, + type='EpochBasedTrainLoop', + val_interval=10) +train_data_prefix = 'train2017/' +train_dataloader = dict( + batch_size=16, + collate_fn=dict(type='yolow_collate'), + dataset=dict( + datasets=[ + dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + ], + ignore_keys=[ + 'classes', + 'palette', + ], + type='ConcatDataset'), + num_workers=8, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=True, type='DefaultSampler')) +train_num_workers = 8 +train_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_pipeline_stage2 = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +tta_model = dict( + tta_cfg=dict(max_per_img=300, nms=dict(iou_threshold=0.65, type='nms')), + type='mmdet.DetTTAModel') +tta_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict( + transforms=[ + [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), + ], + [ + dict(prob=1.0, type='mmdet.RandomFlip'), + dict(prob=0.0, type='mmdet.RandomFlip'), + ], + [ + dict(type='mmdet.LoadAnnotations', with_bbox=True), + ], + [ + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), + ], + ], + type='TestTimeAug'), +] +val_ann_file = 'annotations/instances_val2017.json' +val_batch_size_per_gpu = 1 +val_cfg = dict(type='ValLoop') +val_data_prefix = 'val2017/' +val_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +val_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +val_interval_stage2 = 1 +val_num_workers = 2 +vis_backends = [ + dict(type='LocalVisBackend'), +] +visualizer = dict( + name='visualizer', + type='mmdet.DetLocalVisualizer', + vis_backends=[ + dict(type='LocalVisBackend'), + ]) +weight_decay = 0.025 +widen_factor = 1.25 +work_dir = '/home/jean/Amine/OpenYolo3D/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival' diff --git a/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240531_132952/vis_data/config.py b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240531_132952/vis_data/config.py new file mode 100644 index 0000000000000000000000000000000000000000..f8b00f96ad70de81ecc9073c7d67025d43429906 --- /dev/null +++ b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240531_132952/vis_data/config.py @@ -0,0 +1,1366 @@ +_backend_args = None +_multiscale_resize_transforms = [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), +] +affine_scale = 0.9 +albu_train_transforms = [ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), +] +backend_args = None +base_lr = 0.002 +batch_shapes_cfg = None +close_mosaic_epochs = 2 +coco_val_dataset = dict( + _delete_=True, + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +custom_hooks = [ + dict( + ema_type='ExpMomentumEMA', + momentum=0.0001, + priority=49, + strict_load=False, + type='EMAHook', + update_buffers=True), + dict( + switch_epoch=98, + switch_pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='mmdet.PipelineSwitchHook'), +] +custom_imports = dict( + allow_failed_imports=False, imports=[ + 'yolo_world', + ]) +data_root = 'data/coco/' +dataset_type = 'YOLOv5CocoDataset' +deepen_factor = 1.0 +default_hooks = dict( + checkpoint=dict( + interval=2, + max_keep_ckpts=2, + rule='greater', + save_best='auto', + type='CheckpointHook'), + logger=dict(interval=50, type='LoggerHook'), + param_scheduler=dict( + lr_factor=0.01, + max_epochs=100, + scheduler_type='linear', + type='YOLOv5ParamSchedulerHook'), + sampler_seed=dict(type='DistSamplerSeedHook'), + timer=dict(type='IterTimerHook'), + visualization=dict(type='mmdet.DetVisualizationHook')) +default_scope = 'mmyolo' +env_cfg = dict( + cudnn_benchmark=True, + dist_cfg=dict(backend='nccl'), + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0)) +flickr_train_dataset = dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +img_scale = ( + 640, + 640, +) +img_scales = [ + ( + 640, + 640, + ), + ( + 320, + 320, + ), + ( + 960, + 960, + ), +] +last_stage_out_channels = 512 +last_transform = [ + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), +] +load_from = '/home/jean/Amine/OpenYolo3D/pretrained/checkpoints/yolo_world_v2_x_obj365v1_goldg_cc3mlite_pretrain_1280ft-14996a36.pth' +log_level = 'INFO' +log_processor = dict(by_epoch=True, type='LogProcessor', window_size=50) +loss_bbox_weight = 7.5 +loss_cls_weight = 0.5 +loss_dfl_weight = 0.375 +lr_factor = 0.01 +max_aspect_ratio = 100 +max_epochs = 100 +max_keep_ckpts = 2 +mg_train_dataset = dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +mixup_prob = 0.15 +model = dict( + backbone=dict( + image_model=dict( + act_cfg=dict(inplace=True, type='SiLU'), + arch='P5', + deepen_factor=1.0, + last_stage_out_channels=512, + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + type='YOLOv8CSPDarknet', + widen_factor=1.25), + text_model=dict( + frozen_modules=[ + 'all', + ], + model_name='openai/clip-vit-base-patch32', + type='HuggingCLIPLanguageBackbone'), + type='MultiModalYOLOBackbone'), + bbox_head=dict( + bbox_coder=dict(type='DistancePointBBoxCoder'), + head_module=dict( + act_cfg=dict(inplace=True, type='SiLU'), + embed_dims=512, + featmap_strides=[ + 8, + 16, + 32, + ], + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_classes=80, + reg_max=16, + type='YOLOWorldHeadModule', + use_bn_head=True, + widen_factor=1.25), + loss_bbox=dict( + bbox_format='xyxy', + iou_mode='ciou', + loss_weight=7.5, + reduction='sum', + return_iou=False, + type='IoULoss'), + loss_cls=dict( + loss_weight=0.5, + reduction='none', + type='mmdet.CrossEntropyLoss', + use_sigmoid=True), + loss_dfl=dict( + loss_weight=0.375, + reduction='mean', + type='mmdet.DistributionFocalLoss'), + prior_generator=dict( + offset=0.5, strides=[ + 8, + 16, + 32, + ], type='mmdet.MlvlPointGenerator'), + type='YOLOWorldHead'), + data_preprocessor=dict( + bgr_to_rgb=True, + mean=[ + 0.0, + 0.0, + 0.0, + ], + std=[ + 255.0, + 255.0, + 255.0, + ], + type='YOLOWDetDataPreprocessor'), + mm_neck=True, + neck=dict( + act_cfg=dict(inplace=True, type='SiLU'), + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv'), + deepen_factor=1.0, + embed_channels=[ + 128, + 256, + 256, + ], + guide_channels=512, + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_csp_blocks=3, + num_heads=[ + 4, + 8, + 8, + ], + out_channels=[ + 256, + 512, + 512, + ], + type='YOLOWorldPAFPN', + widen_factor=1.25), + num_test_classes=1203, + num_train_classes=80, + test_cfg=dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001), + train_cfg=dict( + assigner=dict( + alpha=0.5, + beta=6.0, + eps=1e-09, + num_classes=80, + topk=10, + type='BatchTaskAlignedAssigner', + use_ciou=True)), + type='YOLOWorldDetector') +model_test_cfg = dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001) +mosaic_affine_transform = [ + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='Mosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), +] +neck_embed_channels = [ + 128, + 256, + 256, +] +neck_num_heads = [ + 4, + 8, + 8, +] +norm_cfg = dict(eps=0.001, momentum=0.03, type='BN') +num_classes = 1203 +num_det_layers = 3 +num_training_classes = 80 +obj365v1_train_dataset = dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +optim_wrapper = dict( + clip_grad=dict(max_norm=10.0), + constructor='YOLOWv5OptimizerConstructor', + optimizer=dict( + batch_size_per_gpu=16, lr=0.002, type='AdamW', weight_decay=0.025), + paramwise_cfg=dict( + bias_decay_mult=0.0, + custom_keys=dict({ + 'backbone.text_model': dict(lr_mult=0.01), + 'logit_scale': dict(weight_decay=0.0) + }), + norm_decay_mult=0.0), + type='OptimWrapper') +param_scheduler = None +persistent_workers = True +pre_transform = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), +] +resume = False +save_epoch_intervals = 2 +strides = [ + 8, + 16, + 32, +] +tal_alpha = 0.5 +tal_beta = 6.0 +tal_topk = 10 +test_cfg = dict(type='TestLoop') +test_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +test_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +test_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +text_channels = 512 +text_transform = [ + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_ann_file = 'annotations/instances_train2017.json' +train_batch_size_per_gpu = 16 +train_cfg = dict( + dynamic_intervals=[ + ( + 98, + 1, + ), + ], + max_epochs=100, + type='EpochBasedTrainLoop', + val_interval=10) +train_data_prefix = 'train2017/' +train_dataloader = dict( + batch_size=16, + collate_fn=dict(type='yolow_collate'), + dataset=dict( + datasets=[ + dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + ], + ignore_keys=[ + 'classes', + 'palette', + ], + type='ConcatDataset'), + num_workers=8, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=True, type='DefaultSampler')) +train_num_workers = 8 +train_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_pipeline_stage2 = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +tta_model = dict( + tta_cfg=dict(max_per_img=300, nms=dict(iou_threshold=0.65, type='nms')), + type='mmdet.DetTTAModel') +tta_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict( + transforms=[ + [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), + ], + [ + dict(prob=1.0, type='mmdet.RandomFlip'), + dict(prob=0.0, type='mmdet.RandomFlip'), + ], + [ + dict(type='mmdet.LoadAnnotations', with_bbox=True), + ], + [ + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), + ], + ], + type='TestTimeAug'), +] +val_ann_file = 'annotations/instances_val2017.json' +val_batch_size_per_gpu = 1 +val_cfg = dict(type='ValLoop') +val_data_prefix = 'val2017/' +val_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +val_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +val_interval_stage2 = 1 +val_num_workers = 2 +vis_backends = [ + dict(type='LocalVisBackend'), +] +visualizer = dict( + name='visualizer', + type='mmdet.DetLocalVisualizer', + vis_backends=[ + dict(type='LocalVisBackend'), + ]) +weight_decay = 0.025 +widen_factor = 1.25 +work_dir = '/home/jean/Amine/OpenYolo3D/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival' diff --git a/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240531_133132/vis_data/config.py b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240531_133132/vis_data/config.py new file mode 100644 index 0000000000000000000000000000000000000000..f8b00f96ad70de81ecc9073c7d67025d43429906 --- /dev/null +++ b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240531_133132/vis_data/config.py @@ -0,0 +1,1366 @@ +_backend_args = None +_multiscale_resize_transforms = [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), +] +affine_scale = 0.9 +albu_train_transforms = [ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), +] +backend_args = None +base_lr = 0.002 +batch_shapes_cfg = None +close_mosaic_epochs = 2 +coco_val_dataset = dict( + _delete_=True, + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +custom_hooks = [ + dict( + ema_type='ExpMomentumEMA', + momentum=0.0001, + priority=49, + strict_load=False, + type='EMAHook', + update_buffers=True), + dict( + switch_epoch=98, + switch_pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='mmdet.PipelineSwitchHook'), +] +custom_imports = dict( + allow_failed_imports=False, imports=[ + 'yolo_world', + ]) +data_root = 'data/coco/' +dataset_type = 'YOLOv5CocoDataset' +deepen_factor = 1.0 +default_hooks = dict( + checkpoint=dict( + interval=2, + max_keep_ckpts=2, + rule='greater', + save_best='auto', + type='CheckpointHook'), + logger=dict(interval=50, type='LoggerHook'), + param_scheduler=dict( + lr_factor=0.01, + max_epochs=100, + scheduler_type='linear', + type='YOLOv5ParamSchedulerHook'), + sampler_seed=dict(type='DistSamplerSeedHook'), + timer=dict(type='IterTimerHook'), + visualization=dict(type='mmdet.DetVisualizationHook')) +default_scope = 'mmyolo' +env_cfg = dict( + cudnn_benchmark=True, + dist_cfg=dict(backend='nccl'), + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0)) +flickr_train_dataset = dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +img_scale = ( + 640, + 640, +) +img_scales = [ + ( + 640, + 640, + ), + ( + 320, + 320, + ), + ( + 960, + 960, + ), +] +last_stage_out_channels = 512 +last_transform = [ + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), +] +load_from = '/home/jean/Amine/OpenYolo3D/pretrained/checkpoints/yolo_world_v2_x_obj365v1_goldg_cc3mlite_pretrain_1280ft-14996a36.pth' +log_level = 'INFO' +log_processor = dict(by_epoch=True, type='LogProcessor', window_size=50) +loss_bbox_weight = 7.5 +loss_cls_weight = 0.5 +loss_dfl_weight = 0.375 +lr_factor = 0.01 +max_aspect_ratio = 100 +max_epochs = 100 +max_keep_ckpts = 2 +mg_train_dataset = dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +mixup_prob = 0.15 +model = dict( + backbone=dict( + image_model=dict( + act_cfg=dict(inplace=True, type='SiLU'), + arch='P5', + deepen_factor=1.0, + last_stage_out_channels=512, + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + type='YOLOv8CSPDarknet', + widen_factor=1.25), + text_model=dict( + frozen_modules=[ + 'all', + ], + model_name='openai/clip-vit-base-patch32', + type='HuggingCLIPLanguageBackbone'), + type='MultiModalYOLOBackbone'), + bbox_head=dict( + bbox_coder=dict(type='DistancePointBBoxCoder'), + head_module=dict( + act_cfg=dict(inplace=True, type='SiLU'), + embed_dims=512, + featmap_strides=[ + 8, + 16, + 32, + ], + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_classes=80, + reg_max=16, + type='YOLOWorldHeadModule', + use_bn_head=True, + widen_factor=1.25), + loss_bbox=dict( + bbox_format='xyxy', + iou_mode='ciou', + loss_weight=7.5, + reduction='sum', + return_iou=False, + type='IoULoss'), + loss_cls=dict( + loss_weight=0.5, + reduction='none', + type='mmdet.CrossEntropyLoss', + use_sigmoid=True), + loss_dfl=dict( + loss_weight=0.375, + reduction='mean', + type='mmdet.DistributionFocalLoss'), + prior_generator=dict( + offset=0.5, strides=[ + 8, + 16, + 32, + ], type='mmdet.MlvlPointGenerator'), + type='YOLOWorldHead'), + data_preprocessor=dict( + bgr_to_rgb=True, + mean=[ + 0.0, + 0.0, + 0.0, + ], + std=[ + 255.0, + 255.0, + 255.0, + ], + type='YOLOWDetDataPreprocessor'), + mm_neck=True, + neck=dict( + act_cfg=dict(inplace=True, type='SiLU'), + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv'), + deepen_factor=1.0, + embed_channels=[ + 128, + 256, + 256, + ], + guide_channels=512, + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_csp_blocks=3, + num_heads=[ + 4, + 8, + 8, + ], + out_channels=[ + 256, + 512, + 512, + ], + type='YOLOWorldPAFPN', + widen_factor=1.25), + num_test_classes=1203, + num_train_classes=80, + test_cfg=dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001), + train_cfg=dict( + assigner=dict( + alpha=0.5, + beta=6.0, + eps=1e-09, + num_classes=80, + topk=10, + type='BatchTaskAlignedAssigner', + use_ciou=True)), + type='YOLOWorldDetector') +model_test_cfg = dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001) +mosaic_affine_transform = [ + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='Mosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), +] +neck_embed_channels = [ + 128, + 256, + 256, +] +neck_num_heads = [ + 4, + 8, + 8, +] +norm_cfg = dict(eps=0.001, momentum=0.03, type='BN') +num_classes = 1203 +num_det_layers = 3 +num_training_classes = 80 +obj365v1_train_dataset = dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +optim_wrapper = dict( + clip_grad=dict(max_norm=10.0), + constructor='YOLOWv5OptimizerConstructor', + optimizer=dict( + batch_size_per_gpu=16, lr=0.002, type='AdamW', weight_decay=0.025), + paramwise_cfg=dict( + bias_decay_mult=0.0, + custom_keys=dict({ + 'backbone.text_model': dict(lr_mult=0.01), + 'logit_scale': dict(weight_decay=0.0) + }), + norm_decay_mult=0.0), + type='OptimWrapper') +param_scheduler = None +persistent_workers = True +pre_transform = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), +] +resume = False +save_epoch_intervals = 2 +strides = [ + 8, + 16, + 32, +] +tal_alpha = 0.5 +tal_beta = 6.0 +tal_topk = 10 +test_cfg = dict(type='TestLoop') +test_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +test_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +test_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +text_channels = 512 +text_transform = [ + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_ann_file = 'annotations/instances_train2017.json' +train_batch_size_per_gpu = 16 +train_cfg = dict( + dynamic_intervals=[ + ( + 98, + 1, + ), + ], + max_epochs=100, + type='EpochBasedTrainLoop', + val_interval=10) +train_data_prefix = 'train2017/' +train_dataloader = dict( + batch_size=16, + collate_fn=dict(type='yolow_collate'), + dataset=dict( + datasets=[ + dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + ], + ignore_keys=[ + 'classes', + 'palette', + ], + type='ConcatDataset'), + num_workers=8, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=True, type='DefaultSampler')) +train_num_workers = 8 +train_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_pipeline_stage2 = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +tta_model = dict( + tta_cfg=dict(max_per_img=300, nms=dict(iou_threshold=0.65, type='nms')), + type='mmdet.DetTTAModel') +tta_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict( + transforms=[ + [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), + ], + [ + dict(prob=1.0, type='mmdet.RandomFlip'), + dict(prob=0.0, type='mmdet.RandomFlip'), + ], + [ + dict(type='mmdet.LoadAnnotations', with_bbox=True), + ], + [ + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), + ], + ], + type='TestTimeAug'), +] +val_ann_file = 'annotations/instances_val2017.json' +val_batch_size_per_gpu = 1 +val_cfg = dict(type='ValLoop') +val_data_prefix = 'val2017/' +val_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +val_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +val_interval_stage2 = 1 +val_num_workers = 2 +vis_backends = [ + dict(type='LocalVisBackend'), +] +visualizer = dict( + name='visualizer', + type='mmdet.DetLocalVisualizer', + vis_backends=[ + dict(type='LocalVisBackend'), + ]) +weight_decay = 0.025 +widen_factor = 1.25 +work_dir = '/home/jean/Amine/OpenYolo3D/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival' diff --git a/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_190525/vis_data/config.py b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_190525/vis_data/config.py new file mode 100644 index 0000000000000000000000000000000000000000..f8b00f96ad70de81ecc9073c7d67025d43429906 --- /dev/null +++ b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_190525/vis_data/config.py @@ -0,0 +1,1366 @@ +_backend_args = None +_multiscale_resize_transforms = [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), +] +affine_scale = 0.9 +albu_train_transforms = [ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), +] +backend_args = None +base_lr = 0.002 +batch_shapes_cfg = None +close_mosaic_epochs = 2 +coco_val_dataset = dict( + _delete_=True, + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +custom_hooks = [ + dict( + ema_type='ExpMomentumEMA', + momentum=0.0001, + priority=49, + strict_load=False, + type='EMAHook', + update_buffers=True), + dict( + switch_epoch=98, + switch_pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='mmdet.PipelineSwitchHook'), +] +custom_imports = dict( + allow_failed_imports=False, imports=[ + 'yolo_world', + ]) +data_root = 'data/coco/' +dataset_type = 'YOLOv5CocoDataset' +deepen_factor = 1.0 +default_hooks = dict( + checkpoint=dict( + interval=2, + max_keep_ckpts=2, + rule='greater', + save_best='auto', + type='CheckpointHook'), + logger=dict(interval=50, type='LoggerHook'), + param_scheduler=dict( + lr_factor=0.01, + max_epochs=100, + scheduler_type='linear', + type='YOLOv5ParamSchedulerHook'), + sampler_seed=dict(type='DistSamplerSeedHook'), + timer=dict(type='IterTimerHook'), + visualization=dict(type='mmdet.DetVisualizationHook')) +default_scope = 'mmyolo' +env_cfg = dict( + cudnn_benchmark=True, + dist_cfg=dict(backend='nccl'), + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0)) +flickr_train_dataset = dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +img_scale = ( + 640, + 640, +) +img_scales = [ + ( + 640, + 640, + ), + ( + 320, + 320, + ), + ( + 960, + 960, + ), +] +last_stage_out_channels = 512 +last_transform = [ + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), +] +load_from = '/home/jean/Amine/OpenYolo3D/pretrained/checkpoints/yolo_world_v2_x_obj365v1_goldg_cc3mlite_pretrain_1280ft-14996a36.pth' +log_level = 'INFO' +log_processor = dict(by_epoch=True, type='LogProcessor', window_size=50) +loss_bbox_weight = 7.5 +loss_cls_weight = 0.5 +loss_dfl_weight = 0.375 +lr_factor = 0.01 +max_aspect_ratio = 100 +max_epochs = 100 +max_keep_ckpts = 2 +mg_train_dataset = dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +mixup_prob = 0.15 +model = dict( + backbone=dict( + image_model=dict( + act_cfg=dict(inplace=True, type='SiLU'), + arch='P5', + deepen_factor=1.0, + last_stage_out_channels=512, + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + type='YOLOv8CSPDarknet', + widen_factor=1.25), + text_model=dict( + frozen_modules=[ + 'all', + ], + model_name='openai/clip-vit-base-patch32', + type='HuggingCLIPLanguageBackbone'), + type='MultiModalYOLOBackbone'), + bbox_head=dict( + bbox_coder=dict(type='DistancePointBBoxCoder'), + head_module=dict( + act_cfg=dict(inplace=True, type='SiLU'), + embed_dims=512, + featmap_strides=[ + 8, + 16, + 32, + ], + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_classes=80, + reg_max=16, + type='YOLOWorldHeadModule', + use_bn_head=True, + widen_factor=1.25), + loss_bbox=dict( + bbox_format='xyxy', + iou_mode='ciou', + loss_weight=7.5, + reduction='sum', + return_iou=False, + type='IoULoss'), + loss_cls=dict( + loss_weight=0.5, + reduction='none', + type='mmdet.CrossEntropyLoss', + use_sigmoid=True), + loss_dfl=dict( + loss_weight=0.375, + reduction='mean', + type='mmdet.DistributionFocalLoss'), + prior_generator=dict( + offset=0.5, strides=[ + 8, + 16, + 32, + ], type='mmdet.MlvlPointGenerator'), + type='YOLOWorldHead'), + data_preprocessor=dict( + bgr_to_rgb=True, + mean=[ + 0.0, + 0.0, + 0.0, + ], + std=[ + 255.0, + 255.0, + 255.0, + ], + type='YOLOWDetDataPreprocessor'), + mm_neck=True, + neck=dict( + act_cfg=dict(inplace=True, type='SiLU'), + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv'), + deepen_factor=1.0, + embed_channels=[ + 128, + 256, + 256, + ], + guide_channels=512, + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_csp_blocks=3, + num_heads=[ + 4, + 8, + 8, + ], + out_channels=[ + 256, + 512, + 512, + ], + type='YOLOWorldPAFPN', + widen_factor=1.25), + num_test_classes=1203, + num_train_classes=80, + test_cfg=dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001), + train_cfg=dict( + assigner=dict( + alpha=0.5, + beta=6.0, + eps=1e-09, + num_classes=80, + topk=10, + type='BatchTaskAlignedAssigner', + use_ciou=True)), + type='YOLOWorldDetector') +model_test_cfg = dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001) +mosaic_affine_transform = [ + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='Mosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), +] +neck_embed_channels = [ + 128, + 256, + 256, +] +neck_num_heads = [ + 4, + 8, + 8, +] +norm_cfg = dict(eps=0.001, momentum=0.03, type='BN') +num_classes = 1203 +num_det_layers = 3 +num_training_classes = 80 +obj365v1_train_dataset = dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +optim_wrapper = dict( + clip_grad=dict(max_norm=10.0), + constructor='YOLOWv5OptimizerConstructor', + optimizer=dict( + batch_size_per_gpu=16, lr=0.002, type='AdamW', weight_decay=0.025), + paramwise_cfg=dict( + bias_decay_mult=0.0, + custom_keys=dict({ + 'backbone.text_model': dict(lr_mult=0.01), + 'logit_scale': dict(weight_decay=0.0) + }), + norm_decay_mult=0.0), + type='OptimWrapper') +param_scheduler = None +persistent_workers = True +pre_transform = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), +] +resume = False +save_epoch_intervals = 2 +strides = [ + 8, + 16, + 32, +] +tal_alpha = 0.5 +tal_beta = 6.0 +tal_topk = 10 +test_cfg = dict(type='TestLoop') +test_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +test_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +test_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +text_channels = 512 +text_transform = [ + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_ann_file = 'annotations/instances_train2017.json' +train_batch_size_per_gpu = 16 +train_cfg = dict( + dynamic_intervals=[ + ( + 98, + 1, + ), + ], + max_epochs=100, + type='EpochBasedTrainLoop', + val_interval=10) +train_data_prefix = 'train2017/' +train_dataloader = dict( + batch_size=16, + collate_fn=dict(type='yolow_collate'), + dataset=dict( + datasets=[ + dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + ], + ignore_keys=[ + 'classes', + 'palette', + ], + type='ConcatDataset'), + num_workers=8, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=True, type='DefaultSampler')) +train_num_workers = 8 +train_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_pipeline_stage2 = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +tta_model = dict( + tta_cfg=dict(max_per_img=300, nms=dict(iou_threshold=0.65, type='nms')), + type='mmdet.DetTTAModel') +tta_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict( + transforms=[ + [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), + ], + [ + dict(prob=1.0, type='mmdet.RandomFlip'), + dict(prob=0.0, type='mmdet.RandomFlip'), + ], + [ + dict(type='mmdet.LoadAnnotations', with_bbox=True), + ], + [ + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), + ], + ], + type='TestTimeAug'), +] +val_ann_file = 'annotations/instances_val2017.json' +val_batch_size_per_gpu = 1 +val_cfg = dict(type='ValLoop') +val_data_prefix = 'val2017/' +val_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +val_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +val_interval_stage2 = 1 +val_num_workers = 2 +vis_backends = [ + dict(type='LocalVisBackend'), +] +visualizer = dict( + name='visualizer', + type='mmdet.DetLocalVisualizer', + vis_backends=[ + dict(type='LocalVisBackend'), + ]) +weight_decay = 0.025 +widen_factor = 1.25 +work_dir = '/home/jean/Amine/OpenYolo3D/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival' diff --git a/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_190700/vis_data/config.py b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_190700/vis_data/config.py new file mode 100644 index 0000000000000000000000000000000000000000..f8b00f96ad70de81ecc9073c7d67025d43429906 --- /dev/null +++ b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_190700/vis_data/config.py @@ -0,0 +1,1366 @@ +_backend_args = None +_multiscale_resize_transforms = [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), +] +affine_scale = 0.9 +albu_train_transforms = [ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), +] +backend_args = None +base_lr = 0.002 +batch_shapes_cfg = None +close_mosaic_epochs = 2 +coco_val_dataset = dict( + _delete_=True, + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +custom_hooks = [ + dict( + ema_type='ExpMomentumEMA', + momentum=0.0001, + priority=49, + strict_load=False, + type='EMAHook', + update_buffers=True), + dict( + switch_epoch=98, + switch_pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='mmdet.PipelineSwitchHook'), +] +custom_imports = dict( + allow_failed_imports=False, imports=[ + 'yolo_world', + ]) +data_root = 'data/coco/' +dataset_type = 'YOLOv5CocoDataset' +deepen_factor = 1.0 +default_hooks = dict( + checkpoint=dict( + interval=2, + max_keep_ckpts=2, + rule='greater', + save_best='auto', + type='CheckpointHook'), + logger=dict(interval=50, type='LoggerHook'), + param_scheduler=dict( + lr_factor=0.01, + max_epochs=100, + scheduler_type='linear', + type='YOLOv5ParamSchedulerHook'), + sampler_seed=dict(type='DistSamplerSeedHook'), + timer=dict(type='IterTimerHook'), + visualization=dict(type='mmdet.DetVisualizationHook')) +default_scope = 'mmyolo' +env_cfg = dict( + cudnn_benchmark=True, + dist_cfg=dict(backend='nccl'), + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0)) +flickr_train_dataset = dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +img_scale = ( + 640, + 640, +) +img_scales = [ + ( + 640, + 640, + ), + ( + 320, + 320, + ), + ( + 960, + 960, + ), +] +last_stage_out_channels = 512 +last_transform = [ + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), +] +load_from = '/home/jean/Amine/OpenYolo3D/pretrained/checkpoints/yolo_world_v2_x_obj365v1_goldg_cc3mlite_pretrain_1280ft-14996a36.pth' +log_level = 'INFO' +log_processor = dict(by_epoch=True, type='LogProcessor', window_size=50) +loss_bbox_weight = 7.5 +loss_cls_weight = 0.5 +loss_dfl_weight = 0.375 +lr_factor = 0.01 +max_aspect_ratio = 100 +max_epochs = 100 +max_keep_ckpts = 2 +mg_train_dataset = dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +mixup_prob = 0.15 +model = dict( + backbone=dict( + image_model=dict( + act_cfg=dict(inplace=True, type='SiLU'), + arch='P5', + deepen_factor=1.0, + last_stage_out_channels=512, + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + type='YOLOv8CSPDarknet', + widen_factor=1.25), + text_model=dict( + frozen_modules=[ + 'all', + ], + model_name='openai/clip-vit-base-patch32', + type='HuggingCLIPLanguageBackbone'), + type='MultiModalYOLOBackbone'), + bbox_head=dict( + bbox_coder=dict(type='DistancePointBBoxCoder'), + head_module=dict( + act_cfg=dict(inplace=True, type='SiLU'), + embed_dims=512, + featmap_strides=[ + 8, + 16, + 32, + ], + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_classes=80, + reg_max=16, + type='YOLOWorldHeadModule', + use_bn_head=True, + widen_factor=1.25), + loss_bbox=dict( + bbox_format='xyxy', + iou_mode='ciou', + loss_weight=7.5, + reduction='sum', + return_iou=False, + type='IoULoss'), + loss_cls=dict( + loss_weight=0.5, + reduction='none', + type='mmdet.CrossEntropyLoss', + use_sigmoid=True), + loss_dfl=dict( + loss_weight=0.375, + reduction='mean', + type='mmdet.DistributionFocalLoss'), + prior_generator=dict( + offset=0.5, strides=[ + 8, + 16, + 32, + ], type='mmdet.MlvlPointGenerator'), + type='YOLOWorldHead'), + data_preprocessor=dict( + bgr_to_rgb=True, + mean=[ + 0.0, + 0.0, + 0.0, + ], + std=[ + 255.0, + 255.0, + 255.0, + ], + type='YOLOWDetDataPreprocessor'), + mm_neck=True, + neck=dict( + act_cfg=dict(inplace=True, type='SiLU'), + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv'), + deepen_factor=1.0, + embed_channels=[ + 128, + 256, + 256, + ], + guide_channels=512, + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_csp_blocks=3, + num_heads=[ + 4, + 8, + 8, + ], + out_channels=[ + 256, + 512, + 512, + ], + type='YOLOWorldPAFPN', + widen_factor=1.25), + num_test_classes=1203, + num_train_classes=80, + test_cfg=dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001), + train_cfg=dict( + assigner=dict( + alpha=0.5, + beta=6.0, + eps=1e-09, + num_classes=80, + topk=10, + type='BatchTaskAlignedAssigner', + use_ciou=True)), + type='YOLOWorldDetector') +model_test_cfg = dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001) +mosaic_affine_transform = [ + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='Mosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), +] +neck_embed_channels = [ + 128, + 256, + 256, +] +neck_num_heads = [ + 4, + 8, + 8, +] +norm_cfg = dict(eps=0.001, momentum=0.03, type='BN') +num_classes = 1203 +num_det_layers = 3 +num_training_classes = 80 +obj365v1_train_dataset = dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +optim_wrapper = dict( + clip_grad=dict(max_norm=10.0), + constructor='YOLOWv5OptimizerConstructor', + optimizer=dict( + batch_size_per_gpu=16, lr=0.002, type='AdamW', weight_decay=0.025), + paramwise_cfg=dict( + bias_decay_mult=0.0, + custom_keys=dict({ + 'backbone.text_model': dict(lr_mult=0.01), + 'logit_scale': dict(weight_decay=0.0) + }), + norm_decay_mult=0.0), + type='OptimWrapper') +param_scheduler = None +persistent_workers = True +pre_transform = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), +] +resume = False +save_epoch_intervals = 2 +strides = [ + 8, + 16, + 32, +] +tal_alpha = 0.5 +tal_beta = 6.0 +tal_topk = 10 +test_cfg = dict(type='TestLoop') +test_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +test_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +test_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +text_channels = 512 +text_transform = [ + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_ann_file = 'annotations/instances_train2017.json' +train_batch_size_per_gpu = 16 +train_cfg = dict( + dynamic_intervals=[ + ( + 98, + 1, + ), + ], + max_epochs=100, + type='EpochBasedTrainLoop', + val_interval=10) +train_data_prefix = 'train2017/' +train_dataloader = dict( + batch_size=16, + collate_fn=dict(type='yolow_collate'), + dataset=dict( + datasets=[ + dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + ], + ignore_keys=[ + 'classes', + 'palette', + ], + type='ConcatDataset'), + num_workers=8, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=True, type='DefaultSampler')) +train_num_workers = 8 +train_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_pipeline_stage2 = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +tta_model = dict( + tta_cfg=dict(max_per_img=300, nms=dict(iou_threshold=0.65, type='nms')), + type='mmdet.DetTTAModel') +tta_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict( + transforms=[ + [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), + ], + [ + dict(prob=1.0, type='mmdet.RandomFlip'), + dict(prob=0.0, type='mmdet.RandomFlip'), + ], + [ + dict(type='mmdet.LoadAnnotations', with_bbox=True), + ], + [ + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), + ], + ], + type='TestTimeAug'), +] +val_ann_file = 'annotations/instances_val2017.json' +val_batch_size_per_gpu = 1 +val_cfg = dict(type='ValLoop') +val_data_prefix = 'val2017/' +val_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +val_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +val_interval_stage2 = 1 +val_num_workers = 2 +vis_backends = [ + dict(type='LocalVisBackend'), +] +visualizer = dict( + name='visualizer', + type='mmdet.DetLocalVisualizer', + vis_backends=[ + dict(type='LocalVisBackend'), + ]) +weight_decay = 0.025 +widen_factor = 1.25 +work_dir = '/home/jean/Amine/OpenYolo3D/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival' diff --git a/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_191828/vis_data/config.py b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_191828/vis_data/config.py new file mode 100644 index 0000000000000000000000000000000000000000..f8b00f96ad70de81ecc9073c7d67025d43429906 --- /dev/null +++ b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_191828/vis_data/config.py @@ -0,0 +1,1366 @@ +_backend_args = None +_multiscale_resize_transforms = [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), +] +affine_scale = 0.9 +albu_train_transforms = [ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), +] +backend_args = None +base_lr = 0.002 +batch_shapes_cfg = None +close_mosaic_epochs = 2 +coco_val_dataset = dict( + _delete_=True, + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +custom_hooks = [ + dict( + ema_type='ExpMomentumEMA', + momentum=0.0001, + priority=49, + strict_load=False, + type='EMAHook', + update_buffers=True), + dict( + switch_epoch=98, + switch_pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='mmdet.PipelineSwitchHook'), +] +custom_imports = dict( + allow_failed_imports=False, imports=[ + 'yolo_world', + ]) +data_root = 'data/coco/' +dataset_type = 'YOLOv5CocoDataset' +deepen_factor = 1.0 +default_hooks = dict( + checkpoint=dict( + interval=2, + max_keep_ckpts=2, + rule='greater', + save_best='auto', + type='CheckpointHook'), + logger=dict(interval=50, type='LoggerHook'), + param_scheduler=dict( + lr_factor=0.01, + max_epochs=100, + scheduler_type='linear', + type='YOLOv5ParamSchedulerHook'), + sampler_seed=dict(type='DistSamplerSeedHook'), + timer=dict(type='IterTimerHook'), + visualization=dict(type='mmdet.DetVisualizationHook')) +default_scope = 'mmyolo' +env_cfg = dict( + cudnn_benchmark=True, + dist_cfg=dict(backend='nccl'), + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0)) +flickr_train_dataset = dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +img_scale = ( + 640, + 640, +) +img_scales = [ + ( + 640, + 640, + ), + ( + 320, + 320, + ), + ( + 960, + 960, + ), +] +last_stage_out_channels = 512 +last_transform = [ + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), +] +load_from = '/home/jean/Amine/OpenYolo3D/pretrained/checkpoints/yolo_world_v2_x_obj365v1_goldg_cc3mlite_pretrain_1280ft-14996a36.pth' +log_level = 'INFO' +log_processor = dict(by_epoch=True, type='LogProcessor', window_size=50) +loss_bbox_weight = 7.5 +loss_cls_weight = 0.5 +loss_dfl_weight = 0.375 +lr_factor = 0.01 +max_aspect_ratio = 100 +max_epochs = 100 +max_keep_ckpts = 2 +mg_train_dataset = dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +mixup_prob = 0.15 +model = dict( + backbone=dict( + image_model=dict( + act_cfg=dict(inplace=True, type='SiLU'), + arch='P5', + deepen_factor=1.0, + last_stage_out_channels=512, + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + type='YOLOv8CSPDarknet', + widen_factor=1.25), + text_model=dict( + frozen_modules=[ + 'all', + ], + model_name='openai/clip-vit-base-patch32', + type='HuggingCLIPLanguageBackbone'), + type='MultiModalYOLOBackbone'), + bbox_head=dict( + bbox_coder=dict(type='DistancePointBBoxCoder'), + head_module=dict( + act_cfg=dict(inplace=True, type='SiLU'), + embed_dims=512, + featmap_strides=[ + 8, + 16, + 32, + ], + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_classes=80, + reg_max=16, + type='YOLOWorldHeadModule', + use_bn_head=True, + widen_factor=1.25), + loss_bbox=dict( + bbox_format='xyxy', + iou_mode='ciou', + loss_weight=7.5, + reduction='sum', + return_iou=False, + type='IoULoss'), + loss_cls=dict( + loss_weight=0.5, + reduction='none', + type='mmdet.CrossEntropyLoss', + use_sigmoid=True), + loss_dfl=dict( + loss_weight=0.375, + reduction='mean', + type='mmdet.DistributionFocalLoss'), + prior_generator=dict( + offset=0.5, strides=[ + 8, + 16, + 32, + ], type='mmdet.MlvlPointGenerator'), + type='YOLOWorldHead'), + data_preprocessor=dict( + bgr_to_rgb=True, + mean=[ + 0.0, + 0.0, + 0.0, + ], + std=[ + 255.0, + 255.0, + 255.0, + ], + type='YOLOWDetDataPreprocessor'), + mm_neck=True, + neck=dict( + act_cfg=dict(inplace=True, type='SiLU'), + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv'), + deepen_factor=1.0, + embed_channels=[ + 128, + 256, + 256, + ], + guide_channels=512, + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_csp_blocks=3, + num_heads=[ + 4, + 8, + 8, + ], + out_channels=[ + 256, + 512, + 512, + ], + type='YOLOWorldPAFPN', + widen_factor=1.25), + num_test_classes=1203, + num_train_classes=80, + test_cfg=dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001), + train_cfg=dict( + assigner=dict( + alpha=0.5, + beta=6.0, + eps=1e-09, + num_classes=80, + topk=10, + type='BatchTaskAlignedAssigner', + use_ciou=True)), + type='YOLOWorldDetector') +model_test_cfg = dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001) +mosaic_affine_transform = [ + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='Mosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), +] +neck_embed_channels = [ + 128, + 256, + 256, +] +neck_num_heads = [ + 4, + 8, + 8, +] +norm_cfg = dict(eps=0.001, momentum=0.03, type='BN') +num_classes = 1203 +num_det_layers = 3 +num_training_classes = 80 +obj365v1_train_dataset = dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +optim_wrapper = dict( + clip_grad=dict(max_norm=10.0), + constructor='YOLOWv5OptimizerConstructor', + optimizer=dict( + batch_size_per_gpu=16, lr=0.002, type='AdamW', weight_decay=0.025), + paramwise_cfg=dict( + bias_decay_mult=0.0, + custom_keys=dict({ + 'backbone.text_model': dict(lr_mult=0.01), + 'logit_scale': dict(weight_decay=0.0) + }), + norm_decay_mult=0.0), + type='OptimWrapper') +param_scheduler = None +persistent_workers = True +pre_transform = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), +] +resume = False +save_epoch_intervals = 2 +strides = [ + 8, + 16, + 32, +] +tal_alpha = 0.5 +tal_beta = 6.0 +tal_topk = 10 +test_cfg = dict(type='TestLoop') +test_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +test_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +test_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +text_channels = 512 +text_transform = [ + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_ann_file = 'annotations/instances_train2017.json' +train_batch_size_per_gpu = 16 +train_cfg = dict( + dynamic_intervals=[ + ( + 98, + 1, + ), + ], + max_epochs=100, + type='EpochBasedTrainLoop', + val_interval=10) +train_data_prefix = 'train2017/' +train_dataloader = dict( + batch_size=16, + collate_fn=dict(type='yolow_collate'), + dataset=dict( + datasets=[ + dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + ], + ignore_keys=[ + 'classes', + 'palette', + ], + type='ConcatDataset'), + num_workers=8, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=True, type='DefaultSampler')) +train_num_workers = 8 +train_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_pipeline_stage2 = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +tta_model = dict( + tta_cfg=dict(max_per_img=300, nms=dict(iou_threshold=0.65, type='nms')), + type='mmdet.DetTTAModel') +tta_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict( + transforms=[ + [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), + ], + [ + dict(prob=1.0, type='mmdet.RandomFlip'), + dict(prob=0.0, type='mmdet.RandomFlip'), + ], + [ + dict(type='mmdet.LoadAnnotations', with_bbox=True), + ], + [ + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), + ], + ], + type='TestTimeAug'), +] +val_ann_file = 'annotations/instances_val2017.json' +val_batch_size_per_gpu = 1 +val_cfg = dict(type='ValLoop') +val_data_prefix = 'val2017/' +val_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +val_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +val_interval_stage2 = 1 +val_num_workers = 2 +vis_backends = [ + dict(type='LocalVisBackend'), +] +visualizer = dict( + name='visualizer', + type='mmdet.DetLocalVisualizer', + vis_backends=[ + dict(type='LocalVisBackend'), + ]) +weight_decay = 0.025 +widen_factor = 1.25 +work_dir = '/home/jean/Amine/OpenYolo3D/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival' diff --git a/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_192721/vis_data/config.py b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_192721/vis_data/config.py new file mode 100644 index 0000000000000000000000000000000000000000..f8b00f96ad70de81ecc9073c7d67025d43429906 --- /dev/null +++ b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_192721/vis_data/config.py @@ -0,0 +1,1366 @@ +_backend_args = None +_multiscale_resize_transforms = [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), +] +affine_scale = 0.9 +albu_train_transforms = [ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), +] +backend_args = None +base_lr = 0.002 +batch_shapes_cfg = None +close_mosaic_epochs = 2 +coco_val_dataset = dict( + _delete_=True, + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +custom_hooks = [ + dict( + ema_type='ExpMomentumEMA', + momentum=0.0001, + priority=49, + strict_load=False, + type='EMAHook', + update_buffers=True), + dict( + switch_epoch=98, + switch_pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='mmdet.PipelineSwitchHook'), +] +custom_imports = dict( + allow_failed_imports=False, imports=[ + 'yolo_world', + ]) +data_root = 'data/coco/' +dataset_type = 'YOLOv5CocoDataset' +deepen_factor = 1.0 +default_hooks = dict( + checkpoint=dict( + interval=2, + max_keep_ckpts=2, + rule='greater', + save_best='auto', + type='CheckpointHook'), + logger=dict(interval=50, type='LoggerHook'), + param_scheduler=dict( + lr_factor=0.01, + max_epochs=100, + scheduler_type='linear', + type='YOLOv5ParamSchedulerHook'), + sampler_seed=dict(type='DistSamplerSeedHook'), + timer=dict(type='IterTimerHook'), + visualization=dict(type='mmdet.DetVisualizationHook')) +default_scope = 'mmyolo' +env_cfg = dict( + cudnn_benchmark=True, + dist_cfg=dict(backend='nccl'), + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0)) +flickr_train_dataset = dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +img_scale = ( + 640, + 640, +) +img_scales = [ + ( + 640, + 640, + ), + ( + 320, + 320, + ), + ( + 960, + 960, + ), +] +last_stage_out_channels = 512 +last_transform = [ + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), +] +load_from = '/home/jean/Amine/OpenYolo3D/pretrained/checkpoints/yolo_world_v2_x_obj365v1_goldg_cc3mlite_pretrain_1280ft-14996a36.pth' +log_level = 'INFO' +log_processor = dict(by_epoch=True, type='LogProcessor', window_size=50) +loss_bbox_weight = 7.5 +loss_cls_weight = 0.5 +loss_dfl_weight = 0.375 +lr_factor = 0.01 +max_aspect_ratio = 100 +max_epochs = 100 +max_keep_ckpts = 2 +mg_train_dataset = dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +mixup_prob = 0.15 +model = dict( + backbone=dict( + image_model=dict( + act_cfg=dict(inplace=True, type='SiLU'), + arch='P5', + deepen_factor=1.0, + last_stage_out_channels=512, + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + type='YOLOv8CSPDarknet', + widen_factor=1.25), + text_model=dict( + frozen_modules=[ + 'all', + ], + model_name='openai/clip-vit-base-patch32', + type='HuggingCLIPLanguageBackbone'), + type='MultiModalYOLOBackbone'), + bbox_head=dict( + bbox_coder=dict(type='DistancePointBBoxCoder'), + head_module=dict( + act_cfg=dict(inplace=True, type='SiLU'), + embed_dims=512, + featmap_strides=[ + 8, + 16, + 32, + ], + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_classes=80, + reg_max=16, + type='YOLOWorldHeadModule', + use_bn_head=True, + widen_factor=1.25), + loss_bbox=dict( + bbox_format='xyxy', + iou_mode='ciou', + loss_weight=7.5, + reduction='sum', + return_iou=False, + type='IoULoss'), + loss_cls=dict( + loss_weight=0.5, + reduction='none', + type='mmdet.CrossEntropyLoss', + use_sigmoid=True), + loss_dfl=dict( + loss_weight=0.375, + reduction='mean', + type='mmdet.DistributionFocalLoss'), + prior_generator=dict( + offset=0.5, strides=[ + 8, + 16, + 32, + ], type='mmdet.MlvlPointGenerator'), + type='YOLOWorldHead'), + data_preprocessor=dict( + bgr_to_rgb=True, + mean=[ + 0.0, + 0.0, + 0.0, + ], + std=[ + 255.0, + 255.0, + 255.0, + ], + type='YOLOWDetDataPreprocessor'), + mm_neck=True, + neck=dict( + act_cfg=dict(inplace=True, type='SiLU'), + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv'), + deepen_factor=1.0, + embed_channels=[ + 128, + 256, + 256, + ], + guide_channels=512, + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_csp_blocks=3, + num_heads=[ + 4, + 8, + 8, + ], + out_channels=[ + 256, + 512, + 512, + ], + type='YOLOWorldPAFPN', + widen_factor=1.25), + num_test_classes=1203, + num_train_classes=80, + test_cfg=dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001), + train_cfg=dict( + assigner=dict( + alpha=0.5, + beta=6.0, + eps=1e-09, + num_classes=80, + topk=10, + type='BatchTaskAlignedAssigner', + use_ciou=True)), + type='YOLOWorldDetector') +model_test_cfg = dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001) +mosaic_affine_transform = [ + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='Mosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), +] +neck_embed_channels = [ + 128, + 256, + 256, +] +neck_num_heads = [ + 4, + 8, + 8, +] +norm_cfg = dict(eps=0.001, momentum=0.03, type='BN') +num_classes = 1203 +num_det_layers = 3 +num_training_classes = 80 +obj365v1_train_dataset = dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +optim_wrapper = dict( + clip_grad=dict(max_norm=10.0), + constructor='YOLOWv5OptimizerConstructor', + optimizer=dict( + batch_size_per_gpu=16, lr=0.002, type='AdamW', weight_decay=0.025), + paramwise_cfg=dict( + bias_decay_mult=0.0, + custom_keys=dict({ + 'backbone.text_model': dict(lr_mult=0.01), + 'logit_scale': dict(weight_decay=0.0) + }), + norm_decay_mult=0.0), + type='OptimWrapper') +param_scheduler = None +persistent_workers = True +pre_transform = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), +] +resume = False +save_epoch_intervals = 2 +strides = [ + 8, + 16, + 32, +] +tal_alpha = 0.5 +tal_beta = 6.0 +tal_topk = 10 +test_cfg = dict(type='TestLoop') +test_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +test_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +test_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +text_channels = 512 +text_transform = [ + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_ann_file = 'annotations/instances_train2017.json' +train_batch_size_per_gpu = 16 +train_cfg = dict( + dynamic_intervals=[ + ( + 98, + 1, + ), + ], + max_epochs=100, + type='EpochBasedTrainLoop', + val_interval=10) +train_data_prefix = 'train2017/' +train_dataloader = dict( + batch_size=16, + collate_fn=dict(type='yolow_collate'), + dataset=dict( + datasets=[ + dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + ], + ignore_keys=[ + 'classes', + 'palette', + ], + type='ConcatDataset'), + num_workers=8, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=True, type='DefaultSampler')) +train_num_workers = 8 +train_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_pipeline_stage2 = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +tta_model = dict( + tta_cfg=dict(max_per_img=300, nms=dict(iou_threshold=0.65, type='nms')), + type='mmdet.DetTTAModel') +tta_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict( + transforms=[ + [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), + ], + [ + dict(prob=1.0, type='mmdet.RandomFlip'), + dict(prob=0.0, type='mmdet.RandomFlip'), + ], + [ + dict(type='mmdet.LoadAnnotations', with_bbox=True), + ], + [ + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), + ], + ], + type='TestTimeAug'), +] +val_ann_file = 'annotations/instances_val2017.json' +val_batch_size_per_gpu = 1 +val_cfg = dict(type='ValLoop') +val_data_prefix = 'val2017/' +val_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +val_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +val_interval_stage2 = 1 +val_num_workers = 2 +vis_backends = [ + dict(type='LocalVisBackend'), +] +visualizer = dict( + name='visualizer', + type='mmdet.DetLocalVisualizer', + vis_backends=[ + dict(type='LocalVisBackend'), + ]) +weight_decay = 0.025 +widen_factor = 1.25 +work_dir = '/home/jean/Amine/OpenYolo3D/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival' diff --git a/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_192803/vis_data/config.py b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_192803/vis_data/config.py new file mode 100644 index 0000000000000000000000000000000000000000..f8b00f96ad70de81ecc9073c7d67025d43429906 --- /dev/null +++ b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_192803/vis_data/config.py @@ -0,0 +1,1366 @@ +_backend_args = None +_multiscale_resize_transforms = [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), +] +affine_scale = 0.9 +albu_train_transforms = [ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), +] +backend_args = None +base_lr = 0.002 +batch_shapes_cfg = None +close_mosaic_epochs = 2 +coco_val_dataset = dict( + _delete_=True, + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +custom_hooks = [ + dict( + ema_type='ExpMomentumEMA', + momentum=0.0001, + priority=49, + strict_load=False, + type='EMAHook', + update_buffers=True), + dict( + switch_epoch=98, + switch_pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='mmdet.PipelineSwitchHook'), +] +custom_imports = dict( + allow_failed_imports=False, imports=[ + 'yolo_world', + ]) +data_root = 'data/coco/' +dataset_type = 'YOLOv5CocoDataset' +deepen_factor = 1.0 +default_hooks = dict( + checkpoint=dict( + interval=2, + max_keep_ckpts=2, + rule='greater', + save_best='auto', + type='CheckpointHook'), + logger=dict(interval=50, type='LoggerHook'), + param_scheduler=dict( + lr_factor=0.01, + max_epochs=100, + scheduler_type='linear', + type='YOLOv5ParamSchedulerHook'), + sampler_seed=dict(type='DistSamplerSeedHook'), + timer=dict(type='IterTimerHook'), + visualization=dict(type='mmdet.DetVisualizationHook')) +default_scope = 'mmyolo' +env_cfg = dict( + cudnn_benchmark=True, + dist_cfg=dict(backend='nccl'), + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0)) +flickr_train_dataset = dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +img_scale = ( + 640, + 640, +) +img_scales = [ + ( + 640, + 640, + ), + ( + 320, + 320, + ), + ( + 960, + 960, + ), +] +last_stage_out_channels = 512 +last_transform = [ + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), +] +load_from = '/home/jean/Amine/OpenYolo3D/pretrained/checkpoints/yolo_world_v2_x_obj365v1_goldg_cc3mlite_pretrain_1280ft-14996a36.pth' +log_level = 'INFO' +log_processor = dict(by_epoch=True, type='LogProcessor', window_size=50) +loss_bbox_weight = 7.5 +loss_cls_weight = 0.5 +loss_dfl_weight = 0.375 +lr_factor = 0.01 +max_aspect_ratio = 100 +max_epochs = 100 +max_keep_ckpts = 2 +mg_train_dataset = dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +mixup_prob = 0.15 +model = dict( + backbone=dict( + image_model=dict( + act_cfg=dict(inplace=True, type='SiLU'), + arch='P5', + deepen_factor=1.0, + last_stage_out_channels=512, + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + type='YOLOv8CSPDarknet', + widen_factor=1.25), + text_model=dict( + frozen_modules=[ + 'all', + ], + model_name='openai/clip-vit-base-patch32', + type='HuggingCLIPLanguageBackbone'), + type='MultiModalYOLOBackbone'), + bbox_head=dict( + bbox_coder=dict(type='DistancePointBBoxCoder'), + head_module=dict( + act_cfg=dict(inplace=True, type='SiLU'), + embed_dims=512, + featmap_strides=[ + 8, + 16, + 32, + ], + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_classes=80, + reg_max=16, + type='YOLOWorldHeadModule', + use_bn_head=True, + widen_factor=1.25), + loss_bbox=dict( + bbox_format='xyxy', + iou_mode='ciou', + loss_weight=7.5, + reduction='sum', + return_iou=False, + type='IoULoss'), + loss_cls=dict( + loss_weight=0.5, + reduction='none', + type='mmdet.CrossEntropyLoss', + use_sigmoid=True), + loss_dfl=dict( + loss_weight=0.375, + reduction='mean', + type='mmdet.DistributionFocalLoss'), + prior_generator=dict( + offset=0.5, strides=[ + 8, + 16, + 32, + ], type='mmdet.MlvlPointGenerator'), + type='YOLOWorldHead'), + data_preprocessor=dict( + bgr_to_rgb=True, + mean=[ + 0.0, + 0.0, + 0.0, + ], + std=[ + 255.0, + 255.0, + 255.0, + ], + type='YOLOWDetDataPreprocessor'), + mm_neck=True, + neck=dict( + act_cfg=dict(inplace=True, type='SiLU'), + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv'), + deepen_factor=1.0, + embed_channels=[ + 128, + 256, + 256, + ], + guide_channels=512, + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_csp_blocks=3, + num_heads=[ + 4, + 8, + 8, + ], + out_channels=[ + 256, + 512, + 512, + ], + type='YOLOWorldPAFPN', + widen_factor=1.25), + num_test_classes=1203, + num_train_classes=80, + test_cfg=dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001), + train_cfg=dict( + assigner=dict( + alpha=0.5, + beta=6.0, + eps=1e-09, + num_classes=80, + topk=10, + type='BatchTaskAlignedAssigner', + use_ciou=True)), + type='YOLOWorldDetector') +model_test_cfg = dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001) +mosaic_affine_transform = [ + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='Mosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), +] +neck_embed_channels = [ + 128, + 256, + 256, +] +neck_num_heads = [ + 4, + 8, + 8, +] +norm_cfg = dict(eps=0.001, momentum=0.03, type='BN') +num_classes = 1203 +num_det_layers = 3 +num_training_classes = 80 +obj365v1_train_dataset = dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +optim_wrapper = dict( + clip_grad=dict(max_norm=10.0), + constructor='YOLOWv5OptimizerConstructor', + optimizer=dict( + batch_size_per_gpu=16, lr=0.002, type='AdamW', weight_decay=0.025), + paramwise_cfg=dict( + bias_decay_mult=0.0, + custom_keys=dict({ + 'backbone.text_model': dict(lr_mult=0.01), + 'logit_scale': dict(weight_decay=0.0) + }), + norm_decay_mult=0.0), + type='OptimWrapper') +param_scheduler = None +persistent_workers = True +pre_transform = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), +] +resume = False +save_epoch_intervals = 2 +strides = [ + 8, + 16, + 32, +] +tal_alpha = 0.5 +tal_beta = 6.0 +tal_topk = 10 +test_cfg = dict(type='TestLoop') +test_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +test_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +test_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +text_channels = 512 +text_transform = [ + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_ann_file = 'annotations/instances_train2017.json' +train_batch_size_per_gpu = 16 +train_cfg = dict( + dynamic_intervals=[ + ( + 98, + 1, + ), + ], + max_epochs=100, + type='EpochBasedTrainLoop', + val_interval=10) +train_data_prefix = 'train2017/' +train_dataloader = dict( + batch_size=16, + collate_fn=dict(type='yolow_collate'), + dataset=dict( + datasets=[ + dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + ], + ignore_keys=[ + 'classes', + 'palette', + ], + type='ConcatDataset'), + num_workers=8, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=True, type='DefaultSampler')) +train_num_workers = 8 +train_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_pipeline_stage2 = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +tta_model = dict( + tta_cfg=dict(max_per_img=300, nms=dict(iou_threshold=0.65, type='nms')), + type='mmdet.DetTTAModel') +tta_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict( + transforms=[ + [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), + ], + [ + dict(prob=1.0, type='mmdet.RandomFlip'), + dict(prob=0.0, type='mmdet.RandomFlip'), + ], + [ + dict(type='mmdet.LoadAnnotations', with_bbox=True), + ], + [ + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), + ], + ], + type='TestTimeAug'), +] +val_ann_file = 'annotations/instances_val2017.json' +val_batch_size_per_gpu = 1 +val_cfg = dict(type='ValLoop') +val_data_prefix = 'val2017/' +val_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +val_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +val_interval_stage2 = 1 +val_num_workers = 2 +vis_backends = [ + dict(type='LocalVisBackend'), +] +visualizer = dict( + name='visualizer', + type='mmdet.DetLocalVisualizer', + vis_backends=[ + dict(type='LocalVisBackend'), + ]) +weight_decay = 0.025 +widen_factor = 1.25 +work_dir = '/home/jean/Amine/OpenYolo3D/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival' diff --git a/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_192936/vis_data/config.py b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_192936/vis_data/config.py new file mode 100644 index 0000000000000000000000000000000000000000..f8b00f96ad70de81ecc9073c7d67025d43429906 --- /dev/null +++ b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_192936/vis_data/config.py @@ -0,0 +1,1366 @@ +_backend_args = None +_multiscale_resize_transforms = [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), +] +affine_scale = 0.9 +albu_train_transforms = [ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), +] +backend_args = None +base_lr = 0.002 +batch_shapes_cfg = None +close_mosaic_epochs = 2 +coco_val_dataset = dict( + _delete_=True, + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +custom_hooks = [ + dict( + ema_type='ExpMomentumEMA', + momentum=0.0001, + priority=49, + strict_load=False, + type='EMAHook', + update_buffers=True), + dict( + switch_epoch=98, + switch_pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='mmdet.PipelineSwitchHook'), +] +custom_imports = dict( + allow_failed_imports=False, imports=[ + 'yolo_world', + ]) +data_root = 'data/coco/' +dataset_type = 'YOLOv5CocoDataset' +deepen_factor = 1.0 +default_hooks = dict( + checkpoint=dict( + interval=2, + max_keep_ckpts=2, + rule='greater', + save_best='auto', + type='CheckpointHook'), + logger=dict(interval=50, type='LoggerHook'), + param_scheduler=dict( + lr_factor=0.01, + max_epochs=100, + scheduler_type='linear', + type='YOLOv5ParamSchedulerHook'), + sampler_seed=dict(type='DistSamplerSeedHook'), + timer=dict(type='IterTimerHook'), + visualization=dict(type='mmdet.DetVisualizationHook')) +default_scope = 'mmyolo' +env_cfg = dict( + cudnn_benchmark=True, + dist_cfg=dict(backend='nccl'), + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0)) +flickr_train_dataset = dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +img_scale = ( + 640, + 640, +) +img_scales = [ + ( + 640, + 640, + ), + ( + 320, + 320, + ), + ( + 960, + 960, + ), +] +last_stage_out_channels = 512 +last_transform = [ + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), +] +load_from = '/home/jean/Amine/OpenYolo3D/pretrained/checkpoints/yolo_world_v2_x_obj365v1_goldg_cc3mlite_pretrain_1280ft-14996a36.pth' +log_level = 'INFO' +log_processor = dict(by_epoch=True, type='LogProcessor', window_size=50) +loss_bbox_weight = 7.5 +loss_cls_weight = 0.5 +loss_dfl_weight = 0.375 +lr_factor = 0.01 +max_aspect_ratio = 100 +max_epochs = 100 +max_keep_ckpts = 2 +mg_train_dataset = dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +mixup_prob = 0.15 +model = dict( + backbone=dict( + image_model=dict( + act_cfg=dict(inplace=True, type='SiLU'), + arch='P5', + deepen_factor=1.0, + last_stage_out_channels=512, + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + type='YOLOv8CSPDarknet', + widen_factor=1.25), + text_model=dict( + frozen_modules=[ + 'all', + ], + model_name='openai/clip-vit-base-patch32', + type='HuggingCLIPLanguageBackbone'), + type='MultiModalYOLOBackbone'), + bbox_head=dict( + bbox_coder=dict(type='DistancePointBBoxCoder'), + head_module=dict( + act_cfg=dict(inplace=True, type='SiLU'), + embed_dims=512, + featmap_strides=[ + 8, + 16, + 32, + ], + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_classes=80, + reg_max=16, + type='YOLOWorldHeadModule', + use_bn_head=True, + widen_factor=1.25), + loss_bbox=dict( + bbox_format='xyxy', + iou_mode='ciou', + loss_weight=7.5, + reduction='sum', + return_iou=False, + type='IoULoss'), + loss_cls=dict( + loss_weight=0.5, + reduction='none', + type='mmdet.CrossEntropyLoss', + use_sigmoid=True), + loss_dfl=dict( + loss_weight=0.375, + reduction='mean', + type='mmdet.DistributionFocalLoss'), + prior_generator=dict( + offset=0.5, strides=[ + 8, + 16, + 32, + ], type='mmdet.MlvlPointGenerator'), + type='YOLOWorldHead'), + data_preprocessor=dict( + bgr_to_rgb=True, + mean=[ + 0.0, + 0.0, + 0.0, + ], + std=[ + 255.0, + 255.0, + 255.0, + ], + type='YOLOWDetDataPreprocessor'), + mm_neck=True, + neck=dict( + act_cfg=dict(inplace=True, type='SiLU'), + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv'), + deepen_factor=1.0, + embed_channels=[ + 128, + 256, + 256, + ], + guide_channels=512, + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_csp_blocks=3, + num_heads=[ + 4, + 8, + 8, + ], + out_channels=[ + 256, + 512, + 512, + ], + type='YOLOWorldPAFPN', + widen_factor=1.25), + num_test_classes=1203, + num_train_classes=80, + test_cfg=dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001), + train_cfg=dict( + assigner=dict( + alpha=0.5, + beta=6.0, + eps=1e-09, + num_classes=80, + topk=10, + type='BatchTaskAlignedAssigner', + use_ciou=True)), + type='YOLOWorldDetector') +model_test_cfg = dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001) +mosaic_affine_transform = [ + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='Mosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), +] +neck_embed_channels = [ + 128, + 256, + 256, +] +neck_num_heads = [ + 4, + 8, + 8, +] +norm_cfg = dict(eps=0.001, momentum=0.03, type='BN') +num_classes = 1203 +num_det_layers = 3 +num_training_classes = 80 +obj365v1_train_dataset = dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +optim_wrapper = dict( + clip_grad=dict(max_norm=10.0), + constructor='YOLOWv5OptimizerConstructor', + optimizer=dict( + batch_size_per_gpu=16, lr=0.002, type='AdamW', weight_decay=0.025), + paramwise_cfg=dict( + bias_decay_mult=0.0, + custom_keys=dict({ + 'backbone.text_model': dict(lr_mult=0.01), + 'logit_scale': dict(weight_decay=0.0) + }), + norm_decay_mult=0.0), + type='OptimWrapper') +param_scheduler = None +persistent_workers = True +pre_transform = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), +] +resume = False +save_epoch_intervals = 2 +strides = [ + 8, + 16, + 32, +] +tal_alpha = 0.5 +tal_beta = 6.0 +tal_topk = 10 +test_cfg = dict(type='TestLoop') +test_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +test_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +test_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +text_channels = 512 +text_transform = [ + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_ann_file = 'annotations/instances_train2017.json' +train_batch_size_per_gpu = 16 +train_cfg = dict( + dynamic_intervals=[ + ( + 98, + 1, + ), + ], + max_epochs=100, + type='EpochBasedTrainLoop', + val_interval=10) +train_data_prefix = 'train2017/' +train_dataloader = dict( + batch_size=16, + collate_fn=dict(type='yolow_collate'), + dataset=dict( + datasets=[ + dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + ], + ignore_keys=[ + 'classes', + 'palette', + ], + type='ConcatDataset'), + num_workers=8, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=True, type='DefaultSampler')) +train_num_workers = 8 +train_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_pipeline_stage2 = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +tta_model = dict( + tta_cfg=dict(max_per_img=300, nms=dict(iou_threshold=0.65, type='nms')), + type='mmdet.DetTTAModel') +tta_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict( + transforms=[ + [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), + ], + [ + dict(prob=1.0, type='mmdet.RandomFlip'), + dict(prob=0.0, type='mmdet.RandomFlip'), + ], + [ + dict(type='mmdet.LoadAnnotations', with_bbox=True), + ], + [ + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), + ], + ], + type='TestTimeAug'), +] +val_ann_file = 'annotations/instances_val2017.json' +val_batch_size_per_gpu = 1 +val_cfg = dict(type='ValLoop') +val_data_prefix = 'val2017/' +val_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +val_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +val_interval_stage2 = 1 +val_num_workers = 2 +vis_backends = [ + dict(type='LocalVisBackend'), +] +visualizer = dict( + name='visualizer', + type='mmdet.DetLocalVisualizer', + vis_backends=[ + dict(type='LocalVisBackend'), + ]) +weight_decay = 0.025 +widen_factor = 1.25 +work_dir = '/home/jean/Amine/OpenYolo3D/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival' diff --git a/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_193150/vis_data/config.py b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_193150/vis_data/config.py new file mode 100644 index 0000000000000000000000000000000000000000..f8b00f96ad70de81ecc9073c7d67025d43429906 --- /dev/null +++ b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_193150/vis_data/config.py @@ -0,0 +1,1366 @@ +_backend_args = None +_multiscale_resize_transforms = [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), +] +affine_scale = 0.9 +albu_train_transforms = [ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), +] +backend_args = None +base_lr = 0.002 +batch_shapes_cfg = None +close_mosaic_epochs = 2 +coco_val_dataset = dict( + _delete_=True, + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +custom_hooks = [ + dict( + ema_type='ExpMomentumEMA', + momentum=0.0001, + priority=49, + strict_load=False, + type='EMAHook', + update_buffers=True), + dict( + switch_epoch=98, + switch_pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='mmdet.PipelineSwitchHook'), +] +custom_imports = dict( + allow_failed_imports=False, imports=[ + 'yolo_world', + ]) +data_root = 'data/coco/' +dataset_type = 'YOLOv5CocoDataset' +deepen_factor = 1.0 +default_hooks = dict( + checkpoint=dict( + interval=2, + max_keep_ckpts=2, + rule='greater', + save_best='auto', + type='CheckpointHook'), + logger=dict(interval=50, type='LoggerHook'), + param_scheduler=dict( + lr_factor=0.01, + max_epochs=100, + scheduler_type='linear', + type='YOLOv5ParamSchedulerHook'), + sampler_seed=dict(type='DistSamplerSeedHook'), + timer=dict(type='IterTimerHook'), + visualization=dict(type='mmdet.DetVisualizationHook')) +default_scope = 'mmyolo' +env_cfg = dict( + cudnn_benchmark=True, + dist_cfg=dict(backend='nccl'), + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0)) +flickr_train_dataset = dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +img_scale = ( + 640, + 640, +) +img_scales = [ + ( + 640, + 640, + ), + ( + 320, + 320, + ), + ( + 960, + 960, + ), +] +last_stage_out_channels = 512 +last_transform = [ + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), +] +load_from = '/home/jean/Amine/OpenYolo3D/pretrained/checkpoints/yolo_world_v2_x_obj365v1_goldg_cc3mlite_pretrain_1280ft-14996a36.pth' +log_level = 'INFO' +log_processor = dict(by_epoch=True, type='LogProcessor', window_size=50) +loss_bbox_weight = 7.5 +loss_cls_weight = 0.5 +loss_dfl_weight = 0.375 +lr_factor = 0.01 +max_aspect_ratio = 100 +max_epochs = 100 +max_keep_ckpts = 2 +mg_train_dataset = dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +mixup_prob = 0.15 +model = dict( + backbone=dict( + image_model=dict( + act_cfg=dict(inplace=True, type='SiLU'), + arch='P5', + deepen_factor=1.0, + last_stage_out_channels=512, + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + type='YOLOv8CSPDarknet', + widen_factor=1.25), + text_model=dict( + frozen_modules=[ + 'all', + ], + model_name='openai/clip-vit-base-patch32', + type='HuggingCLIPLanguageBackbone'), + type='MultiModalYOLOBackbone'), + bbox_head=dict( + bbox_coder=dict(type='DistancePointBBoxCoder'), + head_module=dict( + act_cfg=dict(inplace=True, type='SiLU'), + embed_dims=512, + featmap_strides=[ + 8, + 16, + 32, + ], + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_classes=80, + reg_max=16, + type='YOLOWorldHeadModule', + use_bn_head=True, + widen_factor=1.25), + loss_bbox=dict( + bbox_format='xyxy', + iou_mode='ciou', + loss_weight=7.5, + reduction='sum', + return_iou=False, + type='IoULoss'), + loss_cls=dict( + loss_weight=0.5, + reduction='none', + type='mmdet.CrossEntropyLoss', + use_sigmoid=True), + loss_dfl=dict( + loss_weight=0.375, + reduction='mean', + type='mmdet.DistributionFocalLoss'), + prior_generator=dict( + offset=0.5, strides=[ + 8, + 16, + 32, + ], type='mmdet.MlvlPointGenerator'), + type='YOLOWorldHead'), + data_preprocessor=dict( + bgr_to_rgb=True, + mean=[ + 0.0, + 0.0, + 0.0, + ], + std=[ + 255.0, + 255.0, + 255.0, + ], + type='YOLOWDetDataPreprocessor'), + mm_neck=True, + neck=dict( + act_cfg=dict(inplace=True, type='SiLU'), + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv'), + deepen_factor=1.0, + embed_channels=[ + 128, + 256, + 256, + ], + guide_channels=512, + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_csp_blocks=3, + num_heads=[ + 4, + 8, + 8, + ], + out_channels=[ + 256, + 512, + 512, + ], + type='YOLOWorldPAFPN', + widen_factor=1.25), + num_test_classes=1203, + num_train_classes=80, + test_cfg=dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001), + train_cfg=dict( + assigner=dict( + alpha=0.5, + beta=6.0, + eps=1e-09, + num_classes=80, + topk=10, + type='BatchTaskAlignedAssigner', + use_ciou=True)), + type='YOLOWorldDetector') +model_test_cfg = dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001) +mosaic_affine_transform = [ + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='Mosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), +] +neck_embed_channels = [ + 128, + 256, + 256, +] +neck_num_heads = [ + 4, + 8, + 8, +] +norm_cfg = dict(eps=0.001, momentum=0.03, type='BN') +num_classes = 1203 +num_det_layers = 3 +num_training_classes = 80 +obj365v1_train_dataset = dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +optim_wrapper = dict( + clip_grad=dict(max_norm=10.0), + constructor='YOLOWv5OptimizerConstructor', + optimizer=dict( + batch_size_per_gpu=16, lr=0.002, type='AdamW', weight_decay=0.025), + paramwise_cfg=dict( + bias_decay_mult=0.0, + custom_keys=dict({ + 'backbone.text_model': dict(lr_mult=0.01), + 'logit_scale': dict(weight_decay=0.0) + }), + norm_decay_mult=0.0), + type='OptimWrapper') +param_scheduler = None +persistent_workers = True +pre_transform = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), +] +resume = False +save_epoch_intervals = 2 +strides = [ + 8, + 16, + 32, +] +tal_alpha = 0.5 +tal_beta = 6.0 +tal_topk = 10 +test_cfg = dict(type='TestLoop') +test_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +test_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +test_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +text_channels = 512 +text_transform = [ + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_ann_file = 'annotations/instances_train2017.json' +train_batch_size_per_gpu = 16 +train_cfg = dict( + dynamic_intervals=[ + ( + 98, + 1, + ), + ], + max_epochs=100, + type='EpochBasedTrainLoop', + val_interval=10) +train_data_prefix = 'train2017/' +train_dataloader = dict( + batch_size=16, + collate_fn=dict(type='yolow_collate'), + dataset=dict( + datasets=[ + dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + ], + ignore_keys=[ + 'classes', + 'palette', + ], + type='ConcatDataset'), + num_workers=8, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=True, type='DefaultSampler')) +train_num_workers = 8 +train_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_pipeline_stage2 = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +tta_model = dict( + tta_cfg=dict(max_per_img=300, nms=dict(iou_threshold=0.65, type='nms')), + type='mmdet.DetTTAModel') +tta_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict( + transforms=[ + [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), + ], + [ + dict(prob=1.0, type='mmdet.RandomFlip'), + dict(prob=0.0, type='mmdet.RandomFlip'), + ], + [ + dict(type='mmdet.LoadAnnotations', with_bbox=True), + ], + [ + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), + ], + ], + type='TestTimeAug'), +] +val_ann_file = 'annotations/instances_val2017.json' +val_batch_size_per_gpu = 1 +val_cfg = dict(type='ValLoop') +val_data_prefix = 'val2017/' +val_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +val_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +val_interval_stage2 = 1 +val_num_workers = 2 +vis_backends = [ + dict(type='LocalVisBackend'), +] +visualizer = dict( + name='visualizer', + type='mmdet.DetLocalVisualizer', + vis_backends=[ + dict(type='LocalVisBackend'), + ]) +weight_decay = 0.025 +widen_factor = 1.25 +work_dir = '/home/jean/Amine/OpenYolo3D/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival' diff --git a/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_193257/vis_data/config.py b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_193257/vis_data/config.py new file mode 100644 index 0000000000000000000000000000000000000000..f8b00f96ad70de81ecc9073c7d67025d43429906 --- /dev/null +++ b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_193257/vis_data/config.py @@ -0,0 +1,1366 @@ +_backend_args = None +_multiscale_resize_transforms = [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), +] +affine_scale = 0.9 +albu_train_transforms = [ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), +] +backend_args = None +base_lr = 0.002 +batch_shapes_cfg = None +close_mosaic_epochs = 2 +coco_val_dataset = dict( + _delete_=True, + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +custom_hooks = [ + dict( + ema_type='ExpMomentumEMA', + momentum=0.0001, + priority=49, + strict_load=False, + type='EMAHook', + update_buffers=True), + dict( + switch_epoch=98, + switch_pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='mmdet.PipelineSwitchHook'), +] +custom_imports = dict( + allow_failed_imports=False, imports=[ + 'yolo_world', + ]) +data_root = 'data/coco/' +dataset_type = 'YOLOv5CocoDataset' +deepen_factor = 1.0 +default_hooks = dict( + checkpoint=dict( + interval=2, + max_keep_ckpts=2, + rule='greater', + save_best='auto', + type='CheckpointHook'), + logger=dict(interval=50, type='LoggerHook'), + param_scheduler=dict( + lr_factor=0.01, + max_epochs=100, + scheduler_type='linear', + type='YOLOv5ParamSchedulerHook'), + sampler_seed=dict(type='DistSamplerSeedHook'), + timer=dict(type='IterTimerHook'), + visualization=dict(type='mmdet.DetVisualizationHook')) +default_scope = 'mmyolo' +env_cfg = dict( + cudnn_benchmark=True, + dist_cfg=dict(backend='nccl'), + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0)) +flickr_train_dataset = dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +img_scale = ( + 640, + 640, +) +img_scales = [ + ( + 640, + 640, + ), + ( + 320, + 320, + ), + ( + 960, + 960, + ), +] +last_stage_out_channels = 512 +last_transform = [ + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), +] +load_from = '/home/jean/Amine/OpenYolo3D/pretrained/checkpoints/yolo_world_v2_x_obj365v1_goldg_cc3mlite_pretrain_1280ft-14996a36.pth' +log_level = 'INFO' +log_processor = dict(by_epoch=True, type='LogProcessor', window_size=50) +loss_bbox_weight = 7.5 +loss_cls_weight = 0.5 +loss_dfl_weight = 0.375 +lr_factor = 0.01 +max_aspect_ratio = 100 +max_epochs = 100 +max_keep_ckpts = 2 +mg_train_dataset = dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +mixup_prob = 0.15 +model = dict( + backbone=dict( + image_model=dict( + act_cfg=dict(inplace=True, type='SiLU'), + arch='P5', + deepen_factor=1.0, + last_stage_out_channels=512, + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + type='YOLOv8CSPDarknet', + widen_factor=1.25), + text_model=dict( + frozen_modules=[ + 'all', + ], + model_name='openai/clip-vit-base-patch32', + type='HuggingCLIPLanguageBackbone'), + type='MultiModalYOLOBackbone'), + bbox_head=dict( + bbox_coder=dict(type='DistancePointBBoxCoder'), + head_module=dict( + act_cfg=dict(inplace=True, type='SiLU'), + embed_dims=512, + featmap_strides=[ + 8, + 16, + 32, + ], + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_classes=80, + reg_max=16, + type='YOLOWorldHeadModule', + use_bn_head=True, + widen_factor=1.25), + loss_bbox=dict( + bbox_format='xyxy', + iou_mode='ciou', + loss_weight=7.5, + reduction='sum', + return_iou=False, + type='IoULoss'), + loss_cls=dict( + loss_weight=0.5, + reduction='none', + type='mmdet.CrossEntropyLoss', + use_sigmoid=True), + loss_dfl=dict( + loss_weight=0.375, + reduction='mean', + type='mmdet.DistributionFocalLoss'), + prior_generator=dict( + offset=0.5, strides=[ + 8, + 16, + 32, + ], type='mmdet.MlvlPointGenerator'), + type='YOLOWorldHead'), + data_preprocessor=dict( + bgr_to_rgb=True, + mean=[ + 0.0, + 0.0, + 0.0, + ], + std=[ + 255.0, + 255.0, + 255.0, + ], + type='YOLOWDetDataPreprocessor'), + mm_neck=True, + neck=dict( + act_cfg=dict(inplace=True, type='SiLU'), + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv'), + deepen_factor=1.0, + embed_channels=[ + 128, + 256, + 256, + ], + guide_channels=512, + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_csp_blocks=3, + num_heads=[ + 4, + 8, + 8, + ], + out_channels=[ + 256, + 512, + 512, + ], + type='YOLOWorldPAFPN', + widen_factor=1.25), + num_test_classes=1203, + num_train_classes=80, + test_cfg=dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001), + train_cfg=dict( + assigner=dict( + alpha=0.5, + beta=6.0, + eps=1e-09, + num_classes=80, + topk=10, + type='BatchTaskAlignedAssigner', + use_ciou=True)), + type='YOLOWorldDetector') +model_test_cfg = dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001) +mosaic_affine_transform = [ + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='Mosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), +] +neck_embed_channels = [ + 128, + 256, + 256, +] +neck_num_heads = [ + 4, + 8, + 8, +] +norm_cfg = dict(eps=0.001, momentum=0.03, type='BN') +num_classes = 1203 +num_det_layers = 3 +num_training_classes = 80 +obj365v1_train_dataset = dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +optim_wrapper = dict( + clip_grad=dict(max_norm=10.0), + constructor='YOLOWv5OptimizerConstructor', + optimizer=dict( + batch_size_per_gpu=16, lr=0.002, type='AdamW', weight_decay=0.025), + paramwise_cfg=dict( + bias_decay_mult=0.0, + custom_keys=dict({ + 'backbone.text_model': dict(lr_mult=0.01), + 'logit_scale': dict(weight_decay=0.0) + }), + norm_decay_mult=0.0), + type='OptimWrapper') +param_scheduler = None +persistent_workers = True +pre_transform = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), +] +resume = False +save_epoch_intervals = 2 +strides = [ + 8, + 16, + 32, +] +tal_alpha = 0.5 +tal_beta = 6.0 +tal_topk = 10 +test_cfg = dict(type='TestLoop') +test_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +test_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +test_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +text_channels = 512 +text_transform = [ + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_ann_file = 'annotations/instances_train2017.json' +train_batch_size_per_gpu = 16 +train_cfg = dict( + dynamic_intervals=[ + ( + 98, + 1, + ), + ], + max_epochs=100, + type='EpochBasedTrainLoop', + val_interval=10) +train_data_prefix = 'train2017/' +train_dataloader = dict( + batch_size=16, + collate_fn=dict(type='yolow_collate'), + dataset=dict( + datasets=[ + dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + ], + ignore_keys=[ + 'classes', + 'palette', + ], + type='ConcatDataset'), + num_workers=8, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=True, type='DefaultSampler')) +train_num_workers = 8 +train_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_pipeline_stage2 = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +tta_model = dict( + tta_cfg=dict(max_per_img=300, nms=dict(iou_threshold=0.65, type='nms')), + type='mmdet.DetTTAModel') +tta_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict( + transforms=[ + [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), + ], + [ + dict(prob=1.0, type='mmdet.RandomFlip'), + dict(prob=0.0, type='mmdet.RandomFlip'), + ], + [ + dict(type='mmdet.LoadAnnotations', with_bbox=True), + ], + [ + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), + ], + ], + type='TestTimeAug'), +] +val_ann_file = 'annotations/instances_val2017.json' +val_batch_size_per_gpu = 1 +val_cfg = dict(type='ValLoop') +val_data_prefix = 'val2017/' +val_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +val_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +val_interval_stage2 = 1 +val_num_workers = 2 +vis_backends = [ + dict(type='LocalVisBackend'), +] +visualizer = dict( + name='visualizer', + type='mmdet.DetLocalVisualizer', + vis_backends=[ + dict(type='LocalVisBackend'), + ]) +weight_decay = 0.025 +widen_factor = 1.25 +work_dir = '/home/jean/Amine/OpenYolo3D/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival' diff --git a/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_193823/vis_data/config.py b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_193823/vis_data/config.py new file mode 100644 index 0000000000000000000000000000000000000000..f8b00f96ad70de81ecc9073c7d67025d43429906 --- /dev/null +++ b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_193823/vis_data/config.py @@ -0,0 +1,1366 @@ +_backend_args = None +_multiscale_resize_transforms = [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), +] +affine_scale = 0.9 +albu_train_transforms = [ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), +] +backend_args = None +base_lr = 0.002 +batch_shapes_cfg = None +close_mosaic_epochs = 2 +coco_val_dataset = dict( + _delete_=True, + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +custom_hooks = [ + dict( + ema_type='ExpMomentumEMA', + momentum=0.0001, + priority=49, + strict_load=False, + type='EMAHook', + update_buffers=True), + dict( + switch_epoch=98, + switch_pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='mmdet.PipelineSwitchHook'), +] +custom_imports = dict( + allow_failed_imports=False, imports=[ + 'yolo_world', + ]) +data_root = 'data/coco/' +dataset_type = 'YOLOv5CocoDataset' +deepen_factor = 1.0 +default_hooks = dict( + checkpoint=dict( + interval=2, + max_keep_ckpts=2, + rule='greater', + save_best='auto', + type='CheckpointHook'), + logger=dict(interval=50, type='LoggerHook'), + param_scheduler=dict( + lr_factor=0.01, + max_epochs=100, + scheduler_type='linear', + type='YOLOv5ParamSchedulerHook'), + sampler_seed=dict(type='DistSamplerSeedHook'), + timer=dict(type='IterTimerHook'), + visualization=dict(type='mmdet.DetVisualizationHook')) +default_scope = 'mmyolo' +env_cfg = dict( + cudnn_benchmark=True, + dist_cfg=dict(backend='nccl'), + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0)) +flickr_train_dataset = dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +img_scale = ( + 640, + 640, +) +img_scales = [ + ( + 640, + 640, + ), + ( + 320, + 320, + ), + ( + 960, + 960, + ), +] +last_stage_out_channels = 512 +last_transform = [ + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), +] +load_from = '/home/jean/Amine/OpenYolo3D/pretrained/checkpoints/yolo_world_v2_x_obj365v1_goldg_cc3mlite_pretrain_1280ft-14996a36.pth' +log_level = 'INFO' +log_processor = dict(by_epoch=True, type='LogProcessor', window_size=50) +loss_bbox_weight = 7.5 +loss_cls_weight = 0.5 +loss_dfl_weight = 0.375 +lr_factor = 0.01 +max_aspect_ratio = 100 +max_epochs = 100 +max_keep_ckpts = 2 +mg_train_dataset = dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +mixup_prob = 0.15 +model = dict( + backbone=dict( + image_model=dict( + act_cfg=dict(inplace=True, type='SiLU'), + arch='P5', + deepen_factor=1.0, + last_stage_out_channels=512, + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + type='YOLOv8CSPDarknet', + widen_factor=1.25), + text_model=dict( + frozen_modules=[ + 'all', + ], + model_name='openai/clip-vit-base-patch32', + type='HuggingCLIPLanguageBackbone'), + type='MultiModalYOLOBackbone'), + bbox_head=dict( + bbox_coder=dict(type='DistancePointBBoxCoder'), + head_module=dict( + act_cfg=dict(inplace=True, type='SiLU'), + embed_dims=512, + featmap_strides=[ + 8, + 16, + 32, + ], + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_classes=80, + reg_max=16, + type='YOLOWorldHeadModule', + use_bn_head=True, + widen_factor=1.25), + loss_bbox=dict( + bbox_format='xyxy', + iou_mode='ciou', + loss_weight=7.5, + reduction='sum', + return_iou=False, + type='IoULoss'), + loss_cls=dict( + loss_weight=0.5, + reduction='none', + type='mmdet.CrossEntropyLoss', + use_sigmoid=True), + loss_dfl=dict( + loss_weight=0.375, + reduction='mean', + type='mmdet.DistributionFocalLoss'), + prior_generator=dict( + offset=0.5, strides=[ + 8, + 16, + 32, + ], type='mmdet.MlvlPointGenerator'), + type='YOLOWorldHead'), + data_preprocessor=dict( + bgr_to_rgb=True, + mean=[ + 0.0, + 0.0, + 0.0, + ], + std=[ + 255.0, + 255.0, + 255.0, + ], + type='YOLOWDetDataPreprocessor'), + mm_neck=True, + neck=dict( + act_cfg=dict(inplace=True, type='SiLU'), + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv'), + deepen_factor=1.0, + embed_channels=[ + 128, + 256, + 256, + ], + guide_channels=512, + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_csp_blocks=3, + num_heads=[ + 4, + 8, + 8, + ], + out_channels=[ + 256, + 512, + 512, + ], + type='YOLOWorldPAFPN', + widen_factor=1.25), + num_test_classes=1203, + num_train_classes=80, + test_cfg=dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001), + train_cfg=dict( + assigner=dict( + alpha=0.5, + beta=6.0, + eps=1e-09, + num_classes=80, + topk=10, + type='BatchTaskAlignedAssigner', + use_ciou=True)), + type='YOLOWorldDetector') +model_test_cfg = dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001) +mosaic_affine_transform = [ + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='Mosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), +] +neck_embed_channels = [ + 128, + 256, + 256, +] +neck_num_heads = [ + 4, + 8, + 8, +] +norm_cfg = dict(eps=0.001, momentum=0.03, type='BN') +num_classes = 1203 +num_det_layers = 3 +num_training_classes = 80 +obj365v1_train_dataset = dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +optim_wrapper = dict( + clip_grad=dict(max_norm=10.0), + constructor='YOLOWv5OptimizerConstructor', + optimizer=dict( + batch_size_per_gpu=16, lr=0.002, type='AdamW', weight_decay=0.025), + paramwise_cfg=dict( + bias_decay_mult=0.0, + custom_keys=dict({ + 'backbone.text_model': dict(lr_mult=0.01), + 'logit_scale': dict(weight_decay=0.0) + }), + norm_decay_mult=0.0), + type='OptimWrapper') +param_scheduler = None +persistent_workers = True +pre_transform = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), +] +resume = False +save_epoch_intervals = 2 +strides = [ + 8, + 16, + 32, +] +tal_alpha = 0.5 +tal_beta = 6.0 +tal_topk = 10 +test_cfg = dict(type='TestLoop') +test_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +test_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +test_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +text_channels = 512 +text_transform = [ + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_ann_file = 'annotations/instances_train2017.json' +train_batch_size_per_gpu = 16 +train_cfg = dict( + dynamic_intervals=[ + ( + 98, + 1, + ), + ], + max_epochs=100, + type='EpochBasedTrainLoop', + val_interval=10) +train_data_prefix = 'train2017/' +train_dataloader = dict( + batch_size=16, + collate_fn=dict(type='yolow_collate'), + dataset=dict( + datasets=[ + dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + ], + ignore_keys=[ + 'classes', + 'palette', + ], + type='ConcatDataset'), + num_workers=8, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=True, type='DefaultSampler')) +train_num_workers = 8 +train_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_pipeline_stage2 = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +tta_model = dict( + tta_cfg=dict(max_per_img=300, nms=dict(iou_threshold=0.65, type='nms')), + type='mmdet.DetTTAModel') +tta_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict( + transforms=[ + [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), + ], + [ + dict(prob=1.0, type='mmdet.RandomFlip'), + dict(prob=0.0, type='mmdet.RandomFlip'), + ], + [ + dict(type='mmdet.LoadAnnotations', with_bbox=True), + ], + [ + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), + ], + ], + type='TestTimeAug'), +] +val_ann_file = 'annotations/instances_val2017.json' +val_batch_size_per_gpu = 1 +val_cfg = dict(type='ValLoop') +val_data_prefix = 'val2017/' +val_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +val_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +val_interval_stage2 = 1 +val_num_workers = 2 +vis_backends = [ + dict(type='LocalVisBackend'), +] +visualizer = dict( + name='visualizer', + type='mmdet.DetLocalVisualizer', + vis_backends=[ + dict(type='LocalVisBackend'), + ]) +weight_decay = 0.025 +widen_factor = 1.25 +work_dir = '/home/jean/Amine/OpenYolo3D/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival' diff --git a/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_194509/vis_data/config.py b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_194509/vis_data/config.py new file mode 100644 index 0000000000000000000000000000000000000000..f8b00f96ad70de81ecc9073c7d67025d43429906 --- /dev/null +++ b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_194509/vis_data/config.py @@ -0,0 +1,1366 @@ +_backend_args = None +_multiscale_resize_transforms = [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), +] +affine_scale = 0.9 +albu_train_transforms = [ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), +] +backend_args = None +base_lr = 0.002 +batch_shapes_cfg = None +close_mosaic_epochs = 2 +coco_val_dataset = dict( + _delete_=True, + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +custom_hooks = [ + dict( + ema_type='ExpMomentumEMA', + momentum=0.0001, + priority=49, + strict_load=False, + type='EMAHook', + update_buffers=True), + dict( + switch_epoch=98, + switch_pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='mmdet.PipelineSwitchHook'), +] +custom_imports = dict( + allow_failed_imports=False, imports=[ + 'yolo_world', + ]) +data_root = 'data/coco/' +dataset_type = 'YOLOv5CocoDataset' +deepen_factor = 1.0 +default_hooks = dict( + checkpoint=dict( + interval=2, + max_keep_ckpts=2, + rule='greater', + save_best='auto', + type='CheckpointHook'), + logger=dict(interval=50, type='LoggerHook'), + param_scheduler=dict( + lr_factor=0.01, + max_epochs=100, + scheduler_type='linear', + type='YOLOv5ParamSchedulerHook'), + sampler_seed=dict(type='DistSamplerSeedHook'), + timer=dict(type='IterTimerHook'), + visualization=dict(type='mmdet.DetVisualizationHook')) +default_scope = 'mmyolo' +env_cfg = dict( + cudnn_benchmark=True, + dist_cfg=dict(backend='nccl'), + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0)) +flickr_train_dataset = dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +img_scale = ( + 640, + 640, +) +img_scales = [ + ( + 640, + 640, + ), + ( + 320, + 320, + ), + ( + 960, + 960, + ), +] +last_stage_out_channels = 512 +last_transform = [ + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), +] +load_from = '/home/jean/Amine/OpenYolo3D/pretrained/checkpoints/yolo_world_v2_x_obj365v1_goldg_cc3mlite_pretrain_1280ft-14996a36.pth' +log_level = 'INFO' +log_processor = dict(by_epoch=True, type='LogProcessor', window_size=50) +loss_bbox_weight = 7.5 +loss_cls_weight = 0.5 +loss_dfl_weight = 0.375 +lr_factor = 0.01 +max_aspect_ratio = 100 +max_epochs = 100 +max_keep_ckpts = 2 +mg_train_dataset = dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +mixup_prob = 0.15 +model = dict( + backbone=dict( + image_model=dict( + act_cfg=dict(inplace=True, type='SiLU'), + arch='P5', + deepen_factor=1.0, + last_stage_out_channels=512, + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + type='YOLOv8CSPDarknet', + widen_factor=1.25), + text_model=dict( + frozen_modules=[ + 'all', + ], + model_name='openai/clip-vit-base-patch32', + type='HuggingCLIPLanguageBackbone'), + type='MultiModalYOLOBackbone'), + bbox_head=dict( + bbox_coder=dict(type='DistancePointBBoxCoder'), + head_module=dict( + act_cfg=dict(inplace=True, type='SiLU'), + embed_dims=512, + featmap_strides=[ + 8, + 16, + 32, + ], + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_classes=80, + reg_max=16, + type='YOLOWorldHeadModule', + use_bn_head=True, + widen_factor=1.25), + loss_bbox=dict( + bbox_format='xyxy', + iou_mode='ciou', + loss_weight=7.5, + reduction='sum', + return_iou=False, + type='IoULoss'), + loss_cls=dict( + loss_weight=0.5, + reduction='none', + type='mmdet.CrossEntropyLoss', + use_sigmoid=True), + loss_dfl=dict( + loss_weight=0.375, + reduction='mean', + type='mmdet.DistributionFocalLoss'), + prior_generator=dict( + offset=0.5, strides=[ + 8, + 16, + 32, + ], type='mmdet.MlvlPointGenerator'), + type='YOLOWorldHead'), + data_preprocessor=dict( + bgr_to_rgb=True, + mean=[ + 0.0, + 0.0, + 0.0, + ], + std=[ + 255.0, + 255.0, + 255.0, + ], + type='YOLOWDetDataPreprocessor'), + mm_neck=True, + neck=dict( + act_cfg=dict(inplace=True, type='SiLU'), + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv'), + deepen_factor=1.0, + embed_channels=[ + 128, + 256, + 256, + ], + guide_channels=512, + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_csp_blocks=3, + num_heads=[ + 4, + 8, + 8, + ], + out_channels=[ + 256, + 512, + 512, + ], + type='YOLOWorldPAFPN', + widen_factor=1.25), + num_test_classes=1203, + num_train_classes=80, + test_cfg=dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001), + train_cfg=dict( + assigner=dict( + alpha=0.5, + beta=6.0, + eps=1e-09, + num_classes=80, + topk=10, + type='BatchTaskAlignedAssigner', + use_ciou=True)), + type='YOLOWorldDetector') +model_test_cfg = dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001) +mosaic_affine_transform = [ + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='Mosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), +] +neck_embed_channels = [ + 128, + 256, + 256, +] +neck_num_heads = [ + 4, + 8, + 8, +] +norm_cfg = dict(eps=0.001, momentum=0.03, type='BN') +num_classes = 1203 +num_det_layers = 3 +num_training_classes = 80 +obj365v1_train_dataset = dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +optim_wrapper = dict( + clip_grad=dict(max_norm=10.0), + constructor='YOLOWv5OptimizerConstructor', + optimizer=dict( + batch_size_per_gpu=16, lr=0.002, type='AdamW', weight_decay=0.025), + paramwise_cfg=dict( + bias_decay_mult=0.0, + custom_keys=dict({ + 'backbone.text_model': dict(lr_mult=0.01), + 'logit_scale': dict(weight_decay=0.0) + }), + norm_decay_mult=0.0), + type='OptimWrapper') +param_scheduler = None +persistent_workers = True +pre_transform = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), +] +resume = False +save_epoch_intervals = 2 +strides = [ + 8, + 16, + 32, +] +tal_alpha = 0.5 +tal_beta = 6.0 +tal_topk = 10 +test_cfg = dict(type='TestLoop') +test_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +test_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +test_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +text_channels = 512 +text_transform = [ + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_ann_file = 'annotations/instances_train2017.json' +train_batch_size_per_gpu = 16 +train_cfg = dict( + dynamic_intervals=[ + ( + 98, + 1, + ), + ], + max_epochs=100, + type='EpochBasedTrainLoop', + val_interval=10) +train_data_prefix = 'train2017/' +train_dataloader = dict( + batch_size=16, + collate_fn=dict(type='yolow_collate'), + dataset=dict( + datasets=[ + dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + ], + ignore_keys=[ + 'classes', + 'palette', + ], + type='ConcatDataset'), + num_workers=8, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=True, type='DefaultSampler')) +train_num_workers = 8 +train_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_pipeline_stage2 = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +tta_model = dict( + tta_cfg=dict(max_per_img=300, nms=dict(iou_threshold=0.65, type='nms')), + type='mmdet.DetTTAModel') +tta_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict( + transforms=[ + [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), + ], + [ + dict(prob=1.0, type='mmdet.RandomFlip'), + dict(prob=0.0, type='mmdet.RandomFlip'), + ], + [ + dict(type='mmdet.LoadAnnotations', with_bbox=True), + ], + [ + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), + ], + ], + type='TestTimeAug'), +] +val_ann_file = 'annotations/instances_val2017.json' +val_batch_size_per_gpu = 1 +val_cfg = dict(type='ValLoop') +val_data_prefix = 'val2017/' +val_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +val_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +val_interval_stage2 = 1 +val_num_workers = 2 +vis_backends = [ + dict(type='LocalVisBackend'), +] +visualizer = dict( + name='visualizer', + type='mmdet.DetLocalVisualizer', + vis_backends=[ + dict(type='LocalVisBackend'), + ]) +weight_decay = 0.025 +widen_factor = 1.25 +work_dir = '/home/jean/Amine/OpenYolo3D/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival' diff --git a/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_195205/vis_data/config.py b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_195205/vis_data/config.py new file mode 100644 index 0000000000000000000000000000000000000000..f8b00f96ad70de81ecc9073c7d67025d43429906 --- /dev/null +++ b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_195205/vis_data/config.py @@ -0,0 +1,1366 @@ +_backend_args = None +_multiscale_resize_transforms = [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), +] +affine_scale = 0.9 +albu_train_transforms = [ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), +] +backend_args = None +base_lr = 0.002 +batch_shapes_cfg = None +close_mosaic_epochs = 2 +coco_val_dataset = dict( + _delete_=True, + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +custom_hooks = [ + dict( + ema_type='ExpMomentumEMA', + momentum=0.0001, + priority=49, + strict_load=False, + type='EMAHook', + update_buffers=True), + dict( + switch_epoch=98, + switch_pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='mmdet.PipelineSwitchHook'), +] +custom_imports = dict( + allow_failed_imports=False, imports=[ + 'yolo_world', + ]) +data_root = 'data/coco/' +dataset_type = 'YOLOv5CocoDataset' +deepen_factor = 1.0 +default_hooks = dict( + checkpoint=dict( + interval=2, + max_keep_ckpts=2, + rule='greater', + save_best='auto', + type='CheckpointHook'), + logger=dict(interval=50, type='LoggerHook'), + param_scheduler=dict( + lr_factor=0.01, + max_epochs=100, + scheduler_type='linear', + type='YOLOv5ParamSchedulerHook'), + sampler_seed=dict(type='DistSamplerSeedHook'), + timer=dict(type='IterTimerHook'), + visualization=dict(type='mmdet.DetVisualizationHook')) +default_scope = 'mmyolo' +env_cfg = dict( + cudnn_benchmark=True, + dist_cfg=dict(backend='nccl'), + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0)) +flickr_train_dataset = dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +img_scale = ( + 640, + 640, +) +img_scales = [ + ( + 640, + 640, + ), + ( + 320, + 320, + ), + ( + 960, + 960, + ), +] +last_stage_out_channels = 512 +last_transform = [ + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), +] +load_from = '/home/jean/Amine/OpenYolo3D/pretrained/checkpoints/yolo_world_v2_x_obj365v1_goldg_cc3mlite_pretrain_1280ft-14996a36.pth' +log_level = 'INFO' +log_processor = dict(by_epoch=True, type='LogProcessor', window_size=50) +loss_bbox_weight = 7.5 +loss_cls_weight = 0.5 +loss_dfl_weight = 0.375 +lr_factor = 0.01 +max_aspect_ratio = 100 +max_epochs = 100 +max_keep_ckpts = 2 +mg_train_dataset = dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +mixup_prob = 0.15 +model = dict( + backbone=dict( + image_model=dict( + act_cfg=dict(inplace=True, type='SiLU'), + arch='P5', + deepen_factor=1.0, + last_stage_out_channels=512, + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + type='YOLOv8CSPDarknet', + widen_factor=1.25), + text_model=dict( + frozen_modules=[ + 'all', + ], + model_name='openai/clip-vit-base-patch32', + type='HuggingCLIPLanguageBackbone'), + type='MultiModalYOLOBackbone'), + bbox_head=dict( + bbox_coder=dict(type='DistancePointBBoxCoder'), + head_module=dict( + act_cfg=dict(inplace=True, type='SiLU'), + embed_dims=512, + featmap_strides=[ + 8, + 16, + 32, + ], + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_classes=80, + reg_max=16, + type='YOLOWorldHeadModule', + use_bn_head=True, + widen_factor=1.25), + loss_bbox=dict( + bbox_format='xyxy', + iou_mode='ciou', + loss_weight=7.5, + reduction='sum', + return_iou=False, + type='IoULoss'), + loss_cls=dict( + loss_weight=0.5, + reduction='none', + type='mmdet.CrossEntropyLoss', + use_sigmoid=True), + loss_dfl=dict( + loss_weight=0.375, + reduction='mean', + type='mmdet.DistributionFocalLoss'), + prior_generator=dict( + offset=0.5, strides=[ + 8, + 16, + 32, + ], type='mmdet.MlvlPointGenerator'), + type='YOLOWorldHead'), + data_preprocessor=dict( + bgr_to_rgb=True, + mean=[ + 0.0, + 0.0, + 0.0, + ], + std=[ + 255.0, + 255.0, + 255.0, + ], + type='YOLOWDetDataPreprocessor'), + mm_neck=True, + neck=dict( + act_cfg=dict(inplace=True, type='SiLU'), + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv'), + deepen_factor=1.0, + embed_channels=[ + 128, + 256, + 256, + ], + guide_channels=512, + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_csp_blocks=3, + num_heads=[ + 4, + 8, + 8, + ], + out_channels=[ + 256, + 512, + 512, + ], + type='YOLOWorldPAFPN', + widen_factor=1.25), + num_test_classes=1203, + num_train_classes=80, + test_cfg=dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001), + train_cfg=dict( + assigner=dict( + alpha=0.5, + beta=6.0, + eps=1e-09, + num_classes=80, + topk=10, + type='BatchTaskAlignedAssigner', + use_ciou=True)), + type='YOLOWorldDetector') +model_test_cfg = dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001) +mosaic_affine_transform = [ + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='Mosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), +] +neck_embed_channels = [ + 128, + 256, + 256, +] +neck_num_heads = [ + 4, + 8, + 8, +] +norm_cfg = dict(eps=0.001, momentum=0.03, type='BN') +num_classes = 1203 +num_det_layers = 3 +num_training_classes = 80 +obj365v1_train_dataset = dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +optim_wrapper = dict( + clip_grad=dict(max_norm=10.0), + constructor='YOLOWv5OptimizerConstructor', + optimizer=dict( + batch_size_per_gpu=16, lr=0.002, type='AdamW', weight_decay=0.025), + paramwise_cfg=dict( + bias_decay_mult=0.0, + custom_keys=dict({ + 'backbone.text_model': dict(lr_mult=0.01), + 'logit_scale': dict(weight_decay=0.0) + }), + norm_decay_mult=0.0), + type='OptimWrapper') +param_scheduler = None +persistent_workers = True +pre_transform = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), +] +resume = False +save_epoch_intervals = 2 +strides = [ + 8, + 16, + 32, +] +tal_alpha = 0.5 +tal_beta = 6.0 +tal_topk = 10 +test_cfg = dict(type='TestLoop') +test_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +test_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +test_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +text_channels = 512 +text_transform = [ + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_ann_file = 'annotations/instances_train2017.json' +train_batch_size_per_gpu = 16 +train_cfg = dict( + dynamic_intervals=[ + ( + 98, + 1, + ), + ], + max_epochs=100, + type='EpochBasedTrainLoop', + val_interval=10) +train_data_prefix = 'train2017/' +train_dataloader = dict( + batch_size=16, + collate_fn=dict(type='yolow_collate'), + dataset=dict( + datasets=[ + dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + ], + ignore_keys=[ + 'classes', + 'palette', + ], + type='ConcatDataset'), + num_workers=8, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=True, type='DefaultSampler')) +train_num_workers = 8 +train_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_pipeline_stage2 = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +tta_model = dict( + tta_cfg=dict(max_per_img=300, nms=dict(iou_threshold=0.65, type='nms')), + type='mmdet.DetTTAModel') +tta_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict( + transforms=[ + [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), + ], + [ + dict(prob=1.0, type='mmdet.RandomFlip'), + dict(prob=0.0, type='mmdet.RandomFlip'), + ], + [ + dict(type='mmdet.LoadAnnotations', with_bbox=True), + ], + [ + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), + ], + ], + type='TestTimeAug'), +] +val_ann_file = 'annotations/instances_val2017.json' +val_batch_size_per_gpu = 1 +val_cfg = dict(type='ValLoop') +val_data_prefix = 'val2017/' +val_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +val_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +val_interval_stage2 = 1 +val_num_workers = 2 +vis_backends = [ + dict(type='LocalVisBackend'), +] +visualizer = dict( + name='visualizer', + type='mmdet.DetLocalVisualizer', + vis_backends=[ + dict(type='LocalVisBackend'), + ]) +weight_decay = 0.025 +widen_factor = 1.25 +work_dir = '/home/jean/Amine/OpenYolo3D/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival' diff --git a/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_195352/vis_data/config.py b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_195352/vis_data/config.py new file mode 100644 index 0000000000000000000000000000000000000000..f8b00f96ad70de81ecc9073c7d67025d43429906 --- /dev/null +++ b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_195352/vis_data/config.py @@ -0,0 +1,1366 @@ +_backend_args = None +_multiscale_resize_transforms = [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), +] +affine_scale = 0.9 +albu_train_transforms = [ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), +] +backend_args = None +base_lr = 0.002 +batch_shapes_cfg = None +close_mosaic_epochs = 2 +coco_val_dataset = dict( + _delete_=True, + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +custom_hooks = [ + dict( + ema_type='ExpMomentumEMA', + momentum=0.0001, + priority=49, + strict_load=False, + type='EMAHook', + update_buffers=True), + dict( + switch_epoch=98, + switch_pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='mmdet.PipelineSwitchHook'), +] +custom_imports = dict( + allow_failed_imports=False, imports=[ + 'yolo_world', + ]) +data_root = 'data/coco/' +dataset_type = 'YOLOv5CocoDataset' +deepen_factor = 1.0 +default_hooks = dict( + checkpoint=dict( + interval=2, + max_keep_ckpts=2, + rule='greater', + save_best='auto', + type='CheckpointHook'), + logger=dict(interval=50, type='LoggerHook'), + param_scheduler=dict( + lr_factor=0.01, + max_epochs=100, + scheduler_type='linear', + type='YOLOv5ParamSchedulerHook'), + sampler_seed=dict(type='DistSamplerSeedHook'), + timer=dict(type='IterTimerHook'), + visualization=dict(type='mmdet.DetVisualizationHook')) +default_scope = 'mmyolo' +env_cfg = dict( + cudnn_benchmark=True, + dist_cfg=dict(backend='nccl'), + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0)) +flickr_train_dataset = dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +img_scale = ( + 640, + 640, +) +img_scales = [ + ( + 640, + 640, + ), + ( + 320, + 320, + ), + ( + 960, + 960, + ), +] +last_stage_out_channels = 512 +last_transform = [ + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), +] +load_from = '/home/jean/Amine/OpenYolo3D/pretrained/checkpoints/yolo_world_v2_x_obj365v1_goldg_cc3mlite_pretrain_1280ft-14996a36.pth' +log_level = 'INFO' +log_processor = dict(by_epoch=True, type='LogProcessor', window_size=50) +loss_bbox_weight = 7.5 +loss_cls_weight = 0.5 +loss_dfl_weight = 0.375 +lr_factor = 0.01 +max_aspect_ratio = 100 +max_epochs = 100 +max_keep_ckpts = 2 +mg_train_dataset = dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +mixup_prob = 0.15 +model = dict( + backbone=dict( + image_model=dict( + act_cfg=dict(inplace=True, type='SiLU'), + arch='P5', + deepen_factor=1.0, + last_stage_out_channels=512, + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + type='YOLOv8CSPDarknet', + widen_factor=1.25), + text_model=dict( + frozen_modules=[ + 'all', + ], + model_name='openai/clip-vit-base-patch32', + type='HuggingCLIPLanguageBackbone'), + type='MultiModalYOLOBackbone'), + bbox_head=dict( + bbox_coder=dict(type='DistancePointBBoxCoder'), + head_module=dict( + act_cfg=dict(inplace=True, type='SiLU'), + embed_dims=512, + featmap_strides=[ + 8, + 16, + 32, + ], + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_classes=80, + reg_max=16, + type='YOLOWorldHeadModule', + use_bn_head=True, + widen_factor=1.25), + loss_bbox=dict( + bbox_format='xyxy', + iou_mode='ciou', + loss_weight=7.5, + reduction='sum', + return_iou=False, + type='IoULoss'), + loss_cls=dict( + loss_weight=0.5, + reduction='none', + type='mmdet.CrossEntropyLoss', + use_sigmoid=True), + loss_dfl=dict( + loss_weight=0.375, + reduction='mean', + type='mmdet.DistributionFocalLoss'), + prior_generator=dict( + offset=0.5, strides=[ + 8, + 16, + 32, + ], type='mmdet.MlvlPointGenerator'), + type='YOLOWorldHead'), + data_preprocessor=dict( + bgr_to_rgb=True, + mean=[ + 0.0, + 0.0, + 0.0, + ], + std=[ + 255.0, + 255.0, + 255.0, + ], + type='YOLOWDetDataPreprocessor'), + mm_neck=True, + neck=dict( + act_cfg=dict(inplace=True, type='SiLU'), + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv'), + deepen_factor=1.0, + embed_channels=[ + 128, + 256, + 256, + ], + guide_channels=512, + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_csp_blocks=3, + num_heads=[ + 4, + 8, + 8, + ], + out_channels=[ + 256, + 512, + 512, + ], + type='YOLOWorldPAFPN', + widen_factor=1.25), + num_test_classes=1203, + num_train_classes=80, + test_cfg=dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001), + train_cfg=dict( + assigner=dict( + alpha=0.5, + beta=6.0, + eps=1e-09, + num_classes=80, + topk=10, + type='BatchTaskAlignedAssigner', + use_ciou=True)), + type='YOLOWorldDetector') +model_test_cfg = dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001) +mosaic_affine_transform = [ + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='Mosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), +] +neck_embed_channels = [ + 128, + 256, + 256, +] +neck_num_heads = [ + 4, + 8, + 8, +] +norm_cfg = dict(eps=0.001, momentum=0.03, type='BN') +num_classes = 1203 +num_det_layers = 3 +num_training_classes = 80 +obj365v1_train_dataset = dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +optim_wrapper = dict( + clip_grad=dict(max_norm=10.0), + constructor='YOLOWv5OptimizerConstructor', + optimizer=dict( + batch_size_per_gpu=16, lr=0.002, type='AdamW', weight_decay=0.025), + paramwise_cfg=dict( + bias_decay_mult=0.0, + custom_keys=dict({ + 'backbone.text_model': dict(lr_mult=0.01), + 'logit_scale': dict(weight_decay=0.0) + }), + norm_decay_mult=0.0), + type='OptimWrapper') +param_scheduler = None +persistent_workers = True +pre_transform = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), +] +resume = False +save_epoch_intervals = 2 +strides = [ + 8, + 16, + 32, +] +tal_alpha = 0.5 +tal_beta = 6.0 +tal_topk = 10 +test_cfg = dict(type='TestLoop') +test_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +test_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +test_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +text_channels = 512 +text_transform = [ + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_ann_file = 'annotations/instances_train2017.json' +train_batch_size_per_gpu = 16 +train_cfg = dict( + dynamic_intervals=[ + ( + 98, + 1, + ), + ], + max_epochs=100, + type='EpochBasedTrainLoop', + val_interval=10) +train_data_prefix = 'train2017/' +train_dataloader = dict( + batch_size=16, + collate_fn=dict(type='yolow_collate'), + dataset=dict( + datasets=[ + dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + ], + ignore_keys=[ + 'classes', + 'palette', + ], + type='ConcatDataset'), + num_workers=8, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=True, type='DefaultSampler')) +train_num_workers = 8 +train_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_pipeline_stage2 = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +tta_model = dict( + tta_cfg=dict(max_per_img=300, nms=dict(iou_threshold=0.65, type='nms')), + type='mmdet.DetTTAModel') +tta_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict( + transforms=[ + [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), + ], + [ + dict(prob=1.0, type='mmdet.RandomFlip'), + dict(prob=0.0, type='mmdet.RandomFlip'), + ], + [ + dict(type='mmdet.LoadAnnotations', with_bbox=True), + ], + [ + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), + ], + ], + type='TestTimeAug'), +] +val_ann_file = 'annotations/instances_val2017.json' +val_batch_size_per_gpu = 1 +val_cfg = dict(type='ValLoop') +val_data_prefix = 'val2017/' +val_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +val_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +val_interval_stage2 = 1 +val_num_workers = 2 +vis_backends = [ + dict(type='LocalVisBackend'), +] +visualizer = dict( + name='visualizer', + type='mmdet.DetLocalVisualizer', + vis_backends=[ + dict(type='LocalVisBackend'), + ]) +weight_decay = 0.025 +widen_factor = 1.25 +work_dir = '/home/jean/Amine/OpenYolo3D/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival' diff --git a/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_195607/vis_data/config.py b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_195607/vis_data/config.py new file mode 100644 index 0000000000000000000000000000000000000000..f8b00f96ad70de81ecc9073c7d67025d43429906 --- /dev/null +++ b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_195607/vis_data/config.py @@ -0,0 +1,1366 @@ +_backend_args = None +_multiscale_resize_transforms = [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), +] +affine_scale = 0.9 +albu_train_transforms = [ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), +] +backend_args = None +base_lr = 0.002 +batch_shapes_cfg = None +close_mosaic_epochs = 2 +coco_val_dataset = dict( + _delete_=True, + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +custom_hooks = [ + dict( + ema_type='ExpMomentumEMA', + momentum=0.0001, + priority=49, + strict_load=False, + type='EMAHook', + update_buffers=True), + dict( + switch_epoch=98, + switch_pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='mmdet.PipelineSwitchHook'), +] +custom_imports = dict( + allow_failed_imports=False, imports=[ + 'yolo_world', + ]) +data_root = 'data/coco/' +dataset_type = 'YOLOv5CocoDataset' +deepen_factor = 1.0 +default_hooks = dict( + checkpoint=dict( + interval=2, + max_keep_ckpts=2, + rule='greater', + save_best='auto', + type='CheckpointHook'), + logger=dict(interval=50, type='LoggerHook'), + param_scheduler=dict( + lr_factor=0.01, + max_epochs=100, + scheduler_type='linear', + type='YOLOv5ParamSchedulerHook'), + sampler_seed=dict(type='DistSamplerSeedHook'), + timer=dict(type='IterTimerHook'), + visualization=dict(type='mmdet.DetVisualizationHook')) +default_scope = 'mmyolo' +env_cfg = dict( + cudnn_benchmark=True, + dist_cfg=dict(backend='nccl'), + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0)) +flickr_train_dataset = dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +img_scale = ( + 640, + 640, +) +img_scales = [ + ( + 640, + 640, + ), + ( + 320, + 320, + ), + ( + 960, + 960, + ), +] +last_stage_out_channels = 512 +last_transform = [ + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), +] +load_from = '/home/jean/Amine/OpenYolo3D/pretrained/checkpoints/yolo_world_v2_x_obj365v1_goldg_cc3mlite_pretrain_1280ft-14996a36.pth' +log_level = 'INFO' +log_processor = dict(by_epoch=True, type='LogProcessor', window_size=50) +loss_bbox_weight = 7.5 +loss_cls_weight = 0.5 +loss_dfl_weight = 0.375 +lr_factor = 0.01 +max_aspect_ratio = 100 +max_epochs = 100 +max_keep_ckpts = 2 +mg_train_dataset = dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +mixup_prob = 0.15 +model = dict( + backbone=dict( + image_model=dict( + act_cfg=dict(inplace=True, type='SiLU'), + arch='P5', + deepen_factor=1.0, + last_stage_out_channels=512, + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + type='YOLOv8CSPDarknet', + widen_factor=1.25), + text_model=dict( + frozen_modules=[ + 'all', + ], + model_name='openai/clip-vit-base-patch32', + type='HuggingCLIPLanguageBackbone'), + type='MultiModalYOLOBackbone'), + bbox_head=dict( + bbox_coder=dict(type='DistancePointBBoxCoder'), + head_module=dict( + act_cfg=dict(inplace=True, type='SiLU'), + embed_dims=512, + featmap_strides=[ + 8, + 16, + 32, + ], + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_classes=80, + reg_max=16, + type='YOLOWorldHeadModule', + use_bn_head=True, + widen_factor=1.25), + loss_bbox=dict( + bbox_format='xyxy', + iou_mode='ciou', + loss_weight=7.5, + reduction='sum', + return_iou=False, + type='IoULoss'), + loss_cls=dict( + loss_weight=0.5, + reduction='none', + type='mmdet.CrossEntropyLoss', + use_sigmoid=True), + loss_dfl=dict( + loss_weight=0.375, + reduction='mean', + type='mmdet.DistributionFocalLoss'), + prior_generator=dict( + offset=0.5, strides=[ + 8, + 16, + 32, + ], type='mmdet.MlvlPointGenerator'), + type='YOLOWorldHead'), + data_preprocessor=dict( + bgr_to_rgb=True, + mean=[ + 0.0, + 0.0, + 0.0, + ], + std=[ + 255.0, + 255.0, + 255.0, + ], + type='YOLOWDetDataPreprocessor'), + mm_neck=True, + neck=dict( + act_cfg=dict(inplace=True, type='SiLU'), + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv'), + deepen_factor=1.0, + embed_channels=[ + 128, + 256, + 256, + ], + guide_channels=512, + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_csp_blocks=3, + num_heads=[ + 4, + 8, + 8, + ], + out_channels=[ + 256, + 512, + 512, + ], + type='YOLOWorldPAFPN', + widen_factor=1.25), + num_test_classes=1203, + num_train_classes=80, + test_cfg=dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001), + train_cfg=dict( + assigner=dict( + alpha=0.5, + beta=6.0, + eps=1e-09, + num_classes=80, + topk=10, + type='BatchTaskAlignedAssigner', + use_ciou=True)), + type='YOLOWorldDetector') +model_test_cfg = dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001) +mosaic_affine_transform = [ + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='Mosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), +] +neck_embed_channels = [ + 128, + 256, + 256, +] +neck_num_heads = [ + 4, + 8, + 8, +] +norm_cfg = dict(eps=0.001, momentum=0.03, type='BN') +num_classes = 1203 +num_det_layers = 3 +num_training_classes = 80 +obj365v1_train_dataset = dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +optim_wrapper = dict( + clip_grad=dict(max_norm=10.0), + constructor='YOLOWv5OptimizerConstructor', + optimizer=dict( + batch_size_per_gpu=16, lr=0.002, type='AdamW', weight_decay=0.025), + paramwise_cfg=dict( + bias_decay_mult=0.0, + custom_keys=dict({ + 'backbone.text_model': dict(lr_mult=0.01), + 'logit_scale': dict(weight_decay=0.0) + }), + norm_decay_mult=0.0), + type='OptimWrapper') +param_scheduler = None +persistent_workers = True +pre_transform = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), +] +resume = False +save_epoch_intervals = 2 +strides = [ + 8, + 16, + 32, +] +tal_alpha = 0.5 +tal_beta = 6.0 +tal_topk = 10 +test_cfg = dict(type='TestLoop') +test_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +test_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +test_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +text_channels = 512 +text_transform = [ + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_ann_file = 'annotations/instances_train2017.json' +train_batch_size_per_gpu = 16 +train_cfg = dict( + dynamic_intervals=[ + ( + 98, + 1, + ), + ], + max_epochs=100, + type='EpochBasedTrainLoop', + val_interval=10) +train_data_prefix = 'train2017/' +train_dataloader = dict( + batch_size=16, + collate_fn=dict(type='yolow_collate'), + dataset=dict( + datasets=[ + dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + ], + ignore_keys=[ + 'classes', + 'palette', + ], + type='ConcatDataset'), + num_workers=8, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=True, type='DefaultSampler')) +train_num_workers = 8 +train_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_pipeline_stage2 = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +tta_model = dict( + tta_cfg=dict(max_per_img=300, nms=dict(iou_threshold=0.65, type='nms')), + type='mmdet.DetTTAModel') +tta_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict( + transforms=[ + [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), + ], + [ + dict(prob=1.0, type='mmdet.RandomFlip'), + dict(prob=0.0, type='mmdet.RandomFlip'), + ], + [ + dict(type='mmdet.LoadAnnotations', with_bbox=True), + ], + [ + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), + ], + ], + type='TestTimeAug'), +] +val_ann_file = 'annotations/instances_val2017.json' +val_batch_size_per_gpu = 1 +val_cfg = dict(type='ValLoop') +val_data_prefix = 'val2017/' +val_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +val_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +val_interval_stage2 = 1 +val_num_workers = 2 +vis_backends = [ + dict(type='LocalVisBackend'), +] +visualizer = dict( + name='visualizer', + type='mmdet.DetLocalVisualizer', + vis_backends=[ + dict(type='LocalVisBackend'), + ]) +weight_decay = 0.025 +widen_factor = 1.25 +work_dir = '/home/jean/Amine/OpenYolo3D/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival' diff --git a/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_195729/vis_data/config.py b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_195729/vis_data/config.py new file mode 100644 index 0000000000000000000000000000000000000000..f8b00f96ad70de81ecc9073c7d67025d43429906 --- /dev/null +++ b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_195729/vis_data/config.py @@ -0,0 +1,1366 @@ +_backend_args = None +_multiscale_resize_transforms = [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), +] +affine_scale = 0.9 +albu_train_transforms = [ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), +] +backend_args = None +base_lr = 0.002 +batch_shapes_cfg = None +close_mosaic_epochs = 2 +coco_val_dataset = dict( + _delete_=True, + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +custom_hooks = [ + dict( + ema_type='ExpMomentumEMA', + momentum=0.0001, + priority=49, + strict_load=False, + type='EMAHook', + update_buffers=True), + dict( + switch_epoch=98, + switch_pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='mmdet.PipelineSwitchHook'), +] +custom_imports = dict( + allow_failed_imports=False, imports=[ + 'yolo_world', + ]) +data_root = 'data/coco/' +dataset_type = 'YOLOv5CocoDataset' +deepen_factor = 1.0 +default_hooks = dict( + checkpoint=dict( + interval=2, + max_keep_ckpts=2, + rule='greater', + save_best='auto', + type='CheckpointHook'), + logger=dict(interval=50, type='LoggerHook'), + param_scheduler=dict( + lr_factor=0.01, + max_epochs=100, + scheduler_type='linear', + type='YOLOv5ParamSchedulerHook'), + sampler_seed=dict(type='DistSamplerSeedHook'), + timer=dict(type='IterTimerHook'), + visualization=dict(type='mmdet.DetVisualizationHook')) +default_scope = 'mmyolo' +env_cfg = dict( + cudnn_benchmark=True, + dist_cfg=dict(backend='nccl'), + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0)) +flickr_train_dataset = dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +img_scale = ( + 640, + 640, +) +img_scales = [ + ( + 640, + 640, + ), + ( + 320, + 320, + ), + ( + 960, + 960, + ), +] +last_stage_out_channels = 512 +last_transform = [ + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), +] +load_from = '/home/jean/Amine/OpenYolo3D/pretrained/checkpoints/yolo_world_v2_x_obj365v1_goldg_cc3mlite_pretrain_1280ft-14996a36.pth' +log_level = 'INFO' +log_processor = dict(by_epoch=True, type='LogProcessor', window_size=50) +loss_bbox_weight = 7.5 +loss_cls_weight = 0.5 +loss_dfl_weight = 0.375 +lr_factor = 0.01 +max_aspect_ratio = 100 +max_epochs = 100 +max_keep_ckpts = 2 +mg_train_dataset = dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +mixup_prob = 0.15 +model = dict( + backbone=dict( + image_model=dict( + act_cfg=dict(inplace=True, type='SiLU'), + arch='P5', + deepen_factor=1.0, + last_stage_out_channels=512, + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + type='YOLOv8CSPDarknet', + widen_factor=1.25), + text_model=dict( + frozen_modules=[ + 'all', + ], + model_name='openai/clip-vit-base-patch32', + type='HuggingCLIPLanguageBackbone'), + type='MultiModalYOLOBackbone'), + bbox_head=dict( + bbox_coder=dict(type='DistancePointBBoxCoder'), + head_module=dict( + act_cfg=dict(inplace=True, type='SiLU'), + embed_dims=512, + featmap_strides=[ + 8, + 16, + 32, + ], + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_classes=80, + reg_max=16, + type='YOLOWorldHeadModule', + use_bn_head=True, + widen_factor=1.25), + loss_bbox=dict( + bbox_format='xyxy', + iou_mode='ciou', + loss_weight=7.5, + reduction='sum', + return_iou=False, + type='IoULoss'), + loss_cls=dict( + loss_weight=0.5, + reduction='none', + type='mmdet.CrossEntropyLoss', + use_sigmoid=True), + loss_dfl=dict( + loss_weight=0.375, + reduction='mean', + type='mmdet.DistributionFocalLoss'), + prior_generator=dict( + offset=0.5, strides=[ + 8, + 16, + 32, + ], type='mmdet.MlvlPointGenerator'), + type='YOLOWorldHead'), + data_preprocessor=dict( + bgr_to_rgb=True, + mean=[ + 0.0, + 0.0, + 0.0, + ], + std=[ + 255.0, + 255.0, + 255.0, + ], + type='YOLOWDetDataPreprocessor'), + mm_neck=True, + neck=dict( + act_cfg=dict(inplace=True, type='SiLU'), + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv'), + deepen_factor=1.0, + embed_channels=[ + 128, + 256, + 256, + ], + guide_channels=512, + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_csp_blocks=3, + num_heads=[ + 4, + 8, + 8, + ], + out_channels=[ + 256, + 512, + 512, + ], + type='YOLOWorldPAFPN', + widen_factor=1.25), + num_test_classes=1203, + num_train_classes=80, + test_cfg=dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001), + train_cfg=dict( + assigner=dict( + alpha=0.5, + beta=6.0, + eps=1e-09, + num_classes=80, + topk=10, + type='BatchTaskAlignedAssigner', + use_ciou=True)), + type='YOLOWorldDetector') +model_test_cfg = dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001) +mosaic_affine_transform = [ + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='Mosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), +] +neck_embed_channels = [ + 128, + 256, + 256, +] +neck_num_heads = [ + 4, + 8, + 8, +] +norm_cfg = dict(eps=0.001, momentum=0.03, type='BN') +num_classes = 1203 +num_det_layers = 3 +num_training_classes = 80 +obj365v1_train_dataset = dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +optim_wrapper = dict( + clip_grad=dict(max_norm=10.0), + constructor='YOLOWv5OptimizerConstructor', + optimizer=dict( + batch_size_per_gpu=16, lr=0.002, type='AdamW', weight_decay=0.025), + paramwise_cfg=dict( + bias_decay_mult=0.0, + custom_keys=dict({ + 'backbone.text_model': dict(lr_mult=0.01), + 'logit_scale': dict(weight_decay=0.0) + }), + norm_decay_mult=0.0), + type='OptimWrapper') +param_scheduler = None +persistent_workers = True +pre_transform = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), +] +resume = False +save_epoch_intervals = 2 +strides = [ + 8, + 16, + 32, +] +tal_alpha = 0.5 +tal_beta = 6.0 +tal_topk = 10 +test_cfg = dict(type='TestLoop') +test_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +test_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +test_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +text_channels = 512 +text_transform = [ + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_ann_file = 'annotations/instances_train2017.json' +train_batch_size_per_gpu = 16 +train_cfg = dict( + dynamic_intervals=[ + ( + 98, + 1, + ), + ], + max_epochs=100, + type='EpochBasedTrainLoop', + val_interval=10) +train_data_prefix = 'train2017/' +train_dataloader = dict( + batch_size=16, + collate_fn=dict(type='yolow_collate'), + dataset=dict( + datasets=[ + dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + ], + ignore_keys=[ + 'classes', + 'palette', + ], + type='ConcatDataset'), + num_workers=8, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=True, type='DefaultSampler')) +train_num_workers = 8 +train_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_pipeline_stage2 = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +tta_model = dict( + tta_cfg=dict(max_per_img=300, nms=dict(iou_threshold=0.65, type='nms')), + type='mmdet.DetTTAModel') +tta_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict( + transforms=[ + [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), + ], + [ + dict(prob=1.0, type='mmdet.RandomFlip'), + dict(prob=0.0, type='mmdet.RandomFlip'), + ], + [ + dict(type='mmdet.LoadAnnotations', with_bbox=True), + ], + [ + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), + ], + ], + type='TestTimeAug'), +] +val_ann_file = 'annotations/instances_val2017.json' +val_batch_size_per_gpu = 1 +val_cfg = dict(type='ValLoop') +val_data_prefix = 'val2017/' +val_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +val_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +val_interval_stage2 = 1 +val_num_workers = 2 +vis_backends = [ + dict(type='LocalVisBackend'), +] +visualizer = dict( + name='visualizer', + type='mmdet.DetLocalVisualizer', + vis_backends=[ + dict(type='LocalVisBackend'), + ]) +weight_decay = 0.025 +widen_factor = 1.25 +work_dir = '/home/jean/Amine/OpenYolo3D/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival' diff --git a/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_195842/vis_data/config.py b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_195842/vis_data/config.py new file mode 100644 index 0000000000000000000000000000000000000000..f8b00f96ad70de81ecc9073c7d67025d43429906 --- /dev/null +++ b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_195842/vis_data/config.py @@ -0,0 +1,1366 @@ +_backend_args = None +_multiscale_resize_transforms = [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), +] +affine_scale = 0.9 +albu_train_transforms = [ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), +] +backend_args = None +base_lr = 0.002 +batch_shapes_cfg = None +close_mosaic_epochs = 2 +coco_val_dataset = dict( + _delete_=True, + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +custom_hooks = [ + dict( + ema_type='ExpMomentumEMA', + momentum=0.0001, + priority=49, + strict_load=False, + type='EMAHook', + update_buffers=True), + dict( + switch_epoch=98, + switch_pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='mmdet.PipelineSwitchHook'), +] +custom_imports = dict( + allow_failed_imports=False, imports=[ + 'yolo_world', + ]) +data_root = 'data/coco/' +dataset_type = 'YOLOv5CocoDataset' +deepen_factor = 1.0 +default_hooks = dict( + checkpoint=dict( + interval=2, + max_keep_ckpts=2, + rule='greater', + save_best='auto', + type='CheckpointHook'), + logger=dict(interval=50, type='LoggerHook'), + param_scheduler=dict( + lr_factor=0.01, + max_epochs=100, + scheduler_type='linear', + type='YOLOv5ParamSchedulerHook'), + sampler_seed=dict(type='DistSamplerSeedHook'), + timer=dict(type='IterTimerHook'), + visualization=dict(type='mmdet.DetVisualizationHook')) +default_scope = 'mmyolo' +env_cfg = dict( + cudnn_benchmark=True, + dist_cfg=dict(backend='nccl'), + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0)) +flickr_train_dataset = dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +img_scale = ( + 640, + 640, +) +img_scales = [ + ( + 640, + 640, + ), + ( + 320, + 320, + ), + ( + 960, + 960, + ), +] +last_stage_out_channels = 512 +last_transform = [ + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), +] +load_from = '/home/jean/Amine/OpenYolo3D/pretrained/checkpoints/yolo_world_v2_x_obj365v1_goldg_cc3mlite_pretrain_1280ft-14996a36.pth' +log_level = 'INFO' +log_processor = dict(by_epoch=True, type='LogProcessor', window_size=50) +loss_bbox_weight = 7.5 +loss_cls_weight = 0.5 +loss_dfl_weight = 0.375 +lr_factor = 0.01 +max_aspect_ratio = 100 +max_epochs = 100 +max_keep_ckpts = 2 +mg_train_dataset = dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +mixup_prob = 0.15 +model = dict( + backbone=dict( + image_model=dict( + act_cfg=dict(inplace=True, type='SiLU'), + arch='P5', + deepen_factor=1.0, + last_stage_out_channels=512, + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + type='YOLOv8CSPDarknet', + widen_factor=1.25), + text_model=dict( + frozen_modules=[ + 'all', + ], + model_name='openai/clip-vit-base-patch32', + type='HuggingCLIPLanguageBackbone'), + type='MultiModalYOLOBackbone'), + bbox_head=dict( + bbox_coder=dict(type='DistancePointBBoxCoder'), + head_module=dict( + act_cfg=dict(inplace=True, type='SiLU'), + embed_dims=512, + featmap_strides=[ + 8, + 16, + 32, + ], + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_classes=80, + reg_max=16, + type='YOLOWorldHeadModule', + use_bn_head=True, + widen_factor=1.25), + loss_bbox=dict( + bbox_format='xyxy', + iou_mode='ciou', + loss_weight=7.5, + reduction='sum', + return_iou=False, + type='IoULoss'), + loss_cls=dict( + loss_weight=0.5, + reduction='none', + type='mmdet.CrossEntropyLoss', + use_sigmoid=True), + loss_dfl=dict( + loss_weight=0.375, + reduction='mean', + type='mmdet.DistributionFocalLoss'), + prior_generator=dict( + offset=0.5, strides=[ + 8, + 16, + 32, + ], type='mmdet.MlvlPointGenerator'), + type='YOLOWorldHead'), + data_preprocessor=dict( + bgr_to_rgb=True, + mean=[ + 0.0, + 0.0, + 0.0, + ], + std=[ + 255.0, + 255.0, + 255.0, + ], + type='YOLOWDetDataPreprocessor'), + mm_neck=True, + neck=dict( + act_cfg=dict(inplace=True, type='SiLU'), + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv'), + deepen_factor=1.0, + embed_channels=[ + 128, + 256, + 256, + ], + guide_channels=512, + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_csp_blocks=3, + num_heads=[ + 4, + 8, + 8, + ], + out_channels=[ + 256, + 512, + 512, + ], + type='YOLOWorldPAFPN', + widen_factor=1.25), + num_test_classes=1203, + num_train_classes=80, + test_cfg=dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001), + train_cfg=dict( + assigner=dict( + alpha=0.5, + beta=6.0, + eps=1e-09, + num_classes=80, + topk=10, + type='BatchTaskAlignedAssigner', + use_ciou=True)), + type='YOLOWorldDetector') +model_test_cfg = dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001) +mosaic_affine_transform = [ + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='Mosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), +] +neck_embed_channels = [ + 128, + 256, + 256, +] +neck_num_heads = [ + 4, + 8, + 8, +] +norm_cfg = dict(eps=0.001, momentum=0.03, type='BN') +num_classes = 1203 +num_det_layers = 3 +num_training_classes = 80 +obj365v1_train_dataset = dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +optim_wrapper = dict( + clip_grad=dict(max_norm=10.0), + constructor='YOLOWv5OptimizerConstructor', + optimizer=dict( + batch_size_per_gpu=16, lr=0.002, type='AdamW', weight_decay=0.025), + paramwise_cfg=dict( + bias_decay_mult=0.0, + custom_keys=dict({ + 'backbone.text_model': dict(lr_mult=0.01), + 'logit_scale': dict(weight_decay=0.0) + }), + norm_decay_mult=0.0), + type='OptimWrapper') +param_scheduler = None +persistent_workers = True +pre_transform = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), +] +resume = False +save_epoch_intervals = 2 +strides = [ + 8, + 16, + 32, +] +tal_alpha = 0.5 +tal_beta = 6.0 +tal_topk = 10 +test_cfg = dict(type='TestLoop') +test_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +test_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +test_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +text_channels = 512 +text_transform = [ + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_ann_file = 'annotations/instances_train2017.json' +train_batch_size_per_gpu = 16 +train_cfg = dict( + dynamic_intervals=[ + ( + 98, + 1, + ), + ], + max_epochs=100, + type='EpochBasedTrainLoop', + val_interval=10) +train_data_prefix = 'train2017/' +train_dataloader = dict( + batch_size=16, + collate_fn=dict(type='yolow_collate'), + dataset=dict( + datasets=[ + dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + ], + ignore_keys=[ + 'classes', + 'palette', + ], + type='ConcatDataset'), + num_workers=8, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=True, type='DefaultSampler')) +train_num_workers = 8 +train_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_pipeline_stage2 = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +tta_model = dict( + tta_cfg=dict(max_per_img=300, nms=dict(iou_threshold=0.65, type='nms')), + type='mmdet.DetTTAModel') +tta_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict( + transforms=[ + [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), + ], + [ + dict(prob=1.0, type='mmdet.RandomFlip'), + dict(prob=0.0, type='mmdet.RandomFlip'), + ], + [ + dict(type='mmdet.LoadAnnotations', with_bbox=True), + ], + [ + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), + ], + ], + type='TestTimeAug'), +] +val_ann_file = 'annotations/instances_val2017.json' +val_batch_size_per_gpu = 1 +val_cfg = dict(type='ValLoop') +val_data_prefix = 'val2017/' +val_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +val_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +val_interval_stage2 = 1 +val_num_workers = 2 +vis_backends = [ + dict(type='LocalVisBackend'), +] +visualizer = dict( + name='visualizer', + type='mmdet.DetLocalVisualizer', + vis_backends=[ + dict(type='LocalVisBackend'), + ]) +weight_decay = 0.025 +widen_factor = 1.25 +work_dir = '/home/jean/Amine/OpenYolo3D/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival' diff --git a/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_200002/vis_data/config.py b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_200002/vis_data/config.py new file mode 100644 index 0000000000000000000000000000000000000000..f8b00f96ad70de81ecc9073c7d67025d43429906 --- /dev/null +++ b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_200002/vis_data/config.py @@ -0,0 +1,1366 @@ +_backend_args = None +_multiscale_resize_transforms = [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), +] +affine_scale = 0.9 +albu_train_transforms = [ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), +] +backend_args = None +base_lr = 0.002 +batch_shapes_cfg = None +close_mosaic_epochs = 2 +coco_val_dataset = dict( + _delete_=True, + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +custom_hooks = [ + dict( + ema_type='ExpMomentumEMA', + momentum=0.0001, + priority=49, + strict_load=False, + type='EMAHook', + update_buffers=True), + dict( + switch_epoch=98, + switch_pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='mmdet.PipelineSwitchHook'), +] +custom_imports = dict( + allow_failed_imports=False, imports=[ + 'yolo_world', + ]) +data_root = 'data/coco/' +dataset_type = 'YOLOv5CocoDataset' +deepen_factor = 1.0 +default_hooks = dict( + checkpoint=dict( + interval=2, + max_keep_ckpts=2, + rule='greater', + save_best='auto', + type='CheckpointHook'), + logger=dict(interval=50, type='LoggerHook'), + param_scheduler=dict( + lr_factor=0.01, + max_epochs=100, + scheduler_type='linear', + type='YOLOv5ParamSchedulerHook'), + sampler_seed=dict(type='DistSamplerSeedHook'), + timer=dict(type='IterTimerHook'), + visualization=dict(type='mmdet.DetVisualizationHook')) +default_scope = 'mmyolo' +env_cfg = dict( + cudnn_benchmark=True, + dist_cfg=dict(backend='nccl'), + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0)) +flickr_train_dataset = dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +img_scale = ( + 640, + 640, +) +img_scales = [ + ( + 640, + 640, + ), + ( + 320, + 320, + ), + ( + 960, + 960, + ), +] +last_stage_out_channels = 512 +last_transform = [ + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), +] +load_from = '/home/jean/Amine/OpenYolo3D/pretrained/checkpoints/yolo_world_v2_x_obj365v1_goldg_cc3mlite_pretrain_1280ft-14996a36.pth' +log_level = 'INFO' +log_processor = dict(by_epoch=True, type='LogProcessor', window_size=50) +loss_bbox_weight = 7.5 +loss_cls_weight = 0.5 +loss_dfl_weight = 0.375 +lr_factor = 0.01 +max_aspect_ratio = 100 +max_epochs = 100 +max_keep_ckpts = 2 +mg_train_dataset = dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +mixup_prob = 0.15 +model = dict( + backbone=dict( + image_model=dict( + act_cfg=dict(inplace=True, type='SiLU'), + arch='P5', + deepen_factor=1.0, + last_stage_out_channels=512, + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + type='YOLOv8CSPDarknet', + widen_factor=1.25), + text_model=dict( + frozen_modules=[ + 'all', + ], + model_name='openai/clip-vit-base-patch32', + type='HuggingCLIPLanguageBackbone'), + type='MultiModalYOLOBackbone'), + bbox_head=dict( + bbox_coder=dict(type='DistancePointBBoxCoder'), + head_module=dict( + act_cfg=dict(inplace=True, type='SiLU'), + embed_dims=512, + featmap_strides=[ + 8, + 16, + 32, + ], + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_classes=80, + reg_max=16, + type='YOLOWorldHeadModule', + use_bn_head=True, + widen_factor=1.25), + loss_bbox=dict( + bbox_format='xyxy', + iou_mode='ciou', + loss_weight=7.5, + reduction='sum', + return_iou=False, + type='IoULoss'), + loss_cls=dict( + loss_weight=0.5, + reduction='none', + type='mmdet.CrossEntropyLoss', + use_sigmoid=True), + loss_dfl=dict( + loss_weight=0.375, + reduction='mean', + type='mmdet.DistributionFocalLoss'), + prior_generator=dict( + offset=0.5, strides=[ + 8, + 16, + 32, + ], type='mmdet.MlvlPointGenerator'), + type='YOLOWorldHead'), + data_preprocessor=dict( + bgr_to_rgb=True, + mean=[ + 0.0, + 0.0, + 0.0, + ], + std=[ + 255.0, + 255.0, + 255.0, + ], + type='YOLOWDetDataPreprocessor'), + mm_neck=True, + neck=dict( + act_cfg=dict(inplace=True, type='SiLU'), + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv'), + deepen_factor=1.0, + embed_channels=[ + 128, + 256, + 256, + ], + guide_channels=512, + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_csp_blocks=3, + num_heads=[ + 4, + 8, + 8, + ], + out_channels=[ + 256, + 512, + 512, + ], + type='YOLOWorldPAFPN', + widen_factor=1.25), + num_test_classes=1203, + num_train_classes=80, + test_cfg=dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001), + train_cfg=dict( + assigner=dict( + alpha=0.5, + beta=6.0, + eps=1e-09, + num_classes=80, + topk=10, + type='BatchTaskAlignedAssigner', + use_ciou=True)), + type='YOLOWorldDetector') +model_test_cfg = dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001) +mosaic_affine_transform = [ + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='Mosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), +] +neck_embed_channels = [ + 128, + 256, + 256, +] +neck_num_heads = [ + 4, + 8, + 8, +] +norm_cfg = dict(eps=0.001, momentum=0.03, type='BN') +num_classes = 1203 +num_det_layers = 3 +num_training_classes = 80 +obj365v1_train_dataset = dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +optim_wrapper = dict( + clip_grad=dict(max_norm=10.0), + constructor='YOLOWv5OptimizerConstructor', + optimizer=dict( + batch_size_per_gpu=16, lr=0.002, type='AdamW', weight_decay=0.025), + paramwise_cfg=dict( + bias_decay_mult=0.0, + custom_keys=dict({ + 'backbone.text_model': dict(lr_mult=0.01), + 'logit_scale': dict(weight_decay=0.0) + }), + norm_decay_mult=0.0), + type='OptimWrapper') +param_scheduler = None +persistent_workers = True +pre_transform = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), +] +resume = False +save_epoch_intervals = 2 +strides = [ + 8, + 16, + 32, +] +tal_alpha = 0.5 +tal_beta = 6.0 +tal_topk = 10 +test_cfg = dict(type='TestLoop') +test_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +test_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +test_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +text_channels = 512 +text_transform = [ + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_ann_file = 'annotations/instances_train2017.json' +train_batch_size_per_gpu = 16 +train_cfg = dict( + dynamic_intervals=[ + ( + 98, + 1, + ), + ], + max_epochs=100, + type='EpochBasedTrainLoop', + val_interval=10) +train_data_prefix = 'train2017/' +train_dataloader = dict( + batch_size=16, + collate_fn=dict(type='yolow_collate'), + dataset=dict( + datasets=[ + dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + ], + ignore_keys=[ + 'classes', + 'palette', + ], + type='ConcatDataset'), + num_workers=8, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=True, type='DefaultSampler')) +train_num_workers = 8 +train_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_pipeline_stage2 = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +tta_model = dict( + tta_cfg=dict(max_per_img=300, nms=dict(iou_threshold=0.65, type='nms')), + type='mmdet.DetTTAModel') +tta_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict( + transforms=[ + [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), + ], + [ + dict(prob=1.0, type='mmdet.RandomFlip'), + dict(prob=0.0, type='mmdet.RandomFlip'), + ], + [ + dict(type='mmdet.LoadAnnotations', with_bbox=True), + ], + [ + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), + ], + ], + type='TestTimeAug'), +] +val_ann_file = 'annotations/instances_val2017.json' +val_batch_size_per_gpu = 1 +val_cfg = dict(type='ValLoop') +val_data_prefix = 'val2017/' +val_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +val_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +val_interval_stage2 = 1 +val_num_workers = 2 +vis_backends = [ + dict(type='LocalVisBackend'), +] +visualizer = dict( + name='visualizer', + type='mmdet.DetLocalVisualizer', + vis_backends=[ + dict(type='LocalVisBackend'), + ]) +weight_decay = 0.025 +widen_factor = 1.25 +work_dir = '/home/jean/Amine/OpenYolo3D/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival' diff --git a/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_200106/vis_data/config.py b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_200106/vis_data/config.py new file mode 100644 index 0000000000000000000000000000000000000000..f8b00f96ad70de81ecc9073c7d67025d43429906 --- /dev/null +++ b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_200106/vis_data/config.py @@ -0,0 +1,1366 @@ +_backend_args = None +_multiscale_resize_transforms = [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), +] +affine_scale = 0.9 +albu_train_transforms = [ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), +] +backend_args = None +base_lr = 0.002 +batch_shapes_cfg = None +close_mosaic_epochs = 2 +coco_val_dataset = dict( + _delete_=True, + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +custom_hooks = [ + dict( + ema_type='ExpMomentumEMA', + momentum=0.0001, + priority=49, + strict_load=False, + type='EMAHook', + update_buffers=True), + dict( + switch_epoch=98, + switch_pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='mmdet.PipelineSwitchHook'), +] +custom_imports = dict( + allow_failed_imports=False, imports=[ + 'yolo_world', + ]) +data_root = 'data/coco/' +dataset_type = 'YOLOv5CocoDataset' +deepen_factor = 1.0 +default_hooks = dict( + checkpoint=dict( + interval=2, + max_keep_ckpts=2, + rule='greater', + save_best='auto', + type='CheckpointHook'), + logger=dict(interval=50, type='LoggerHook'), + param_scheduler=dict( + lr_factor=0.01, + max_epochs=100, + scheduler_type='linear', + type='YOLOv5ParamSchedulerHook'), + sampler_seed=dict(type='DistSamplerSeedHook'), + timer=dict(type='IterTimerHook'), + visualization=dict(type='mmdet.DetVisualizationHook')) +default_scope = 'mmyolo' +env_cfg = dict( + cudnn_benchmark=True, + dist_cfg=dict(backend='nccl'), + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0)) +flickr_train_dataset = dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +img_scale = ( + 640, + 640, +) +img_scales = [ + ( + 640, + 640, + ), + ( + 320, + 320, + ), + ( + 960, + 960, + ), +] +last_stage_out_channels = 512 +last_transform = [ + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), +] +load_from = '/home/jean/Amine/OpenYolo3D/pretrained/checkpoints/yolo_world_v2_x_obj365v1_goldg_cc3mlite_pretrain_1280ft-14996a36.pth' +log_level = 'INFO' +log_processor = dict(by_epoch=True, type='LogProcessor', window_size=50) +loss_bbox_weight = 7.5 +loss_cls_weight = 0.5 +loss_dfl_weight = 0.375 +lr_factor = 0.01 +max_aspect_ratio = 100 +max_epochs = 100 +max_keep_ckpts = 2 +mg_train_dataset = dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +mixup_prob = 0.15 +model = dict( + backbone=dict( + image_model=dict( + act_cfg=dict(inplace=True, type='SiLU'), + arch='P5', + deepen_factor=1.0, + last_stage_out_channels=512, + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + type='YOLOv8CSPDarknet', + widen_factor=1.25), + text_model=dict( + frozen_modules=[ + 'all', + ], + model_name='openai/clip-vit-base-patch32', + type='HuggingCLIPLanguageBackbone'), + type='MultiModalYOLOBackbone'), + bbox_head=dict( + bbox_coder=dict(type='DistancePointBBoxCoder'), + head_module=dict( + act_cfg=dict(inplace=True, type='SiLU'), + embed_dims=512, + featmap_strides=[ + 8, + 16, + 32, + ], + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_classes=80, + reg_max=16, + type='YOLOWorldHeadModule', + use_bn_head=True, + widen_factor=1.25), + loss_bbox=dict( + bbox_format='xyxy', + iou_mode='ciou', + loss_weight=7.5, + reduction='sum', + return_iou=False, + type='IoULoss'), + loss_cls=dict( + loss_weight=0.5, + reduction='none', + type='mmdet.CrossEntropyLoss', + use_sigmoid=True), + loss_dfl=dict( + loss_weight=0.375, + reduction='mean', + type='mmdet.DistributionFocalLoss'), + prior_generator=dict( + offset=0.5, strides=[ + 8, + 16, + 32, + ], type='mmdet.MlvlPointGenerator'), + type='YOLOWorldHead'), + data_preprocessor=dict( + bgr_to_rgb=True, + mean=[ + 0.0, + 0.0, + 0.0, + ], + std=[ + 255.0, + 255.0, + 255.0, + ], + type='YOLOWDetDataPreprocessor'), + mm_neck=True, + neck=dict( + act_cfg=dict(inplace=True, type='SiLU'), + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv'), + deepen_factor=1.0, + embed_channels=[ + 128, + 256, + 256, + ], + guide_channels=512, + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_csp_blocks=3, + num_heads=[ + 4, + 8, + 8, + ], + out_channels=[ + 256, + 512, + 512, + ], + type='YOLOWorldPAFPN', + widen_factor=1.25), + num_test_classes=1203, + num_train_classes=80, + test_cfg=dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001), + train_cfg=dict( + assigner=dict( + alpha=0.5, + beta=6.0, + eps=1e-09, + num_classes=80, + topk=10, + type='BatchTaskAlignedAssigner', + use_ciou=True)), + type='YOLOWorldDetector') +model_test_cfg = dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001) +mosaic_affine_transform = [ + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='Mosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), +] +neck_embed_channels = [ + 128, + 256, + 256, +] +neck_num_heads = [ + 4, + 8, + 8, +] +norm_cfg = dict(eps=0.001, momentum=0.03, type='BN') +num_classes = 1203 +num_det_layers = 3 +num_training_classes = 80 +obj365v1_train_dataset = dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +optim_wrapper = dict( + clip_grad=dict(max_norm=10.0), + constructor='YOLOWv5OptimizerConstructor', + optimizer=dict( + batch_size_per_gpu=16, lr=0.002, type='AdamW', weight_decay=0.025), + paramwise_cfg=dict( + bias_decay_mult=0.0, + custom_keys=dict({ + 'backbone.text_model': dict(lr_mult=0.01), + 'logit_scale': dict(weight_decay=0.0) + }), + norm_decay_mult=0.0), + type='OptimWrapper') +param_scheduler = None +persistent_workers = True +pre_transform = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), +] +resume = False +save_epoch_intervals = 2 +strides = [ + 8, + 16, + 32, +] +tal_alpha = 0.5 +tal_beta = 6.0 +tal_topk = 10 +test_cfg = dict(type='TestLoop') +test_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +test_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +test_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +text_channels = 512 +text_transform = [ + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_ann_file = 'annotations/instances_train2017.json' +train_batch_size_per_gpu = 16 +train_cfg = dict( + dynamic_intervals=[ + ( + 98, + 1, + ), + ], + max_epochs=100, + type='EpochBasedTrainLoop', + val_interval=10) +train_data_prefix = 'train2017/' +train_dataloader = dict( + batch_size=16, + collate_fn=dict(type='yolow_collate'), + dataset=dict( + datasets=[ + dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + ], + ignore_keys=[ + 'classes', + 'palette', + ], + type='ConcatDataset'), + num_workers=8, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=True, type='DefaultSampler')) +train_num_workers = 8 +train_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_pipeline_stage2 = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +tta_model = dict( + tta_cfg=dict(max_per_img=300, nms=dict(iou_threshold=0.65, type='nms')), + type='mmdet.DetTTAModel') +tta_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict( + transforms=[ + [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), + ], + [ + dict(prob=1.0, type='mmdet.RandomFlip'), + dict(prob=0.0, type='mmdet.RandomFlip'), + ], + [ + dict(type='mmdet.LoadAnnotations', with_bbox=True), + ], + [ + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), + ], + ], + type='TestTimeAug'), +] +val_ann_file = 'annotations/instances_val2017.json' +val_batch_size_per_gpu = 1 +val_cfg = dict(type='ValLoop') +val_data_prefix = 'val2017/' +val_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +val_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +val_interval_stage2 = 1 +val_num_workers = 2 +vis_backends = [ + dict(type='LocalVisBackend'), +] +visualizer = dict( + name='visualizer', + type='mmdet.DetLocalVisualizer', + vis_backends=[ + dict(type='LocalVisBackend'), + ]) +weight_decay = 0.025 +widen_factor = 1.25 +work_dir = '/home/jean/Amine/OpenYolo3D/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival' diff --git a/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_200429/vis_data/config.py b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_200429/vis_data/config.py new file mode 100644 index 0000000000000000000000000000000000000000..f8b00f96ad70de81ecc9073c7d67025d43429906 --- /dev/null +++ b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_200429/vis_data/config.py @@ -0,0 +1,1366 @@ +_backend_args = None +_multiscale_resize_transforms = [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), +] +affine_scale = 0.9 +albu_train_transforms = [ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), +] +backend_args = None +base_lr = 0.002 +batch_shapes_cfg = None +close_mosaic_epochs = 2 +coco_val_dataset = dict( + _delete_=True, + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +custom_hooks = [ + dict( + ema_type='ExpMomentumEMA', + momentum=0.0001, + priority=49, + strict_load=False, + type='EMAHook', + update_buffers=True), + dict( + switch_epoch=98, + switch_pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='mmdet.PipelineSwitchHook'), +] +custom_imports = dict( + allow_failed_imports=False, imports=[ + 'yolo_world', + ]) +data_root = 'data/coco/' +dataset_type = 'YOLOv5CocoDataset' +deepen_factor = 1.0 +default_hooks = dict( + checkpoint=dict( + interval=2, + max_keep_ckpts=2, + rule='greater', + save_best='auto', + type='CheckpointHook'), + logger=dict(interval=50, type='LoggerHook'), + param_scheduler=dict( + lr_factor=0.01, + max_epochs=100, + scheduler_type='linear', + type='YOLOv5ParamSchedulerHook'), + sampler_seed=dict(type='DistSamplerSeedHook'), + timer=dict(type='IterTimerHook'), + visualization=dict(type='mmdet.DetVisualizationHook')) +default_scope = 'mmyolo' +env_cfg = dict( + cudnn_benchmark=True, + dist_cfg=dict(backend='nccl'), + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0)) +flickr_train_dataset = dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +img_scale = ( + 640, + 640, +) +img_scales = [ + ( + 640, + 640, + ), + ( + 320, + 320, + ), + ( + 960, + 960, + ), +] +last_stage_out_channels = 512 +last_transform = [ + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), +] +load_from = '/home/jean/Amine/OpenYolo3D/pretrained/checkpoints/yolo_world_v2_x_obj365v1_goldg_cc3mlite_pretrain_1280ft-14996a36.pth' +log_level = 'INFO' +log_processor = dict(by_epoch=True, type='LogProcessor', window_size=50) +loss_bbox_weight = 7.5 +loss_cls_weight = 0.5 +loss_dfl_weight = 0.375 +lr_factor = 0.01 +max_aspect_ratio = 100 +max_epochs = 100 +max_keep_ckpts = 2 +mg_train_dataset = dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +mixup_prob = 0.15 +model = dict( + backbone=dict( + image_model=dict( + act_cfg=dict(inplace=True, type='SiLU'), + arch='P5', + deepen_factor=1.0, + last_stage_out_channels=512, + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + type='YOLOv8CSPDarknet', + widen_factor=1.25), + text_model=dict( + frozen_modules=[ + 'all', + ], + model_name='openai/clip-vit-base-patch32', + type='HuggingCLIPLanguageBackbone'), + type='MultiModalYOLOBackbone'), + bbox_head=dict( + bbox_coder=dict(type='DistancePointBBoxCoder'), + head_module=dict( + act_cfg=dict(inplace=True, type='SiLU'), + embed_dims=512, + featmap_strides=[ + 8, + 16, + 32, + ], + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_classes=80, + reg_max=16, + type='YOLOWorldHeadModule', + use_bn_head=True, + widen_factor=1.25), + loss_bbox=dict( + bbox_format='xyxy', + iou_mode='ciou', + loss_weight=7.5, + reduction='sum', + return_iou=False, + type='IoULoss'), + loss_cls=dict( + loss_weight=0.5, + reduction='none', + type='mmdet.CrossEntropyLoss', + use_sigmoid=True), + loss_dfl=dict( + loss_weight=0.375, + reduction='mean', + type='mmdet.DistributionFocalLoss'), + prior_generator=dict( + offset=0.5, strides=[ + 8, + 16, + 32, + ], type='mmdet.MlvlPointGenerator'), + type='YOLOWorldHead'), + data_preprocessor=dict( + bgr_to_rgb=True, + mean=[ + 0.0, + 0.0, + 0.0, + ], + std=[ + 255.0, + 255.0, + 255.0, + ], + type='YOLOWDetDataPreprocessor'), + mm_neck=True, + neck=dict( + act_cfg=dict(inplace=True, type='SiLU'), + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv'), + deepen_factor=1.0, + embed_channels=[ + 128, + 256, + 256, + ], + guide_channels=512, + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_csp_blocks=3, + num_heads=[ + 4, + 8, + 8, + ], + out_channels=[ + 256, + 512, + 512, + ], + type='YOLOWorldPAFPN', + widen_factor=1.25), + num_test_classes=1203, + num_train_classes=80, + test_cfg=dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001), + train_cfg=dict( + assigner=dict( + alpha=0.5, + beta=6.0, + eps=1e-09, + num_classes=80, + topk=10, + type='BatchTaskAlignedAssigner', + use_ciou=True)), + type='YOLOWorldDetector') +model_test_cfg = dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001) +mosaic_affine_transform = [ + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='Mosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), +] +neck_embed_channels = [ + 128, + 256, + 256, +] +neck_num_heads = [ + 4, + 8, + 8, +] +norm_cfg = dict(eps=0.001, momentum=0.03, type='BN') +num_classes = 1203 +num_det_layers = 3 +num_training_classes = 80 +obj365v1_train_dataset = dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +optim_wrapper = dict( + clip_grad=dict(max_norm=10.0), + constructor='YOLOWv5OptimizerConstructor', + optimizer=dict( + batch_size_per_gpu=16, lr=0.002, type='AdamW', weight_decay=0.025), + paramwise_cfg=dict( + bias_decay_mult=0.0, + custom_keys=dict({ + 'backbone.text_model': dict(lr_mult=0.01), + 'logit_scale': dict(weight_decay=0.0) + }), + norm_decay_mult=0.0), + type='OptimWrapper') +param_scheduler = None +persistent_workers = True +pre_transform = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), +] +resume = False +save_epoch_intervals = 2 +strides = [ + 8, + 16, + 32, +] +tal_alpha = 0.5 +tal_beta = 6.0 +tal_topk = 10 +test_cfg = dict(type='TestLoop') +test_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +test_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +test_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +text_channels = 512 +text_transform = [ + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_ann_file = 'annotations/instances_train2017.json' +train_batch_size_per_gpu = 16 +train_cfg = dict( + dynamic_intervals=[ + ( + 98, + 1, + ), + ], + max_epochs=100, + type='EpochBasedTrainLoop', + val_interval=10) +train_data_prefix = 'train2017/' +train_dataloader = dict( + batch_size=16, + collate_fn=dict(type='yolow_collate'), + dataset=dict( + datasets=[ + dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + ], + ignore_keys=[ + 'classes', + 'palette', + ], + type='ConcatDataset'), + num_workers=8, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=True, type='DefaultSampler')) +train_num_workers = 8 +train_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_pipeline_stage2 = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +tta_model = dict( + tta_cfg=dict(max_per_img=300, nms=dict(iou_threshold=0.65, type='nms')), + type='mmdet.DetTTAModel') +tta_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict( + transforms=[ + [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), + ], + [ + dict(prob=1.0, type='mmdet.RandomFlip'), + dict(prob=0.0, type='mmdet.RandomFlip'), + ], + [ + dict(type='mmdet.LoadAnnotations', with_bbox=True), + ], + [ + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), + ], + ], + type='TestTimeAug'), +] +val_ann_file = 'annotations/instances_val2017.json' +val_batch_size_per_gpu = 1 +val_cfg = dict(type='ValLoop') +val_data_prefix = 'val2017/' +val_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +val_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +val_interval_stage2 = 1 +val_num_workers = 2 +vis_backends = [ + dict(type='LocalVisBackend'), +] +visualizer = dict( + name='visualizer', + type='mmdet.DetLocalVisualizer', + vis_backends=[ + dict(type='LocalVisBackend'), + ]) +weight_decay = 0.025 +widen_factor = 1.25 +work_dir = '/home/jean/Amine/OpenYolo3D/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival' diff --git a/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_200613/vis_data/config.py b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_200613/vis_data/config.py new file mode 100644 index 0000000000000000000000000000000000000000..f8b00f96ad70de81ecc9073c7d67025d43429906 --- /dev/null +++ b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_200613/vis_data/config.py @@ -0,0 +1,1366 @@ +_backend_args = None +_multiscale_resize_transforms = [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), +] +affine_scale = 0.9 +albu_train_transforms = [ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), +] +backend_args = None +base_lr = 0.002 +batch_shapes_cfg = None +close_mosaic_epochs = 2 +coco_val_dataset = dict( + _delete_=True, + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +custom_hooks = [ + dict( + ema_type='ExpMomentumEMA', + momentum=0.0001, + priority=49, + strict_load=False, + type='EMAHook', + update_buffers=True), + dict( + switch_epoch=98, + switch_pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='mmdet.PipelineSwitchHook'), +] +custom_imports = dict( + allow_failed_imports=False, imports=[ + 'yolo_world', + ]) +data_root = 'data/coco/' +dataset_type = 'YOLOv5CocoDataset' +deepen_factor = 1.0 +default_hooks = dict( + checkpoint=dict( + interval=2, + max_keep_ckpts=2, + rule='greater', + save_best='auto', + type='CheckpointHook'), + logger=dict(interval=50, type='LoggerHook'), + param_scheduler=dict( + lr_factor=0.01, + max_epochs=100, + scheduler_type='linear', + type='YOLOv5ParamSchedulerHook'), + sampler_seed=dict(type='DistSamplerSeedHook'), + timer=dict(type='IterTimerHook'), + visualization=dict(type='mmdet.DetVisualizationHook')) +default_scope = 'mmyolo' +env_cfg = dict( + cudnn_benchmark=True, + dist_cfg=dict(backend='nccl'), + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0)) +flickr_train_dataset = dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +img_scale = ( + 640, + 640, +) +img_scales = [ + ( + 640, + 640, + ), + ( + 320, + 320, + ), + ( + 960, + 960, + ), +] +last_stage_out_channels = 512 +last_transform = [ + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), +] +load_from = '/home/jean/Amine/OpenYolo3D/pretrained/checkpoints/yolo_world_v2_x_obj365v1_goldg_cc3mlite_pretrain_1280ft-14996a36.pth' +log_level = 'INFO' +log_processor = dict(by_epoch=True, type='LogProcessor', window_size=50) +loss_bbox_weight = 7.5 +loss_cls_weight = 0.5 +loss_dfl_weight = 0.375 +lr_factor = 0.01 +max_aspect_ratio = 100 +max_epochs = 100 +max_keep_ckpts = 2 +mg_train_dataset = dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +mixup_prob = 0.15 +model = dict( + backbone=dict( + image_model=dict( + act_cfg=dict(inplace=True, type='SiLU'), + arch='P5', + deepen_factor=1.0, + last_stage_out_channels=512, + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + type='YOLOv8CSPDarknet', + widen_factor=1.25), + text_model=dict( + frozen_modules=[ + 'all', + ], + model_name='openai/clip-vit-base-patch32', + type='HuggingCLIPLanguageBackbone'), + type='MultiModalYOLOBackbone'), + bbox_head=dict( + bbox_coder=dict(type='DistancePointBBoxCoder'), + head_module=dict( + act_cfg=dict(inplace=True, type='SiLU'), + embed_dims=512, + featmap_strides=[ + 8, + 16, + 32, + ], + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_classes=80, + reg_max=16, + type='YOLOWorldHeadModule', + use_bn_head=True, + widen_factor=1.25), + loss_bbox=dict( + bbox_format='xyxy', + iou_mode='ciou', + loss_weight=7.5, + reduction='sum', + return_iou=False, + type='IoULoss'), + loss_cls=dict( + loss_weight=0.5, + reduction='none', + type='mmdet.CrossEntropyLoss', + use_sigmoid=True), + loss_dfl=dict( + loss_weight=0.375, + reduction='mean', + type='mmdet.DistributionFocalLoss'), + prior_generator=dict( + offset=0.5, strides=[ + 8, + 16, + 32, + ], type='mmdet.MlvlPointGenerator'), + type='YOLOWorldHead'), + data_preprocessor=dict( + bgr_to_rgb=True, + mean=[ + 0.0, + 0.0, + 0.0, + ], + std=[ + 255.0, + 255.0, + 255.0, + ], + type='YOLOWDetDataPreprocessor'), + mm_neck=True, + neck=dict( + act_cfg=dict(inplace=True, type='SiLU'), + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv'), + deepen_factor=1.0, + embed_channels=[ + 128, + 256, + 256, + ], + guide_channels=512, + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_csp_blocks=3, + num_heads=[ + 4, + 8, + 8, + ], + out_channels=[ + 256, + 512, + 512, + ], + type='YOLOWorldPAFPN', + widen_factor=1.25), + num_test_classes=1203, + num_train_classes=80, + test_cfg=dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001), + train_cfg=dict( + assigner=dict( + alpha=0.5, + beta=6.0, + eps=1e-09, + num_classes=80, + topk=10, + type='BatchTaskAlignedAssigner', + use_ciou=True)), + type='YOLOWorldDetector') +model_test_cfg = dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001) +mosaic_affine_transform = [ + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='Mosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), +] +neck_embed_channels = [ + 128, + 256, + 256, +] +neck_num_heads = [ + 4, + 8, + 8, +] +norm_cfg = dict(eps=0.001, momentum=0.03, type='BN') +num_classes = 1203 +num_det_layers = 3 +num_training_classes = 80 +obj365v1_train_dataset = dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +optim_wrapper = dict( + clip_grad=dict(max_norm=10.0), + constructor='YOLOWv5OptimizerConstructor', + optimizer=dict( + batch_size_per_gpu=16, lr=0.002, type='AdamW', weight_decay=0.025), + paramwise_cfg=dict( + bias_decay_mult=0.0, + custom_keys=dict({ + 'backbone.text_model': dict(lr_mult=0.01), + 'logit_scale': dict(weight_decay=0.0) + }), + norm_decay_mult=0.0), + type='OptimWrapper') +param_scheduler = None +persistent_workers = True +pre_transform = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), +] +resume = False +save_epoch_intervals = 2 +strides = [ + 8, + 16, + 32, +] +tal_alpha = 0.5 +tal_beta = 6.0 +tal_topk = 10 +test_cfg = dict(type='TestLoop') +test_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +test_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +test_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +text_channels = 512 +text_transform = [ + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_ann_file = 'annotations/instances_train2017.json' +train_batch_size_per_gpu = 16 +train_cfg = dict( + dynamic_intervals=[ + ( + 98, + 1, + ), + ], + max_epochs=100, + type='EpochBasedTrainLoop', + val_interval=10) +train_data_prefix = 'train2017/' +train_dataloader = dict( + batch_size=16, + collate_fn=dict(type='yolow_collate'), + dataset=dict( + datasets=[ + dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + ], + ignore_keys=[ + 'classes', + 'palette', + ], + type='ConcatDataset'), + num_workers=8, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=True, type='DefaultSampler')) +train_num_workers = 8 +train_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_pipeline_stage2 = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +tta_model = dict( + tta_cfg=dict(max_per_img=300, nms=dict(iou_threshold=0.65, type='nms')), + type='mmdet.DetTTAModel') +tta_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict( + transforms=[ + [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), + ], + [ + dict(prob=1.0, type='mmdet.RandomFlip'), + dict(prob=0.0, type='mmdet.RandomFlip'), + ], + [ + dict(type='mmdet.LoadAnnotations', with_bbox=True), + ], + [ + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), + ], + ], + type='TestTimeAug'), +] +val_ann_file = 'annotations/instances_val2017.json' +val_batch_size_per_gpu = 1 +val_cfg = dict(type='ValLoop') +val_data_prefix = 'val2017/' +val_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +val_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +val_interval_stage2 = 1 +val_num_workers = 2 +vis_backends = [ + dict(type='LocalVisBackend'), +] +visualizer = dict( + name='visualizer', + type='mmdet.DetLocalVisualizer', + vis_backends=[ + dict(type='LocalVisBackend'), + ]) +weight_decay = 0.025 +widen_factor = 1.25 +work_dir = '/home/jean/Amine/OpenYolo3D/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival' diff --git a/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_200712/vis_data/config.py b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_200712/vis_data/config.py new file mode 100644 index 0000000000000000000000000000000000000000..f8b00f96ad70de81ecc9073c7d67025d43429906 --- /dev/null +++ b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_200712/vis_data/config.py @@ -0,0 +1,1366 @@ +_backend_args = None +_multiscale_resize_transforms = [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), +] +affine_scale = 0.9 +albu_train_transforms = [ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), +] +backend_args = None +base_lr = 0.002 +batch_shapes_cfg = None +close_mosaic_epochs = 2 +coco_val_dataset = dict( + _delete_=True, + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +custom_hooks = [ + dict( + ema_type='ExpMomentumEMA', + momentum=0.0001, + priority=49, + strict_load=False, + type='EMAHook', + update_buffers=True), + dict( + switch_epoch=98, + switch_pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='mmdet.PipelineSwitchHook'), +] +custom_imports = dict( + allow_failed_imports=False, imports=[ + 'yolo_world', + ]) +data_root = 'data/coco/' +dataset_type = 'YOLOv5CocoDataset' +deepen_factor = 1.0 +default_hooks = dict( + checkpoint=dict( + interval=2, + max_keep_ckpts=2, + rule='greater', + save_best='auto', + type='CheckpointHook'), + logger=dict(interval=50, type='LoggerHook'), + param_scheduler=dict( + lr_factor=0.01, + max_epochs=100, + scheduler_type='linear', + type='YOLOv5ParamSchedulerHook'), + sampler_seed=dict(type='DistSamplerSeedHook'), + timer=dict(type='IterTimerHook'), + visualization=dict(type='mmdet.DetVisualizationHook')) +default_scope = 'mmyolo' +env_cfg = dict( + cudnn_benchmark=True, + dist_cfg=dict(backend='nccl'), + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0)) +flickr_train_dataset = dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +img_scale = ( + 640, + 640, +) +img_scales = [ + ( + 640, + 640, + ), + ( + 320, + 320, + ), + ( + 960, + 960, + ), +] +last_stage_out_channels = 512 +last_transform = [ + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), +] +load_from = '/home/jean/Amine/OpenYolo3D/pretrained/checkpoints/yolo_world_v2_x_obj365v1_goldg_cc3mlite_pretrain_1280ft-14996a36.pth' +log_level = 'INFO' +log_processor = dict(by_epoch=True, type='LogProcessor', window_size=50) +loss_bbox_weight = 7.5 +loss_cls_weight = 0.5 +loss_dfl_weight = 0.375 +lr_factor = 0.01 +max_aspect_ratio = 100 +max_epochs = 100 +max_keep_ckpts = 2 +mg_train_dataset = dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +mixup_prob = 0.15 +model = dict( + backbone=dict( + image_model=dict( + act_cfg=dict(inplace=True, type='SiLU'), + arch='P5', + deepen_factor=1.0, + last_stage_out_channels=512, + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + type='YOLOv8CSPDarknet', + widen_factor=1.25), + text_model=dict( + frozen_modules=[ + 'all', + ], + model_name='openai/clip-vit-base-patch32', + type='HuggingCLIPLanguageBackbone'), + type='MultiModalYOLOBackbone'), + bbox_head=dict( + bbox_coder=dict(type='DistancePointBBoxCoder'), + head_module=dict( + act_cfg=dict(inplace=True, type='SiLU'), + embed_dims=512, + featmap_strides=[ + 8, + 16, + 32, + ], + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_classes=80, + reg_max=16, + type='YOLOWorldHeadModule', + use_bn_head=True, + widen_factor=1.25), + loss_bbox=dict( + bbox_format='xyxy', + iou_mode='ciou', + loss_weight=7.5, + reduction='sum', + return_iou=False, + type='IoULoss'), + loss_cls=dict( + loss_weight=0.5, + reduction='none', + type='mmdet.CrossEntropyLoss', + use_sigmoid=True), + loss_dfl=dict( + loss_weight=0.375, + reduction='mean', + type='mmdet.DistributionFocalLoss'), + prior_generator=dict( + offset=0.5, strides=[ + 8, + 16, + 32, + ], type='mmdet.MlvlPointGenerator'), + type='YOLOWorldHead'), + data_preprocessor=dict( + bgr_to_rgb=True, + mean=[ + 0.0, + 0.0, + 0.0, + ], + std=[ + 255.0, + 255.0, + 255.0, + ], + type='YOLOWDetDataPreprocessor'), + mm_neck=True, + neck=dict( + act_cfg=dict(inplace=True, type='SiLU'), + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv'), + deepen_factor=1.0, + embed_channels=[ + 128, + 256, + 256, + ], + guide_channels=512, + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_csp_blocks=3, + num_heads=[ + 4, + 8, + 8, + ], + out_channels=[ + 256, + 512, + 512, + ], + type='YOLOWorldPAFPN', + widen_factor=1.25), + num_test_classes=1203, + num_train_classes=80, + test_cfg=dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001), + train_cfg=dict( + assigner=dict( + alpha=0.5, + beta=6.0, + eps=1e-09, + num_classes=80, + topk=10, + type='BatchTaskAlignedAssigner', + use_ciou=True)), + type='YOLOWorldDetector') +model_test_cfg = dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001) +mosaic_affine_transform = [ + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='Mosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), +] +neck_embed_channels = [ + 128, + 256, + 256, +] +neck_num_heads = [ + 4, + 8, + 8, +] +norm_cfg = dict(eps=0.001, momentum=0.03, type='BN') +num_classes = 1203 +num_det_layers = 3 +num_training_classes = 80 +obj365v1_train_dataset = dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +optim_wrapper = dict( + clip_grad=dict(max_norm=10.0), + constructor='YOLOWv5OptimizerConstructor', + optimizer=dict( + batch_size_per_gpu=16, lr=0.002, type='AdamW', weight_decay=0.025), + paramwise_cfg=dict( + bias_decay_mult=0.0, + custom_keys=dict({ + 'backbone.text_model': dict(lr_mult=0.01), + 'logit_scale': dict(weight_decay=0.0) + }), + norm_decay_mult=0.0), + type='OptimWrapper') +param_scheduler = None +persistent_workers = True +pre_transform = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), +] +resume = False +save_epoch_intervals = 2 +strides = [ + 8, + 16, + 32, +] +tal_alpha = 0.5 +tal_beta = 6.0 +tal_topk = 10 +test_cfg = dict(type='TestLoop') +test_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +test_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +test_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +text_channels = 512 +text_transform = [ + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_ann_file = 'annotations/instances_train2017.json' +train_batch_size_per_gpu = 16 +train_cfg = dict( + dynamic_intervals=[ + ( + 98, + 1, + ), + ], + max_epochs=100, + type='EpochBasedTrainLoop', + val_interval=10) +train_data_prefix = 'train2017/' +train_dataloader = dict( + batch_size=16, + collate_fn=dict(type='yolow_collate'), + dataset=dict( + datasets=[ + dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + ], + ignore_keys=[ + 'classes', + 'palette', + ], + type='ConcatDataset'), + num_workers=8, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=True, type='DefaultSampler')) +train_num_workers = 8 +train_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_pipeline_stage2 = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +tta_model = dict( + tta_cfg=dict(max_per_img=300, nms=dict(iou_threshold=0.65, type='nms')), + type='mmdet.DetTTAModel') +tta_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict( + transforms=[ + [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), + ], + [ + dict(prob=1.0, type='mmdet.RandomFlip'), + dict(prob=0.0, type='mmdet.RandomFlip'), + ], + [ + dict(type='mmdet.LoadAnnotations', with_bbox=True), + ], + [ + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), + ], + ], + type='TestTimeAug'), +] +val_ann_file = 'annotations/instances_val2017.json' +val_batch_size_per_gpu = 1 +val_cfg = dict(type='ValLoop') +val_data_prefix = 'val2017/' +val_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +val_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +val_interval_stage2 = 1 +val_num_workers = 2 +vis_backends = [ + dict(type='LocalVisBackend'), +] +visualizer = dict( + name='visualizer', + type='mmdet.DetLocalVisualizer', + vis_backends=[ + dict(type='LocalVisBackend'), + ]) +weight_decay = 0.025 +widen_factor = 1.25 +work_dir = '/home/jean/Amine/OpenYolo3D/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival' diff --git a/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_200814/vis_data/config.py b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_200814/vis_data/config.py new file mode 100644 index 0000000000000000000000000000000000000000..f8b00f96ad70de81ecc9073c7d67025d43429906 --- /dev/null +++ b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_200814/vis_data/config.py @@ -0,0 +1,1366 @@ +_backend_args = None +_multiscale_resize_transforms = [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), +] +affine_scale = 0.9 +albu_train_transforms = [ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), +] +backend_args = None +base_lr = 0.002 +batch_shapes_cfg = None +close_mosaic_epochs = 2 +coco_val_dataset = dict( + _delete_=True, + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +custom_hooks = [ + dict( + ema_type='ExpMomentumEMA', + momentum=0.0001, + priority=49, + strict_load=False, + type='EMAHook', + update_buffers=True), + dict( + switch_epoch=98, + switch_pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='mmdet.PipelineSwitchHook'), +] +custom_imports = dict( + allow_failed_imports=False, imports=[ + 'yolo_world', + ]) +data_root = 'data/coco/' +dataset_type = 'YOLOv5CocoDataset' +deepen_factor = 1.0 +default_hooks = dict( + checkpoint=dict( + interval=2, + max_keep_ckpts=2, + rule='greater', + save_best='auto', + type='CheckpointHook'), + logger=dict(interval=50, type='LoggerHook'), + param_scheduler=dict( + lr_factor=0.01, + max_epochs=100, + scheduler_type='linear', + type='YOLOv5ParamSchedulerHook'), + sampler_seed=dict(type='DistSamplerSeedHook'), + timer=dict(type='IterTimerHook'), + visualization=dict(type='mmdet.DetVisualizationHook')) +default_scope = 'mmyolo' +env_cfg = dict( + cudnn_benchmark=True, + dist_cfg=dict(backend='nccl'), + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0)) +flickr_train_dataset = dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +img_scale = ( + 640, + 640, +) +img_scales = [ + ( + 640, + 640, + ), + ( + 320, + 320, + ), + ( + 960, + 960, + ), +] +last_stage_out_channels = 512 +last_transform = [ + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), +] +load_from = '/home/jean/Amine/OpenYolo3D/pretrained/checkpoints/yolo_world_v2_x_obj365v1_goldg_cc3mlite_pretrain_1280ft-14996a36.pth' +log_level = 'INFO' +log_processor = dict(by_epoch=True, type='LogProcessor', window_size=50) +loss_bbox_weight = 7.5 +loss_cls_weight = 0.5 +loss_dfl_weight = 0.375 +lr_factor = 0.01 +max_aspect_ratio = 100 +max_epochs = 100 +max_keep_ckpts = 2 +mg_train_dataset = dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +mixup_prob = 0.15 +model = dict( + backbone=dict( + image_model=dict( + act_cfg=dict(inplace=True, type='SiLU'), + arch='P5', + deepen_factor=1.0, + last_stage_out_channels=512, + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + type='YOLOv8CSPDarknet', + widen_factor=1.25), + text_model=dict( + frozen_modules=[ + 'all', + ], + model_name='openai/clip-vit-base-patch32', + type='HuggingCLIPLanguageBackbone'), + type='MultiModalYOLOBackbone'), + bbox_head=dict( + bbox_coder=dict(type='DistancePointBBoxCoder'), + head_module=dict( + act_cfg=dict(inplace=True, type='SiLU'), + embed_dims=512, + featmap_strides=[ + 8, + 16, + 32, + ], + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_classes=80, + reg_max=16, + type='YOLOWorldHeadModule', + use_bn_head=True, + widen_factor=1.25), + loss_bbox=dict( + bbox_format='xyxy', + iou_mode='ciou', + loss_weight=7.5, + reduction='sum', + return_iou=False, + type='IoULoss'), + loss_cls=dict( + loss_weight=0.5, + reduction='none', + type='mmdet.CrossEntropyLoss', + use_sigmoid=True), + loss_dfl=dict( + loss_weight=0.375, + reduction='mean', + type='mmdet.DistributionFocalLoss'), + prior_generator=dict( + offset=0.5, strides=[ + 8, + 16, + 32, + ], type='mmdet.MlvlPointGenerator'), + type='YOLOWorldHead'), + data_preprocessor=dict( + bgr_to_rgb=True, + mean=[ + 0.0, + 0.0, + 0.0, + ], + std=[ + 255.0, + 255.0, + 255.0, + ], + type='YOLOWDetDataPreprocessor'), + mm_neck=True, + neck=dict( + act_cfg=dict(inplace=True, type='SiLU'), + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv'), + deepen_factor=1.0, + embed_channels=[ + 128, + 256, + 256, + ], + guide_channels=512, + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_csp_blocks=3, + num_heads=[ + 4, + 8, + 8, + ], + out_channels=[ + 256, + 512, + 512, + ], + type='YOLOWorldPAFPN', + widen_factor=1.25), + num_test_classes=1203, + num_train_classes=80, + test_cfg=dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001), + train_cfg=dict( + assigner=dict( + alpha=0.5, + beta=6.0, + eps=1e-09, + num_classes=80, + topk=10, + type='BatchTaskAlignedAssigner', + use_ciou=True)), + type='YOLOWorldDetector') +model_test_cfg = dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001) +mosaic_affine_transform = [ + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='Mosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), +] +neck_embed_channels = [ + 128, + 256, + 256, +] +neck_num_heads = [ + 4, + 8, + 8, +] +norm_cfg = dict(eps=0.001, momentum=0.03, type='BN') +num_classes = 1203 +num_det_layers = 3 +num_training_classes = 80 +obj365v1_train_dataset = dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +optim_wrapper = dict( + clip_grad=dict(max_norm=10.0), + constructor='YOLOWv5OptimizerConstructor', + optimizer=dict( + batch_size_per_gpu=16, lr=0.002, type='AdamW', weight_decay=0.025), + paramwise_cfg=dict( + bias_decay_mult=0.0, + custom_keys=dict({ + 'backbone.text_model': dict(lr_mult=0.01), + 'logit_scale': dict(weight_decay=0.0) + }), + norm_decay_mult=0.0), + type='OptimWrapper') +param_scheduler = None +persistent_workers = True +pre_transform = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), +] +resume = False +save_epoch_intervals = 2 +strides = [ + 8, + 16, + 32, +] +tal_alpha = 0.5 +tal_beta = 6.0 +tal_topk = 10 +test_cfg = dict(type='TestLoop') +test_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +test_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +test_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +text_channels = 512 +text_transform = [ + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_ann_file = 'annotations/instances_train2017.json' +train_batch_size_per_gpu = 16 +train_cfg = dict( + dynamic_intervals=[ + ( + 98, + 1, + ), + ], + max_epochs=100, + type='EpochBasedTrainLoop', + val_interval=10) +train_data_prefix = 'train2017/' +train_dataloader = dict( + batch_size=16, + collate_fn=dict(type='yolow_collate'), + dataset=dict( + datasets=[ + dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + ], + ignore_keys=[ + 'classes', + 'palette', + ], + type='ConcatDataset'), + num_workers=8, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=True, type='DefaultSampler')) +train_num_workers = 8 +train_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_pipeline_stage2 = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +tta_model = dict( + tta_cfg=dict(max_per_img=300, nms=dict(iou_threshold=0.65, type='nms')), + type='mmdet.DetTTAModel') +tta_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict( + transforms=[ + [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), + ], + [ + dict(prob=1.0, type='mmdet.RandomFlip'), + dict(prob=0.0, type='mmdet.RandomFlip'), + ], + [ + dict(type='mmdet.LoadAnnotations', with_bbox=True), + ], + [ + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), + ], + ], + type='TestTimeAug'), +] +val_ann_file = 'annotations/instances_val2017.json' +val_batch_size_per_gpu = 1 +val_cfg = dict(type='ValLoop') +val_data_prefix = 'val2017/' +val_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +val_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +val_interval_stage2 = 1 +val_num_workers = 2 +vis_backends = [ + dict(type='LocalVisBackend'), +] +visualizer = dict( + name='visualizer', + type='mmdet.DetLocalVisualizer', + vis_backends=[ + dict(type='LocalVisBackend'), + ]) +weight_decay = 0.025 +widen_factor = 1.25 +work_dir = '/home/jean/Amine/OpenYolo3D/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival' diff --git a/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_201123/vis_data/config.py b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_201123/vis_data/config.py new file mode 100644 index 0000000000000000000000000000000000000000..f8b00f96ad70de81ecc9073c7d67025d43429906 --- /dev/null +++ b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_201123/vis_data/config.py @@ -0,0 +1,1366 @@ +_backend_args = None +_multiscale_resize_transforms = [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), +] +affine_scale = 0.9 +albu_train_transforms = [ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), +] +backend_args = None +base_lr = 0.002 +batch_shapes_cfg = None +close_mosaic_epochs = 2 +coco_val_dataset = dict( + _delete_=True, + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +custom_hooks = [ + dict( + ema_type='ExpMomentumEMA', + momentum=0.0001, + priority=49, + strict_load=False, + type='EMAHook', + update_buffers=True), + dict( + switch_epoch=98, + switch_pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='mmdet.PipelineSwitchHook'), +] +custom_imports = dict( + allow_failed_imports=False, imports=[ + 'yolo_world', + ]) +data_root = 'data/coco/' +dataset_type = 'YOLOv5CocoDataset' +deepen_factor = 1.0 +default_hooks = dict( + checkpoint=dict( + interval=2, + max_keep_ckpts=2, + rule='greater', + save_best='auto', + type='CheckpointHook'), + logger=dict(interval=50, type='LoggerHook'), + param_scheduler=dict( + lr_factor=0.01, + max_epochs=100, + scheduler_type='linear', + type='YOLOv5ParamSchedulerHook'), + sampler_seed=dict(type='DistSamplerSeedHook'), + timer=dict(type='IterTimerHook'), + visualization=dict(type='mmdet.DetVisualizationHook')) +default_scope = 'mmyolo' +env_cfg = dict( + cudnn_benchmark=True, + dist_cfg=dict(backend='nccl'), + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0)) +flickr_train_dataset = dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +img_scale = ( + 640, + 640, +) +img_scales = [ + ( + 640, + 640, + ), + ( + 320, + 320, + ), + ( + 960, + 960, + ), +] +last_stage_out_channels = 512 +last_transform = [ + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), +] +load_from = '/home/jean/Amine/OpenYolo3D/pretrained/checkpoints/yolo_world_v2_x_obj365v1_goldg_cc3mlite_pretrain_1280ft-14996a36.pth' +log_level = 'INFO' +log_processor = dict(by_epoch=True, type='LogProcessor', window_size=50) +loss_bbox_weight = 7.5 +loss_cls_weight = 0.5 +loss_dfl_weight = 0.375 +lr_factor = 0.01 +max_aspect_ratio = 100 +max_epochs = 100 +max_keep_ckpts = 2 +mg_train_dataset = dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +mixup_prob = 0.15 +model = dict( + backbone=dict( + image_model=dict( + act_cfg=dict(inplace=True, type='SiLU'), + arch='P5', + deepen_factor=1.0, + last_stage_out_channels=512, + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + type='YOLOv8CSPDarknet', + widen_factor=1.25), + text_model=dict( + frozen_modules=[ + 'all', + ], + model_name='openai/clip-vit-base-patch32', + type='HuggingCLIPLanguageBackbone'), + type='MultiModalYOLOBackbone'), + bbox_head=dict( + bbox_coder=dict(type='DistancePointBBoxCoder'), + head_module=dict( + act_cfg=dict(inplace=True, type='SiLU'), + embed_dims=512, + featmap_strides=[ + 8, + 16, + 32, + ], + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_classes=80, + reg_max=16, + type='YOLOWorldHeadModule', + use_bn_head=True, + widen_factor=1.25), + loss_bbox=dict( + bbox_format='xyxy', + iou_mode='ciou', + loss_weight=7.5, + reduction='sum', + return_iou=False, + type='IoULoss'), + loss_cls=dict( + loss_weight=0.5, + reduction='none', + type='mmdet.CrossEntropyLoss', + use_sigmoid=True), + loss_dfl=dict( + loss_weight=0.375, + reduction='mean', + type='mmdet.DistributionFocalLoss'), + prior_generator=dict( + offset=0.5, strides=[ + 8, + 16, + 32, + ], type='mmdet.MlvlPointGenerator'), + type='YOLOWorldHead'), + data_preprocessor=dict( + bgr_to_rgb=True, + mean=[ + 0.0, + 0.0, + 0.0, + ], + std=[ + 255.0, + 255.0, + 255.0, + ], + type='YOLOWDetDataPreprocessor'), + mm_neck=True, + neck=dict( + act_cfg=dict(inplace=True, type='SiLU'), + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv'), + deepen_factor=1.0, + embed_channels=[ + 128, + 256, + 256, + ], + guide_channels=512, + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_csp_blocks=3, + num_heads=[ + 4, + 8, + 8, + ], + out_channels=[ + 256, + 512, + 512, + ], + type='YOLOWorldPAFPN', + widen_factor=1.25), + num_test_classes=1203, + num_train_classes=80, + test_cfg=dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001), + train_cfg=dict( + assigner=dict( + alpha=0.5, + beta=6.0, + eps=1e-09, + num_classes=80, + topk=10, + type='BatchTaskAlignedAssigner', + use_ciou=True)), + type='YOLOWorldDetector') +model_test_cfg = dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001) +mosaic_affine_transform = [ + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='Mosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), +] +neck_embed_channels = [ + 128, + 256, + 256, +] +neck_num_heads = [ + 4, + 8, + 8, +] +norm_cfg = dict(eps=0.001, momentum=0.03, type='BN') +num_classes = 1203 +num_det_layers = 3 +num_training_classes = 80 +obj365v1_train_dataset = dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +optim_wrapper = dict( + clip_grad=dict(max_norm=10.0), + constructor='YOLOWv5OptimizerConstructor', + optimizer=dict( + batch_size_per_gpu=16, lr=0.002, type='AdamW', weight_decay=0.025), + paramwise_cfg=dict( + bias_decay_mult=0.0, + custom_keys=dict({ + 'backbone.text_model': dict(lr_mult=0.01), + 'logit_scale': dict(weight_decay=0.0) + }), + norm_decay_mult=0.0), + type='OptimWrapper') +param_scheduler = None +persistent_workers = True +pre_transform = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), +] +resume = False +save_epoch_intervals = 2 +strides = [ + 8, + 16, + 32, +] +tal_alpha = 0.5 +tal_beta = 6.0 +tal_topk = 10 +test_cfg = dict(type='TestLoop') +test_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +test_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +test_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +text_channels = 512 +text_transform = [ + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_ann_file = 'annotations/instances_train2017.json' +train_batch_size_per_gpu = 16 +train_cfg = dict( + dynamic_intervals=[ + ( + 98, + 1, + ), + ], + max_epochs=100, + type='EpochBasedTrainLoop', + val_interval=10) +train_data_prefix = 'train2017/' +train_dataloader = dict( + batch_size=16, + collate_fn=dict(type='yolow_collate'), + dataset=dict( + datasets=[ + dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + ], + ignore_keys=[ + 'classes', + 'palette', + ], + type='ConcatDataset'), + num_workers=8, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=True, type='DefaultSampler')) +train_num_workers = 8 +train_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_pipeline_stage2 = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +tta_model = dict( + tta_cfg=dict(max_per_img=300, nms=dict(iou_threshold=0.65, type='nms')), + type='mmdet.DetTTAModel') +tta_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict( + transforms=[ + [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), + ], + [ + dict(prob=1.0, type='mmdet.RandomFlip'), + dict(prob=0.0, type='mmdet.RandomFlip'), + ], + [ + dict(type='mmdet.LoadAnnotations', with_bbox=True), + ], + [ + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), + ], + ], + type='TestTimeAug'), +] +val_ann_file = 'annotations/instances_val2017.json' +val_batch_size_per_gpu = 1 +val_cfg = dict(type='ValLoop') +val_data_prefix = 'val2017/' +val_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +val_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +val_interval_stage2 = 1 +val_num_workers = 2 +vis_backends = [ + dict(type='LocalVisBackend'), +] +visualizer = dict( + name='visualizer', + type='mmdet.DetLocalVisualizer', + vis_backends=[ + dict(type='LocalVisBackend'), + ]) +weight_decay = 0.025 +widen_factor = 1.25 +work_dir = '/home/jean/Amine/OpenYolo3D/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival' diff --git a/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_201621/vis_data/config.py b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_201621/vis_data/config.py new file mode 100644 index 0000000000000000000000000000000000000000..f8b00f96ad70de81ecc9073c7d67025d43429906 --- /dev/null +++ b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_201621/vis_data/config.py @@ -0,0 +1,1366 @@ +_backend_args = None +_multiscale_resize_transforms = [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), +] +affine_scale = 0.9 +albu_train_transforms = [ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), +] +backend_args = None +base_lr = 0.002 +batch_shapes_cfg = None +close_mosaic_epochs = 2 +coco_val_dataset = dict( + _delete_=True, + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +custom_hooks = [ + dict( + ema_type='ExpMomentumEMA', + momentum=0.0001, + priority=49, + strict_load=False, + type='EMAHook', + update_buffers=True), + dict( + switch_epoch=98, + switch_pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='mmdet.PipelineSwitchHook'), +] +custom_imports = dict( + allow_failed_imports=False, imports=[ + 'yolo_world', + ]) +data_root = 'data/coco/' +dataset_type = 'YOLOv5CocoDataset' +deepen_factor = 1.0 +default_hooks = dict( + checkpoint=dict( + interval=2, + max_keep_ckpts=2, + rule='greater', + save_best='auto', + type='CheckpointHook'), + logger=dict(interval=50, type='LoggerHook'), + param_scheduler=dict( + lr_factor=0.01, + max_epochs=100, + scheduler_type='linear', + type='YOLOv5ParamSchedulerHook'), + sampler_seed=dict(type='DistSamplerSeedHook'), + timer=dict(type='IterTimerHook'), + visualization=dict(type='mmdet.DetVisualizationHook')) +default_scope = 'mmyolo' +env_cfg = dict( + cudnn_benchmark=True, + dist_cfg=dict(backend='nccl'), + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0)) +flickr_train_dataset = dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +img_scale = ( + 640, + 640, +) +img_scales = [ + ( + 640, + 640, + ), + ( + 320, + 320, + ), + ( + 960, + 960, + ), +] +last_stage_out_channels = 512 +last_transform = [ + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), +] +load_from = '/home/jean/Amine/OpenYolo3D/pretrained/checkpoints/yolo_world_v2_x_obj365v1_goldg_cc3mlite_pretrain_1280ft-14996a36.pth' +log_level = 'INFO' +log_processor = dict(by_epoch=True, type='LogProcessor', window_size=50) +loss_bbox_weight = 7.5 +loss_cls_weight = 0.5 +loss_dfl_weight = 0.375 +lr_factor = 0.01 +max_aspect_ratio = 100 +max_epochs = 100 +max_keep_ckpts = 2 +mg_train_dataset = dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +mixup_prob = 0.15 +model = dict( + backbone=dict( + image_model=dict( + act_cfg=dict(inplace=True, type='SiLU'), + arch='P5', + deepen_factor=1.0, + last_stage_out_channels=512, + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + type='YOLOv8CSPDarknet', + widen_factor=1.25), + text_model=dict( + frozen_modules=[ + 'all', + ], + model_name='openai/clip-vit-base-patch32', + type='HuggingCLIPLanguageBackbone'), + type='MultiModalYOLOBackbone'), + bbox_head=dict( + bbox_coder=dict(type='DistancePointBBoxCoder'), + head_module=dict( + act_cfg=dict(inplace=True, type='SiLU'), + embed_dims=512, + featmap_strides=[ + 8, + 16, + 32, + ], + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_classes=80, + reg_max=16, + type='YOLOWorldHeadModule', + use_bn_head=True, + widen_factor=1.25), + loss_bbox=dict( + bbox_format='xyxy', + iou_mode='ciou', + loss_weight=7.5, + reduction='sum', + return_iou=False, + type='IoULoss'), + loss_cls=dict( + loss_weight=0.5, + reduction='none', + type='mmdet.CrossEntropyLoss', + use_sigmoid=True), + loss_dfl=dict( + loss_weight=0.375, + reduction='mean', + type='mmdet.DistributionFocalLoss'), + prior_generator=dict( + offset=0.5, strides=[ + 8, + 16, + 32, + ], type='mmdet.MlvlPointGenerator'), + type='YOLOWorldHead'), + data_preprocessor=dict( + bgr_to_rgb=True, + mean=[ + 0.0, + 0.0, + 0.0, + ], + std=[ + 255.0, + 255.0, + 255.0, + ], + type='YOLOWDetDataPreprocessor'), + mm_neck=True, + neck=dict( + act_cfg=dict(inplace=True, type='SiLU'), + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv'), + deepen_factor=1.0, + embed_channels=[ + 128, + 256, + 256, + ], + guide_channels=512, + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_csp_blocks=3, + num_heads=[ + 4, + 8, + 8, + ], + out_channels=[ + 256, + 512, + 512, + ], + type='YOLOWorldPAFPN', + widen_factor=1.25), + num_test_classes=1203, + num_train_classes=80, + test_cfg=dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001), + train_cfg=dict( + assigner=dict( + alpha=0.5, + beta=6.0, + eps=1e-09, + num_classes=80, + topk=10, + type='BatchTaskAlignedAssigner', + use_ciou=True)), + type='YOLOWorldDetector') +model_test_cfg = dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001) +mosaic_affine_transform = [ + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='Mosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), +] +neck_embed_channels = [ + 128, + 256, + 256, +] +neck_num_heads = [ + 4, + 8, + 8, +] +norm_cfg = dict(eps=0.001, momentum=0.03, type='BN') +num_classes = 1203 +num_det_layers = 3 +num_training_classes = 80 +obj365v1_train_dataset = dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +optim_wrapper = dict( + clip_grad=dict(max_norm=10.0), + constructor='YOLOWv5OptimizerConstructor', + optimizer=dict( + batch_size_per_gpu=16, lr=0.002, type='AdamW', weight_decay=0.025), + paramwise_cfg=dict( + bias_decay_mult=0.0, + custom_keys=dict({ + 'backbone.text_model': dict(lr_mult=0.01), + 'logit_scale': dict(weight_decay=0.0) + }), + norm_decay_mult=0.0), + type='OptimWrapper') +param_scheduler = None +persistent_workers = True +pre_transform = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), +] +resume = False +save_epoch_intervals = 2 +strides = [ + 8, + 16, + 32, +] +tal_alpha = 0.5 +tal_beta = 6.0 +tal_topk = 10 +test_cfg = dict(type='TestLoop') +test_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +test_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +test_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +text_channels = 512 +text_transform = [ + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_ann_file = 'annotations/instances_train2017.json' +train_batch_size_per_gpu = 16 +train_cfg = dict( + dynamic_intervals=[ + ( + 98, + 1, + ), + ], + max_epochs=100, + type='EpochBasedTrainLoop', + val_interval=10) +train_data_prefix = 'train2017/' +train_dataloader = dict( + batch_size=16, + collate_fn=dict(type='yolow_collate'), + dataset=dict( + datasets=[ + dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + ], + ignore_keys=[ + 'classes', + 'palette', + ], + type='ConcatDataset'), + num_workers=8, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=True, type='DefaultSampler')) +train_num_workers = 8 +train_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_pipeline_stage2 = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +tta_model = dict( + tta_cfg=dict(max_per_img=300, nms=dict(iou_threshold=0.65, type='nms')), + type='mmdet.DetTTAModel') +tta_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict( + transforms=[ + [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), + ], + [ + dict(prob=1.0, type='mmdet.RandomFlip'), + dict(prob=0.0, type='mmdet.RandomFlip'), + ], + [ + dict(type='mmdet.LoadAnnotations', with_bbox=True), + ], + [ + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), + ], + ], + type='TestTimeAug'), +] +val_ann_file = 'annotations/instances_val2017.json' +val_batch_size_per_gpu = 1 +val_cfg = dict(type='ValLoop') +val_data_prefix = 'val2017/' +val_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +val_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +val_interval_stage2 = 1 +val_num_workers = 2 +vis_backends = [ + dict(type='LocalVisBackend'), +] +visualizer = dict( + name='visualizer', + type='mmdet.DetLocalVisualizer', + vis_backends=[ + dict(type='LocalVisBackend'), + ]) +weight_decay = 0.025 +widen_factor = 1.25 +work_dir = '/home/jean/Amine/OpenYolo3D/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival' diff --git a/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_201813/vis_data/config.py b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_201813/vis_data/config.py new file mode 100644 index 0000000000000000000000000000000000000000..f8b00f96ad70de81ecc9073c7d67025d43429906 --- /dev/null +++ b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_201813/vis_data/config.py @@ -0,0 +1,1366 @@ +_backend_args = None +_multiscale_resize_transforms = [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), +] +affine_scale = 0.9 +albu_train_transforms = [ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), +] +backend_args = None +base_lr = 0.002 +batch_shapes_cfg = None +close_mosaic_epochs = 2 +coco_val_dataset = dict( + _delete_=True, + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +custom_hooks = [ + dict( + ema_type='ExpMomentumEMA', + momentum=0.0001, + priority=49, + strict_load=False, + type='EMAHook', + update_buffers=True), + dict( + switch_epoch=98, + switch_pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='mmdet.PipelineSwitchHook'), +] +custom_imports = dict( + allow_failed_imports=False, imports=[ + 'yolo_world', + ]) +data_root = 'data/coco/' +dataset_type = 'YOLOv5CocoDataset' +deepen_factor = 1.0 +default_hooks = dict( + checkpoint=dict( + interval=2, + max_keep_ckpts=2, + rule='greater', + save_best='auto', + type='CheckpointHook'), + logger=dict(interval=50, type='LoggerHook'), + param_scheduler=dict( + lr_factor=0.01, + max_epochs=100, + scheduler_type='linear', + type='YOLOv5ParamSchedulerHook'), + sampler_seed=dict(type='DistSamplerSeedHook'), + timer=dict(type='IterTimerHook'), + visualization=dict(type='mmdet.DetVisualizationHook')) +default_scope = 'mmyolo' +env_cfg = dict( + cudnn_benchmark=True, + dist_cfg=dict(backend='nccl'), + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0)) +flickr_train_dataset = dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +img_scale = ( + 640, + 640, +) +img_scales = [ + ( + 640, + 640, + ), + ( + 320, + 320, + ), + ( + 960, + 960, + ), +] +last_stage_out_channels = 512 +last_transform = [ + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), +] +load_from = '/home/jean/Amine/OpenYolo3D/pretrained/checkpoints/yolo_world_v2_x_obj365v1_goldg_cc3mlite_pretrain_1280ft-14996a36.pth' +log_level = 'INFO' +log_processor = dict(by_epoch=True, type='LogProcessor', window_size=50) +loss_bbox_weight = 7.5 +loss_cls_weight = 0.5 +loss_dfl_weight = 0.375 +lr_factor = 0.01 +max_aspect_ratio = 100 +max_epochs = 100 +max_keep_ckpts = 2 +mg_train_dataset = dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +mixup_prob = 0.15 +model = dict( + backbone=dict( + image_model=dict( + act_cfg=dict(inplace=True, type='SiLU'), + arch='P5', + deepen_factor=1.0, + last_stage_out_channels=512, + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + type='YOLOv8CSPDarknet', + widen_factor=1.25), + text_model=dict( + frozen_modules=[ + 'all', + ], + model_name='openai/clip-vit-base-patch32', + type='HuggingCLIPLanguageBackbone'), + type='MultiModalYOLOBackbone'), + bbox_head=dict( + bbox_coder=dict(type='DistancePointBBoxCoder'), + head_module=dict( + act_cfg=dict(inplace=True, type='SiLU'), + embed_dims=512, + featmap_strides=[ + 8, + 16, + 32, + ], + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_classes=80, + reg_max=16, + type='YOLOWorldHeadModule', + use_bn_head=True, + widen_factor=1.25), + loss_bbox=dict( + bbox_format='xyxy', + iou_mode='ciou', + loss_weight=7.5, + reduction='sum', + return_iou=False, + type='IoULoss'), + loss_cls=dict( + loss_weight=0.5, + reduction='none', + type='mmdet.CrossEntropyLoss', + use_sigmoid=True), + loss_dfl=dict( + loss_weight=0.375, + reduction='mean', + type='mmdet.DistributionFocalLoss'), + prior_generator=dict( + offset=0.5, strides=[ + 8, + 16, + 32, + ], type='mmdet.MlvlPointGenerator'), + type='YOLOWorldHead'), + data_preprocessor=dict( + bgr_to_rgb=True, + mean=[ + 0.0, + 0.0, + 0.0, + ], + std=[ + 255.0, + 255.0, + 255.0, + ], + type='YOLOWDetDataPreprocessor'), + mm_neck=True, + neck=dict( + act_cfg=dict(inplace=True, type='SiLU'), + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv'), + deepen_factor=1.0, + embed_channels=[ + 128, + 256, + 256, + ], + guide_channels=512, + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_csp_blocks=3, + num_heads=[ + 4, + 8, + 8, + ], + out_channels=[ + 256, + 512, + 512, + ], + type='YOLOWorldPAFPN', + widen_factor=1.25), + num_test_classes=1203, + num_train_classes=80, + test_cfg=dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001), + train_cfg=dict( + assigner=dict( + alpha=0.5, + beta=6.0, + eps=1e-09, + num_classes=80, + topk=10, + type='BatchTaskAlignedAssigner', + use_ciou=True)), + type='YOLOWorldDetector') +model_test_cfg = dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001) +mosaic_affine_transform = [ + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='Mosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), +] +neck_embed_channels = [ + 128, + 256, + 256, +] +neck_num_heads = [ + 4, + 8, + 8, +] +norm_cfg = dict(eps=0.001, momentum=0.03, type='BN') +num_classes = 1203 +num_det_layers = 3 +num_training_classes = 80 +obj365v1_train_dataset = dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +optim_wrapper = dict( + clip_grad=dict(max_norm=10.0), + constructor='YOLOWv5OptimizerConstructor', + optimizer=dict( + batch_size_per_gpu=16, lr=0.002, type='AdamW', weight_decay=0.025), + paramwise_cfg=dict( + bias_decay_mult=0.0, + custom_keys=dict({ + 'backbone.text_model': dict(lr_mult=0.01), + 'logit_scale': dict(weight_decay=0.0) + }), + norm_decay_mult=0.0), + type='OptimWrapper') +param_scheduler = None +persistent_workers = True +pre_transform = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), +] +resume = False +save_epoch_intervals = 2 +strides = [ + 8, + 16, + 32, +] +tal_alpha = 0.5 +tal_beta = 6.0 +tal_topk = 10 +test_cfg = dict(type='TestLoop') +test_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +test_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +test_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +text_channels = 512 +text_transform = [ + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_ann_file = 'annotations/instances_train2017.json' +train_batch_size_per_gpu = 16 +train_cfg = dict( + dynamic_intervals=[ + ( + 98, + 1, + ), + ], + max_epochs=100, + type='EpochBasedTrainLoop', + val_interval=10) +train_data_prefix = 'train2017/' +train_dataloader = dict( + batch_size=16, + collate_fn=dict(type='yolow_collate'), + dataset=dict( + datasets=[ + dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + ], + ignore_keys=[ + 'classes', + 'palette', + ], + type='ConcatDataset'), + num_workers=8, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=True, type='DefaultSampler')) +train_num_workers = 8 +train_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_pipeline_stage2 = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +tta_model = dict( + tta_cfg=dict(max_per_img=300, nms=dict(iou_threshold=0.65, type='nms')), + type='mmdet.DetTTAModel') +tta_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict( + transforms=[ + [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), + ], + [ + dict(prob=1.0, type='mmdet.RandomFlip'), + dict(prob=0.0, type='mmdet.RandomFlip'), + ], + [ + dict(type='mmdet.LoadAnnotations', with_bbox=True), + ], + [ + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), + ], + ], + type='TestTimeAug'), +] +val_ann_file = 'annotations/instances_val2017.json' +val_batch_size_per_gpu = 1 +val_cfg = dict(type='ValLoop') +val_data_prefix = 'val2017/' +val_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +val_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +val_interval_stage2 = 1 +val_num_workers = 2 +vis_backends = [ + dict(type='LocalVisBackend'), +] +visualizer = dict( + name='visualizer', + type='mmdet.DetLocalVisualizer', + vis_backends=[ + dict(type='LocalVisBackend'), + ]) +weight_decay = 0.025 +widen_factor = 1.25 +work_dir = '/home/jean/Amine/OpenYolo3D/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival' diff --git a/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_203924/vis_data/config.py b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_203924/vis_data/config.py new file mode 100644 index 0000000000000000000000000000000000000000..f8b00f96ad70de81ecc9073c7d67025d43429906 --- /dev/null +++ b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_203924/vis_data/config.py @@ -0,0 +1,1366 @@ +_backend_args = None +_multiscale_resize_transforms = [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), +] +affine_scale = 0.9 +albu_train_transforms = [ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), +] +backend_args = None +base_lr = 0.002 +batch_shapes_cfg = None +close_mosaic_epochs = 2 +coco_val_dataset = dict( + _delete_=True, + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +custom_hooks = [ + dict( + ema_type='ExpMomentumEMA', + momentum=0.0001, + priority=49, + strict_load=False, + type='EMAHook', + update_buffers=True), + dict( + switch_epoch=98, + switch_pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='mmdet.PipelineSwitchHook'), +] +custom_imports = dict( + allow_failed_imports=False, imports=[ + 'yolo_world', + ]) +data_root = 'data/coco/' +dataset_type = 'YOLOv5CocoDataset' +deepen_factor = 1.0 +default_hooks = dict( + checkpoint=dict( + interval=2, + max_keep_ckpts=2, + rule='greater', + save_best='auto', + type='CheckpointHook'), + logger=dict(interval=50, type='LoggerHook'), + param_scheduler=dict( + lr_factor=0.01, + max_epochs=100, + scheduler_type='linear', + type='YOLOv5ParamSchedulerHook'), + sampler_seed=dict(type='DistSamplerSeedHook'), + timer=dict(type='IterTimerHook'), + visualization=dict(type='mmdet.DetVisualizationHook')) +default_scope = 'mmyolo' +env_cfg = dict( + cudnn_benchmark=True, + dist_cfg=dict(backend='nccl'), + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0)) +flickr_train_dataset = dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +img_scale = ( + 640, + 640, +) +img_scales = [ + ( + 640, + 640, + ), + ( + 320, + 320, + ), + ( + 960, + 960, + ), +] +last_stage_out_channels = 512 +last_transform = [ + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), +] +load_from = '/home/jean/Amine/OpenYolo3D/pretrained/checkpoints/yolo_world_v2_x_obj365v1_goldg_cc3mlite_pretrain_1280ft-14996a36.pth' +log_level = 'INFO' +log_processor = dict(by_epoch=True, type='LogProcessor', window_size=50) +loss_bbox_weight = 7.5 +loss_cls_weight = 0.5 +loss_dfl_weight = 0.375 +lr_factor = 0.01 +max_aspect_ratio = 100 +max_epochs = 100 +max_keep_ckpts = 2 +mg_train_dataset = dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +mixup_prob = 0.15 +model = dict( + backbone=dict( + image_model=dict( + act_cfg=dict(inplace=True, type='SiLU'), + arch='P5', + deepen_factor=1.0, + last_stage_out_channels=512, + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + type='YOLOv8CSPDarknet', + widen_factor=1.25), + text_model=dict( + frozen_modules=[ + 'all', + ], + model_name='openai/clip-vit-base-patch32', + type='HuggingCLIPLanguageBackbone'), + type='MultiModalYOLOBackbone'), + bbox_head=dict( + bbox_coder=dict(type='DistancePointBBoxCoder'), + head_module=dict( + act_cfg=dict(inplace=True, type='SiLU'), + embed_dims=512, + featmap_strides=[ + 8, + 16, + 32, + ], + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_classes=80, + reg_max=16, + type='YOLOWorldHeadModule', + use_bn_head=True, + widen_factor=1.25), + loss_bbox=dict( + bbox_format='xyxy', + iou_mode='ciou', + loss_weight=7.5, + reduction='sum', + return_iou=False, + type='IoULoss'), + loss_cls=dict( + loss_weight=0.5, + reduction='none', + type='mmdet.CrossEntropyLoss', + use_sigmoid=True), + loss_dfl=dict( + loss_weight=0.375, + reduction='mean', + type='mmdet.DistributionFocalLoss'), + prior_generator=dict( + offset=0.5, strides=[ + 8, + 16, + 32, + ], type='mmdet.MlvlPointGenerator'), + type='YOLOWorldHead'), + data_preprocessor=dict( + bgr_to_rgb=True, + mean=[ + 0.0, + 0.0, + 0.0, + ], + std=[ + 255.0, + 255.0, + 255.0, + ], + type='YOLOWDetDataPreprocessor'), + mm_neck=True, + neck=dict( + act_cfg=dict(inplace=True, type='SiLU'), + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv'), + deepen_factor=1.0, + embed_channels=[ + 128, + 256, + 256, + ], + guide_channels=512, + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_csp_blocks=3, + num_heads=[ + 4, + 8, + 8, + ], + out_channels=[ + 256, + 512, + 512, + ], + type='YOLOWorldPAFPN', + widen_factor=1.25), + num_test_classes=1203, + num_train_classes=80, + test_cfg=dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001), + train_cfg=dict( + assigner=dict( + alpha=0.5, + beta=6.0, + eps=1e-09, + num_classes=80, + topk=10, + type='BatchTaskAlignedAssigner', + use_ciou=True)), + type='YOLOWorldDetector') +model_test_cfg = dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001) +mosaic_affine_transform = [ + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='Mosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), +] +neck_embed_channels = [ + 128, + 256, + 256, +] +neck_num_heads = [ + 4, + 8, + 8, +] +norm_cfg = dict(eps=0.001, momentum=0.03, type='BN') +num_classes = 1203 +num_det_layers = 3 +num_training_classes = 80 +obj365v1_train_dataset = dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +optim_wrapper = dict( + clip_grad=dict(max_norm=10.0), + constructor='YOLOWv5OptimizerConstructor', + optimizer=dict( + batch_size_per_gpu=16, lr=0.002, type='AdamW', weight_decay=0.025), + paramwise_cfg=dict( + bias_decay_mult=0.0, + custom_keys=dict({ + 'backbone.text_model': dict(lr_mult=0.01), + 'logit_scale': dict(weight_decay=0.0) + }), + norm_decay_mult=0.0), + type='OptimWrapper') +param_scheduler = None +persistent_workers = True +pre_transform = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), +] +resume = False +save_epoch_intervals = 2 +strides = [ + 8, + 16, + 32, +] +tal_alpha = 0.5 +tal_beta = 6.0 +tal_topk = 10 +test_cfg = dict(type='TestLoop') +test_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +test_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +test_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +text_channels = 512 +text_transform = [ + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_ann_file = 'annotations/instances_train2017.json' +train_batch_size_per_gpu = 16 +train_cfg = dict( + dynamic_intervals=[ + ( + 98, + 1, + ), + ], + max_epochs=100, + type='EpochBasedTrainLoop', + val_interval=10) +train_data_prefix = 'train2017/' +train_dataloader = dict( + batch_size=16, + collate_fn=dict(type='yolow_collate'), + dataset=dict( + datasets=[ + dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + ], + ignore_keys=[ + 'classes', + 'palette', + ], + type='ConcatDataset'), + num_workers=8, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=True, type='DefaultSampler')) +train_num_workers = 8 +train_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_pipeline_stage2 = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +tta_model = dict( + tta_cfg=dict(max_per_img=300, nms=dict(iou_threshold=0.65, type='nms')), + type='mmdet.DetTTAModel') +tta_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict( + transforms=[ + [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), + ], + [ + dict(prob=1.0, type='mmdet.RandomFlip'), + dict(prob=0.0, type='mmdet.RandomFlip'), + ], + [ + dict(type='mmdet.LoadAnnotations', with_bbox=True), + ], + [ + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), + ], + ], + type='TestTimeAug'), +] +val_ann_file = 'annotations/instances_val2017.json' +val_batch_size_per_gpu = 1 +val_cfg = dict(type='ValLoop') +val_data_prefix = 'val2017/' +val_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +val_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +val_interval_stage2 = 1 +val_num_workers = 2 +vis_backends = [ + dict(type='LocalVisBackend'), +] +visualizer = dict( + name='visualizer', + type='mmdet.DetLocalVisualizer', + vis_backends=[ + dict(type='LocalVisBackend'), + ]) +weight_decay = 0.025 +widen_factor = 1.25 +work_dir = '/home/jean/Amine/OpenYolo3D/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival' diff --git a/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_204301/vis_data/config.py b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_204301/vis_data/config.py new file mode 100644 index 0000000000000000000000000000000000000000..f8b00f96ad70de81ecc9073c7d67025d43429906 --- /dev/null +++ b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_204301/vis_data/config.py @@ -0,0 +1,1366 @@ +_backend_args = None +_multiscale_resize_transforms = [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), +] +affine_scale = 0.9 +albu_train_transforms = [ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), +] +backend_args = None +base_lr = 0.002 +batch_shapes_cfg = None +close_mosaic_epochs = 2 +coco_val_dataset = dict( + _delete_=True, + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +custom_hooks = [ + dict( + ema_type='ExpMomentumEMA', + momentum=0.0001, + priority=49, + strict_load=False, + type='EMAHook', + update_buffers=True), + dict( + switch_epoch=98, + switch_pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='mmdet.PipelineSwitchHook'), +] +custom_imports = dict( + allow_failed_imports=False, imports=[ + 'yolo_world', + ]) +data_root = 'data/coco/' +dataset_type = 'YOLOv5CocoDataset' +deepen_factor = 1.0 +default_hooks = dict( + checkpoint=dict( + interval=2, + max_keep_ckpts=2, + rule='greater', + save_best='auto', + type='CheckpointHook'), + logger=dict(interval=50, type='LoggerHook'), + param_scheduler=dict( + lr_factor=0.01, + max_epochs=100, + scheduler_type='linear', + type='YOLOv5ParamSchedulerHook'), + sampler_seed=dict(type='DistSamplerSeedHook'), + timer=dict(type='IterTimerHook'), + visualization=dict(type='mmdet.DetVisualizationHook')) +default_scope = 'mmyolo' +env_cfg = dict( + cudnn_benchmark=True, + dist_cfg=dict(backend='nccl'), + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0)) +flickr_train_dataset = dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +img_scale = ( + 640, + 640, +) +img_scales = [ + ( + 640, + 640, + ), + ( + 320, + 320, + ), + ( + 960, + 960, + ), +] +last_stage_out_channels = 512 +last_transform = [ + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), +] +load_from = '/home/jean/Amine/OpenYolo3D/pretrained/checkpoints/yolo_world_v2_x_obj365v1_goldg_cc3mlite_pretrain_1280ft-14996a36.pth' +log_level = 'INFO' +log_processor = dict(by_epoch=True, type='LogProcessor', window_size=50) +loss_bbox_weight = 7.5 +loss_cls_weight = 0.5 +loss_dfl_weight = 0.375 +lr_factor = 0.01 +max_aspect_ratio = 100 +max_epochs = 100 +max_keep_ckpts = 2 +mg_train_dataset = dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +mixup_prob = 0.15 +model = dict( + backbone=dict( + image_model=dict( + act_cfg=dict(inplace=True, type='SiLU'), + arch='P5', + deepen_factor=1.0, + last_stage_out_channels=512, + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + type='YOLOv8CSPDarknet', + widen_factor=1.25), + text_model=dict( + frozen_modules=[ + 'all', + ], + model_name='openai/clip-vit-base-patch32', + type='HuggingCLIPLanguageBackbone'), + type='MultiModalYOLOBackbone'), + bbox_head=dict( + bbox_coder=dict(type='DistancePointBBoxCoder'), + head_module=dict( + act_cfg=dict(inplace=True, type='SiLU'), + embed_dims=512, + featmap_strides=[ + 8, + 16, + 32, + ], + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_classes=80, + reg_max=16, + type='YOLOWorldHeadModule', + use_bn_head=True, + widen_factor=1.25), + loss_bbox=dict( + bbox_format='xyxy', + iou_mode='ciou', + loss_weight=7.5, + reduction='sum', + return_iou=False, + type='IoULoss'), + loss_cls=dict( + loss_weight=0.5, + reduction='none', + type='mmdet.CrossEntropyLoss', + use_sigmoid=True), + loss_dfl=dict( + loss_weight=0.375, + reduction='mean', + type='mmdet.DistributionFocalLoss'), + prior_generator=dict( + offset=0.5, strides=[ + 8, + 16, + 32, + ], type='mmdet.MlvlPointGenerator'), + type='YOLOWorldHead'), + data_preprocessor=dict( + bgr_to_rgb=True, + mean=[ + 0.0, + 0.0, + 0.0, + ], + std=[ + 255.0, + 255.0, + 255.0, + ], + type='YOLOWDetDataPreprocessor'), + mm_neck=True, + neck=dict( + act_cfg=dict(inplace=True, type='SiLU'), + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv'), + deepen_factor=1.0, + embed_channels=[ + 128, + 256, + 256, + ], + guide_channels=512, + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_csp_blocks=3, + num_heads=[ + 4, + 8, + 8, + ], + out_channels=[ + 256, + 512, + 512, + ], + type='YOLOWorldPAFPN', + widen_factor=1.25), + num_test_classes=1203, + num_train_classes=80, + test_cfg=dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001), + train_cfg=dict( + assigner=dict( + alpha=0.5, + beta=6.0, + eps=1e-09, + num_classes=80, + topk=10, + type='BatchTaskAlignedAssigner', + use_ciou=True)), + type='YOLOWorldDetector') +model_test_cfg = dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001) +mosaic_affine_transform = [ + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='Mosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), +] +neck_embed_channels = [ + 128, + 256, + 256, +] +neck_num_heads = [ + 4, + 8, + 8, +] +norm_cfg = dict(eps=0.001, momentum=0.03, type='BN') +num_classes = 1203 +num_det_layers = 3 +num_training_classes = 80 +obj365v1_train_dataset = dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +optim_wrapper = dict( + clip_grad=dict(max_norm=10.0), + constructor='YOLOWv5OptimizerConstructor', + optimizer=dict( + batch_size_per_gpu=16, lr=0.002, type='AdamW', weight_decay=0.025), + paramwise_cfg=dict( + bias_decay_mult=0.0, + custom_keys=dict({ + 'backbone.text_model': dict(lr_mult=0.01), + 'logit_scale': dict(weight_decay=0.0) + }), + norm_decay_mult=0.0), + type='OptimWrapper') +param_scheduler = None +persistent_workers = True +pre_transform = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), +] +resume = False +save_epoch_intervals = 2 +strides = [ + 8, + 16, + 32, +] +tal_alpha = 0.5 +tal_beta = 6.0 +tal_topk = 10 +test_cfg = dict(type='TestLoop') +test_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +test_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +test_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +text_channels = 512 +text_transform = [ + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_ann_file = 'annotations/instances_train2017.json' +train_batch_size_per_gpu = 16 +train_cfg = dict( + dynamic_intervals=[ + ( + 98, + 1, + ), + ], + max_epochs=100, + type='EpochBasedTrainLoop', + val_interval=10) +train_data_prefix = 'train2017/' +train_dataloader = dict( + batch_size=16, + collate_fn=dict(type='yolow_collate'), + dataset=dict( + datasets=[ + dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + ], + ignore_keys=[ + 'classes', + 'palette', + ], + type='ConcatDataset'), + num_workers=8, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=True, type='DefaultSampler')) +train_num_workers = 8 +train_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_pipeline_stage2 = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +tta_model = dict( + tta_cfg=dict(max_per_img=300, nms=dict(iou_threshold=0.65, type='nms')), + type='mmdet.DetTTAModel') +tta_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict( + transforms=[ + [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), + ], + [ + dict(prob=1.0, type='mmdet.RandomFlip'), + dict(prob=0.0, type='mmdet.RandomFlip'), + ], + [ + dict(type='mmdet.LoadAnnotations', with_bbox=True), + ], + [ + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), + ], + ], + type='TestTimeAug'), +] +val_ann_file = 'annotations/instances_val2017.json' +val_batch_size_per_gpu = 1 +val_cfg = dict(type='ValLoop') +val_data_prefix = 'val2017/' +val_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +val_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +val_interval_stage2 = 1 +val_num_workers = 2 +vis_backends = [ + dict(type='LocalVisBackend'), +] +visualizer = dict( + name='visualizer', + type='mmdet.DetLocalVisualizer', + vis_backends=[ + dict(type='LocalVisBackend'), + ]) +weight_decay = 0.025 +widen_factor = 1.25 +work_dir = '/home/jean/Amine/OpenYolo3D/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival' diff --git a/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_204356/vis_data/config.py b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_204356/vis_data/config.py new file mode 100644 index 0000000000000000000000000000000000000000..f8b00f96ad70de81ecc9073c7d67025d43429906 --- /dev/null +++ b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_204356/vis_data/config.py @@ -0,0 +1,1366 @@ +_backend_args = None +_multiscale_resize_transforms = [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), +] +affine_scale = 0.9 +albu_train_transforms = [ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), +] +backend_args = None +base_lr = 0.002 +batch_shapes_cfg = None +close_mosaic_epochs = 2 +coco_val_dataset = dict( + _delete_=True, + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +custom_hooks = [ + dict( + ema_type='ExpMomentumEMA', + momentum=0.0001, + priority=49, + strict_load=False, + type='EMAHook', + update_buffers=True), + dict( + switch_epoch=98, + switch_pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='mmdet.PipelineSwitchHook'), +] +custom_imports = dict( + allow_failed_imports=False, imports=[ + 'yolo_world', + ]) +data_root = 'data/coco/' +dataset_type = 'YOLOv5CocoDataset' +deepen_factor = 1.0 +default_hooks = dict( + checkpoint=dict( + interval=2, + max_keep_ckpts=2, + rule='greater', + save_best='auto', + type='CheckpointHook'), + logger=dict(interval=50, type='LoggerHook'), + param_scheduler=dict( + lr_factor=0.01, + max_epochs=100, + scheduler_type='linear', + type='YOLOv5ParamSchedulerHook'), + sampler_seed=dict(type='DistSamplerSeedHook'), + timer=dict(type='IterTimerHook'), + visualization=dict(type='mmdet.DetVisualizationHook')) +default_scope = 'mmyolo' +env_cfg = dict( + cudnn_benchmark=True, + dist_cfg=dict(backend='nccl'), + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0)) +flickr_train_dataset = dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +img_scale = ( + 640, + 640, +) +img_scales = [ + ( + 640, + 640, + ), + ( + 320, + 320, + ), + ( + 960, + 960, + ), +] +last_stage_out_channels = 512 +last_transform = [ + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), +] +load_from = '/home/jean/Amine/OpenYolo3D/pretrained/checkpoints/yolo_world_v2_x_obj365v1_goldg_cc3mlite_pretrain_1280ft-14996a36.pth' +log_level = 'INFO' +log_processor = dict(by_epoch=True, type='LogProcessor', window_size=50) +loss_bbox_weight = 7.5 +loss_cls_weight = 0.5 +loss_dfl_weight = 0.375 +lr_factor = 0.01 +max_aspect_ratio = 100 +max_epochs = 100 +max_keep_ckpts = 2 +mg_train_dataset = dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +mixup_prob = 0.15 +model = dict( + backbone=dict( + image_model=dict( + act_cfg=dict(inplace=True, type='SiLU'), + arch='P5', + deepen_factor=1.0, + last_stage_out_channels=512, + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + type='YOLOv8CSPDarknet', + widen_factor=1.25), + text_model=dict( + frozen_modules=[ + 'all', + ], + model_name='openai/clip-vit-base-patch32', + type='HuggingCLIPLanguageBackbone'), + type='MultiModalYOLOBackbone'), + bbox_head=dict( + bbox_coder=dict(type='DistancePointBBoxCoder'), + head_module=dict( + act_cfg=dict(inplace=True, type='SiLU'), + embed_dims=512, + featmap_strides=[ + 8, + 16, + 32, + ], + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_classes=80, + reg_max=16, + type='YOLOWorldHeadModule', + use_bn_head=True, + widen_factor=1.25), + loss_bbox=dict( + bbox_format='xyxy', + iou_mode='ciou', + loss_weight=7.5, + reduction='sum', + return_iou=False, + type='IoULoss'), + loss_cls=dict( + loss_weight=0.5, + reduction='none', + type='mmdet.CrossEntropyLoss', + use_sigmoid=True), + loss_dfl=dict( + loss_weight=0.375, + reduction='mean', + type='mmdet.DistributionFocalLoss'), + prior_generator=dict( + offset=0.5, strides=[ + 8, + 16, + 32, + ], type='mmdet.MlvlPointGenerator'), + type='YOLOWorldHead'), + data_preprocessor=dict( + bgr_to_rgb=True, + mean=[ + 0.0, + 0.0, + 0.0, + ], + std=[ + 255.0, + 255.0, + 255.0, + ], + type='YOLOWDetDataPreprocessor'), + mm_neck=True, + neck=dict( + act_cfg=dict(inplace=True, type='SiLU'), + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv'), + deepen_factor=1.0, + embed_channels=[ + 128, + 256, + 256, + ], + guide_channels=512, + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_csp_blocks=3, + num_heads=[ + 4, + 8, + 8, + ], + out_channels=[ + 256, + 512, + 512, + ], + type='YOLOWorldPAFPN', + widen_factor=1.25), + num_test_classes=1203, + num_train_classes=80, + test_cfg=dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001), + train_cfg=dict( + assigner=dict( + alpha=0.5, + beta=6.0, + eps=1e-09, + num_classes=80, + topk=10, + type='BatchTaskAlignedAssigner', + use_ciou=True)), + type='YOLOWorldDetector') +model_test_cfg = dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001) +mosaic_affine_transform = [ + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='Mosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), +] +neck_embed_channels = [ + 128, + 256, + 256, +] +neck_num_heads = [ + 4, + 8, + 8, +] +norm_cfg = dict(eps=0.001, momentum=0.03, type='BN') +num_classes = 1203 +num_det_layers = 3 +num_training_classes = 80 +obj365v1_train_dataset = dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +optim_wrapper = dict( + clip_grad=dict(max_norm=10.0), + constructor='YOLOWv5OptimizerConstructor', + optimizer=dict( + batch_size_per_gpu=16, lr=0.002, type='AdamW', weight_decay=0.025), + paramwise_cfg=dict( + bias_decay_mult=0.0, + custom_keys=dict({ + 'backbone.text_model': dict(lr_mult=0.01), + 'logit_scale': dict(weight_decay=0.0) + }), + norm_decay_mult=0.0), + type='OptimWrapper') +param_scheduler = None +persistent_workers = True +pre_transform = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), +] +resume = False +save_epoch_intervals = 2 +strides = [ + 8, + 16, + 32, +] +tal_alpha = 0.5 +tal_beta = 6.0 +tal_topk = 10 +test_cfg = dict(type='TestLoop') +test_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +test_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +test_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +text_channels = 512 +text_transform = [ + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_ann_file = 'annotations/instances_train2017.json' +train_batch_size_per_gpu = 16 +train_cfg = dict( + dynamic_intervals=[ + ( + 98, + 1, + ), + ], + max_epochs=100, + type='EpochBasedTrainLoop', + val_interval=10) +train_data_prefix = 'train2017/' +train_dataloader = dict( + batch_size=16, + collate_fn=dict(type='yolow_collate'), + dataset=dict( + datasets=[ + dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + ], + ignore_keys=[ + 'classes', + 'palette', + ], + type='ConcatDataset'), + num_workers=8, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=True, type='DefaultSampler')) +train_num_workers = 8 +train_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_pipeline_stage2 = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +tta_model = dict( + tta_cfg=dict(max_per_img=300, nms=dict(iou_threshold=0.65, type='nms')), + type='mmdet.DetTTAModel') +tta_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict( + transforms=[ + [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), + ], + [ + dict(prob=1.0, type='mmdet.RandomFlip'), + dict(prob=0.0, type='mmdet.RandomFlip'), + ], + [ + dict(type='mmdet.LoadAnnotations', with_bbox=True), + ], + [ + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), + ], + ], + type='TestTimeAug'), +] +val_ann_file = 'annotations/instances_val2017.json' +val_batch_size_per_gpu = 1 +val_cfg = dict(type='ValLoop') +val_data_prefix = 'val2017/' +val_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +val_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +val_interval_stage2 = 1 +val_num_workers = 2 +vis_backends = [ + dict(type='LocalVisBackend'), +] +visualizer = dict( + name='visualizer', + type='mmdet.DetLocalVisualizer', + vis_backends=[ + dict(type='LocalVisBackend'), + ]) +weight_decay = 0.025 +widen_factor = 1.25 +work_dir = '/home/jean/Amine/OpenYolo3D/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival' diff --git a/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_204855/vis_data/config.py b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_204855/vis_data/config.py new file mode 100644 index 0000000000000000000000000000000000000000..f8b00f96ad70de81ecc9073c7d67025d43429906 --- /dev/null +++ b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_204855/vis_data/config.py @@ -0,0 +1,1366 @@ +_backend_args = None +_multiscale_resize_transforms = [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), +] +affine_scale = 0.9 +albu_train_transforms = [ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), +] +backend_args = None +base_lr = 0.002 +batch_shapes_cfg = None +close_mosaic_epochs = 2 +coco_val_dataset = dict( + _delete_=True, + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +custom_hooks = [ + dict( + ema_type='ExpMomentumEMA', + momentum=0.0001, + priority=49, + strict_load=False, + type='EMAHook', + update_buffers=True), + dict( + switch_epoch=98, + switch_pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='mmdet.PipelineSwitchHook'), +] +custom_imports = dict( + allow_failed_imports=False, imports=[ + 'yolo_world', + ]) +data_root = 'data/coco/' +dataset_type = 'YOLOv5CocoDataset' +deepen_factor = 1.0 +default_hooks = dict( + checkpoint=dict( + interval=2, + max_keep_ckpts=2, + rule='greater', + save_best='auto', + type='CheckpointHook'), + logger=dict(interval=50, type='LoggerHook'), + param_scheduler=dict( + lr_factor=0.01, + max_epochs=100, + scheduler_type='linear', + type='YOLOv5ParamSchedulerHook'), + sampler_seed=dict(type='DistSamplerSeedHook'), + timer=dict(type='IterTimerHook'), + visualization=dict(type='mmdet.DetVisualizationHook')) +default_scope = 'mmyolo' +env_cfg = dict( + cudnn_benchmark=True, + dist_cfg=dict(backend='nccl'), + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0)) +flickr_train_dataset = dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +img_scale = ( + 640, + 640, +) +img_scales = [ + ( + 640, + 640, + ), + ( + 320, + 320, + ), + ( + 960, + 960, + ), +] +last_stage_out_channels = 512 +last_transform = [ + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), +] +load_from = '/home/jean/Amine/OpenYolo3D/pretrained/checkpoints/yolo_world_v2_x_obj365v1_goldg_cc3mlite_pretrain_1280ft-14996a36.pth' +log_level = 'INFO' +log_processor = dict(by_epoch=True, type='LogProcessor', window_size=50) +loss_bbox_weight = 7.5 +loss_cls_weight = 0.5 +loss_dfl_weight = 0.375 +lr_factor = 0.01 +max_aspect_ratio = 100 +max_epochs = 100 +max_keep_ckpts = 2 +mg_train_dataset = dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +mixup_prob = 0.15 +model = dict( + backbone=dict( + image_model=dict( + act_cfg=dict(inplace=True, type='SiLU'), + arch='P5', + deepen_factor=1.0, + last_stage_out_channels=512, + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + type='YOLOv8CSPDarknet', + widen_factor=1.25), + text_model=dict( + frozen_modules=[ + 'all', + ], + model_name='openai/clip-vit-base-patch32', + type='HuggingCLIPLanguageBackbone'), + type='MultiModalYOLOBackbone'), + bbox_head=dict( + bbox_coder=dict(type='DistancePointBBoxCoder'), + head_module=dict( + act_cfg=dict(inplace=True, type='SiLU'), + embed_dims=512, + featmap_strides=[ + 8, + 16, + 32, + ], + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_classes=80, + reg_max=16, + type='YOLOWorldHeadModule', + use_bn_head=True, + widen_factor=1.25), + loss_bbox=dict( + bbox_format='xyxy', + iou_mode='ciou', + loss_weight=7.5, + reduction='sum', + return_iou=False, + type='IoULoss'), + loss_cls=dict( + loss_weight=0.5, + reduction='none', + type='mmdet.CrossEntropyLoss', + use_sigmoid=True), + loss_dfl=dict( + loss_weight=0.375, + reduction='mean', + type='mmdet.DistributionFocalLoss'), + prior_generator=dict( + offset=0.5, strides=[ + 8, + 16, + 32, + ], type='mmdet.MlvlPointGenerator'), + type='YOLOWorldHead'), + data_preprocessor=dict( + bgr_to_rgb=True, + mean=[ + 0.0, + 0.0, + 0.0, + ], + std=[ + 255.0, + 255.0, + 255.0, + ], + type='YOLOWDetDataPreprocessor'), + mm_neck=True, + neck=dict( + act_cfg=dict(inplace=True, type='SiLU'), + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv'), + deepen_factor=1.0, + embed_channels=[ + 128, + 256, + 256, + ], + guide_channels=512, + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_csp_blocks=3, + num_heads=[ + 4, + 8, + 8, + ], + out_channels=[ + 256, + 512, + 512, + ], + type='YOLOWorldPAFPN', + widen_factor=1.25), + num_test_classes=1203, + num_train_classes=80, + test_cfg=dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001), + train_cfg=dict( + assigner=dict( + alpha=0.5, + beta=6.0, + eps=1e-09, + num_classes=80, + topk=10, + type='BatchTaskAlignedAssigner', + use_ciou=True)), + type='YOLOWorldDetector') +model_test_cfg = dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001) +mosaic_affine_transform = [ + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='Mosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), +] +neck_embed_channels = [ + 128, + 256, + 256, +] +neck_num_heads = [ + 4, + 8, + 8, +] +norm_cfg = dict(eps=0.001, momentum=0.03, type='BN') +num_classes = 1203 +num_det_layers = 3 +num_training_classes = 80 +obj365v1_train_dataset = dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +optim_wrapper = dict( + clip_grad=dict(max_norm=10.0), + constructor='YOLOWv5OptimizerConstructor', + optimizer=dict( + batch_size_per_gpu=16, lr=0.002, type='AdamW', weight_decay=0.025), + paramwise_cfg=dict( + bias_decay_mult=0.0, + custom_keys=dict({ + 'backbone.text_model': dict(lr_mult=0.01), + 'logit_scale': dict(weight_decay=0.0) + }), + norm_decay_mult=0.0), + type='OptimWrapper') +param_scheduler = None +persistent_workers = True +pre_transform = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), +] +resume = False +save_epoch_intervals = 2 +strides = [ + 8, + 16, + 32, +] +tal_alpha = 0.5 +tal_beta = 6.0 +tal_topk = 10 +test_cfg = dict(type='TestLoop') +test_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +test_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +test_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +text_channels = 512 +text_transform = [ + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_ann_file = 'annotations/instances_train2017.json' +train_batch_size_per_gpu = 16 +train_cfg = dict( + dynamic_intervals=[ + ( + 98, + 1, + ), + ], + max_epochs=100, + type='EpochBasedTrainLoop', + val_interval=10) +train_data_prefix = 'train2017/' +train_dataloader = dict( + batch_size=16, + collate_fn=dict(type='yolow_collate'), + dataset=dict( + datasets=[ + dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + ], + ignore_keys=[ + 'classes', + 'palette', + ], + type='ConcatDataset'), + num_workers=8, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=True, type='DefaultSampler')) +train_num_workers = 8 +train_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_pipeline_stage2 = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +tta_model = dict( + tta_cfg=dict(max_per_img=300, nms=dict(iou_threshold=0.65, type='nms')), + type='mmdet.DetTTAModel') +tta_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict( + transforms=[ + [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), + ], + [ + dict(prob=1.0, type='mmdet.RandomFlip'), + dict(prob=0.0, type='mmdet.RandomFlip'), + ], + [ + dict(type='mmdet.LoadAnnotations', with_bbox=True), + ], + [ + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), + ], + ], + type='TestTimeAug'), +] +val_ann_file = 'annotations/instances_val2017.json' +val_batch_size_per_gpu = 1 +val_cfg = dict(type='ValLoop') +val_data_prefix = 'val2017/' +val_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +val_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +val_interval_stage2 = 1 +val_num_workers = 2 +vis_backends = [ + dict(type='LocalVisBackend'), +] +visualizer = dict( + name='visualizer', + type='mmdet.DetLocalVisualizer', + vis_backends=[ + dict(type='LocalVisBackend'), + ]) +weight_decay = 0.025 +widen_factor = 1.25 +work_dir = '/home/jean/Amine/OpenYolo3D/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival' diff --git a/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_205142/vis_data/config.py b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_205142/vis_data/config.py new file mode 100644 index 0000000000000000000000000000000000000000..f8b00f96ad70de81ecc9073c7d67025d43429906 --- /dev/null +++ b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_205142/vis_data/config.py @@ -0,0 +1,1366 @@ +_backend_args = None +_multiscale_resize_transforms = [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), +] +affine_scale = 0.9 +albu_train_transforms = [ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), +] +backend_args = None +base_lr = 0.002 +batch_shapes_cfg = None +close_mosaic_epochs = 2 +coco_val_dataset = dict( + _delete_=True, + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +custom_hooks = [ + dict( + ema_type='ExpMomentumEMA', + momentum=0.0001, + priority=49, + strict_load=False, + type='EMAHook', + update_buffers=True), + dict( + switch_epoch=98, + switch_pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='mmdet.PipelineSwitchHook'), +] +custom_imports = dict( + allow_failed_imports=False, imports=[ + 'yolo_world', + ]) +data_root = 'data/coco/' +dataset_type = 'YOLOv5CocoDataset' +deepen_factor = 1.0 +default_hooks = dict( + checkpoint=dict( + interval=2, + max_keep_ckpts=2, + rule='greater', + save_best='auto', + type='CheckpointHook'), + logger=dict(interval=50, type='LoggerHook'), + param_scheduler=dict( + lr_factor=0.01, + max_epochs=100, + scheduler_type='linear', + type='YOLOv5ParamSchedulerHook'), + sampler_seed=dict(type='DistSamplerSeedHook'), + timer=dict(type='IterTimerHook'), + visualization=dict(type='mmdet.DetVisualizationHook')) +default_scope = 'mmyolo' +env_cfg = dict( + cudnn_benchmark=True, + dist_cfg=dict(backend='nccl'), + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0)) +flickr_train_dataset = dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +img_scale = ( + 640, + 640, +) +img_scales = [ + ( + 640, + 640, + ), + ( + 320, + 320, + ), + ( + 960, + 960, + ), +] +last_stage_out_channels = 512 +last_transform = [ + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), +] +load_from = '/home/jean/Amine/OpenYolo3D/pretrained/checkpoints/yolo_world_v2_x_obj365v1_goldg_cc3mlite_pretrain_1280ft-14996a36.pth' +log_level = 'INFO' +log_processor = dict(by_epoch=True, type='LogProcessor', window_size=50) +loss_bbox_weight = 7.5 +loss_cls_weight = 0.5 +loss_dfl_weight = 0.375 +lr_factor = 0.01 +max_aspect_ratio = 100 +max_epochs = 100 +max_keep_ckpts = 2 +mg_train_dataset = dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +mixup_prob = 0.15 +model = dict( + backbone=dict( + image_model=dict( + act_cfg=dict(inplace=True, type='SiLU'), + arch='P5', + deepen_factor=1.0, + last_stage_out_channels=512, + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + type='YOLOv8CSPDarknet', + widen_factor=1.25), + text_model=dict( + frozen_modules=[ + 'all', + ], + model_name='openai/clip-vit-base-patch32', + type='HuggingCLIPLanguageBackbone'), + type='MultiModalYOLOBackbone'), + bbox_head=dict( + bbox_coder=dict(type='DistancePointBBoxCoder'), + head_module=dict( + act_cfg=dict(inplace=True, type='SiLU'), + embed_dims=512, + featmap_strides=[ + 8, + 16, + 32, + ], + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_classes=80, + reg_max=16, + type='YOLOWorldHeadModule', + use_bn_head=True, + widen_factor=1.25), + loss_bbox=dict( + bbox_format='xyxy', + iou_mode='ciou', + loss_weight=7.5, + reduction='sum', + return_iou=False, + type='IoULoss'), + loss_cls=dict( + loss_weight=0.5, + reduction='none', + type='mmdet.CrossEntropyLoss', + use_sigmoid=True), + loss_dfl=dict( + loss_weight=0.375, + reduction='mean', + type='mmdet.DistributionFocalLoss'), + prior_generator=dict( + offset=0.5, strides=[ + 8, + 16, + 32, + ], type='mmdet.MlvlPointGenerator'), + type='YOLOWorldHead'), + data_preprocessor=dict( + bgr_to_rgb=True, + mean=[ + 0.0, + 0.0, + 0.0, + ], + std=[ + 255.0, + 255.0, + 255.0, + ], + type='YOLOWDetDataPreprocessor'), + mm_neck=True, + neck=dict( + act_cfg=dict(inplace=True, type='SiLU'), + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv'), + deepen_factor=1.0, + embed_channels=[ + 128, + 256, + 256, + ], + guide_channels=512, + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_csp_blocks=3, + num_heads=[ + 4, + 8, + 8, + ], + out_channels=[ + 256, + 512, + 512, + ], + type='YOLOWorldPAFPN', + widen_factor=1.25), + num_test_classes=1203, + num_train_classes=80, + test_cfg=dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001), + train_cfg=dict( + assigner=dict( + alpha=0.5, + beta=6.0, + eps=1e-09, + num_classes=80, + topk=10, + type='BatchTaskAlignedAssigner', + use_ciou=True)), + type='YOLOWorldDetector') +model_test_cfg = dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001) +mosaic_affine_transform = [ + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='Mosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), +] +neck_embed_channels = [ + 128, + 256, + 256, +] +neck_num_heads = [ + 4, + 8, + 8, +] +norm_cfg = dict(eps=0.001, momentum=0.03, type='BN') +num_classes = 1203 +num_det_layers = 3 +num_training_classes = 80 +obj365v1_train_dataset = dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +optim_wrapper = dict( + clip_grad=dict(max_norm=10.0), + constructor='YOLOWv5OptimizerConstructor', + optimizer=dict( + batch_size_per_gpu=16, lr=0.002, type='AdamW', weight_decay=0.025), + paramwise_cfg=dict( + bias_decay_mult=0.0, + custom_keys=dict({ + 'backbone.text_model': dict(lr_mult=0.01), + 'logit_scale': dict(weight_decay=0.0) + }), + norm_decay_mult=0.0), + type='OptimWrapper') +param_scheduler = None +persistent_workers = True +pre_transform = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), +] +resume = False +save_epoch_intervals = 2 +strides = [ + 8, + 16, + 32, +] +tal_alpha = 0.5 +tal_beta = 6.0 +tal_topk = 10 +test_cfg = dict(type='TestLoop') +test_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +test_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +test_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +text_channels = 512 +text_transform = [ + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_ann_file = 'annotations/instances_train2017.json' +train_batch_size_per_gpu = 16 +train_cfg = dict( + dynamic_intervals=[ + ( + 98, + 1, + ), + ], + max_epochs=100, + type='EpochBasedTrainLoop', + val_interval=10) +train_data_prefix = 'train2017/' +train_dataloader = dict( + batch_size=16, + collate_fn=dict(type='yolow_collate'), + dataset=dict( + datasets=[ + dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + ], + ignore_keys=[ + 'classes', + 'palette', + ], + type='ConcatDataset'), + num_workers=8, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=True, type='DefaultSampler')) +train_num_workers = 8 +train_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_pipeline_stage2 = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +tta_model = dict( + tta_cfg=dict(max_per_img=300, nms=dict(iou_threshold=0.65, type='nms')), + type='mmdet.DetTTAModel') +tta_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict( + transforms=[ + [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), + ], + [ + dict(prob=1.0, type='mmdet.RandomFlip'), + dict(prob=0.0, type='mmdet.RandomFlip'), + ], + [ + dict(type='mmdet.LoadAnnotations', with_bbox=True), + ], + [ + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), + ], + ], + type='TestTimeAug'), +] +val_ann_file = 'annotations/instances_val2017.json' +val_batch_size_per_gpu = 1 +val_cfg = dict(type='ValLoop') +val_data_prefix = 'val2017/' +val_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +val_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +val_interval_stage2 = 1 +val_num_workers = 2 +vis_backends = [ + dict(type='LocalVisBackend'), +] +visualizer = dict( + name='visualizer', + type='mmdet.DetLocalVisualizer', + vis_backends=[ + dict(type='LocalVisBackend'), + ]) +weight_decay = 0.025 +widen_factor = 1.25 +work_dir = '/home/jean/Amine/OpenYolo3D/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival' diff --git a/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_210103/vis_data/config.py b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_210103/vis_data/config.py new file mode 100644 index 0000000000000000000000000000000000000000..f8b00f96ad70de81ecc9073c7d67025d43429906 --- /dev/null +++ b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_210103/vis_data/config.py @@ -0,0 +1,1366 @@ +_backend_args = None +_multiscale_resize_transforms = [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), +] +affine_scale = 0.9 +albu_train_transforms = [ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), +] +backend_args = None +base_lr = 0.002 +batch_shapes_cfg = None +close_mosaic_epochs = 2 +coco_val_dataset = dict( + _delete_=True, + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +custom_hooks = [ + dict( + ema_type='ExpMomentumEMA', + momentum=0.0001, + priority=49, + strict_load=False, + type='EMAHook', + update_buffers=True), + dict( + switch_epoch=98, + switch_pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='mmdet.PipelineSwitchHook'), +] +custom_imports = dict( + allow_failed_imports=False, imports=[ + 'yolo_world', + ]) +data_root = 'data/coco/' +dataset_type = 'YOLOv5CocoDataset' +deepen_factor = 1.0 +default_hooks = dict( + checkpoint=dict( + interval=2, + max_keep_ckpts=2, + rule='greater', + save_best='auto', + type='CheckpointHook'), + logger=dict(interval=50, type='LoggerHook'), + param_scheduler=dict( + lr_factor=0.01, + max_epochs=100, + scheduler_type='linear', + type='YOLOv5ParamSchedulerHook'), + sampler_seed=dict(type='DistSamplerSeedHook'), + timer=dict(type='IterTimerHook'), + visualization=dict(type='mmdet.DetVisualizationHook')) +default_scope = 'mmyolo' +env_cfg = dict( + cudnn_benchmark=True, + dist_cfg=dict(backend='nccl'), + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0)) +flickr_train_dataset = dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +img_scale = ( + 640, + 640, +) +img_scales = [ + ( + 640, + 640, + ), + ( + 320, + 320, + ), + ( + 960, + 960, + ), +] +last_stage_out_channels = 512 +last_transform = [ + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), +] +load_from = '/home/jean/Amine/OpenYolo3D/pretrained/checkpoints/yolo_world_v2_x_obj365v1_goldg_cc3mlite_pretrain_1280ft-14996a36.pth' +log_level = 'INFO' +log_processor = dict(by_epoch=True, type='LogProcessor', window_size=50) +loss_bbox_weight = 7.5 +loss_cls_weight = 0.5 +loss_dfl_weight = 0.375 +lr_factor = 0.01 +max_aspect_ratio = 100 +max_epochs = 100 +max_keep_ckpts = 2 +mg_train_dataset = dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +mixup_prob = 0.15 +model = dict( + backbone=dict( + image_model=dict( + act_cfg=dict(inplace=True, type='SiLU'), + arch='P5', + deepen_factor=1.0, + last_stage_out_channels=512, + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + type='YOLOv8CSPDarknet', + widen_factor=1.25), + text_model=dict( + frozen_modules=[ + 'all', + ], + model_name='openai/clip-vit-base-patch32', + type='HuggingCLIPLanguageBackbone'), + type='MultiModalYOLOBackbone'), + bbox_head=dict( + bbox_coder=dict(type='DistancePointBBoxCoder'), + head_module=dict( + act_cfg=dict(inplace=True, type='SiLU'), + embed_dims=512, + featmap_strides=[ + 8, + 16, + 32, + ], + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_classes=80, + reg_max=16, + type='YOLOWorldHeadModule', + use_bn_head=True, + widen_factor=1.25), + loss_bbox=dict( + bbox_format='xyxy', + iou_mode='ciou', + loss_weight=7.5, + reduction='sum', + return_iou=False, + type='IoULoss'), + loss_cls=dict( + loss_weight=0.5, + reduction='none', + type='mmdet.CrossEntropyLoss', + use_sigmoid=True), + loss_dfl=dict( + loss_weight=0.375, + reduction='mean', + type='mmdet.DistributionFocalLoss'), + prior_generator=dict( + offset=0.5, strides=[ + 8, + 16, + 32, + ], type='mmdet.MlvlPointGenerator'), + type='YOLOWorldHead'), + data_preprocessor=dict( + bgr_to_rgb=True, + mean=[ + 0.0, + 0.0, + 0.0, + ], + std=[ + 255.0, + 255.0, + 255.0, + ], + type='YOLOWDetDataPreprocessor'), + mm_neck=True, + neck=dict( + act_cfg=dict(inplace=True, type='SiLU'), + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv'), + deepen_factor=1.0, + embed_channels=[ + 128, + 256, + 256, + ], + guide_channels=512, + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_csp_blocks=3, + num_heads=[ + 4, + 8, + 8, + ], + out_channels=[ + 256, + 512, + 512, + ], + type='YOLOWorldPAFPN', + widen_factor=1.25), + num_test_classes=1203, + num_train_classes=80, + test_cfg=dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001), + train_cfg=dict( + assigner=dict( + alpha=0.5, + beta=6.0, + eps=1e-09, + num_classes=80, + topk=10, + type='BatchTaskAlignedAssigner', + use_ciou=True)), + type='YOLOWorldDetector') +model_test_cfg = dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001) +mosaic_affine_transform = [ + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='Mosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), +] +neck_embed_channels = [ + 128, + 256, + 256, +] +neck_num_heads = [ + 4, + 8, + 8, +] +norm_cfg = dict(eps=0.001, momentum=0.03, type='BN') +num_classes = 1203 +num_det_layers = 3 +num_training_classes = 80 +obj365v1_train_dataset = dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +optim_wrapper = dict( + clip_grad=dict(max_norm=10.0), + constructor='YOLOWv5OptimizerConstructor', + optimizer=dict( + batch_size_per_gpu=16, lr=0.002, type='AdamW', weight_decay=0.025), + paramwise_cfg=dict( + bias_decay_mult=0.0, + custom_keys=dict({ + 'backbone.text_model': dict(lr_mult=0.01), + 'logit_scale': dict(weight_decay=0.0) + }), + norm_decay_mult=0.0), + type='OptimWrapper') +param_scheduler = None +persistent_workers = True +pre_transform = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), +] +resume = False +save_epoch_intervals = 2 +strides = [ + 8, + 16, + 32, +] +tal_alpha = 0.5 +tal_beta = 6.0 +tal_topk = 10 +test_cfg = dict(type='TestLoop') +test_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +test_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +test_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +text_channels = 512 +text_transform = [ + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_ann_file = 'annotations/instances_train2017.json' +train_batch_size_per_gpu = 16 +train_cfg = dict( + dynamic_intervals=[ + ( + 98, + 1, + ), + ], + max_epochs=100, + type='EpochBasedTrainLoop', + val_interval=10) +train_data_prefix = 'train2017/' +train_dataloader = dict( + batch_size=16, + collate_fn=dict(type='yolow_collate'), + dataset=dict( + datasets=[ + dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + ], + ignore_keys=[ + 'classes', + 'palette', + ], + type='ConcatDataset'), + num_workers=8, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=True, type='DefaultSampler')) +train_num_workers = 8 +train_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_pipeline_stage2 = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +tta_model = dict( + tta_cfg=dict(max_per_img=300, nms=dict(iou_threshold=0.65, type='nms')), + type='mmdet.DetTTAModel') +tta_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict( + transforms=[ + [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), + ], + [ + dict(prob=1.0, type='mmdet.RandomFlip'), + dict(prob=0.0, type='mmdet.RandomFlip'), + ], + [ + dict(type='mmdet.LoadAnnotations', with_bbox=True), + ], + [ + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), + ], + ], + type='TestTimeAug'), +] +val_ann_file = 'annotations/instances_val2017.json' +val_batch_size_per_gpu = 1 +val_cfg = dict(type='ValLoop') +val_data_prefix = 'val2017/' +val_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +val_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +val_interval_stage2 = 1 +val_num_workers = 2 +vis_backends = [ + dict(type='LocalVisBackend'), +] +visualizer = dict( + name='visualizer', + type='mmdet.DetLocalVisualizer', + vis_backends=[ + dict(type='LocalVisBackend'), + ]) +weight_decay = 0.025 +widen_factor = 1.25 +work_dir = '/home/jean/Amine/OpenYolo3D/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival' diff --git a/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_210805/vis_data/config.py b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_210805/vis_data/config.py new file mode 100644 index 0000000000000000000000000000000000000000..f8b00f96ad70de81ecc9073c7d67025d43429906 --- /dev/null +++ b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_210805/vis_data/config.py @@ -0,0 +1,1366 @@ +_backend_args = None +_multiscale_resize_transforms = [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), +] +affine_scale = 0.9 +albu_train_transforms = [ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), +] +backend_args = None +base_lr = 0.002 +batch_shapes_cfg = None +close_mosaic_epochs = 2 +coco_val_dataset = dict( + _delete_=True, + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +custom_hooks = [ + dict( + ema_type='ExpMomentumEMA', + momentum=0.0001, + priority=49, + strict_load=False, + type='EMAHook', + update_buffers=True), + dict( + switch_epoch=98, + switch_pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='mmdet.PipelineSwitchHook'), +] +custom_imports = dict( + allow_failed_imports=False, imports=[ + 'yolo_world', + ]) +data_root = 'data/coco/' +dataset_type = 'YOLOv5CocoDataset' +deepen_factor = 1.0 +default_hooks = dict( + checkpoint=dict( + interval=2, + max_keep_ckpts=2, + rule='greater', + save_best='auto', + type='CheckpointHook'), + logger=dict(interval=50, type='LoggerHook'), + param_scheduler=dict( + lr_factor=0.01, + max_epochs=100, + scheduler_type='linear', + type='YOLOv5ParamSchedulerHook'), + sampler_seed=dict(type='DistSamplerSeedHook'), + timer=dict(type='IterTimerHook'), + visualization=dict(type='mmdet.DetVisualizationHook')) +default_scope = 'mmyolo' +env_cfg = dict( + cudnn_benchmark=True, + dist_cfg=dict(backend='nccl'), + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0)) +flickr_train_dataset = dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +img_scale = ( + 640, + 640, +) +img_scales = [ + ( + 640, + 640, + ), + ( + 320, + 320, + ), + ( + 960, + 960, + ), +] +last_stage_out_channels = 512 +last_transform = [ + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), +] +load_from = '/home/jean/Amine/OpenYolo3D/pretrained/checkpoints/yolo_world_v2_x_obj365v1_goldg_cc3mlite_pretrain_1280ft-14996a36.pth' +log_level = 'INFO' +log_processor = dict(by_epoch=True, type='LogProcessor', window_size=50) +loss_bbox_weight = 7.5 +loss_cls_weight = 0.5 +loss_dfl_weight = 0.375 +lr_factor = 0.01 +max_aspect_ratio = 100 +max_epochs = 100 +max_keep_ckpts = 2 +mg_train_dataset = dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +mixup_prob = 0.15 +model = dict( + backbone=dict( + image_model=dict( + act_cfg=dict(inplace=True, type='SiLU'), + arch='P5', + deepen_factor=1.0, + last_stage_out_channels=512, + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + type='YOLOv8CSPDarknet', + widen_factor=1.25), + text_model=dict( + frozen_modules=[ + 'all', + ], + model_name='openai/clip-vit-base-patch32', + type='HuggingCLIPLanguageBackbone'), + type='MultiModalYOLOBackbone'), + bbox_head=dict( + bbox_coder=dict(type='DistancePointBBoxCoder'), + head_module=dict( + act_cfg=dict(inplace=True, type='SiLU'), + embed_dims=512, + featmap_strides=[ + 8, + 16, + 32, + ], + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_classes=80, + reg_max=16, + type='YOLOWorldHeadModule', + use_bn_head=True, + widen_factor=1.25), + loss_bbox=dict( + bbox_format='xyxy', + iou_mode='ciou', + loss_weight=7.5, + reduction='sum', + return_iou=False, + type='IoULoss'), + loss_cls=dict( + loss_weight=0.5, + reduction='none', + type='mmdet.CrossEntropyLoss', + use_sigmoid=True), + loss_dfl=dict( + loss_weight=0.375, + reduction='mean', + type='mmdet.DistributionFocalLoss'), + prior_generator=dict( + offset=0.5, strides=[ + 8, + 16, + 32, + ], type='mmdet.MlvlPointGenerator'), + type='YOLOWorldHead'), + data_preprocessor=dict( + bgr_to_rgb=True, + mean=[ + 0.0, + 0.0, + 0.0, + ], + std=[ + 255.0, + 255.0, + 255.0, + ], + type='YOLOWDetDataPreprocessor'), + mm_neck=True, + neck=dict( + act_cfg=dict(inplace=True, type='SiLU'), + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv'), + deepen_factor=1.0, + embed_channels=[ + 128, + 256, + 256, + ], + guide_channels=512, + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_csp_blocks=3, + num_heads=[ + 4, + 8, + 8, + ], + out_channels=[ + 256, + 512, + 512, + ], + type='YOLOWorldPAFPN', + widen_factor=1.25), + num_test_classes=1203, + num_train_classes=80, + test_cfg=dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001), + train_cfg=dict( + assigner=dict( + alpha=0.5, + beta=6.0, + eps=1e-09, + num_classes=80, + topk=10, + type='BatchTaskAlignedAssigner', + use_ciou=True)), + type='YOLOWorldDetector') +model_test_cfg = dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001) +mosaic_affine_transform = [ + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='Mosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), +] +neck_embed_channels = [ + 128, + 256, + 256, +] +neck_num_heads = [ + 4, + 8, + 8, +] +norm_cfg = dict(eps=0.001, momentum=0.03, type='BN') +num_classes = 1203 +num_det_layers = 3 +num_training_classes = 80 +obj365v1_train_dataset = dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +optim_wrapper = dict( + clip_grad=dict(max_norm=10.0), + constructor='YOLOWv5OptimizerConstructor', + optimizer=dict( + batch_size_per_gpu=16, lr=0.002, type='AdamW', weight_decay=0.025), + paramwise_cfg=dict( + bias_decay_mult=0.0, + custom_keys=dict({ + 'backbone.text_model': dict(lr_mult=0.01), + 'logit_scale': dict(weight_decay=0.0) + }), + norm_decay_mult=0.0), + type='OptimWrapper') +param_scheduler = None +persistent_workers = True +pre_transform = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), +] +resume = False +save_epoch_intervals = 2 +strides = [ + 8, + 16, + 32, +] +tal_alpha = 0.5 +tal_beta = 6.0 +tal_topk = 10 +test_cfg = dict(type='TestLoop') +test_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +test_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +test_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +text_channels = 512 +text_transform = [ + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_ann_file = 'annotations/instances_train2017.json' +train_batch_size_per_gpu = 16 +train_cfg = dict( + dynamic_intervals=[ + ( + 98, + 1, + ), + ], + max_epochs=100, + type='EpochBasedTrainLoop', + val_interval=10) +train_data_prefix = 'train2017/' +train_dataloader = dict( + batch_size=16, + collate_fn=dict(type='yolow_collate'), + dataset=dict( + datasets=[ + dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + ], + ignore_keys=[ + 'classes', + 'palette', + ], + type='ConcatDataset'), + num_workers=8, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=True, type='DefaultSampler')) +train_num_workers = 8 +train_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_pipeline_stage2 = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +tta_model = dict( + tta_cfg=dict(max_per_img=300, nms=dict(iou_threshold=0.65, type='nms')), + type='mmdet.DetTTAModel') +tta_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict( + transforms=[ + [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), + ], + [ + dict(prob=1.0, type='mmdet.RandomFlip'), + dict(prob=0.0, type='mmdet.RandomFlip'), + ], + [ + dict(type='mmdet.LoadAnnotations', with_bbox=True), + ], + [ + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), + ], + ], + type='TestTimeAug'), +] +val_ann_file = 'annotations/instances_val2017.json' +val_batch_size_per_gpu = 1 +val_cfg = dict(type='ValLoop') +val_data_prefix = 'val2017/' +val_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +val_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +val_interval_stage2 = 1 +val_num_workers = 2 +vis_backends = [ + dict(type='LocalVisBackend'), +] +visualizer = dict( + name='visualizer', + type='mmdet.DetLocalVisualizer', + vis_backends=[ + dict(type='LocalVisBackend'), + ]) +weight_decay = 0.025 +widen_factor = 1.25 +work_dir = '/home/jean/Amine/OpenYolo3D/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival' diff --git a/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_210916/vis_data/config.py b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_210916/vis_data/config.py new file mode 100644 index 0000000000000000000000000000000000000000..f8b00f96ad70de81ecc9073c7d67025d43429906 --- /dev/null +++ b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_210916/vis_data/config.py @@ -0,0 +1,1366 @@ +_backend_args = None +_multiscale_resize_transforms = [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), +] +affine_scale = 0.9 +albu_train_transforms = [ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), +] +backend_args = None +base_lr = 0.002 +batch_shapes_cfg = None +close_mosaic_epochs = 2 +coco_val_dataset = dict( + _delete_=True, + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +custom_hooks = [ + dict( + ema_type='ExpMomentumEMA', + momentum=0.0001, + priority=49, + strict_load=False, + type='EMAHook', + update_buffers=True), + dict( + switch_epoch=98, + switch_pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='mmdet.PipelineSwitchHook'), +] +custom_imports = dict( + allow_failed_imports=False, imports=[ + 'yolo_world', + ]) +data_root = 'data/coco/' +dataset_type = 'YOLOv5CocoDataset' +deepen_factor = 1.0 +default_hooks = dict( + checkpoint=dict( + interval=2, + max_keep_ckpts=2, + rule='greater', + save_best='auto', + type='CheckpointHook'), + logger=dict(interval=50, type='LoggerHook'), + param_scheduler=dict( + lr_factor=0.01, + max_epochs=100, + scheduler_type='linear', + type='YOLOv5ParamSchedulerHook'), + sampler_seed=dict(type='DistSamplerSeedHook'), + timer=dict(type='IterTimerHook'), + visualization=dict(type='mmdet.DetVisualizationHook')) +default_scope = 'mmyolo' +env_cfg = dict( + cudnn_benchmark=True, + dist_cfg=dict(backend='nccl'), + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0)) +flickr_train_dataset = dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +img_scale = ( + 640, + 640, +) +img_scales = [ + ( + 640, + 640, + ), + ( + 320, + 320, + ), + ( + 960, + 960, + ), +] +last_stage_out_channels = 512 +last_transform = [ + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), +] +load_from = '/home/jean/Amine/OpenYolo3D/pretrained/checkpoints/yolo_world_v2_x_obj365v1_goldg_cc3mlite_pretrain_1280ft-14996a36.pth' +log_level = 'INFO' +log_processor = dict(by_epoch=True, type='LogProcessor', window_size=50) +loss_bbox_weight = 7.5 +loss_cls_weight = 0.5 +loss_dfl_weight = 0.375 +lr_factor = 0.01 +max_aspect_ratio = 100 +max_epochs = 100 +max_keep_ckpts = 2 +mg_train_dataset = dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +mixup_prob = 0.15 +model = dict( + backbone=dict( + image_model=dict( + act_cfg=dict(inplace=True, type='SiLU'), + arch='P5', + deepen_factor=1.0, + last_stage_out_channels=512, + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + type='YOLOv8CSPDarknet', + widen_factor=1.25), + text_model=dict( + frozen_modules=[ + 'all', + ], + model_name='openai/clip-vit-base-patch32', + type='HuggingCLIPLanguageBackbone'), + type='MultiModalYOLOBackbone'), + bbox_head=dict( + bbox_coder=dict(type='DistancePointBBoxCoder'), + head_module=dict( + act_cfg=dict(inplace=True, type='SiLU'), + embed_dims=512, + featmap_strides=[ + 8, + 16, + 32, + ], + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_classes=80, + reg_max=16, + type='YOLOWorldHeadModule', + use_bn_head=True, + widen_factor=1.25), + loss_bbox=dict( + bbox_format='xyxy', + iou_mode='ciou', + loss_weight=7.5, + reduction='sum', + return_iou=False, + type='IoULoss'), + loss_cls=dict( + loss_weight=0.5, + reduction='none', + type='mmdet.CrossEntropyLoss', + use_sigmoid=True), + loss_dfl=dict( + loss_weight=0.375, + reduction='mean', + type='mmdet.DistributionFocalLoss'), + prior_generator=dict( + offset=0.5, strides=[ + 8, + 16, + 32, + ], type='mmdet.MlvlPointGenerator'), + type='YOLOWorldHead'), + data_preprocessor=dict( + bgr_to_rgb=True, + mean=[ + 0.0, + 0.0, + 0.0, + ], + std=[ + 255.0, + 255.0, + 255.0, + ], + type='YOLOWDetDataPreprocessor'), + mm_neck=True, + neck=dict( + act_cfg=dict(inplace=True, type='SiLU'), + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv'), + deepen_factor=1.0, + embed_channels=[ + 128, + 256, + 256, + ], + guide_channels=512, + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_csp_blocks=3, + num_heads=[ + 4, + 8, + 8, + ], + out_channels=[ + 256, + 512, + 512, + ], + type='YOLOWorldPAFPN', + widen_factor=1.25), + num_test_classes=1203, + num_train_classes=80, + test_cfg=dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001), + train_cfg=dict( + assigner=dict( + alpha=0.5, + beta=6.0, + eps=1e-09, + num_classes=80, + topk=10, + type='BatchTaskAlignedAssigner', + use_ciou=True)), + type='YOLOWorldDetector') +model_test_cfg = dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001) +mosaic_affine_transform = [ + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='Mosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), +] +neck_embed_channels = [ + 128, + 256, + 256, +] +neck_num_heads = [ + 4, + 8, + 8, +] +norm_cfg = dict(eps=0.001, momentum=0.03, type='BN') +num_classes = 1203 +num_det_layers = 3 +num_training_classes = 80 +obj365v1_train_dataset = dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +optim_wrapper = dict( + clip_grad=dict(max_norm=10.0), + constructor='YOLOWv5OptimizerConstructor', + optimizer=dict( + batch_size_per_gpu=16, lr=0.002, type='AdamW', weight_decay=0.025), + paramwise_cfg=dict( + bias_decay_mult=0.0, + custom_keys=dict({ + 'backbone.text_model': dict(lr_mult=0.01), + 'logit_scale': dict(weight_decay=0.0) + }), + norm_decay_mult=0.0), + type='OptimWrapper') +param_scheduler = None +persistent_workers = True +pre_transform = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), +] +resume = False +save_epoch_intervals = 2 +strides = [ + 8, + 16, + 32, +] +tal_alpha = 0.5 +tal_beta = 6.0 +tal_topk = 10 +test_cfg = dict(type='TestLoop') +test_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +test_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +test_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +text_channels = 512 +text_transform = [ + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_ann_file = 'annotations/instances_train2017.json' +train_batch_size_per_gpu = 16 +train_cfg = dict( + dynamic_intervals=[ + ( + 98, + 1, + ), + ], + max_epochs=100, + type='EpochBasedTrainLoop', + val_interval=10) +train_data_prefix = 'train2017/' +train_dataloader = dict( + batch_size=16, + collate_fn=dict(type='yolow_collate'), + dataset=dict( + datasets=[ + dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + ], + ignore_keys=[ + 'classes', + 'palette', + ], + type='ConcatDataset'), + num_workers=8, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=True, type='DefaultSampler')) +train_num_workers = 8 +train_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_pipeline_stage2 = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +tta_model = dict( + tta_cfg=dict(max_per_img=300, nms=dict(iou_threshold=0.65, type='nms')), + type='mmdet.DetTTAModel') +tta_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict( + transforms=[ + [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), + ], + [ + dict(prob=1.0, type='mmdet.RandomFlip'), + dict(prob=0.0, type='mmdet.RandomFlip'), + ], + [ + dict(type='mmdet.LoadAnnotations', with_bbox=True), + ], + [ + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), + ], + ], + type='TestTimeAug'), +] +val_ann_file = 'annotations/instances_val2017.json' +val_batch_size_per_gpu = 1 +val_cfg = dict(type='ValLoop') +val_data_prefix = 'val2017/' +val_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +val_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +val_interval_stage2 = 1 +val_num_workers = 2 +vis_backends = [ + dict(type='LocalVisBackend'), +] +visualizer = dict( + name='visualizer', + type='mmdet.DetLocalVisualizer', + vis_backends=[ + dict(type='LocalVisBackend'), + ]) +weight_decay = 0.025 +widen_factor = 1.25 +work_dir = '/home/jean/Amine/OpenYolo3D/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival' diff --git a/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_211048/vis_data/config.py b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_211048/vis_data/config.py new file mode 100644 index 0000000000000000000000000000000000000000..f8b00f96ad70de81ecc9073c7d67025d43429906 --- /dev/null +++ b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_211048/vis_data/config.py @@ -0,0 +1,1366 @@ +_backend_args = None +_multiscale_resize_transforms = [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), +] +affine_scale = 0.9 +albu_train_transforms = [ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), +] +backend_args = None +base_lr = 0.002 +batch_shapes_cfg = None +close_mosaic_epochs = 2 +coco_val_dataset = dict( + _delete_=True, + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +custom_hooks = [ + dict( + ema_type='ExpMomentumEMA', + momentum=0.0001, + priority=49, + strict_load=False, + type='EMAHook', + update_buffers=True), + dict( + switch_epoch=98, + switch_pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='mmdet.PipelineSwitchHook'), +] +custom_imports = dict( + allow_failed_imports=False, imports=[ + 'yolo_world', + ]) +data_root = 'data/coco/' +dataset_type = 'YOLOv5CocoDataset' +deepen_factor = 1.0 +default_hooks = dict( + checkpoint=dict( + interval=2, + max_keep_ckpts=2, + rule='greater', + save_best='auto', + type='CheckpointHook'), + logger=dict(interval=50, type='LoggerHook'), + param_scheduler=dict( + lr_factor=0.01, + max_epochs=100, + scheduler_type='linear', + type='YOLOv5ParamSchedulerHook'), + sampler_seed=dict(type='DistSamplerSeedHook'), + timer=dict(type='IterTimerHook'), + visualization=dict(type='mmdet.DetVisualizationHook')) +default_scope = 'mmyolo' +env_cfg = dict( + cudnn_benchmark=True, + dist_cfg=dict(backend='nccl'), + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0)) +flickr_train_dataset = dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +img_scale = ( + 640, + 640, +) +img_scales = [ + ( + 640, + 640, + ), + ( + 320, + 320, + ), + ( + 960, + 960, + ), +] +last_stage_out_channels = 512 +last_transform = [ + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), +] +load_from = '/home/jean/Amine/OpenYolo3D/pretrained/checkpoints/yolo_world_v2_x_obj365v1_goldg_cc3mlite_pretrain_1280ft-14996a36.pth' +log_level = 'INFO' +log_processor = dict(by_epoch=True, type='LogProcessor', window_size=50) +loss_bbox_weight = 7.5 +loss_cls_weight = 0.5 +loss_dfl_weight = 0.375 +lr_factor = 0.01 +max_aspect_ratio = 100 +max_epochs = 100 +max_keep_ckpts = 2 +mg_train_dataset = dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +mixup_prob = 0.15 +model = dict( + backbone=dict( + image_model=dict( + act_cfg=dict(inplace=True, type='SiLU'), + arch='P5', + deepen_factor=1.0, + last_stage_out_channels=512, + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + type='YOLOv8CSPDarknet', + widen_factor=1.25), + text_model=dict( + frozen_modules=[ + 'all', + ], + model_name='openai/clip-vit-base-patch32', + type='HuggingCLIPLanguageBackbone'), + type='MultiModalYOLOBackbone'), + bbox_head=dict( + bbox_coder=dict(type='DistancePointBBoxCoder'), + head_module=dict( + act_cfg=dict(inplace=True, type='SiLU'), + embed_dims=512, + featmap_strides=[ + 8, + 16, + 32, + ], + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_classes=80, + reg_max=16, + type='YOLOWorldHeadModule', + use_bn_head=True, + widen_factor=1.25), + loss_bbox=dict( + bbox_format='xyxy', + iou_mode='ciou', + loss_weight=7.5, + reduction='sum', + return_iou=False, + type='IoULoss'), + loss_cls=dict( + loss_weight=0.5, + reduction='none', + type='mmdet.CrossEntropyLoss', + use_sigmoid=True), + loss_dfl=dict( + loss_weight=0.375, + reduction='mean', + type='mmdet.DistributionFocalLoss'), + prior_generator=dict( + offset=0.5, strides=[ + 8, + 16, + 32, + ], type='mmdet.MlvlPointGenerator'), + type='YOLOWorldHead'), + data_preprocessor=dict( + bgr_to_rgb=True, + mean=[ + 0.0, + 0.0, + 0.0, + ], + std=[ + 255.0, + 255.0, + 255.0, + ], + type='YOLOWDetDataPreprocessor'), + mm_neck=True, + neck=dict( + act_cfg=dict(inplace=True, type='SiLU'), + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv'), + deepen_factor=1.0, + embed_channels=[ + 128, + 256, + 256, + ], + guide_channels=512, + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_csp_blocks=3, + num_heads=[ + 4, + 8, + 8, + ], + out_channels=[ + 256, + 512, + 512, + ], + type='YOLOWorldPAFPN', + widen_factor=1.25), + num_test_classes=1203, + num_train_classes=80, + test_cfg=dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001), + train_cfg=dict( + assigner=dict( + alpha=0.5, + beta=6.0, + eps=1e-09, + num_classes=80, + topk=10, + type='BatchTaskAlignedAssigner', + use_ciou=True)), + type='YOLOWorldDetector') +model_test_cfg = dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001) +mosaic_affine_transform = [ + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='Mosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), +] +neck_embed_channels = [ + 128, + 256, + 256, +] +neck_num_heads = [ + 4, + 8, + 8, +] +norm_cfg = dict(eps=0.001, momentum=0.03, type='BN') +num_classes = 1203 +num_det_layers = 3 +num_training_classes = 80 +obj365v1_train_dataset = dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +optim_wrapper = dict( + clip_grad=dict(max_norm=10.0), + constructor='YOLOWv5OptimizerConstructor', + optimizer=dict( + batch_size_per_gpu=16, lr=0.002, type='AdamW', weight_decay=0.025), + paramwise_cfg=dict( + bias_decay_mult=0.0, + custom_keys=dict({ + 'backbone.text_model': dict(lr_mult=0.01), + 'logit_scale': dict(weight_decay=0.0) + }), + norm_decay_mult=0.0), + type='OptimWrapper') +param_scheduler = None +persistent_workers = True +pre_transform = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), +] +resume = False +save_epoch_intervals = 2 +strides = [ + 8, + 16, + 32, +] +tal_alpha = 0.5 +tal_beta = 6.0 +tal_topk = 10 +test_cfg = dict(type='TestLoop') +test_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +test_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +test_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +text_channels = 512 +text_transform = [ + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_ann_file = 'annotations/instances_train2017.json' +train_batch_size_per_gpu = 16 +train_cfg = dict( + dynamic_intervals=[ + ( + 98, + 1, + ), + ], + max_epochs=100, + type='EpochBasedTrainLoop', + val_interval=10) +train_data_prefix = 'train2017/' +train_dataloader = dict( + batch_size=16, + collate_fn=dict(type='yolow_collate'), + dataset=dict( + datasets=[ + dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + ], + ignore_keys=[ + 'classes', + 'palette', + ], + type='ConcatDataset'), + num_workers=8, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=True, type='DefaultSampler')) +train_num_workers = 8 +train_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_pipeline_stage2 = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +tta_model = dict( + tta_cfg=dict(max_per_img=300, nms=dict(iou_threshold=0.65, type='nms')), + type='mmdet.DetTTAModel') +tta_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict( + transforms=[ + [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), + ], + [ + dict(prob=1.0, type='mmdet.RandomFlip'), + dict(prob=0.0, type='mmdet.RandomFlip'), + ], + [ + dict(type='mmdet.LoadAnnotations', with_bbox=True), + ], + [ + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), + ], + ], + type='TestTimeAug'), +] +val_ann_file = 'annotations/instances_val2017.json' +val_batch_size_per_gpu = 1 +val_cfg = dict(type='ValLoop') +val_data_prefix = 'val2017/' +val_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +val_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +val_interval_stage2 = 1 +val_num_workers = 2 +vis_backends = [ + dict(type='LocalVisBackend'), +] +visualizer = dict( + name='visualizer', + type='mmdet.DetLocalVisualizer', + vis_backends=[ + dict(type='LocalVisBackend'), + ]) +weight_decay = 0.025 +widen_factor = 1.25 +work_dir = '/home/jean/Amine/OpenYolo3D/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival' diff --git a/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_211138/vis_data/config.py b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_211138/vis_data/config.py new file mode 100644 index 0000000000000000000000000000000000000000..f8b00f96ad70de81ecc9073c7d67025d43429906 --- /dev/null +++ b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_211138/vis_data/config.py @@ -0,0 +1,1366 @@ +_backend_args = None +_multiscale_resize_transforms = [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), +] +affine_scale = 0.9 +albu_train_transforms = [ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), +] +backend_args = None +base_lr = 0.002 +batch_shapes_cfg = None +close_mosaic_epochs = 2 +coco_val_dataset = dict( + _delete_=True, + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +custom_hooks = [ + dict( + ema_type='ExpMomentumEMA', + momentum=0.0001, + priority=49, + strict_load=False, + type='EMAHook', + update_buffers=True), + dict( + switch_epoch=98, + switch_pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='mmdet.PipelineSwitchHook'), +] +custom_imports = dict( + allow_failed_imports=False, imports=[ + 'yolo_world', + ]) +data_root = 'data/coco/' +dataset_type = 'YOLOv5CocoDataset' +deepen_factor = 1.0 +default_hooks = dict( + checkpoint=dict( + interval=2, + max_keep_ckpts=2, + rule='greater', + save_best='auto', + type='CheckpointHook'), + logger=dict(interval=50, type='LoggerHook'), + param_scheduler=dict( + lr_factor=0.01, + max_epochs=100, + scheduler_type='linear', + type='YOLOv5ParamSchedulerHook'), + sampler_seed=dict(type='DistSamplerSeedHook'), + timer=dict(type='IterTimerHook'), + visualization=dict(type='mmdet.DetVisualizationHook')) +default_scope = 'mmyolo' +env_cfg = dict( + cudnn_benchmark=True, + dist_cfg=dict(backend='nccl'), + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0)) +flickr_train_dataset = dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +img_scale = ( + 640, + 640, +) +img_scales = [ + ( + 640, + 640, + ), + ( + 320, + 320, + ), + ( + 960, + 960, + ), +] +last_stage_out_channels = 512 +last_transform = [ + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), +] +load_from = '/home/jean/Amine/OpenYolo3D/pretrained/checkpoints/yolo_world_v2_x_obj365v1_goldg_cc3mlite_pretrain_1280ft-14996a36.pth' +log_level = 'INFO' +log_processor = dict(by_epoch=True, type='LogProcessor', window_size=50) +loss_bbox_weight = 7.5 +loss_cls_weight = 0.5 +loss_dfl_weight = 0.375 +lr_factor = 0.01 +max_aspect_ratio = 100 +max_epochs = 100 +max_keep_ckpts = 2 +mg_train_dataset = dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +mixup_prob = 0.15 +model = dict( + backbone=dict( + image_model=dict( + act_cfg=dict(inplace=True, type='SiLU'), + arch='P5', + deepen_factor=1.0, + last_stage_out_channels=512, + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + type='YOLOv8CSPDarknet', + widen_factor=1.25), + text_model=dict( + frozen_modules=[ + 'all', + ], + model_name='openai/clip-vit-base-patch32', + type='HuggingCLIPLanguageBackbone'), + type='MultiModalYOLOBackbone'), + bbox_head=dict( + bbox_coder=dict(type='DistancePointBBoxCoder'), + head_module=dict( + act_cfg=dict(inplace=True, type='SiLU'), + embed_dims=512, + featmap_strides=[ + 8, + 16, + 32, + ], + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_classes=80, + reg_max=16, + type='YOLOWorldHeadModule', + use_bn_head=True, + widen_factor=1.25), + loss_bbox=dict( + bbox_format='xyxy', + iou_mode='ciou', + loss_weight=7.5, + reduction='sum', + return_iou=False, + type='IoULoss'), + loss_cls=dict( + loss_weight=0.5, + reduction='none', + type='mmdet.CrossEntropyLoss', + use_sigmoid=True), + loss_dfl=dict( + loss_weight=0.375, + reduction='mean', + type='mmdet.DistributionFocalLoss'), + prior_generator=dict( + offset=0.5, strides=[ + 8, + 16, + 32, + ], type='mmdet.MlvlPointGenerator'), + type='YOLOWorldHead'), + data_preprocessor=dict( + bgr_to_rgb=True, + mean=[ + 0.0, + 0.0, + 0.0, + ], + std=[ + 255.0, + 255.0, + 255.0, + ], + type='YOLOWDetDataPreprocessor'), + mm_neck=True, + neck=dict( + act_cfg=dict(inplace=True, type='SiLU'), + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv'), + deepen_factor=1.0, + embed_channels=[ + 128, + 256, + 256, + ], + guide_channels=512, + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_csp_blocks=3, + num_heads=[ + 4, + 8, + 8, + ], + out_channels=[ + 256, + 512, + 512, + ], + type='YOLOWorldPAFPN', + widen_factor=1.25), + num_test_classes=1203, + num_train_classes=80, + test_cfg=dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001), + train_cfg=dict( + assigner=dict( + alpha=0.5, + beta=6.0, + eps=1e-09, + num_classes=80, + topk=10, + type='BatchTaskAlignedAssigner', + use_ciou=True)), + type='YOLOWorldDetector') +model_test_cfg = dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001) +mosaic_affine_transform = [ + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='Mosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), +] +neck_embed_channels = [ + 128, + 256, + 256, +] +neck_num_heads = [ + 4, + 8, + 8, +] +norm_cfg = dict(eps=0.001, momentum=0.03, type='BN') +num_classes = 1203 +num_det_layers = 3 +num_training_classes = 80 +obj365v1_train_dataset = dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +optim_wrapper = dict( + clip_grad=dict(max_norm=10.0), + constructor='YOLOWv5OptimizerConstructor', + optimizer=dict( + batch_size_per_gpu=16, lr=0.002, type='AdamW', weight_decay=0.025), + paramwise_cfg=dict( + bias_decay_mult=0.0, + custom_keys=dict({ + 'backbone.text_model': dict(lr_mult=0.01), + 'logit_scale': dict(weight_decay=0.0) + }), + norm_decay_mult=0.0), + type='OptimWrapper') +param_scheduler = None +persistent_workers = True +pre_transform = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), +] +resume = False +save_epoch_intervals = 2 +strides = [ + 8, + 16, + 32, +] +tal_alpha = 0.5 +tal_beta = 6.0 +tal_topk = 10 +test_cfg = dict(type='TestLoop') +test_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +test_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +test_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +text_channels = 512 +text_transform = [ + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_ann_file = 'annotations/instances_train2017.json' +train_batch_size_per_gpu = 16 +train_cfg = dict( + dynamic_intervals=[ + ( + 98, + 1, + ), + ], + max_epochs=100, + type='EpochBasedTrainLoop', + val_interval=10) +train_data_prefix = 'train2017/' +train_dataloader = dict( + batch_size=16, + collate_fn=dict(type='yolow_collate'), + dataset=dict( + datasets=[ + dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + ], + ignore_keys=[ + 'classes', + 'palette', + ], + type='ConcatDataset'), + num_workers=8, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=True, type='DefaultSampler')) +train_num_workers = 8 +train_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_pipeline_stage2 = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +tta_model = dict( + tta_cfg=dict(max_per_img=300, nms=dict(iou_threshold=0.65, type='nms')), + type='mmdet.DetTTAModel') +tta_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict( + transforms=[ + [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), + ], + [ + dict(prob=1.0, type='mmdet.RandomFlip'), + dict(prob=0.0, type='mmdet.RandomFlip'), + ], + [ + dict(type='mmdet.LoadAnnotations', with_bbox=True), + ], + [ + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), + ], + ], + type='TestTimeAug'), +] +val_ann_file = 'annotations/instances_val2017.json' +val_batch_size_per_gpu = 1 +val_cfg = dict(type='ValLoop') +val_data_prefix = 'val2017/' +val_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +val_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +val_interval_stage2 = 1 +val_num_workers = 2 +vis_backends = [ + dict(type='LocalVisBackend'), +] +visualizer = dict( + name='visualizer', + type='mmdet.DetLocalVisualizer', + vis_backends=[ + dict(type='LocalVisBackend'), + ]) +weight_decay = 0.025 +widen_factor = 1.25 +work_dir = '/home/jean/Amine/OpenYolo3D/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival' diff --git a/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_211422/vis_data/config.py b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_211422/vis_data/config.py new file mode 100644 index 0000000000000000000000000000000000000000..f8b00f96ad70de81ecc9073c7d67025d43429906 --- /dev/null +++ b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_211422/vis_data/config.py @@ -0,0 +1,1366 @@ +_backend_args = None +_multiscale_resize_transforms = [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), +] +affine_scale = 0.9 +albu_train_transforms = [ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), +] +backend_args = None +base_lr = 0.002 +batch_shapes_cfg = None +close_mosaic_epochs = 2 +coco_val_dataset = dict( + _delete_=True, + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +custom_hooks = [ + dict( + ema_type='ExpMomentumEMA', + momentum=0.0001, + priority=49, + strict_load=False, + type='EMAHook', + update_buffers=True), + dict( + switch_epoch=98, + switch_pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='mmdet.PipelineSwitchHook'), +] +custom_imports = dict( + allow_failed_imports=False, imports=[ + 'yolo_world', + ]) +data_root = 'data/coco/' +dataset_type = 'YOLOv5CocoDataset' +deepen_factor = 1.0 +default_hooks = dict( + checkpoint=dict( + interval=2, + max_keep_ckpts=2, + rule='greater', + save_best='auto', + type='CheckpointHook'), + logger=dict(interval=50, type='LoggerHook'), + param_scheduler=dict( + lr_factor=0.01, + max_epochs=100, + scheduler_type='linear', + type='YOLOv5ParamSchedulerHook'), + sampler_seed=dict(type='DistSamplerSeedHook'), + timer=dict(type='IterTimerHook'), + visualization=dict(type='mmdet.DetVisualizationHook')) +default_scope = 'mmyolo' +env_cfg = dict( + cudnn_benchmark=True, + dist_cfg=dict(backend='nccl'), + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0)) +flickr_train_dataset = dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +img_scale = ( + 640, + 640, +) +img_scales = [ + ( + 640, + 640, + ), + ( + 320, + 320, + ), + ( + 960, + 960, + ), +] +last_stage_out_channels = 512 +last_transform = [ + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), +] +load_from = '/home/jean/Amine/OpenYolo3D/pretrained/checkpoints/yolo_world_v2_x_obj365v1_goldg_cc3mlite_pretrain_1280ft-14996a36.pth' +log_level = 'INFO' +log_processor = dict(by_epoch=True, type='LogProcessor', window_size=50) +loss_bbox_weight = 7.5 +loss_cls_weight = 0.5 +loss_dfl_weight = 0.375 +lr_factor = 0.01 +max_aspect_ratio = 100 +max_epochs = 100 +max_keep_ckpts = 2 +mg_train_dataset = dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +mixup_prob = 0.15 +model = dict( + backbone=dict( + image_model=dict( + act_cfg=dict(inplace=True, type='SiLU'), + arch='P5', + deepen_factor=1.0, + last_stage_out_channels=512, + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + type='YOLOv8CSPDarknet', + widen_factor=1.25), + text_model=dict( + frozen_modules=[ + 'all', + ], + model_name='openai/clip-vit-base-patch32', + type='HuggingCLIPLanguageBackbone'), + type='MultiModalYOLOBackbone'), + bbox_head=dict( + bbox_coder=dict(type='DistancePointBBoxCoder'), + head_module=dict( + act_cfg=dict(inplace=True, type='SiLU'), + embed_dims=512, + featmap_strides=[ + 8, + 16, + 32, + ], + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_classes=80, + reg_max=16, + type='YOLOWorldHeadModule', + use_bn_head=True, + widen_factor=1.25), + loss_bbox=dict( + bbox_format='xyxy', + iou_mode='ciou', + loss_weight=7.5, + reduction='sum', + return_iou=False, + type='IoULoss'), + loss_cls=dict( + loss_weight=0.5, + reduction='none', + type='mmdet.CrossEntropyLoss', + use_sigmoid=True), + loss_dfl=dict( + loss_weight=0.375, + reduction='mean', + type='mmdet.DistributionFocalLoss'), + prior_generator=dict( + offset=0.5, strides=[ + 8, + 16, + 32, + ], type='mmdet.MlvlPointGenerator'), + type='YOLOWorldHead'), + data_preprocessor=dict( + bgr_to_rgb=True, + mean=[ + 0.0, + 0.0, + 0.0, + ], + std=[ + 255.0, + 255.0, + 255.0, + ], + type='YOLOWDetDataPreprocessor'), + mm_neck=True, + neck=dict( + act_cfg=dict(inplace=True, type='SiLU'), + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv'), + deepen_factor=1.0, + embed_channels=[ + 128, + 256, + 256, + ], + guide_channels=512, + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_csp_blocks=3, + num_heads=[ + 4, + 8, + 8, + ], + out_channels=[ + 256, + 512, + 512, + ], + type='YOLOWorldPAFPN', + widen_factor=1.25), + num_test_classes=1203, + num_train_classes=80, + test_cfg=dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001), + train_cfg=dict( + assigner=dict( + alpha=0.5, + beta=6.0, + eps=1e-09, + num_classes=80, + topk=10, + type='BatchTaskAlignedAssigner', + use_ciou=True)), + type='YOLOWorldDetector') +model_test_cfg = dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001) +mosaic_affine_transform = [ + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='Mosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), +] +neck_embed_channels = [ + 128, + 256, + 256, +] +neck_num_heads = [ + 4, + 8, + 8, +] +norm_cfg = dict(eps=0.001, momentum=0.03, type='BN') +num_classes = 1203 +num_det_layers = 3 +num_training_classes = 80 +obj365v1_train_dataset = dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +optim_wrapper = dict( + clip_grad=dict(max_norm=10.0), + constructor='YOLOWv5OptimizerConstructor', + optimizer=dict( + batch_size_per_gpu=16, lr=0.002, type='AdamW', weight_decay=0.025), + paramwise_cfg=dict( + bias_decay_mult=0.0, + custom_keys=dict({ + 'backbone.text_model': dict(lr_mult=0.01), + 'logit_scale': dict(weight_decay=0.0) + }), + norm_decay_mult=0.0), + type='OptimWrapper') +param_scheduler = None +persistent_workers = True +pre_transform = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), +] +resume = False +save_epoch_intervals = 2 +strides = [ + 8, + 16, + 32, +] +tal_alpha = 0.5 +tal_beta = 6.0 +tal_topk = 10 +test_cfg = dict(type='TestLoop') +test_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +test_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +test_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +text_channels = 512 +text_transform = [ + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_ann_file = 'annotations/instances_train2017.json' +train_batch_size_per_gpu = 16 +train_cfg = dict( + dynamic_intervals=[ + ( + 98, + 1, + ), + ], + max_epochs=100, + type='EpochBasedTrainLoop', + val_interval=10) +train_data_prefix = 'train2017/' +train_dataloader = dict( + batch_size=16, + collate_fn=dict(type='yolow_collate'), + dataset=dict( + datasets=[ + dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + ], + ignore_keys=[ + 'classes', + 'palette', + ], + type='ConcatDataset'), + num_workers=8, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=True, type='DefaultSampler')) +train_num_workers = 8 +train_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_pipeline_stage2 = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +tta_model = dict( + tta_cfg=dict(max_per_img=300, nms=dict(iou_threshold=0.65, type='nms')), + type='mmdet.DetTTAModel') +tta_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict( + transforms=[ + [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), + ], + [ + dict(prob=1.0, type='mmdet.RandomFlip'), + dict(prob=0.0, type='mmdet.RandomFlip'), + ], + [ + dict(type='mmdet.LoadAnnotations', with_bbox=True), + ], + [ + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), + ], + ], + type='TestTimeAug'), +] +val_ann_file = 'annotations/instances_val2017.json' +val_batch_size_per_gpu = 1 +val_cfg = dict(type='ValLoop') +val_data_prefix = 'val2017/' +val_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +val_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +val_interval_stage2 = 1 +val_num_workers = 2 +vis_backends = [ + dict(type='LocalVisBackend'), +] +visualizer = dict( + name='visualizer', + type='mmdet.DetLocalVisualizer', + vis_backends=[ + dict(type='LocalVisBackend'), + ]) +weight_decay = 0.025 +widen_factor = 1.25 +work_dir = '/home/jean/Amine/OpenYolo3D/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival' diff --git a/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_211642/vis_data/config.py b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_211642/vis_data/config.py new file mode 100644 index 0000000000000000000000000000000000000000..f8b00f96ad70de81ecc9073c7d67025d43429906 --- /dev/null +++ b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_211642/vis_data/config.py @@ -0,0 +1,1366 @@ +_backend_args = None +_multiscale_resize_transforms = [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), +] +affine_scale = 0.9 +albu_train_transforms = [ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), +] +backend_args = None +base_lr = 0.002 +batch_shapes_cfg = None +close_mosaic_epochs = 2 +coco_val_dataset = dict( + _delete_=True, + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +custom_hooks = [ + dict( + ema_type='ExpMomentumEMA', + momentum=0.0001, + priority=49, + strict_load=False, + type='EMAHook', + update_buffers=True), + dict( + switch_epoch=98, + switch_pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='mmdet.PipelineSwitchHook'), +] +custom_imports = dict( + allow_failed_imports=False, imports=[ + 'yolo_world', + ]) +data_root = 'data/coco/' +dataset_type = 'YOLOv5CocoDataset' +deepen_factor = 1.0 +default_hooks = dict( + checkpoint=dict( + interval=2, + max_keep_ckpts=2, + rule='greater', + save_best='auto', + type='CheckpointHook'), + logger=dict(interval=50, type='LoggerHook'), + param_scheduler=dict( + lr_factor=0.01, + max_epochs=100, + scheduler_type='linear', + type='YOLOv5ParamSchedulerHook'), + sampler_seed=dict(type='DistSamplerSeedHook'), + timer=dict(type='IterTimerHook'), + visualization=dict(type='mmdet.DetVisualizationHook')) +default_scope = 'mmyolo' +env_cfg = dict( + cudnn_benchmark=True, + dist_cfg=dict(backend='nccl'), + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0)) +flickr_train_dataset = dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +img_scale = ( + 640, + 640, +) +img_scales = [ + ( + 640, + 640, + ), + ( + 320, + 320, + ), + ( + 960, + 960, + ), +] +last_stage_out_channels = 512 +last_transform = [ + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), +] +load_from = '/home/jean/Amine/OpenYolo3D/pretrained/checkpoints/yolo_world_v2_x_obj365v1_goldg_cc3mlite_pretrain_1280ft-14996a36.pth' +log_level = 'INFO' +log_processor = dict(by_epoch=True, type='LogProcessor', window_size=50) +loss_bbox_weight = 7.5 +loss_cls_weight = 0.5 +loss_dfl_weight = 0.375 +lr_factor = 0.01 +max_aspect_ratio = 100 +max_epochs = 100 +max_keep_ckpts = 2 +mg_train_dataset = dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +mixup_prob = 0.15 +model = dict( + backbone=dict( + image_model=dict( + act_cfg=dict(inplace=True, type='SiLU'), + arch='P5', + deepen_factor=1.0, + last_stage_out_channels=512, + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + type='YOLOv8CSPDarknet', + widen_factor=1.25), + text_model=dict( + frozen_modules=[ + 'all', + ], + model_name='openai/clip-vit-base-patch32', + type='HuggingCLIPLanguageBackbone'), + type='MultiModalYOLOBackbone'), + bbox_head=dict( + bbox_coder=dict(type='DistancePointBBoxCoder'), + head_module=dict( + act_cfg=dict(inplace=True, type='SiLU'), + embed_dims=512, + featmap_strides=[ + 8, + 16, + 32, + ], + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_classes=80, + reg_max=16, + type='YOLOWorldHeadModule', + use_bn_head=True, + widen_factor=1.25), + loss_bbox=dict( + bbox_format='xyxy', + iou_mode='ciou', + loss_weight=7.5, + reduction='sum', + return_iou=False, + type='IoULoss'), + loss_cls=dict( + loss_weight=0.5, + reduction='none', + type='mmdet.CrossEntropyLoss', + use_sigmoid=True), + loss_dfl=dict( + loss_weight=0.375, + reduction='mean', + type='mmdet.DistributionFocalLoss'), + prior_generator=dict( + offset=0.5, strides=[ + 8, + 16, + 32, + ], type='mmdet.MlvlPointGenerator'), + type='YOLOWorldHead'), + data_preprocessor=dict( + bgr_to_rgb=True, + mean=[ + 0.0, + 0.0, + 0.0, + ], + std=[ + 255.0, + 255.0, + 255.0, + ], + type='YOLOWDetDataPreprocessor'), + mm_neck=True, + neck=dict( + act_cfg=dict(inplace=True, type='SiLU'), + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv'), + deepen_factor=1.0, + embed_channels=[ + 128, + 256, + 256, + ], + guide_channels=512, + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_csp_blocks=3, + num_heads=[ + 4, + 8, + 8, + ], + out_channels=[ + 256, + 512, + 512, + ], + type='YOLOWorldPAFPN', + widen_factor=1.25), + num_test_classes=1203, + num_train_classes=80, + test_cfg=dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001), + train_cfg=dict( + assigner=dict( + alpha=0.5, + beta=6.0, + eps=1e-09, + num_classes=80, + topk=10, + type='BatchTaskAlignedAssigner', + use_ciou=True)), + type='YOLOWorldDetector') +model_test_cfg = dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001) +mosaic_affine_transform = [ + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='Mosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), +] +neck_embed_channels = [ + 128, + 256, + 256, +] +neck_num_heads = [ + 4, + 8, + 8, +] +norm_cfg = dict(eps=0.001, momentum=0.03, type='BN') +num_classes = 1203 +num_det_layers = 3 +num_training_classes = 80 +obj365v1_train_dataset = dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +optim_wrapper = dict( + clip_grad=dict(max_norm=10.0), + constructor='YOLOWv5OptimizerConstructor', + optimizer=dict( + batch_size_per_gpu=16, lr=0.002, type='AdamW', weight_decay=0.025), + paramwise_cfg=dict( + bias_decay_mult=0.0, + custom_keys=dict({ + 'backbone.text_model': dict(lr_mult=0.01), + 'logit_scale': dict(weight_decay=0.0) + }), + norm_decay_mult=0.0), + type='OptimWrapper') +param_scheduler = None +persistent_workers = True +pre_transform = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), +] +resume = False +save_epoch_intervals = 2 +strides = [ + 8, + 16, + 32, +] +tal_alpha = 0.5 +tal_beta = 6.0 +tal_topk = 10 +test_cfg = dict(type='TestLoop') +test_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +test_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +test_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +text_channels = 512 +text_transform = [ + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_ann_file = 'annotations/instances_train2017.json' +train_batch_size_per_gpu = 16 +train_cfg = dict( + dynamic_intervals=[ + ( + 98, + 1, + ), + ], + max_epochs=100, + type='EpochBasedTrainLoop', + val_interval=10) +train_data_prefix = 'train2017/' +train_dataloader = dict( + batch_size=16, + collate_fn=dict(type='yolow_collate'), + dataset=dict( + datasets=[ + dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + ], + ignore_keys=[ + 'classes', + 'palette', + ], + type='ConcatDataset'), + num_workers=8, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=True, type='DefaultSampler')) +train_num_workers = 8 +train_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_pipeline_stage2 = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +tta_model = dict( + tta_cfg=dict(max_per_img=300, nms=dict(iou_threshold=0.65, type='nms')), + type='mmdet.DetTTAModel') +tta_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict( + transforms=[ + [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), + ], + [ + dict(prob=1.0, type='mmdet.RandomFlip'), + dict(prob=0.0, type='mmdet.RandomFlip'), + ], + [ + dict(type='mmdet.LoadAnnotations', with_bbox=True), + ], + [ + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), + ], + ], + type='TestTimeAug'), +] +val_ann_file = 'annotations/instances_val2017.json' +val_batch_size_per_gpu = 1 +val_cfg = dict(type='ValLoop') +val_data_prefix = 'val2017/' +val_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +val_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +val_interval_stage2 = 1 +val_num_workers = 2 +vis_backends = [ + dict(type='LocalVisBackend'), +] +visualizer = dict( + name='visualizer', + type='mmdet.DetLocalVisualizer', + vis_backends=[ + dict(type='LocalVisBackend'), + ]) +weight_decay = 0.025 +widen_factor = 1.25 +work_dir = '/home/jean/Amine/OpenYolo3D/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival' diff --git a/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_211827/vis_data/config.py b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_211827/vis_data/config.py new file mode 100644 index 0000000000000000000000000000000000000000..f8b00f96ad70de81ecc9073c7d67025d43429906 --- /dev/null +++ b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_211827/vis_data/config.py @@ -0,0 +1,1366 @@ +_backend_args = None +_multiscale_resize_transforms = [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), +] +affine_scale = 0.9 +albu_train_transforms = [ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), +] +backend_args = None +base_lr = 0.002 +batch_shapes_cfg = None +close_mosaic_epochs = 2 +coco_val_dataset = dict( + _delete_=True, + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +custom_hooks = [ + dict( + ema_type='ExpMomentumEMA', + momentum=0.0001, + priority=49, + strict_load=False, + type='EMAHook', + update_buffers=True), + dict( + switch_epoch=98, + switch_pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='mmdet.PipelineSwitchHook'), +] +custom_imports = dict( + allow_failed_imports=False, imports=[ + 'yolo_world', + ]) +data_root = 'data/coco/' +dataset_type = 'YOLOv5CocoDataset' +deepen_factor = 1.0 +default_hooks = dict( + checkpoint=dict( + interval=2, + max_keep_ckpts=2, + rule='greater', + save_best='auto', + type='CheckpointHook'), + logger=dict(interval=50, type='LoggerHook'), + param_scheduler=dict( + lr_factor=0.01, + max_epochs=100, + scheduler_type='linear', + type='YOLOv5ParamSchedulerHook'), + sampler_seed=dict(type='DistSamplerSeedHook'), + timer=dict(type='IterTimerHook'), + visualization=dict(type='mmdet.DetVisualizationHook')) +default_scope = 'mmyolo' +env_cfg = dict( + cudnn_benchmark=True, + dist_cfg=dict(backend='nccl'), + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0)) +flickr_train_dataset = dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +img_scale = ( + 640, + 640, +) +img_scales = [ + ( + 640, + 640, + ), + ( + 320, + 320, + ), + ( + 960, + 960, + ), +] +last_stage_out_channels = 512 +last_transform = [ + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), +] +load_from = '/home/jean/Amine/OpenYolo3D/pretrained/checkpoints/yolo_world_v2_x_obj365v1_goldg_cc3mlite_pretrain_1280ft-14996a36.pth' +log_level = 'INFO' +log_processor = dict(by_epoch=True, type='LogProcessor', window_size=50) +loss_bbox_weight = 7.5 +loss_cls_weight = 0.5 +loss_dfl_weight = 0.375 +lr_factor = 0.01 +max_aspect_ratio = 100 +max_epochs = 100 +max_keep_ckpts = 2 +mg_train_dataset = dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +mixup_prob = 0.15 +model = dict( + backbone=dict( + image_model=dict( + act_cfg=dict(inplace=True, type='SiLU'), + arch='P5', + deepen_factor=1.0, + last_stage_out_channels=512, + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + type='YOLOv8CSPDarknet', + widen_factor=1.25), + text_model=dict( + frozen_modules=[ + 'all', + ], + model_name='openai/clip-vit-base-patch32', + type='HuggingCLIPLanguageBackbone'), + type='MultiModalYOLOBackbone'), + bbox_head=dict( + bbox_coder=dict(type='DistancePointBBoxCoder'), + head_module=dict( + act_cfg=dict(inplace=True, type='SiLU'), + embed_dims=512, + featmap_strides=[ + 8, + 16, + 32, + ], + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_classes=80, + reg_max=16, + type='YOLOWorldHeadModule', + use_bn_head=True, + widen_factor=1.25), + loss_bbox=dict( + bbox_format='xyxy', + iou_mode='ciou', + loss_weight=7.5, + reduction='sum', + return_iou=False, + type='IoULoss'), + loss_cls=dict( + loss_weight=0.5, + reduction='none', + type='mmdet.CrossEntropyLoss', + use_sigmoid=True), + loss_dfl=dict( + loss_weight=0.375, + reduction='mean', + type='mmdet.DistributionFocalLoss'), + prior_generator=dict( + offset=0.5, strides=[ + 8, + 16, + 32, + ], type='mmdet.MlvlPointGenerator'), + type='YOLOWorldHead'), + data_preprocessor=dict( + bgr_to_rgb=True, + mean=[ + 0.0, + 0.0, + 0.0, + ], + std=[ + 255.0, + 255.0, + 255.0, + ], + type='YOLOWDetDataPreprocessor'), + mm_neck=True, + neck=dict( + act_cfg=dict(inplace=True, type='SiLU'), + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv'), + deepen_factor=1.0, + embed_channels=[ + 128, + 256, + 256, + ], + guide_channels=512, + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_csp_blocks=3, + num_heads=[ + 4, + 8, + 8, + ], + out_channels=[ + 256, + 512, + 512, + ], + type='YOLOWorldPAFPN', + widen_factor=1.25), + num_test_classes=1203, + num_train_classes=80, + test_cfg=dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001), + train_cfg=dict( + assigner=dict( + alpha=0.5, + beta=6.0, + eps=1e-09, + num_classes=80, + topk=10, + type='BatchTaskAlignedAssigner', + use_ciou=True)), + type='YOLOWorldDetector') +model_test_cfg = dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001) +mosaic_affine_transform = [ + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='Mosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), +] +neck_embed_channels = [ + 128, + 256, + 256, +] +neck_num_heads = [ + 4, + 8, + 8, +] +norm_cfg = dict(eps=0.001, momentum=0.03, type='BN') +num_classes = 1203 +num_det_layers = 3 +num_training_classes = 80 +obj365v1_train_dataset = dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +optim_wrapper = dict( + clip_grad=dict(max_norm=10.0), + constructor='YOLOWv5OptimizerConstructor', + optimizer=dict( + batch_size_per_gpu=16, lr=0.002, type='AdamW', weight_decay=0.025), + paramwise_cfg=dict( + bias_decay_mult=0.0, + custom_keys=dict({ + 'backbone.text_model': dict(lr_mult=0.01), + 'logit_scale': dict(weight_decay=0.0) + }), + norm_decay_mult=0.0), + type='OptimWrapper') +param_scheduler = None +persistent_workers = True +pre_transform = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), +] +resume = False +save_epoch_intervals = 2 +strides = [ + 8, + 16, + 32, +] +tal_alpha = 0.5 +tal_beta = 6.0 +tal_topk = 10 +test_cfg = dict(type='TestLoop') +test_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +test_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +test_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +text_channels = 512 +text_transform = [ + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_ann_file = 'annotations/instances_train2017.json' +train_batch_size_per_gpu = 16 +train_cfg = dict( + dynamic_intervals=[ + ( + 98, + 1, + ), + ], + max_epochs=100, + type='EpochBasedTrainLoop', + val_interval=10) +train_data_prefix = 'train2017/' +train_dataloader = dict( + batch_size=16, + collate_fn=dict(type='yolow_collate'), + dataset=dict( + datasets=[ + dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + ], + ignore_keys=[ + 'classes', + 'palette', + ], + type='ConcatDataset'), + num_workers=8, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=True, type='DefaultSampler')) +train_num_workers = 8 +train_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_pipeline_stage2 = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +tta_model = dict( + tta_cfg=dict(max_per_img=300, nms=dict(iou_threshold=0.65, type='nms')), + type='mmdet.DetTTAModel') +tta_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict( + transforms=[ + [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), + ], + [ + dict(prob=1.0, type='mmdet.RandomFlip'), + dict(prob=0.0, type='mmdet.RandomFlip'), + ], + [ + dict(type='mmdet.LoadAnnotations', with_bbox=True), + ], + [ + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), + ], + ], + type='TestTimeAug'), +] +val_ann_file = 'annotations/instances_val2017.json' +val_batch_size_per_gpu = 1 +val_cfg = dict(type='ValLoop') +val_data_prefix = 'val2017/' +val_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +val_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +val_interval_stage2 = 1 +val_num_workers = 2 +vis_backends = [ + dict(type='LocalVisBackend'), +] +visualizer = dict( + name='visualizer', + type='mmdet.DetLocalVisualizer', + vis_backends=[ + dict(type='LocalVisBackend'), + ]) +weight_decay = 0.025 +widen_factor = 1.25 +work_dir = '/home/jean/Amine/OpenYolo3D/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival' diff --git a/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_214018/vis_data/config.py b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_214018/vis_data/config.py new file mode 100644 index 0000000000000000000000000000000000000000..f8b00f96ad70de81ecc9073c7d67025d43429906 --- /dev/null +++ b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/20240601_214018/vis_data/config.py @@ -0,0 +1,1366 @@ +_backend_args = None +_multiscale_resize_transforms = [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), +] +affine_scale = 0.9 +albu_train_transforms = [ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), +] +backend_args = None +base_lr = 0.002 +batch_shapes_cfg = None +close_mosaic_epochs = 2 +coco_val_dataset = dict( + _delete_=True, + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +custom_hooks = [ + dict( + ema_type='ExpMomentumEMA', + momentum=0.0001, + priority=49, + strict_load=False, + type='EMAHook', + update_buffers=True), + dict( + switch_epoch=98, + switch_pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='mmdet.PipelineSwitchHook'), +] +custom_imports = dict( + allow_failed_imports=False, imports=[ + 'yolo_world', + ]) +data_root = 'data/coco/' +dataset_type = 'YOLOv5CocoDataset' +deepen_factor = 1.0 +default_hooks = dict( + checkpoint=dict( + interval=2, + max_keep_ckpts=2, + rule='greater', + save_best='auto', + type='CheckpointHook'), + logger=dict(interval=50, type='LoggerHook'), + param_scheduler=dict( + lr_factor=0.01, + max_epochs=100, + scheduler_type='linear', + type='YOLOv5ParamSchedulerHook'), + sampler_seed=dict(type='DistSamplerSeedHook'), + timer=dict(type='IterTimerHook'), + visualization=dict(type='mmdet.DetVisualizationHook')) +default_scope = 'mmyolo' +env_cfg = dict( + cudnn_benchmark=True, + dist_cfg=dict(backend='nccl'), + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0)) +flickr_train_dataset = dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +img_scale = ( + 640, + 640, +) +img_scales = [ + ( + 640, + 640, + ), + ( + 320, + 320, + ), + ( + 960, + 960, + ), +] +last_stage_out_channels = 512 +last_transform = [ + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), +] +load_from = '/home/jean/Amine/OpenYolo3D/pretrained/checkpoints/yolo_world_v2_x_obj365v1_goldg_cc3mlite_pretrain_1280ft-14996a36.pth' +log_level = 'INFO' +log_processor = dict(by_epoch=True, type='LogProcessor', window_size=50) +loss_bbox_weight = 7.5 +loss_cls_weight = 0.5 +loss_dfl_weight = 0.375 +lr_factor = 0.01 +max_aspect_ratio = 100 +max_epochs = 100 +max_keep_ckpts = 2 +mg_train_dataset = dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +mixup_prob = 0.15 +model = dict( + backbone=dict( + image_model=dict( + act_cfg=dict(inplace=True, type='SiLU'), + arch='P5', + deepen_factor=1.0, + last_stage_out_channels=512, + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + type='YOLOv8CSPDarknet', + widen_factor=1.25), + text_model=dict( + frozen_modules=[ + 'all', + ], + model_name='openai/clip-vit-base-patch32', + type='HuggingCLIPLanguageBackbone'), + type='MultiModalYOLOBackbone'), + bbox_head=dict( + bbox_coder=dict(type='DistancePointBBoxCoder'), + head_module=dict( + act_cfg=dict(inplace=True, type='SiLU'), + embed_dims=512, + featmap_strides=[ + 8, + 16, + 32, + ], + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_classes=80, + reg_max=16, + type='YOLOWorldHeadModule', + use_bn_head=True, + widen_factor=1.25), + loss_bbox=dict( + bbox_format='xyxy', + iou_mode='ciou', + loss_weight=7.5, + reduction='sum', + return_iou=False, + type='IoULoss'), + loss_cls=dict( + loss_weight=0.5, + reduction='none', + type='mmdet.CrossEntropyLoss', + use_sigmoid=True), + loss_dfl=dict( + loss_weight=0.375, + reduction='mean', + type='mmdet.DistributionFocalLoss'), + prior_generator=dict( + offset=0.5, strides=[ + 8, + 16, + 32, + ], type='mmdet.MlvlPointGenerator'), + type='YOLOWorldHead'), + data_preprocessor=dict( + bgr_to_rgb=True, + mean=[ + 0.0, + 0.0, + 0.0, + ], + std=[ + 255.0, + 255.0, + 255.0, + ], + type='YOLOWDetDataPreprocessor'), + mm_neck=True, + neck=dict( + act_cfg=dict(inplace=True, type='SiLU'), + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv'), + deepen_factor=1.0, + embed_channels=[ + 128, + 256, + 256, + ], + guide_channels=512, + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_csp_blocks=3, + num_heads=[ + 4, + 8, + 8, + ], + out_channels=[ + 256, + 512, + 512, + ], + type='YOLOWorldPAFPN', + widen_factor=1.25), + num_test_classes=1203, + num_train_classes=80, + test_cfg=dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001), + train_cfg=dict( + assigner=dict( + alpha=0.5, + beta=6.0, + eps=1e-09, + num_classes=80, + topk=10, + type='BatchTaskAlignedAssigner', + use_ciou=True)), + type='YOLOWorldDetector') +model_test_cfg = dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001) +mosaic_affine_transform = [ + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='Mosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), +] +neck_embed_channels = [ + 128, + 256, + 256, +] +neck_num_heads = [ + 4, + 8, + 8, +] +norm_cfg = dict(eps=0.001, momentum=0.03, type='BN') +num_classes = 1203 +num_det_layers = 3 +num_training_classes = 80 +obj365v1_train_dataset = dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +optim_wrapper = dict( + clip_grad=dict(max_norm=10.0), + constructor='YOLOWv5OptimizerConstructor', + optimizer=dict( + batch_size_per_gpu=16, lr=0.002, type='AdamW', weight_decay=0.025), + paramwise_cfg=dict( + bias_decay_mult=0.0, + custom_keys=dict({ + 'backbone.text_model': dict(lr_mult=0.01), + 'logit_scale': dict(weight_decay=0.0) + }), + norm_decay_mult=0.0), + type='OptimWrapper') +param_scheduler = None +persistent_workers = True +pre_transform = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), +] +resume = False +save_epoch_intervals = 2 +strides = [ + 8, + 16, + 32, +] +tal_alpha = 0.5 +tal_beta = 6.0 +tal_topk = 10 +test_cfg = dict(type='TestLoop') +test_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +test_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +test_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +text_channels = 512 +text_transform = [ + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_ann_file = 'annotations/instances_train2017.json' +train_batch_size_per_gpu = 16 +train_cfg = dict( + dynamic_intervals=[ + ( + 98, + 1, + ), + ], + max_epochs=100, + type='EpochBasedTrainLoop', + val_interval=10) +train_data_prefix = 'train2017/' +train_dataloader = dict( + batch_size=16, + collate_fn=dict(type='yolow_collate'), + dataset=dict( + datasets=[ + dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + ], + ignore_keys=[ + 'classes', + 'palette', + ], + type='ConcatDataset'), + num_workers=8, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=True, type='DefaultSampler')) +train_num_workers = 8 +train_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_pipeline_stage2 = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +tta_model = dict( + tta_cfg=dict(max_per_img=300, nms=dict(iou_threshold=0.65, type='nms')), + type='mmdet.DetTTAModel') +tta_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict( + transforms=[ + [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), + ], + [ + dict(prob=1.0, type='mmdet.RandomFlip'), + dict(prob=0.0, type='mmdet.RandomFlip'), + ], + [ + dict(type='mmdet.LoadAnnotations', with_bbox=True), + ], + [ + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), + ], + ], + type='TestTimeAug'), +] +val_ann_file = 'annotations/instances_val2017.json' +val_batch_size_per_gpu = 1 +val_cfg = dict(type='ValLoop') +val_data_prefix = 'val2017/' +val_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +val_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +val_interval_stage2 = 1 +val_num_workers = 2 +vis_backends = [ + dict(type='LocalVisBackend'), +] +visualizer = dict( + name='visualizer', + type='mmdet.DetLocalVisualizer', + vis_backends=[ + dict(type='LocalVisBackend'), + ]) +weight_decay = 0.025 +widen_factor = 1.25 +work_dir = '/home/jean/Amine/OpenYolo3D/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival' diff --git a/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival.py b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival.py new file mode 100644 index 0000000000000000000000000000000000000000..f8b00f96ad70de81ecc9073c7d67025d43429906 --- /dev/null +++ b/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival.py @@ -0,0 +1,1366 @@ +_backend_args = None +_multiscale_resize_transforms = [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), +] +affine_scale = 0.9 +albu_train_transforms = [ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), +] +backend_args = None +base_lr = 0.002 +batch_shapes_cfg = None +close_mosaic_epochs = 2 +coco_val_dataset = dict( + _delete_=True, + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +custom_hooks = [ + dict( + ema_type='ExpMomentumEMA', + momentum=0.0001, + priority=49, + strict_load=False, + type='EMAHook', + update_buffers=True), + dict( + switch_epoch=98, + switch_pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='mmdet.PipelineSwitchHook'), +] +custom_imports = dict( + allow_failed_imports=False, imports=[ + 'yolo_world', + ]) +data_root = 'data/coco/' +dataset_type = 'YOLOv5CocoDataset' +deepen_factor = 1.0 +default_hooks = dict( + checkpoint=dict( + interval=2, + max_keep_ckpts=2, + rule='greater', + save_best='auto', + type='CheckpointHook'), + logger=dict(interval=50, type='LoggerHook'), + param_scheduler=dict( + lr_factor=0.01, + max_epochs=100, + scheduler_type='linear', + type='YOLOv5ParamSchedulerHook'), + sampler_seed=dict(type='DistSamplerSeedHook'), + timer=dict(type='IterTimerHook'), + visualization=dict(type='mmdet.DetVisualizationHook')) +default_scope = 'mmyolo' +env_cfg = dict( + cudnn_benchmark=True, + dist_cfg=dict(backend='nccl'), + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0)) +flickr_train_dataset = dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +img_scale = ( + 640, + 640, +) +img_scales = [ + ( + 640, + 640, + ), + ( + 320, + 320, + ), + ( + 960, + 960, + ), +] +last_stage_out_channels = 512 +last_transform = [ + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), +] +load_from = '/home/jean/Amine/OpenYolo3D/pretrained/checkpoints/yolo_world_v2_x_obj365v1_goldg_cc3mlite_pretrain_1280ft-14996a36.pth' +log_level = 'INFO' +log_processor = dict(by_epoch=True, type='LogProcessor', window_size=50) +loss_bbox_weight = 7.5 +loss_cls_weight = 0.5 +loss_dfl_weight = 0.375 +lr_factor = 0.01 +max_aspect_ratio = 100 +max_epochs = 100 +max_keep_ckpts = 2 +mg_train_dataset = dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset') +mixup_prob = 0.15 +model = dict( + backbone=dict( + image_model=dict( + act_cfg=dict(inplace=True, type='SiLU'), + arch='P5', + deepen_factor=1.0, + last_stage_out_channels=512, + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + type='YOLOv8CSPDarknet', + widen_factor=1.25), + text_model=dict( + frozen_modules=[ + 'all', + ], + model_name='openai/clip-vit-base-patch32', + type='HuggingCLIPLanguageBackbone'), + type='MultiModalYOLOBackbone'), + bbox_head=dict( + bbox_coder=dict(type='DistancePointBBoxCoder'), + head_module=dict( + act_cfg=dict(inplace=True, type='SiLU'), + embed_dims=512, + featmap_strides=[ + 8, + 16, + 32, + ], + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_classes=80, + reg_max=16, + type='YOLOWorldHeadModule', + use_bn_head=True, + widen_factor=1.25), + loss_bbox=dict( + bbox_format='xyxy', + iou_mode='ciou', + loss_weight=7.5, + reduction='sum', + return_iou=False, + type='IoULoss'), + loss_cls=dict( + loss_weight=0.5, + reduction='none', + type='mmdet.CrossEntropyLoss', + use_sigmoid=True), + loss_dfl=dict( + loss_weight=0.375, + reduction='mean', + type='mmdet.DistributionFocalLoss'), + prior_generator=dict( + offset=0.5, strides=[ + 8, + 16, + 32, + ], type='mmdet.MlvlPointGenerator'), + type='YOLOWorldHead'), + data_preprocessor=dict( + bgr_to_rgb=True, + mean=[ + 0.0, + 0.0, + 0.0, + ], + std=[ + 255.0, + 255.0, + 255.0, + ], + type='YOLOWDetDataPreprocessor'), + mm_neck=True, + neck=dict( + act_cfg=dict(inplace=True, type='SiLU'), + block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv'), + deepen_factor=1.0, + embed_channels=[ + 128, + 256, + 256, + ], + guide_channels=512, + in_channels=[ + 256, + 512, + 512, + ], + norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'), + num_csp_blocks=3, + num_heads=[ + 4, + 8, + 8, + ], + out_channels=[ + 256, + 512, + 512, + ], + type='YOLOWorldPAFPN', + widen_factor=1.25), + num_test_classes=1203, + num_train_classes=80, + test_cfg=dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001), + train_cfg=dict( + assigner=dict( + alpha=0.5, + beta=6.0, + eps=1e-09, + num_classes=80, + topk=10, + type='BatchTaskAlignedAssigner', + use_ciou=True)), + type='YOLOWorldDetector') +model_test_cfg = dict( + max_per_img=300, + multi_label=True, + nms=dict(iou_threshold=0.7, type='nms'), + nms_pre=30000, + score_thr=0.001) +mosaic_affine_transform = [ + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='Mosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), +] +neck_embed_channels = [ + 128, + 256, + 256, +] +neck_num_heads = [ + 4, + 8, + 8, +] +norm_cfg = dict(eps=0.001, momentum=0.03, type='BN') +num_classes = 1203 +num_det_layers = 3 +num_training_classes = 80 +obj365v1_train_dataset = dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset') +optim_wrapper = dict( + clip_grad=dict(max_norm=10.0), + constructor='YOLOWv5OptimizerConstructor', + optimizer=dict( + batch_size_per_gpu=16, lr=0.002, type='AdamW', weight_decay=0.025), + paramwise_cfg=dict( + bias_decay_mult=0.0, + custom_keys=dict({ + 'backbone.text_model': dict(lr_mult=0.01), + 'logit_scale': dict(weight_decay=0.0) + }), + norm_decay_mult=0.0), + type='OptimWrapper') +param_scheduler = None +persistent_workers = True +pre_transform = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), +] +resume = False +save_epoch_intervals = 2 +strides = [ + 8, + 16, + 32, +] +tal_alpha = 0.5 +tal_beta = 6.0 +tal_topk = 10 +test_cfg = dict(type='TestLoop') +test_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +test_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +test_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +text_channels = 512 +text_transform = [ + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_ann_file = 'annotations/instances_train2017.json' +train_batch_size_per_gpu = 16 +train_cfg = dict( + dynamic_intervals=[ + ( + 98, + 1, + ), + ], + max_epochs=100, + type='EpochBasedTrainLoop', + val_interval=10) +train_data_prefix = 'train2017/' +train_dataloader = dict( + batch_size=16, + collate_fn=dict(type='yolow_collate'), + dataset=dict( + datasets=[ + dict( + class_text_path='data/texts/obj365v1_class_texts.json', + dataset=dict( + ann_file='annotations/objects365_train.json', + data_prefix=dict(img='train/'), + data_root='data/objects365v1/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + type='YOLOv5Objects365V1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + dict( + ann_file='annotations/final_flickr_separateGT_train.json', + data_prefix=dict(img='full_images/'), + data_root='data/flickr/', + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + dict( + ann_file='annotations/final_mixed_train_no_coco.json', + data_prefix=dict(img='gqa/images/'), + data_root='data/mixed_grounding/', + filter_cfg=dict(filter_empty_gt=False, min_size=32), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='YOLOv5MixedGroundingDataset'), + ], + ignore_keys=[ + 'classes', + 'palette', + ], + type='ConcatDataset'), + num_workers=8, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=True, type='DefaultSampler')) +train_num_workers = 8 +train_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + img_scale=( + 640, + 640, + ), + pad_val=114.0, + pre_transform=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + ], + type='MultiModalMosaic'), + dict( + border=( + -320, + -320, + ), + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +train_pipeline_stage2 = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=True, + pad_val=dict(img=114.0), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict( + border_val=( + 114, + 114, + 114, + ), + max_aspect_ratio=100, + max_rotate_degree=0.0, + max_shear_degree=0.0, + scaling_ratio_range=( + 0.09999999999999998, + 1.9, + ), + type='YOLOv5RandomAffine'), + dict( + bbox_params=dict( + format='pascal_voc', + label_fields=[ + 'gt_bboxes_labels', + 'gt_ignore_flags', + ], + type='BboxParams'), + keymap=dict(gt_bboxes='bboxes', img='image'), + transforms=[ + dict(p=0.01, type='Blur'), + dict(p=0.01, type='MedianBlur'), + dict(p=0.01, type='ToGray'), + dict(p=0.01, type='CLAHE'), + ], + type='mmdet.Albu'), + dict(type='YOLOv5HSVRandomAug'), + dict(prob=0.5, type='mmdet.RandomFlip'), + dict( + max_num_samples=80, + num_neg_samples=( + 1203, + 1203, + ), + padding_to_max=True, + padding_value='', + type='RandomLoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'flip', + 'flip_direction', + 'texts', + ), + type='mmdet.PackDetInputs'), +] +tta_model = dict( + tta_cfg=dict(max_per_img=300, nms=dict(iou_threshold=0.65, type='nms')), + type='mmdet.DetTTAModel') +tta_pipeline = [ + dict(backend_args=None, type='LoadImageFromFile'), + dict( + transforms=[ + [ + dict( + transforms=[ + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 320, + 320, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 320, + 320, + ), + type='LetterResize'), + ], + type='Compose'), + dict( + transforms=[ + dict(scale=( + 960, + 960, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 960, + 960, + ), + type='LetterResize'), + ], + type='Compose'), + ], + [ + dict(prob=1.0, type='mmdet.RandomFlip'), + dict(prob=0.0, type='mmdet.RandomFlip'), + ], + [ + dict(type='mmdet.LoadAnnotations', with_bbox=True), + ], + [ + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'flip', + 'flip_direction', + ), + type='mmdet.PackDetInputs'), + ], + ], + type='TestTimeAug'), +] +val_ann_file = 'annotations/instances_val2017.json' +val_batch_size_per_gpu = 1 +val_cfg = dict(type='ValLoop') +val_data_prefix = 'val2017/' +val_dataloader = dict( + batch_size=1, + dataset=dict( + class_text_path='data/texts/lvis_v1_class_texts.json', + dataset=dict( + ann_file='lvis/lvis_v1_minival_inserted_image_name.json', + batch_shapes_cfg=None, + data_prefix=dict(img=''), + data_root='data/coco/', + test_mode=True, + type='YOLOv5LVISV1Dataset'), + pipeline=[ + dict(backend_args=None, type='LoadImageFromFile'), + dict(scale=( + 640, + 640, + ), type='YOLOv5KeepRatioResize'), + dict( + allow_scale_up=False, + pad_val=dict(img=114), + scale=( + 640, + 640, + ), + type='LetterResize'), + dict(_scope_='mmdet', type='LoadAnnotations', with_bbox=True), + dict(type='LoadText'), + dict( + meta_keys=( + 'img_id', + 'img_path', + 'ori_shape', + 'img_shape', + 'scale_factor', + 'pad_param', + 'texts', + ), + type='mmdet.PackDetInputs'), + ], + type='MultiModalDataset'), + drop_last=False, + num_workers=2, + persistent_workers=True, + pin_memory=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +val_evaluator = dict( + ann_file='data/coco/lvis/lvis_v1_minival_inserted_image_name.json', + metric='bbox', + proposal_nums=( + 100, + 1, + 10, + ), + type='mmdet.LVISMetric') +val_interval_stage2 = 1 +val_num_workers = 2 +vis_backends = [ + dict(type='LocalVisBackend'), +] +visualizer = dict( + name='visualizer', + type='mmdet.DetLocalVisualizer', + vis_backends=[ + dict(type='LocalVisBackend'), + ]) +weight_decay = 0.025 +widen_factor = 1.25 +work_dir = '/home/jean/Amine/OpenYolo3D/pretrained/configs/yolo_world_v2_x_vlpan_bn_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_minival' diff --git a/utils/__init__.py b/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..357965670d85ef060f4c520fd1fac298d0dec549 --- /dev/null +++ b/utils/__init__.py @@ -0,0 +1,448 @@ +from utils.utils_3d import Network_3D +from utils.utils_2d import Network_2D, load_yaml +import time +import torch +import os +import os.path as osp +import imageio +import glob +import open3d as o3d +import numpy as np +import math +from models.Mask3D.mask3d import load_mesh, load_ply +import colorsys +from tqdm import tqdm + +def get_iou(masks): + masks = masks.float() + intersection = torch.einsum('ij,kj -> ik', masks, masks) + num_masks = masks.shape[0] + masks_batch_size = 2 # scannet 200: 20 + if masks_batch_size < num_masks: + ratio = num_masks//masks_batch_size + remaining = num_masks-ratio*masks_batch_size + start_masks = list(range(0,ratio*masks_batch_size, masks_batch_size)) + if remaining == 0: + end_masks = list(range(masks_batch_size,(ratio+1)*masks_batch_size,masks_batch_size)) + else: + end_masks = list(range(masks_batch_size,(ratio+1)*masks_batch_size,masks_batch_size)) + end_masks[-1] = num_masks + else: + start_masks = [0] + end_masks = [num_masks] + union = torch.cat([((masks[st:ed, None, :]+masks[None, :, :]) >= 1).sum(-1) for st,ed in zip(start_masks, end_masks)]) + iou = torch.div(intersection,union) + + return iou + +def apply_nms(masks, scores, nms_th): + masks = masks.permute(1,0) + scored_sorted, sorted_scores_indices = torch.sort(scores, descending=True) + inv_sorted_scores_indices = {sorted_id.item(): id for id, sorted_id in enumerate(sorted_scores_indices)} + maskes_sorted = masks[sorted_scores_indices] + iou = get_iou(maskes_sorted) + available_indices = torch.arange(len(scored_sorted)) + for indx in range(len(available_indices)): + remove_indices = torch.where(iou[indx,indx+1:] > nms_th)[0] + available_indices[indx+1:][remove_indices] = 0 + remaining = available_indices.unique() + keep_indices = torch.tensor([inv_sorted_scores_indices[id.item()] for id in remaining]) + return keep_indices + +def generate_vibrant_colors(num_colors): + colors = [] + hue_increment = 1.0 / num_colors + saturation = 1.0 + value = 1.0 + + for i in range(num_colors): + hue = i * hue_increment + rgb = colorsys.hsv_to_rgb(hue, saturation, value) + colors.append(rgb) + + return colors + +def get_visibility_mat(pred_masks_3d, inside_mask, topk = 15): + intersection = torch.einsum("ik, fk -> if", pred_masks_3d.float(), inside_mask.float()) + total_point_number = pred_masks_3d[:, None, :].float().sum(dim = -1) + visibility_matrix = intersection/total_point_number + + if topk > visibility_matrix.shape[-1]: + topk = visibility_matrix.shape[-1] + + max_visiblity_in_frame = torch.topk(visibility_matrix, topk, dim = -1).indices + + visibility_matrix_bool = torch.zeros_like(visibility_matrix).bool() + visibility_matrix_bool[torch.tensor(range(len(visibility_matrix_bool)))[:, None],max_visiblity_in_frame] = True + + return visibility_matrix_bool + +def compute_iou(box, boxes): + assert box.shape == (4,), "Reference box must be of shape (4,)" + assert boxes.shape[1] == 4, "Boxes must be of shape (N, 4)" + + x1_inter = torch.max(box[0], boxes[:, 0]) + y1_inter = torch.max(box[1], boxes[:, 1]) + x2_inter = torch.min(box[2], boxes[:, 2]) + y2_inter = torch.min(box[3], boxes[:, 3]) + inter_area = (x2_inter - x1_inter).clamp(0) * (y2_inter - y1_inter).clamp(0) + box_area = (box[2] - box[0]) * (box[3] - box[1]) + boxes_area = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) + union_area = box_area + boxes_area - inter_area + iou = inter_area / union_area + + return iou + +class OpenYolo3D(): + def __init__(self, openyolo3d_config = ""): + config = load_yaml(openyolo3d_config) + self.network_3d = Network_3D(config) + self.network_2d = Network_2D(config) + self.openyolo3d_config = config + + def predict(self, path_2_scene_data, depth_scale, processed_scene = None, path_to_3d_masks = None, is_gt=False): + self.world2cam = WORLD_2_CAM(path_2_scene_data, depth_scale, self.openyolo3d_config) + self.mesh_projections = self.world2cam.get_mesh_projections() + self.scaling_params = [self.world2cam.depth_resolution[0]/self.world2cam.image_resolution[0], self.world2cam.depth_resolution[1]/self.world2cam.image_resolution[1]] + + scene_name = path_2_scene_data.split("/")[-1] + print("[🚀 ACTION] 3D mask proposals computation ...") + start = time.time() + + if path_to_3d_masks is None: + self.preds_3d = self.network_3d.get_class_agnostic_masks(self.world2cam.mesh) if processed_scene is None else self.network_3d.get_class_agnostic_masks(processed_scene) + keep_score = self.preds_3d[1] >= self.openyolo3d_config["network3d"]["th"] + keep_nms = apply_nms(self.preds_3d[0][:, keep_score].cuda(), self.preds_3d[1][keep_score].cuda(), self.openyolo3d_config["network3d"]["nms"]) + self.preds_3d = (self.preds_3d[0].cpu().permute(1,0)[keep_score][keep_nms].permute(1,0), self.preds_3d[1].cpu()[keep_score][keep_nms]) + else: + self.preds_3d = torch.load(osp.join(path_to_3d_masks, f"{scene_name}.pt")) + + print(f"[🕒 INFO] Elapsed time {(time.time()-start)}") + print(f"[✅ INFO] Proposals computed.") + + print("[🚀 ACTION] 2D Bounding Boxes computation ...") + start = time.time() + self.preds_2d = self.network_2d.get_bounding_boxes(self.world2cam.color_paths) + # self.preds_2d = torch.load(osp.join(f"/share/data/drive_3/OpenYolo3D/bboxes_2d", f"{scene_name}.pt")) + print(f"[🕒 INFO] Elapsed time {(time.time()-start)}") + print(f"[✅ INFO] Bounding boxes computed.") + + print("[🚀 ACTION] Predicting ...") + start = time.time() + prediction = self.label_3d_masks_from_2d_bboxes(scene_name, is_gt) + print(f"[🕒 INFO] Elapsed time {(time.time()-start)}") + print(f"[✅ INFO] Prediction completed") + + return prediction + + def label_3d_masks_from_2d_bboxes(self, scene_name, is_gt=False): + projections_mesh_to_frame , keep_visible_points = self.mesh_projections + predictions_2d_bboxes = self.preds_2d + prediction_3d_masks, _ = self.preds_3d + + predicted_masks, predicated_classes, predicated_scores = self.label_3d_masks_from_label_maps(prediction_3d_masks.bool(), + predictions_2d_bboxes, + projections_mesh_to_frame, + keep_visible_points, + is_gt) + + self.predicted_masks = predicted_masks + self.predicated_scores = predicated_scores + self.predicated_classes = predicated_classes + + return {scene_name : (predicted_masks, predicated_classes, predicated_scores)} + + + def label_3d_masks_from_label_maps(self, + prediction_3d_masks, + predictions_2d_bboxes, + projections_mesh_to_frame, + keep_visible_points, + is_gt): + + label_maps = self.construct_label_maps(predictions_2d_bboxes) #construct the label maps , start from the biggest bbox to small one + + visibility_matrix = get_visibility_mat(prediction_3d_masks.cuda().permute(1,0), keep_visible_points.cuda(), topk = 25 if is_gt else self.openyolo3d_config["openyolo3d"]["topk"]) + valid_frames = visibility_matrix.sum(dim=0) >= 1 + + prediction_3d_masks = prediction_3d_masks.permute(1,0).cpu() + prediction_3d_masks_np = prediction_3d_masks.numpy() + projections_mesh_to_frame = projections_mesh_to_frame[valid_frames].cpu().numpy() + visibility_matrix = visibility_matrix[:, valid_frames].cpu().numpy() + keep_visible_points = keep_visible_points[valid_frames].cpu().numpy() + distributions = [] + + class_labels = [] + class_probs = [] + class_dists = [] + label_maps = label_maps[valid_frames].numpy() + bounding_boxes = predictions_2d_bboxes.values() + bounding_boxes_valid = [bbox for (bi, bbox) in enumerate(bounding_boxes) if valid_frames[bi]] + for mask_id, mask in enumerate(prediction_3d_masks_np): + prob_normalizer = 0 + + representitive_frame_ids = np.where(visibility_matrix[mask_id])[0] + labels_distribution = [] + iou_vals = [] + for representitive_frame_id in representitive_frame_ids: + visible_points_mask = (keep_visible_points[representitive_frame_id].squeeze()*mask).astype(bool) + prob_normalizer += visible_points_mask.sum() + instance_x_y_coords = projections_mesh_to_frame[representitive_frame_id][np.where(visible_points_mask)].astype(np.int64) + + boxes = bounding_boxes_valid[representitive_frame_id]["bbox"].long() + if len(boxes) > 0 and len(instance_x_y_coords > 10): + x_l, x_r, y_t, y_b = instance_x_y_coords[:, 0].min(), instance_x_y_coords[:, 0].max()+1, instance_x_y_coords[:, 1].min(), instance_x_y_coords[:, 1].max()+1 + box = torch.tensor([x_l, y_t, x_r, y_b]) + + iou_values = compute_iou(box, boxes) + iou_vals.append(iou_values.max().item()) + selected_labels = label_maps[representitive_frame_id, instance_x_y_coords[:, 1], instance_x_y_coords[:, 0]] + labels_distribution.append(selected_labels) + + labels_distribution = np.concatenate(labels_distribution) if len(labels_distribution) > 0 else np.array([-1]) + + # class_dists.append(labels_distribution) + distribution = torch.zeros(self.openyolo3d_config["openyolo3d"]["num_classes"]) if self.openyolo3d_config["openyolo3d"]["topk_per_image"] != -1 else None + if (labels_distribution != -1).sum() != 0: + + if distribution is not None: + all_labels = torch.from_numpy(labels_distribution[labels_distribution != -1]) + all_labels_unique = all_labels.unique() + for lb in all_labels_unique: + distribution[lb] = (all_labels == lb).sum() + + distribution = distribution/distribution.max() + + class_label = torch.mode(torch.from_numpy(labels_distribution[labels_distribution != -1])).values.item() + class_prob = (labels_distribution == class_label).sum()/prob_normalizer + else: + if distribution is not None: + distribution[-1] = 1.0 + class_label = -1 + class_prob = 0.0 + + iou_vals = torch.tensor(iou_vals) + + class_labels.append(class_label) + if (iou_vals != 0).sum(): + iou_prob = iou_vals[iou_vals != 0].mean().item() + else: + iou_prob = 0.0 + + class_probs.append(class_prob*iou_prob) + if distribution is not None: + distributions.append(distribution) + + pred_classes = torch.tensor(class_labels) + pred_scores = torch.tensor(class_probs) + if distribution is not None: + distributions = torch.stack(distributions) if len(distributions) > 0 else torch.tensor((0, self.openyolo3d_config["openyolo3d"]["num_classes"])) + + if (self.openyolo3d_config["openyolo3d"]["topk_per_image"] != -1) and (not is_gt): + # print("TOPK USED") + n_instance = distributions.shape[0] + distributions = distributions.reshape(-1) + labels = ( + torch.arange(self.openyolo3d_config["openyolo3d"]["num_classes"], device=distributions.device) + .unsqueeze(0) + .repeat(n_instance, 1) + .flatten(0, 1) + ) + + cur_topk = self.openyolo3d_config["openyolo3d"]["topk_per_image"] + _, idx = torch.topk(distributions, k=min(cur_topk, len(distributions)), largest=True) + mask_idx = torch.div(idx, self.openyolo3d_config["openyolo3d"]["num_classes"], rounding_mode="floor") + + pred_classes = labels[idx] + pred_scores = distributions[idx].cuda() + prediction_3d_masks = prediction_3d_masks[mask_idx] + + return prediction_3d_masks.permute(1,0), pred_classes, pred_scores + + def construct_label_maps(self, predictions_2d_bboxes, save_label_map=False): + label_maps = (torch.ones((len(predictions_2d_bboxes), self.world2cam.height, self.world2cam.width))*-1).type(torch.int16) + for frame_id, pred in enumerate(predictions_2d_bboxes.values()): + bboxes = pred["bbox"].long() + labels = pred["labels"].type(torch.int16) + + bboxes[:,0] = bboxes[:,0]*self.scaling_params[1] + bboxes[:,2] = bboxes[:,2]*self.scaling_params[1] + bboxes[:,1] = bboxes[:,1]*self.scaling_params[0] + bboxes[:,3] = bboxes[:,3]*self.scaling_params[0] + bboxes_weights = (bboxes[:,2]-bboxes[:,0])+(bboxes[:,3]-bboxes[:,1]) + sorted_indices = bboxes_weights.sort(descending=True).indices + bboxes = bboxes[sorted_indices] + labels = labels[sorted_indices] + for id, bbox in enumerate(bboxes): + label_maps[frame_id, bbox[1]:bbox[3],bbox[0]:bbox[2]] = labels[id] + + return label_maps + + def save_output_as_ply(self, save_path, highest_score = True): + if highest_score : + th = self.predicated_scores.max() + else: + th = self.predicated_scores.max()-0.1 + + mesh = load_mesh(self.world2cam.mesh) + vertex_colors = np.asarray(mesh.vertex_colors) + vibrant_colors = generate_vibrant_colors(len(self.predicated_scores[self.predicated_scores >= th])) + color_id = 0 + for i, class_id in enumerate(self.predicated_classes): + if self.predicated_scores[i] < th: + continue + if len(vibrant_colors) == 0: + break + mask = self.predicted_masks.permute(1,0)[i] + vertex_colors[mask] = np.array(vibrant_colors.pop()) + color_id += 1 + mesh.vertex_colors = o3d.utility.Vector3dVector(vertex_colors) + o3d.io.write_triangle_mesh(save_path, mesh) + + + +class WORLD_2_CAM(): + def __init__(self, path_2_scene, depth_scale, openyolo3d_config = None): + self.poses = {} + self.intrinsics = {} + self.meshes = {} + self.depth_maps_paths = {} + self.depth_color_paths = {} + self.vis_depth_threshold = openyolo3d_config["openyolo3d"]['vis_depth_threshold'] + + frequency = openyolo3d_config["openyolo3d"]['frequency'] + + path_2_poses = osp.join(path_2_scene,"poses") + num_frames = len(os.listdir(path_2_poses)) + self.poses = [osp.join(path_2_poses, f"{i}.txt") for i in list(range(num_frames))[::frequency]] + + path_2_intrinsics = osp.join(path_2_scene,"intrinsics.txt") + self.intrinsics = [path_2_intrinsics for i in list(range(num_frames))[::frequency]] + + self.mesh = glob.glob(path_2_scene+"/*.ply")[0] + + path_2_depth = osp.join(path_2_scene,"depth") + self.depth_maps_paths = [osp.join(path_2_depth, f"{i}.png") for i in list(range(num_frames))[::frequency]] + + path_2_color = osp.join(path_2_scene,"color") + self.color_paths = [osp.join(path_2_color, f"{i}.jpg") for i in list(range(num_frames))[::frequency]] + + + self.image_resolution = imageio.imread(list(self.color_paths)[0]).shape[:2] + self.depth_resolution = imageio.imread(list(self.depth_maps_paths)[0]).shape + self.height = self.depth_resolution[0] + self.width = self.depth_resolution[1] + + self.depth_scale = depth_scale + self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + + @staticmethod + def load_ply(path_2_mesh): + pcd = o3d.io.read_point_cloud(path_2_mesh) + points = np.asarray(pcd.points) + colors = np.asarray(pcd.colors) + # print(points.shape) + coords = np.concatenate([points, np.ones((points.shape[0], 1))], axis = -1) + return coords, colors + + def load_depth_maps(self): + depth_maps = [] + paths_to_depth_maps_scene_i = self.depth_maps_paths + for depth_map_path_i in paths_to_depth_maps_scene_i: + depth_path = os.path.join(depth_map_path_i) + depth_maps.append(torch.from_numpy(imageio.imread(depth_path) / self.depth_scale).to(self.device)) + return torch.stack(depth_maps) + + def adjust_intrinsic(self, intrinsic, original_resolution, new_resolution): + if original_resolution == new_resolution: + return intrinsic + + resize_width = int(math.floor(new_resolution[1] * float( + original_resolution[0]) / float(original_resolution[1]))) + + adapted_intrinsic = intrinsic.copy() + adapted_intrinsic[0, 0] *= float(resize_width) / float(original_resolution[0]) + adapted_intrinsic[1, 1] *= float(new_resolution[1]) / float(original_resolution[1]) + adapted_intrinsic[0, 2] *= float(new_resolution[0] - 1) / float(original_resolution[0] - 1) + adapted_intrinsic[1, 2] *= float(new_resolution[1] - 1) / float(original_resolution[1] - 1) + return adapted_intrinsic + + def get_mesh_projections(self): + N_Large = 2000000*250 + + points, colors = self.load_ply(self.mesh) + points, colors = torch.from_numpy(points).cuda(), torch.from_numpy(colors).cuda() + + intrinsic = self.adjust_intrinsic(np.loadtxt(self.intrinsics[0]), self.image_resolution, self.depth_resolution) + intrinsics = torch.from_numpy(np.stack([intrinsic for frame_id in range(len(self.poses))])).cuda() + extrinsics = torch.linalg.inv(torch.from_numpy(np.stack([np.loadtxt(pose) for pose in self.poses])).cuda()) + + if extrinsics.shape[0]*points.shape[0] < N_Large: + word2cam_mat = torch.einsum('bij, jk -> bik',torch.einsum('bij,bjk -> bik', intrinsics,extrinsics), points.T).permute(0,2,1) + else: + B_size = 800000 + Num_Points = points.shape[0] + Num_batches = Num_Points//B_size+1 + word2cam_mat = [] + for b_i in range(Num_batches): + dim_start = b_i*B_size + dim_last = (b_i+1)*B_size if b_i != Num_batches-1 else points.shape[0] + word2cam_mat_i = torch.einsum('bij, jk -> bik',torch.einsum('bij,bjk -> bik', intrinsics,extrinsics), points[dim_start:dim_last].T).permute(0,2,1) + word2cam_mat.append(word2cam_mat_i.cpu()) + word2cam_mat = torch.cat(word2cam_mat, dim = 1) + del intrinsics + del extrinsics + del points + del colors + torch.cuda.empty_cache() + + point_depth = word2cam_mat[:, :, 2].cuda() + if word2cam_mat.shape[1]*word2cam_mat.shape[0] < N_Large: + size = (word2cam_mat.shape[0], word2cam_mat.shape[1]) + mask = (word2cam_mat[:, :, 2] != 0).reshape(size[0]*size[1]) + + projected_points = torch.stack([(word2cam_mat[:, :, 0].reshape(size[0]*size[1])[mask]/word2cam_mat[:, :, 2].reshape(size[0]*size[1])[mask]).reshape(size), + (word2cam_mat[:, :, 1].reshape(size[0]*size[1])[mask]/word2cam_mat[:, :, 2].reshape(size[0]*size[1])[mask]).reshape(size)]).permute(1,2,0).long() + inside_mask = ((projected_points[:,:,0] < self.width)*(projected_points[:,:,0] > 0)*(projected_points[:,:,1] < self.height)*(projected_points[:,:,1] >0) == 1 ) + + else: + B_size = 200000 + Num_Points = word2cam_mat.shape[1] + Num_batches = Num_Points//B_size+1 + projected_points = [] + + for b_i in range(Num_batches): + dim_start = b_i*B_size + dim_last = (b_i+1)*B_size if b_i != Num_batches-1 else word2cam_mat.shape[1] + batch_z = word2cam_mat[:, dim_start:dim_last, 2].cuda() + batch_y = word2cam_mat[:, dim_start:dim_last, 1].cuda() + batch_x = word2cam_mat[:, dim_start:dim_last, 0].cuda() + + size = (word2cam_mat.shape[0], dim_last-dim_start) + mask = (batch_z != 0).reshape(size[0]*size[1]) + projected_points_i = torch.stack([(torch.div(batch_x.reshape(size[0]*size[1])[mask],batch_z.reshape(size[0]*size[1])[mask])).reshape(size), + (torch.div(batch_y.reshape(size[0]*size[1])[mask],batch_z.reshape(size[0]*size[1])[mask])).reshape(size)]).permute(1,2,0).long() + projected_points.append(projected_points_i.cpu()) + + + + # merge parts + projected_points = torch.cat(projected_points, dim = 1) + inside_mask = ((projected_points[:,:,0] < self.width)*(projected_points[:,:,0] > 0)*(projected_points[:,:,1] < self.height)*(projected_points[:,:,1] >0) == 1 ) + + + # Get visible points with depth, width, and height + depth_maps = self.load_depth_maps() + num_frames = depth_maps.shape[0] + # pixel_to_3d_point = [] + for frame_id in range(num_frames): + points_in_frame_mask = inside_mask[frame_id].clone() + points_in_frame = (projected_points[frame_id][points_in_frame_mask]) + depth_in_frame = point_depth[frame_id][points_in_frame_mask] + visibility_mask = (torch.abs(depth_maps[frame_id][points_in_frame[:,1].long(), points_in_frame[:,0].long()] + - depth_in_frame) <= \ + self.vis_depth_threshold) + + inside_mask[frame_id][points_in_frame_mask] = visibility_mask.to(inside_mask.device) + + return projected_points.type(torch.int16).cpu(), inside_mask.cpu() \ No newline at end of file diff --git a/utils/utils_2d.py b/utils/utils_2d.py new file mode 100644 index 0000000000000000000000000000000000000000..37d9ebc62d4d0aa217f97f5b5ab84642327c6794 --- /dev/null +++ b/utils/utils_2d.py @@ -0,0 +1,110 @@ +# Copyright (c) Tencent Inc. All rights reserved. +import os +import cv2 +import os.path as osp +from torchvision.ops import nms +import torch +from mmengine.runner.amp import autocast +from tqdm import tqdm +import yaml +from PIL import Image +from mmengine.dataset import Compose +from mmyolo.registry import RUNNERS +from mmengine.config import Config, DictAction +from mmengine.runner import Runner +import supervision as sv + +def load_yaml(path): + with open(path) as stream: + try: + config = yaml.safe_load(stream) + except yaml.YAMLError as exc: + print(exc) + return config + +def get_image_resolution(image_path): + """ + Get the resolution of an image. + + :param image_path: Path to the image file + :return: A tuple containing the width and height of the image + """ + with Image.open(image_path) as img: + width, height = img.size + return width, height + +class Network_2D(): + def __init__(self, config): + self.texts = [[t] for t in config["network2d"]["text_prompts"]] + [[' ']] + self.topk = config["network2d"]["topk"] + self.th = config["network2d"]["th"] + self.nms = config["network2d"]["nms"] + self.use_amp = config["network2d"]["use_amp"] + self.resolution = None + self.frequency = config["openyolo3d"]["frequency"] + cfg = Config.fromfile(os.path.join(os.getcwd(), config["network2d"]["config_path"])) + cfg.work_dir = osp.join(f'{os.getcwd()}/models/YOLO-World/yolo_world/work_dirs', + osp.splitext(config["network2d"]["config_path"])[0].split("/")[-1]) + cfg.load_from = os.path.join(os.getcwd(), config["network2d"]["pretrained_path"]) + if 'runner_type' not in cfg: + self.runner = Runner.from_cfg(cfg) + else: + self.runner = RUNNERS.build(cfg) + + self.runner.call_hook('before_run') + self.runner.load_or_resume() + pipeline = cfg.test_dataloader.dataset.pipeline + self.runner.pipeline = Compose(pipeline) + self.runner.model.eval() + + def get_bounding_boxes(self, path_2_images): + print(f"Infering from {len(path_2_images)} images") + + scene_preds = {} + for image_path in tqdm(path_2_images): + frame_prediction = self.inference_detector([image_path]) + scene_preds.update(frame_prediction) + return scene_preds + + def inference_detector(self, images_batch): + if self.resolution is None: + self.resolution = get_image_resolution(images_batch[0]) + inputs = [] + data_samples = [] + for img_id, image_path in enumerate(images_batch): + data_info = dict(img_id=img_id, img_path=image_path, texts=self.texts) + data_info = self.runner.pipeline(data_info) + inputs.append(data_info['inputs']) + data_samples.append(data_info['data_samples']) + + + data_batch = dict(inputs=torch.stack(inputs), + data_samples=data_samples) + + with autocast(enabled=self.use_amp), torch.no_grad(): + output = self.runner.model.test_step(data_batch) + frame_prediction = {} + + for img_id, image_path in enumerate(images_batch): + with autocast(enabled=self.use_amp), torch.no_grad(): + pred_instances = output[img_id].pred_instances + keep = nms(pred_instances.bboxes, pred_instances.scores, iou_threshold=self.nms) + pred_instances = pred_instances[keep] + pred_instances = pred_instances[pred_instances.scores.float() > self.th] + + if len(pred_instances.scores) > self.topk: + indices = pred_instances.scores.float().topk(self.topk)[1] + pred_instances = pred_instances[indices] + mask = ~(((pred_instances['bboxes'][:,2]-pred_instances['bboxes'][:,0] > self.resolution[0]-50)*(pred_instances['bboxes'][:,3]-pred_instances['bboxes'][:,1] > self.resolution[1]-50)) == 1) + bboxes_ = pred_instances['bboxes'][mask].cpu() + labels_ = pred_instances['labels'][mask].cpu() + scores_ = pred_instances['scores'][mask].cpu() + frame_id = osp.basename(image_path).split(".")[0] + + frame_prediction.update({frame_id:{"bbox":bboxes_, "labels":labels_, "scores":scores_}}) + + return frame_prediction + + + + diff --git a/utils/utils_3d.py b/utils/utils_3d.py new file mode 100644 index 0000000000000000000000000000000000000000..950e88c27f9f211fabd16c2ff5fb69226bae604e --- /dev/null +++ b/utils/utils_3d.py @@ -0,0 +1,19 @@ + +import sys +sys.path.append("..") +from models.Mask3D.mask3d import get_model, load_mesh, prepare_data, map_output_to_pointcloud, save_colorized_mesh +import torch + +class Network_3D(): + def __init__(self, config): + self.model = get_model(config["network3d"]["pretrained_path"]) + self.model.eval() + self.device = torch.device("cuda:0") + self.model.to(self.device) + + def get_class_agnostic_masks(self, pointcloud_file, point2segment=None): + data, points, colors, features, unique_map, inverse_map, point2segment, point2segment_full = prepare_data(pointcloud_file, self.device) + with torch.no_grad(): + outputs = self.model(data, raw_coordinates=features, point2segment=[point2segment] if point2segment is not None else None) + return map_output_to_pointcloud(outputs, inverse_map, point2segment, point2segment_full) + \ No newline at end of file